Bug Summary

File:llvm/include/llvm/IR/IRBuilder.h
Warning:line 1702, column 30
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name OMPIRBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Frontend/OpenMP -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Frontend/OpenMP -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-21-164211-33944-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
16
17#include "llvm/ADT/StringRef.h"
18#include "llvm/ADT/Triple.h"
19#include "llvm/IR/CFG.h"
20#include "llvm/IR/DebugInfo.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/MDBuilder.h"
23#include "llvm/Support/CommandLine.h"
24#include "llvm/Support/Error.h"
25#include "llvm/Transforms/Utils/BasicBlockUtils.h"
26#include "llvm/Transforms/Utils/CodeExtractor.h"
27
28#include <sstream>
29
30#define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder"
31
32using namespace llvm;
33using namespace omp;
34
35static cl::opt<bool>
36 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
37 cl::desc("Use optimistic attributes describing "
38 "'as-if' properties of runtime calls."),
39 cl::init(false));
40
41void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
42 LLVMContext &Ctx = Fn.getContext();
43
44 // Get the function's current attributes.
45 auto Attrs = Fn.getAttributes();
46 auto FnAttrs = Attrs.getFnAttributes();
47 auto RetAttrs = Attrs.getRetAttributes();
48 SmallVector<AttributeSet, 4> ArgAttrs;
49 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo)
50 ArgAttrs.emplace_back(Attrs.getParamAttributes(ArgNo));
51
52#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
53#include "llvm/Frontend/OpenMP/OMPKinds.def"
54
55 // Add attributes to the function declaration.
56 switch (FnID) {
57#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
58 case Enum: \
59 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \
60 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \
61 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \
62 ArgAttrs[ArgNo] = \
63 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \
64 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \
65 break;
66#include "llvm/Frontend/OpenMP/OMPKinds.def"
67 default:
68 // Attributes are optional.
69 break;
70 }
71}
72
73FunctionCallee
74OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) {
75 FunctionType *FnTy = nullptr;
76 Function *Fn = nullptr;
77
78 // Try to find the declation in the module first.
79 switch (FnID) {
80#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
81 case Enum: \
82 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
83 IsVarArg); \
84 Fn = M.getFunction(Str); \
85 break;
86#include "llvm/Frontend/OpenMP/OMPKinds.def"
87 }
88
89 if (!Fn) {
90 // Create a new declaration if we need one.
91 switch (FnID) {
92#define OMP_RTL(Enum, Str, ...) \
93 case Enum: \
94 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
95 break;
96#include "llvm/Frontend/OpenMP/OMPKinds.def"
97 }
98
99 // Add information if the runtime function takes a callback function
100 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
101 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
102 LLVMContext &Ctx = Fn->getContext();
103 MDBuilder MDB(Ctx);
104 // Annotate the callback behavior of the runtime function:
105 // - The callback callee is argument number 2 (microtask).
106 // - The first two arguments of the callback callee are unknown (-1).
107 // - All variadic arguments to the runtime function are passed to the
108 // callback callee.
109 Fn->addMetadata(
110 LLVMContext::MD_callback,
111 *MDNode::get(Ctx, {MDB.createCallbackEncoding(
112 2, {-1, -1}, /* VarArgsArePassed */ true)}));
113 }
114 }
115
116 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
117 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
118 addAttributes(FnID, *Fn);
119
120 } else {
121 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
122 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
123 }
124
125 assert(Fn && "Failed to create OpenMP runtime function")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 125, __extension__ __PRETTY_FUNCTION__))
;
126
127 // Cast the function to the expected type if necessary
128 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo());
129 return {FnTy, C};
130}
131
132Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) {
133 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID);
134 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
135 assert(Fn && "Failed to create OpenMP runtime function pointer")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function pointer"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 135, __extension__ __PRETTY_FUNCTION__))
;
136 return Fn;
137}
138
139void OpenMPIRBuilder::initialize() { initializeTypes(M); }
140
141void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
142 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
143 SmallVector<BasicBlock *, 32> Blocks;
144 SmallVector<OutlineInfo, 16> DeferredOutlines;
145 for (OutlineInfo &OI : OutlineInfos) {
146 // Skip functions that have not finalized yet; may happen with nested
147 // function generation.
148 if (Fn && OI.getFunction() != Fn) {
149 DeferredOutlines.push_back(OI);
150 continue;
151 }
152
153 ParallelRegionBlockSet.clear();
154 Blocks.clear();
155 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
156
157 Function *OuterFn = OI.getFunction();
158 CodeExtractorAnalysisCache CEAC(*OuterFn);
159 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
160 /* AggregateArgs */ false,
161 /* BlockFrequencyInfo */ nullptr,
162 /* BranchProbabilityInfo */ nullptr,
163 /* AssumptionCache */ nullptr,
164 /* AllowVarArgs */ true,
165 /* AllowAlloca */ true,
166 /* Suffix */ ".omp_par");
167
168 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before outlining: "
<< *OuterFn << "\n"; } } while (false)
;
169 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
170 << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
;
171 assert(Extractor.isEligible() &&(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 172, __extension__ __PRETTY_FUNCTION__))
172 "Expected OpenMP outlining to be possible!")(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 172, __extension__ __PRETTY_FUNCTION__))
;
173
174 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
175
176 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After outlining: "
<< *OuterFn << "\n"; } } while (false)
;
177 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << " Outlined function: "
<< *OutlinedFn << "\n"; } } while (false)
;
178 assert(OutlinedFn->getReturnType()->isVoidTy() &&(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 179, __extension__ __PRETTY_FUNCTION__))
179 "OpenMP outlined functions should not return a value!")(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 179, __extension__ __PRETTY_FUNCTION__))
;
180
181 // For compability with the clang CG we move the outlined function after the
182 // one with the parallel region.
183 OutlinedFn->removeFromParent();
184 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
185
186 // Remove the artificial entry introduced by the extractor right away, we
187 // made our own entry block after all.
188 {
189 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
190 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)(static_cast <bool> (ArtificialEntry.getUniqueSuccessor
() == OI.EntryBB) ? void (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 190, __extension__ __PRETTY_FUNCTION__))
;
191 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)(static_cast <bool> (OI.EntryBB->getUniquePredecessor
() == &ArtificialEntry) ? void (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 191, __extension__ __PRETTY_FUNCTION__))
;
192 if (AllowExtractorSinking) {
193 // Move instructions from the to-be-deleted ArtificialEntry to the entry
194 // basic block of the parallel region. CodeExtractor may have sunk
195 // allocas/bitcasts for values that are solely used in the outlined
196 // region and do not escape.
197 assert(!ArtificialEntry.empty() &&(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 198, __extension__ __PRETTY_FUNCTION__))
198 "Expected instructions to sink in the outlined region")(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 198, __extension__ __PRETTY_FUNCTION__))
;
199 for (BasicBlock::iterator It = ArtificialEntry.begin(),
200 End = ArtificialEntry.end();
201 It != End;) {
202 Instruction &I = *It;
203 It++;
204
205 if (I.isTerminator())
206 continue;
207
208 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
209 }
210 }
211 OI.EntryBB->moveBefore(&ArtificialEntry);
212 ArtificialEntry.eraseFromParent();
213 }
214 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)(static_cast <bool> (&OutlinedFn->getEntryBlock(
) == OI.EntryBB) ? void (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 214, __extension__ __PRETTY_FUNCTION__))
;
215 assert(OutlinedFn && OutlinedFn->getNumUses() == 1)(static_cast <bool> (OutlinedFn && OutlinedFn->
getNumUses() == 1) ? void (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 215, __extension__ __PRETTY_FUNCTION__))
;
216
217 // Run a user callback, e.g. to add attributes.
218 if (OI.PostOutlineCB)
219 OI.PostOutlineCB(*OutlinedFn);
220 }
221
222 // Remove work items that have been completed.
223 OutlineInfos = std::move(DeferredOutlines);
224}
225
226OpenMPIRBuilder::~OpenMPIRBuilder() {
227 assert(OutlineInfos.empty() && "There must be no outstanding outlinings")(static_cast <bool> (OutlineInfos.empty() && "There must be no outstanding outlinings"
) ? void (0) : __assert_fail ("OutlineInfos.empty() && \"There must be no outstanding outlinings\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 227, __extension__ __PRETTY_FUNCTION__))
;
228}
229
230Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
231 IdentFlag LocFlags,
232 unsigned Reserve2Flags) {
233 // Enable "C-mode".
234 LocFlags |= OMP_IDENT_FLAG_KMPC;
235
236 Value *&Ident =
237 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
238 if (!Ident) {
239 Constant *I32Null = ConstantInt::getNullValue(Int32);
240 Constant *IdentData[] = {
241 I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)),
242 ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr};
243 Constant *Initializer = ConstantStruct::get(
244 cast<StructType>(IdentPtr->getPointerElementType()), IdentData);
245
246 // Look for existing encoding of the location + flags, not needed but
247 // minimizes the difference to the existing solution while we transition.
248 for (GlobalVariable &GV : M.getGlobalList())
249 if (GV.getType() == IdentPtr && GV.hasInitializer())
250 if (GV.getInitializer() == Initializer)
251 return Ident = &GV;
252
253 auto *GV = new GlobalVariable(M, IdentPtr->getPointerElementType(),
254 /* isConstant = */ true,
255 GlobalValue::PrivateLinkage, Initializer);
256 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
257 GV->setAlignment(Align(8));
258 Ident = GV;
259 }
260 return Builder.CreatePointerCast(Ident, IdentPtr);
261}
262
263Type *OpenMPIRBuilder::getLanemaskType() {
264 LLVMContext &Ctx = M.getContext();
265 Triple triple(M.getTargetTriple());
266
267 // This test is adequate until deviceRTL has finer grained lane widths
268 return triple.isAMDGCN() ? Type::getInt64Ty(Ctx) : Type::getInt32Ty(Ctx);
269}
270
271Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) {
272 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
273 if (!SrcLocStr) {
274 Constant *Initializer =
275 ConstantDataArray::getString(M.getContext(), LocStr);
276
277 // Look for existing encoding of the location, not needed but minimizes the
278 // difference to the existing solution while we transition.
279 for (GlobalVariable &GV : M.getGlobalList())
280 if (GV.isConstant() && GV.hasInitializer() &&
281 GV.getInitializer() == Initializer)
282 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
283
284 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
285 /* AddressSpace */ 0, &M);
286 }
287 return SrcLocStr;
288}
289
290Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
291 StringRef FileName,
292 unsigned Line,
293 unsigned Column) {
294 SmallString<128> Buffer;
295 Buffer.push_back(';');
296 Buffer.append(FileName);
297 Buffer.push_back(';');
298 Buffer.append(FunctionName);
299 Buffer.push_back(';');
300 Buffer.append(std::to_string(Line));
301 Buffer.push_back(';');
302 Buffer.append(std::to_string(Column));
303 Buffer.push_back(';');
304 Buffer.push_back(';');
305 return getOrCreateSrcLocStr(Buffer.str());
306}
307
308Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() {
309 return getOrCreateSrcLocStr(";unknown;unknown;0;0;;");
310}
311
312Constant *
313OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) {
314 DILocation *DIL = Loc.DL.get();
315 if (!DIL)
316 return getOrCreateDefaultSrcLocStr();
317 StringRef FileName = M.getName();
318 if (DIFile *DIF = DIL->getFile())
319 if (Optional<StringRef> Source = DIF->getSource())
320 FileName = *Source;
321 StringRef Function = DIL->getScope()->getSubprogram()->getName();
322 Function =
323 !Function.empty() ? Function : Loc.IP.getBlock()->getParent()->getName();
324 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
325 DIL->getColumn());
326}
327
328Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) {
329 return Builder.CreateCall(
330 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
331 "omp_global_thread_num");
332}
333
334OpenMPIRBuilder::InsertPointTy
335OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK,
336 bool ForceSimpleCall, bool CheckCancelFlag) {
337 if (!updateToLocation(Loc))
338 return Loc.IP;
339 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
340}
341
342OpenMPIRBuilder::InsertPointTy
343OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind,
344 bool ForceSimpleCall, bool CheckCancelFlag) {
345 // Build call __kmpc_cancel_barrier(loc, thread_id) or
346 // __kmpc_barrier(loc, thread_id);
347
348 IdentFlag BarrierLocFlags;
349 switch (Kind) {
350 case OMPD_for:
351 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
352 break;
353 case OMPD_sections:
354 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
355 break;
356 case OMPD_single:
357 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
358 break;
359 case OMPD_barrier:
360 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
361 break;
362 default:
363 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
364 break;
365 }
366
367 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
368 Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags),
369 getOrCreateThreadID(getOrCreateIdent(SrcLocStr))};
370
371 // If we are in a cancellable parallel region, barriers are cancellation
372 // points.
373 // TODO: Check why we would force simple calls or to ignore the cancel flag.
374 bool UseCancelBarrier =
375 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
376
377 Value *Result =
378 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(
379 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
380 : OMPRTL___kmpc_barrier),
381 Args);
382
383 if (UseCancelBarrier && CheckCancelFlag)
384 emitCancelationCheckImpl(Result, OMPD_parallel);
385
386 return Builder.saveIP();
387}
388
389OpenMPIRBuilder::InsertPointTy
390OpenMPIRBuilder::createCancel(const LocationDescription &Loc,
391 Value *IfCondition,
392 omp::Directive CanceledDirective) {
393 if (!updateToLocation(Loc))
394 return Loc.IP;
395
396 // LLVM utilities like blocks with terminators.
397 auto *UI = Builder.CreateUnreachable();
398
399 Instruction *ThenTI = UI, *ElseTI = nullptr;
400 if (IfCondition)
401 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
402 Builder.SetInsertPoint(ThenTI);
403
404 Value *CancelKind = nullptr;
405 switch (CanceledDirective) {
406#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
407 case DirectiveEnum: \
408 CancelKind = Builder.getInt32(Value); \
409 break;
410#include "llvm/Frontend/OpenMP/OMPKinds.def"
411 default:
412 llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 412)
;
413 }
414
415 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
416 Value *Ident = getOrCreateIdent(SrcLocStr);
417 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
418 Value *Result = Builder.CreateCall(
419 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
420 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) {
421 if (CanceledDirective == OMPD_parallel) {
422 IRBuilder<>::InsertPointGuard IPG(Builder);
423 Builder.restoreIP(IP);
424 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
425 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
426 /* CheckCancelFlag */ false);
427 }
428 };
429
430 // The actual cancel logic is shared with others, e.g., cancel_barriers.
431 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB);
432
433 // Update the insertion point and remove the terminator we introduced.
434 Builder.SetInsertPoint(UI->getParent());
435 UI->eraseFromParent();
436
437 return Builder.saveIP();
438}
439
440void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
441 omp::Directive CanceledDirective,
442 FinalizeCallbackTy ExitCB) {
443 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 444, __extension__ __PRETTY_FUNCTION__))
444 "Unexpected cancellation!")(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 444, __extension__ __PRETTY_FUNCTION__))
;
445
446 // For a cancel barrier we create two new blocks.
447 BasicBlock *BB = Builder.GetInsertBlock();
448 BasicBlock *NonCancellationBlock;
449 if (Builder.GetInsertPoint() == BB->end()) {
450 // TODO: This branch will not be needed once we moved to the
451 // OpenMPIRBuilder codegen completely.
452 NonCancellationBlock = BasicBlock::Create(
453 BB->getContext(), BB->getName() + ".cont", BB->getParent());
454 } else {
455 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
456 BB->getTerminator()->eraseFromParent();
457 Builder.SetInsertPoint(BB);
458 }
459 BasicBlock *CancellationBlock = BasicBlock::Create(
460 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
461
462 // Jump to them based on the return value.
463 Value *Cmp = Builder.CreateIsNull(CancelFlag);
464 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
465 /* TODO weight */ nullptr, nullptr);
466
467 // From the cancellation block we finalize all variables and go to the
468 // post finalization block that is known to the FiniCB callback.
469 Builder.SetInsertPoint(CancellationBlock);
470 if (ExitCB)
471 ExitCB(Builder.saveIP());
472 auto &FI = FinalizationStack.back();
473 FI.FiniCB(Builder.saveIP());
474
475 // The continuation block is where code generation continues.
476 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
477}
478
479IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
480 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
481 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
482 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
483 omp::ProcBindKind ProcBind, bool IsCancellable) {
484 if (!updateToLocation(Loc))
1
Taking false branch
485 return Loc.IP;
486
487 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
488 Value *Ident = getOrCreateIdent(SrcLocStr);
489 Value *ThreadID = getOrCreateThreadID(Ident);
490
491 if (NumThreads) {
2
Assuming 'NumThreads' is null
3
Taking false branch
492 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
493 Value *Args[] = {
494 Ident, ThreadID,
495 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
496 Builder.CreateCall(
497 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
498 }
499
500 if (ProcBind != OMP_PROC_BIND_default) {
4
Assuming 'ProcBind' is equal to 'OMP_PROC_BIND_default'
5
Taking false branch
501 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
502 Value *Args[] = {
503 Ident, ThreadID,
504 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
505 Builder.CreateCall(
506 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
507 }
508
509 BasicBlock *InsertBB = Builder.GetInsertBlock();
510 Function *OuterFn = InsertBB->getParent();
511
512 // Save the outer alloca block because the insertion iterator may get
513 // invalidated and we still need this later.
514 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
515
516 // Vector to remember instructions we used only during the modeling but which
517 // we want to delete at the end.
518 SmallVector<Instruction *, 4> ToBeDeleted;
519
520 // Change the location to the outer alloca insertion point to create and
521 // initialize the allocas we pass into the parallel region.
522 Builder.restoreIP(OuterAllocaIP);
523 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
524 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr");
525
526 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the
527 // program, otherwise we only need them for modeling purposes to get the
528 // associated arguments in the outlined function. In the former case,
529 // initialize the allocas properly, in the latter case, delete them later.
530 if (IfCondition) {
6
Assuming 'IfCondition' is null
7
Taking false branch
531 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr);
532 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr);
533 } else {
534 ToBeDeleted.push_back(TIDAddr);
535 ToBeDeleted.push_back(ZeroAddr);
536 }
537
538 // Create an artificial insertion point that will also ensure the blocks we
539 // are about to split are not degenerated.
540 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
541
542 Instruction *ThenTI = UI, *ElseTI = nullptr;
543 if (IfCondition
7.1
'IfCondition' is null
7.1
'IfCondition' is null
7.1
'IfCondition' is null
)
8
Taking false branch
544 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
545
546 BasicBlock *ThenBB = ThenTI->getParent();
547 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry");
548 BasicBlock *PRegBodyBB =
549 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region");
550 BasicBlock *PRegPreFiniBB =
551 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize");
552 BasicBlock *PRegExitBB =
553 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit");
554
555 auto FiniCBWrapper = [&](InsertPointTy IP) {
556 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
557 // target to the region exit block.
558 if (IP.getBlock()->end() == IP.getPoint()) {
559 IRBuilder<>::InsertPointGuard IPG(Builder);
560 Builder.restoreIP(IP);
561 Instruction *I = Builder.CreateBr(PRegExitBB);
562 IP = InsertPointTy(I->getParent(), I->getIterator());
563 }
564 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 566, __extension__ __PRETTY_FUNCTION__))
565 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 566, __extension__ __PRETTY_FUNCTION__))
566 "Unexpected insertion point for finalization call!")(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 566, __extension__ __PRETTY_FUNCTION__))
;
567 return FiniCB(IP);
568 };
569
570 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
571
572 // Generate the privatization allocas in the block that will become the entry
573 // of the outlined function.
574 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
575 InsertPointTy InnerAllocaIP = Builder.saveIP();
576
577 AllocaInst *PrivTIDAddr =
578 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
579 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid");
580
581 // Add some fake uses for OpenMP provided arguments.
582 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use"));
583 Instruction *ZeroAddrUse = Builder.CreateLoad(Int32, ZeroAddr,
584 "zero.addr.use");
585 ToBeDeleted.push_back(ZeroAddrUse);
586
587 // ThenBB
588 // |
589 // V
590 // PRegionEntryBB <- Privatization allocas are placed here.
591 // |
592 // V
593 // PRegionBodyBB <- BodeGen is invoked here.
594 // |
595 // V
596 // PRegPreFiniBB <- The block we will start finalization from.
597 // |
598 // V
599 // PRegionExitBB <- A common exit to simplify block collection.
600 //
601
602 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
9
Assuming 'DebugFlag' is false
10
Loop condition is false. Exiting loop
603
604 // Let the caller create the body.
605 assert(BodyGenCB && "Expected body generation callback!")(static_cast <bool> (BodyGenCB && "Expected body generation callback!"
) ? void (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 605, __extension__ __PRETTY_FUNCTION__))
;
11
Assuming the condition is true
12
'?' condition is true
606 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
607 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB);
608
609 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
13
Assuming 'DebugFlag' is false
14
Loop condition is false. Exiting loop
610
611 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
612 if (auto *F
15.1
'F' is null
15.1
'F' is null
15.1
'F' is null
= dyn_cast<llvm::Function>(RTLFn.getCallee())) {
15
Assuming the object is not a 'Function'
16
Taking false branch
613 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
614 llvm::LLVMContext &Ctx = F->getContext();
615 MDBuilder MDB(Ctx);
616 // Annotate the callback behavior of the __kmpc_fork_call:
617 // - The callback callee is argument number 2 (microtask).
618 // - The first two arguments of the callback callee are unknown (-1).
619 // - All variadic arguments to the __kmpc_fork_call are passed to the
620 // callback callee.
621 F->addMetadata(
622 llvm::LLVMContext::MD_callback,
623 *llvm::MDNode::get(
624 Ctx, {MDB.createCallbackEncoding(2, {-1, -1},
625 /* VarArgsArePassed */ true)}));
626 }
627 }
628
629 OutlineInfo OI;
630 OI.PostOutlineCB = [=](Function &OutlinedFn) {
631 // Add some known attributes.
632 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
633 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
634 OutlinedFn.addFnAttr(Attribute::NoUnwind);
635 OutlinedFn.addFnAttr(Attribute::NoRecurse);
636
637 assert(OutlinedFn.arg_size() >= 2 &&(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
638 "Expected at least tid and bounded tid as arguments")(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
;
639 unsigned NumCapturedVars =
640 OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
641
642 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
643 CI->getParent()->setName("omp_parallel");
644 Builder.SetInsertPoint(CI);
645
646 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn);
647 Value *ForkCallArgs[] = {
648 Ident, Builder.getInt32(NumCapturedVars),
649 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)};
650
651 SmallVector<Value *, 16> RealArgs;
652 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
653 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
654
655 Builder.CreateCall(RTLFn, RealArgs);
656
657 LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
658 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
659
660 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end());
661
662 // Initialize the local TID stack location with the argument value.
663 Builder.SetInsertPoint(PrivTID);
664 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
665 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr);
666
667 // If no "if" clause was present we do not need the call created during
668 // outlining, otherwise we reuse it in the serialized parallel region.
669 if (!ElseTI) {
670 CI->eraseFromParent();
671 } else {
672
673 // If an "if" clause was present we are now generating the serialized
674 // version into the "else" branch.
675 Builder.SetInsertPoint(ElseTI);
676
677 // Build calls __kmpc_serialized_parallel(&Ident, GTid);
678 Value *SerializedParallelCallArgs[] = {Ident, ThreadID};
679 Builder.CreateCall(
680 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel),
681 SerializedParallelCallArgs);
682
683 // OutlinedFn(&GTid, &zero, CapturedStruct);
684 CI->removeFromParent();
685 Builder.Insert(CI);
686
687 // __kmpc_end_serialized_parallel(&Ident, GTid);
688 Value *EndArgs[] = {Ident, ThreadID};
689 Builder.CreateCall(
690 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel),
691 EndArgs);
692
693 LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
694 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
695 }
696
697 for (Instruction *I : ToBeDeleted)
698 I->eraseFromParent();
699 };
700
701 // Adjust the finalization stack, verify the adjustment, and call the
702 // finalize function a last time to finalize values between the pre-fini
703 // block and the exit block if we left the parallel "the normal way".
704 auto FiniInfo = FinalizationStack.pop_back_val();
705 (void)FiniInfo;
706 assert(FiniInfo.DK == OMPD_parallel &&(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 707, __extension__ __PRETTY_FUNCTION__))
17
Assuming 'OMPD_parallel' is equal to field 'DK'
18
'?' condition is true
707 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 707, __extension__ __PRETTY_FUNCTION__))
;
708
709 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
710
711 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
712 FiniCB(PreFiniIP);
713
714 OI.EntryBB = PRegEntryBB;
715 OI.ExitBB = PRegExitBB;
716
717 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
718 SmallVector<BasicBlock *, 32> Blocks;
719 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
720
721 // Ensure a single exit node for the outlined region by creating one.
722 // We might have multiple incoming edges to the exit now due to finalizations,
723 // e.g., cancel calls that cause the control flow to leave the region.
724 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
725 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
726 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
727 Blocks.push_back(PRegOutlinedExitBB);
728
729 CodeExtractorAnalysisCache CEAC(*OuterFn);
730 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
731 /* AggregateArgs */ false,
732 /* BlockFrequencyInfo */ nullptr,
733 /* BranchProbabilityInfo */ nullptr,
734 /* AssumptionCache */ nullptr,
735 /* AllowVarArgs */ true,
736 /* AllowAlloca */ true,
737 /* Suffix */ ".omp_par");
738
739 // Find inputs to, outputs from the code region.
740 BasicBlock *CommonExit = nullptr;
741 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
742 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
743 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
744
745 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before privatization: "
<< *OuterFn << "\n"; } } while (false)
;
19
Assuming 'DebugFlag' is false
20
Loop condition is false. Exiting loop
746
747 FunctionCallee TIDRTLFn =
748 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
749
750 auto PrivHelper = [&](Value &V) {
751 if (&V == TIDAddr || &V == ZeroAddr)
24
Assuming the condition is false
25
Assuming the condition is false
26
Taking false branch
752 return;
753
754 SetVector<Use *> Uses;
755 for (Use &U : V.uses())
756 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
757 if (ParallelRegionBlockSet.count(UserI->getParent()))
758 Uses.insert(&U);
759
760 // __kmpc_fork_call expects extra arguments as pointers. If the input
761 // already has a pointer type, everything is fine. Otherwise, store the
762 // value onto stack and load it back inside the to-be-outlined region. This
763 // will ensure only the pointer will be passed to the function.
764 // FIXME: if there are more than 15 trailing arguments, they must be
765 // additionally packed in a struct.
766 Value *Inner = &V;
767 if (!V.getType()->isPointerTy()) {
27
Taking true branch
768 IRBuilder<>::InsertPointGuard Guard(Builder);
769 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: "
<< V << "\n"; } } while (false)
;
28
Assuming 'DebugFlag' is false
29
Loop condition is false. Exiting loop
770
771 Builder.restoreIP(OuterAllocaIP);
772 Value *Ptr =
773 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
774
775 // Store to stack at end of the block that currently branches to the entry
776 // block of the to-be-outlined region.
777 Builder.SetInsertPoint(InsertBB,
778 InsertBB->getTerminator()->getIterator());
779 Builder.CreateStore(&V, Ptr);
780
781 // Load back next to allocations in the to-be-outlined region.
782 Builder.restoreIP(InnerAllocaIP);
30
Calling 'IRBuilderBase::restoreIP'
35
Returning from 'IRBuilderBase::restoreIP'
783 Inner = Builder.CreateLoad(V.getType(), Ptr);
36
Calling 'IRBuilderBase::CreateLoad'
784 }
785
786 Value *ReplacementValue = nullptr;
787 CallInst *CI = dyn_cast<CallInst>(&V);
788 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
789 ReplacementValue = PrivTID;
790 } else {
791 Builder.restoreIP(
792 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
793 assert(ReplacementValue &&(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 794, __extension__ __PRETTY_FUNCTION__))
794 "Expected copy/create callback to set replacement value!")(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 794, __extension__ __PRETTY_FUNCTION__))
;
795 if (ReplacementValue == &V)
796 return;
797 }
798
799 for (Use *UPtr : Uses)
800 UPtr->set(ReplacementValue);
801 };
802
803 // Reset the inner alloca insertion as it will be used for loading the values
804 // wrapped into pointers before passing them into the to-be-outlined region.
805 // Configure it to insert immediately after the fake use of zero address so
806 // that they are available in the generated body and so that the
807 // OpenMP-related values (thread ID and zero address pointers) remain leading
808 // in the argument list.
809 InnerAllocaIP = IRBuilder<>::InsertPoint(
810 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
811
812 // Reset the outer alloca insertion point to the entry of the relevant block
813 // in case it was invalidated.
814 OuterAllocaIP = IRBuilder<>::InsertPoint(
815 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
816
817 for (Value *Input : Inputs) {
818 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Captured input: " <<
*Input << "\n"; } } while (false)
;
21
Assuming 'DebugFlag' is false
22
Loop condition is false. Exiting loop
819 PrivHelper(*Input);
23
Calling 'operator()'
820 }
821 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
822 for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
823 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
824 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
;
825 assert(Outputs.empty() &&(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 826, __extension__ __PRETTY_FUNCTION__))
826 "OpenMP outlining should not produce live-out values!")(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 826, __extension__ __PRETTY_FUNCTION__))
;
827
828 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After privatization: "
<< *OuterFn << "\n"; } } while (false)
;
829 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
830 for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
831 dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
832 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
;
833
834 // Register the outlined info.
835 addOutlineInfo(std::move(OI));
836
837 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
838 UI->eraseFromParent();
839
840 return AfterIP;
841}
842
843void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) {
844 // Build call void __kmpc_flush(ident_t *loc)
845 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
846 Value *Args[] = {getOrCreateIdent(SrcLocStr)};
847
848 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
849}
850
851void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) {
852 if (!updateToLocation(Loc))
853 return;
854 emitFlush(Loc);
855}
856
857void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) {
858 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
859 // global_tid);
860 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
861 Value *Ident = getOrCreateIdent(SrcLocStr);
862 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
863
864 // Ignore return result until untied tasks are supported.
865 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
866 Args);
867}
868
869void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) {
870 if (!updateToLocation(Loc))
871 return;
872 emitTaskwaitImpl(Loc);
873}
874
875void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) {
876 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
877 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
878 Value *Ident = getOrCreateIdent(SrcLocStr);
879 Constant *I32Null = ConstantInt::getNullValue(Int32);
880 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
881
882 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
883 Args);
884}
885
886void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) {
887 if (!updateToLocation(Loc))
888 return;
889 emitTaskyieldImpl(Loc);
890}
891
892OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections(
893 const LocationDescription &Loc, InsertPointTy AllocaIP,
894 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB,
895 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) {
896 if (!updateToLocation(Loc))
897 return Loc.IP;
898
899 auto FiniCBWrapper = [&](InsertPointTy IP) {
900 if (IP.getBlock()->end() != IP.getPoint())
901 return FiniCB(IP);
902 // This must be done otherwise any nested constructs using FinalizeOMPRegion
903 // will fail because that function requires the Finalization Basic Block to
904 // have a terminator, which is already removed by EmitOMPRegionBody.
905 // IP is currently at cancelation block.
906 // We need to backtrack to the condition block to fetch
907 // the exit block and create a branch from cancelation
908 // to exit block.
909 IRBuilder<>::InsertPointGuard IPG(Builder);
910 Builder.restoreIP(IP);
911 auto *CaseBB = IP.getBlock()->getSinglePredecessor();
912 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
913 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
914 Instruction *I = Builder.CreateBr(ExitBB);
915 IP = InsertPointTy(I->getParent(), I->getIterator());
916 return FiniCB(IP);
917 };
918
919 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable});
920
921 // Each section is emitted as a switch case
922 // Each finalization callback is handled from clang.EmitOMPSectionDirective()
923 // -> OMP.createSection() which generates the IR for each section
924 // Iterate through all sections and emit a switch construct:
925 // switch (IV) {
926 // case 0:
927 // <SectionStmt[0]>;
928 // break;
929 // ...
930 // case <NumSection> - 1:
931 // <SectionStmt[<NumSection> - 1]>;
932 // break;
933 // }
934 // ...
935 // section_loop.after:
936 // <FiniCB>;
937 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) {
938 auto *CurFn = CodeGenIP.getBlock()->getParent();
939 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor();
940 auto *ForExitBB = CodeGenIP.getBlock()
941 ->getSinglePredecessor()
942 ->getTerminator()
943 ->getSuccessor(1);
944 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB);
945 Builder.restoreIP(CodeGenIP);
946 unsigned CaseNumber = 0;
947 for (auto SectionCB : SectionCBs) {
948 auto *CaseBB = BasicBlock::Create(M.getContext(),
949 "omp_section_loop.body.case", CurFn);
950 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB);
951 Builder.SetInsertPoint(CaseBB);
952 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB);
953 CaseNumber++;
954 }
955 // remove the existing terminator from body BB since there can be no
956 // terminators after switch/case
957 CodeGenIP.getBlock()->getTerminator()->eraseFromParent();
958 };
959 // Loop body ends here
960 // LowerBound, UpperBound, and STride for createCanonicalLoop
961 Type *I32Ty = Type::getInt32Ty(M.getContext());
962 Value *LB = ConstantInt::get(I32Ty, 0);
963 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size());
964 Value *ST = ConstantInt::get(I32Ty, 1);
965 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop(
966 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop");
967 LoopInfo = createStaticWorkshareLoop(Loc, LoopInfo, AllocaIP, true);
968 BasicBlock *LoopAfterBB = LoopInfo->getAfter();
969 Instruction *SplitPos = LoopAfterBB->getTerminator();
970 if (!isa_and_nonnull<BranchInst>(SplitPos))
971 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB);
972 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB,
973 // which requires a BB with branch
974 BasicBlock *ExitBB =
975 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end");
976 SplitPos->eraseFromParent();
977
978 // Apply the finalization callback in LoopAfterBB
979 auto FiniInfo = FinalizationStack.pop_back_val();
980 assert(FiniInfo.DK == OMPD_sections &&(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 981, __extension__ __PRETTY_FUNCTION__))
981 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 981, __extension__ __PRETTY_FUNCTION__))
;
982 Builder.SetInsertPoint(LoopAfterBB->getTerminator());
983 FiniInfo.FiniCB(Builder.saveIP());
984 Builder.SetInsertPoint(ExitBB);
985
986 return Builder.saveIP();
987}
988
989OpenMPIRBuilder::InsertPointTy
990OpenMPIRBuilder::createSection(const LocationDescription &Loc,
991 BodyGenCallbackTy BodyGenCB,
992 FinalizeCallbackTy FiniCB) {
993 if (!updateToLocation(Loc))
994 return Loc.IP;
995
996 auto FiniCBWrapper = [&](InsertPointTy IP) {
997 if (IP.getBlock()->end() != IP.getPoint())
998 return FiniCB(IP);
999 // This must be done otherwise any nested constructs using FinalizeOMPRegion
1000 // will fail because that function requires the Finalization Basic Block to
1001 // have a terminator, which is already removed by EmitOMPRegionBody.
1002 // IP is currently at cancelation block.
1003 // We need to backtrack to the condition block to fetch
1004 // the exit block and create a branch from cancelation
1005 // to exit block.
1006 IRBuilder<>::InsertPointGuard IPG(Builder);
1007 Builder.restoreIP(IP);
1008 auto *CaseBB = Loc.IP.getBlock();
1009 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
1010 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
1011 Instruction *I = Builder.CreateBr(ExitBB);
1012 IP = InsertPointTy(I->getParent(), I->getIterator());
1013 return FiniCB(IP);
1014 };
1015
1016 Directive OMPD = Directive::OMPD_sections;
1017 // Since we are using Finalization Callback here, HasFinalize
1018 // and IsCancellable have to be true
1019 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper,
1020 /*Conditional*/ false, /*hasFinalize*/ true,
1021 /*IsCancellable*/ true);
1022}
1023
1024OpenMPIRBuilder::InsertPointTy
1025OpenMPIRBuilder::createMaster(const LocationDescription &Loc,
1026 BodyGenCallbackTy BodyGenCB,
1027 FinalizeCallbackTy FiniCB) {
1028
1029 if (!updateToLocation(Loc))
1030 return Loc.IP;
1031
1032 Directive OMPD = Directive::OMPD_master;
1033 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1034 Value *Ident = getOrCreateIdent(SrcLocStr);
1035 Value *ThreadId = getOrCreateThreadID(Ident);
1036 Value *Args[] = {Ident, ThreadId};
1037
1038 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
1039 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1040
1041 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
1042 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1043
1044 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1045 /*Conditional*/ true, /*hasFinalize*/ true);
1046}
1047
1048OpenMPIRBuilder::InsertPointTy
1049OpenMPIRBuilder::createMasked(const LocationDescription &Loc,
1050 BodyGenCallbackTy BodyGenCB,
1051 FinalizeCallbackTy FiniCB, Value *Filter) {
1052 if (!updateToLocation(Loc))
1053 return Loc.IP;
1054
1055 Directive OMPD = Directive::OMPD_masked;
1056 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1057 Value *Ident = getOrCreateIdent(SrcLocStr);
1058 Value *ThreadId = getOrCreateThreadID(Ident);
1059 Value *Args[] = {Ident, ThreadId, Filter};
1060 Value *ArgsEnd[] = {Ident, ThreadId};
1061
1062 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked);
1063 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1064
1065 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked);
1066 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd);
1067
1068 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1069 /*Conditional*/ true, /*hasFinalize*/ true);
1070}
1071
1072CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton(
1073 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
1074 BasicBlock *PostInsertBefore, const Twine &Name) {
1075 Module *M = F->getParent();
1076 LLVMContext &Ctx = M->getContext();
1077 Type *IndVarTy = TripCount->getType();
1078
1079 // Create the basic block structure.
1080 BasicBlock *Preheader =
1081 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
1082 BasicBlock *Header =
1083 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
1084 BasicBlock *Cond =
1085 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
1086 BasicBlock *Body =
1087 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
1088 BasicBlock *Latch =
1089 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
1090 BasicBlock *Exit =
1091 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
1092 BasicBlock *After =
1093 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
1094
1095 // Use specified DebugLoc for new instructions.
1096 Builder.SetCurrentDebugLocation(DL);
1097
1098 Builder.SetInsertPoint(Preheader);
1099 Builder.CreateBr(Header);
1100
1101 Builder.SetInsertPoint(Header);
1102 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
1103 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
1104 Builder.CreateBr(Cond);
1105
1106 Builder.SetInsertPoint(Cond);
1107 Value *Cmp =
1108 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
1109 Builder.CreateCondBr(Cmp, Body, Exit);
1110
1111 Builder.SetInsertPoint(Body);
1112 Builder.CreateBr(Latch);
1113
1114 Builder.SetInsertPoint(Latch);
1115 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
1116 "omp_" + Name + ".next", /*HasNUW=*/true);
1117 Builder.CreateBr(Header);
1118 IndVarPHI->addIncoming(Next, Latch);
1119
1120 Builder.SetInsertPoint(Exit);
1121 Builder.CreateBr(After);
1122
1123 // Remember and return the canonical control flow.
1124 LoopInfos.emplace_front();
1125 CanonicalLoopInfo *CL = &LoopInfos.front();
1126
1127 CL->Preheader = Preheader;
1128 CL->Header = Header;
1129 CL->Cond = Cond;
1130 CL->Body = Body;
1131 CL->Latch = Latch;
1132 CL->Exit = Exit;
1133 CL->After = After;
1134
1135 CL->IsValid = true;
1136
1137#ifndef NDEBUG
1138 CL->assertOK();
1139#endif
1140 return CL;
1141}
1142
1143CanonicalLoopInfo *
1144OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc,
1145 LoopBodyGenCallbackTy BodyGenCB,
1146 Value *TripCount, const Twine &Name) {
1147 BasicBlock *BB = Loc.IP.getBlock();
1148 BasicBlock *NextBB = BB->getNextNode();
1149
1150 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
1151 NextBB, NextBB, Name);
1152 BasicBlock *After = CL->getAfter();
1153
1154 // If location is not set, don't connect the loop.
1155 if (updateToLocation(Loc)) {
1156 // Split the loop at the insertion point: Branch to the preheader and move
1157 // every following instruction to after the loop (the After BB). Also, the
1158 // new successor is the loop's after block.
1159 Builder.CreateBr(CL->Preheader);
1160 After->getInstList().splice(After->begin(), BB->getInstList(),
1161 Builder.GetInsertPoint(), BB->end());
1162 After->replaceSuccessorsPhiUsesWith(BB, After);
1163 }
1164
1165 // Emit the body content. We do it after connecting the loop to the CFG to
1166 // avoid that the callback encounters degenerate BBs.
1167 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
1168
1169#ifndef NDEBUG
1170 CL->assertOK();
1171#endif
1172 return CL;
1173}
1174
1175CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop(
1176 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
1177 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
1178 InsertPointTy ComputeIP, const Twine &Name) {
1179
1180 // Consider the following difficulties (assuming 8-bit signed integers):
1181 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
1182 // DO I = 1, 100, 50
1183 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
1184 // DO I = 100, 0, -128
1185
1186 // Start, Stop and Step must be of the same integer type.
1187 auto *IndVarTy = cast<IntegerType>(Start->getType());
1188 assert(IndVarTy == Stop->getType() && "Stop type mismatch")(static_cast <bool> (IndVarTy == Stop->getType() &&
"Stop type mismatch") ? void (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1188, __extension__ __PRETTY_FUNCTION__))
;
1189 assert(IndVarTy == Step->getType() && "Step type mismatch")(static_cast <bool> (IndVarTy == Step->getType() &&
"Step type mismatch") ? void (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190
1191 LocationDescription ComputeLoc =
1192 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
1193 updateToLocation(ComputeLoc);
1194
1195 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
1196 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
1197
1198 // Like Step, but always positive.
1199 Value *Incr = Step;
1200
1201 // Distance between Start and Stop; always positive.
1202 Value *Span;
1203
1204 // Condition whether there are no iterations are executed at all, e.g. because
1205 // UB < LB.
1206 Value *ZeroCmp;
1207
1208 if (IsSigned) {
1209 // Ensure that increment is positive. If not, negate and invert LB and UB.
1210 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
1211 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
1212 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
1213 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
1214 Span = Builder.CreateSub(UB, LB, "", false, true);
1215 ZeroCmp = Builder.CreateICmp(
1216 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
1217 } else {
1218 Span = Builder.CreateSub(Stop, Start, "", true);
1219 ZeroCmp = Builder.CreateICmp(
1220 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
1221 }
1222
1223 Value *CountIfLooping;
1224 if (InclusiveStop) {
1225 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
1226 } else {
1227 // Avoid incrementing past stop since it could overflow.
1228 Value *CountIfTwo = Builder.CreateAdd(
1229 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
1230 Value *OneCmp = Builder.CreateICmp(
1231 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr);
1232 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
1233 }
1234 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
1235 "omp_" + Name + ".tripcount");
1236
1237 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
1238 Builder.restoreIP(CodeGenIP);
1239 Value *Span = Builder.CreateMul(IV, Step);
1240 Value *IndVar = Builder.CreateAdd(Span, Start);
1241 BodyGenCB(Builder.saveIP(), IndVar);
1242 };
1243 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
1244 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
1245}
1246
1247// Returns an LLVM function to call for initializing loop bounds using OpenMP
1248// static scheduling depending on `type`. Only i32 and i64 are supported by the
1249// runtime. Always interpret integers as unsigned similarly to
1250// CanonicalLoopInfo.
1251static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M,
1252 OpenMPIRBuilder &OMPBuilder) {
1253 unsigned Bitwidth = Ty->getIntegerBitWidth();
1254 if (Bitwidth == 32)
1255 return OMPBuilder.getOrCreateRuntimeFunction(
1256 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
1257 if (Bitwidth == 64)
1258 return OMPBuilder.getOrCreateRuntimeFunction(
1259 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
1260 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1260)
;
1261}
1262
1263// Sets the number of loop iterations to the given value. This value must be
1264// valid in the condition block (i.e., defined in the preheader) and is
1265// interpreted as an unsigned integer.
1266void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) {
1267 Instruction *CmpI = &CLI->getCond()->front();
1268 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")(static_cast <bool> (isa<CmpInst>(CmpI) &&
"First inst must compare IV with TripCount") ? void (0) : __assert_fail
("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1268, __extension__ __PRETTY_FUNCTION__))
;
1269 CmpI->setOperand(1, TripCount);
1270 CLI->assertOK();
1271}
1272
1273CanonicalLoopInfo *OpenMPIRBuilder::createStaticWorkshareLoop(
1274 const LocationDescription &Loc, CanonicalLoopInfo *CLI,
1275 InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk) {
1276 // Set up the source location value for OpenMP runtime.
1277 if (!updateToLocation(Loc))
1278 return nullptr;
1279
1280 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1281 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1282
1283 // Declare useful OpenMP runtime functions.
1284 Value *IV = CLI->getIndVar();
1285 Type *IVTy = IV->getType();
1286 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
1287 FunctionCallee StaticFini =
1288 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
1289
1290 // Allocate space for computed loop bounds as expected by the "init" function.
1291 Builder.restoreIP(AllocaIP);
1292 Type *I32Type = Type::getInt32Ty(M.getContext());
1293 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1294 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1295 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1296 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1297
1298 // At the end of the preheader, prepare for calling the "init" function by
1299 // storing the current loop bounds into the allocated space. A canonical loop
1300 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1301 // and produces an inclusive upper bound.
1302 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
1303 Constant *Zero = ConstantInt::get(IVTy, 0);
1304 Constant *One = ConstantInt::get(IVTy, 1);
1305 Builder.CreateStore(Zero, PLowerBound);
1306 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
1307 Builder.CreateStore(UpperBound, PUpperBound);
1308 Builder.CreateStore(One, PStride);
1309
1310 if (!Chunk)
1311 Chunk = One;
1312
1313 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1314
1315 Constant *SchedulingType =
1316 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static));
1317
1318 // Call the "init" function and update the trip count of the loop with the
1319 // value it produced.
1320 Builder.CreateCall(StaticInit,
1321 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
1322 PUpperBound, PStride, One, Chunk});
1323 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound);
1324 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound);
1325 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
1326 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
1327 setCanonicalLoopTripCount(CLI, TripCount);
1328
1329 // Update all uses of the induction variable except the one in the condition
1330 // block that compares it with the actual upper bound, and the increment in
1331 // the latch block.
1332 // TODO: this can eventually move to CanonicalLoopInfo or to a new
1333 // CanonicalLoopInfoUpdater interface.
1334 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt());
1335 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound);
1336 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) {
1337 auto *Instr = dyn_cast<Instruction>(U.getUser());
1338 return !Instr ||
1339 (Instr->getParent() != CLI->getCond() &&
1340 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV);
1341 });
1342
1343 // In the "exit" block, call the "fini" function.
1344 Builder.SetInsertPoint(CLI->getExit(),
1345 CLI->getExit()->getTerminator()->getIterator());
1346 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
1347
1348 // Add the barrier if requested.
1349 if (NeedsBarrier)
1350 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
1351 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1352 /* CheckCancelFlag */ false);
1353
1354 CLI->assertOK();
1355 return CLI;
1356}
1357
1358CanonicalLoopInfo *OpenMPIRBuilder::createWorkshareLoop(
1359 const LocationDescription &Loc, CanonicalLoopInfo *CLI,
1360 InsertPointTy AllocaIP, bool NeedsBarrier) {
1361 // Currently only supports static schedules.
1362 return createStaticWorkshareLoop(Loc, CLI, AllocaIP, NeedsBarrier);
1363}
1364
1365/// Returns an LLVM function to call for initializing loop bounds using OpenMP
1366/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1367/// the runtime. Always interpret integers as unsigned similarly to
1368/// CanonicalLoopInfo.
1369static FunctionCallee
1370getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1371 unsigned Bitwidth = Ty->getIntegerBitWidth();
1372 if (Bitwidth == 32)
1373 return OMPBuilder.getOrCreateRuntimeFunction(
1374 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u);
1375 if (Bitwidth == 64)
1376 return OMPBuilder.getOrCreateRuntimeFunction(
1377 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u);
1378 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1378)
;
1379}
1380
1381/// Returns an LLVM function to call for updating the next loop using OpenMP
1382/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1383/// the runtime. Always interpret integers as unsigned similarly to
1384/// CanonicalLoopInfo.
1385static FunctionCallee
1386getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1387 unsigned Bitwidth = Ty->getIntegerBitWidth();
1388 if (Bitwidth == 32)
1389 return OMPBuilder.getOrCreateRuntimeFunction(
1390 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u);
1391 if (Bitwidth == 64)
1392 return OMPBuilder.getOrCreateRuntimeFunction(
1393 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u);
1394 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1394)
;
1395}
1396
1397OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createDynamicWorkshareLoop(
1398 const LocationDescription &Loc, CanonicalLoopInfo *CLI,
1399 InsertPointTy AllocaIP, OMPScheduleType SchedType, bool NeedsBarrier,
1400 Value *Chunk) {
1401 // Set up the source location value for OpenMP runtime.
1402 Builder.SetCurrentDebugLocation(Loc.DL);
1403
1404 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1405 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1406
1407 // Declare useful OpenMP runtime functions.
1408 Value *IV = CLI->getIndVar();
1409 Type *IVTy = IV->getType();
1410 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this);
1411 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this);
1412
1413 // Allocate space for computed loop bounds as expected by the "init" function.
1414 Builder.restoreIP(AllocaIP);
1415 Type *I32Type = Type::getInt32Ty(M.getContext());
1416 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1417 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1418 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1419 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1420
1421 // At the end of the preheader, prepare for calling the "init" function by
1422 // storing the current loop bounds into the allocated space. A canonical loop
1423 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1424 // and produces an inclusive upper bound.
1425 BasicBlock *PreHeader = CLI->getPreheader();
1426 Builder.SetInsertPoint(PreHeader->getTerminator());
1427 Constant *One = ConstantInt::get(IVTy, 1);
1428 Builder.CreateStore(One, PLowerBound);
1429 Value *UpperBound = CLI->getTripCount();
1430 Builder.CreateStore(UpperBound, PUpperBound);
1431 Builder.CreateStore(One, PStride);
1432
1433 BasicBlock *Header = CLI->getHeader();
1434 BasicBlock *Exit = CLI->getExit();
1435 BasicBlock *Cond = CLI->getCond();
1436 InsertPointTy AfterIP = CLI->getAfterIP();
1437
1438 // The CLI will be "broken" in the code below, as the loop is no longer
1439 // a valid canonical loop.
1440
1441 if (!Chunk)
1442 Chunk = One;
1443
1444 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1445
1446 Constant *SchedulingType =
1447 ConstantInt::get(I32Type, static_cast<int>(SchedType));
1448
1449 // Call the "init" function.
1450 Builder.CreateCall(DynamicInit,
1451 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One,
1452 UpperBound, /* step */ One, Chunk});
1453
1454 // An outer loop around the existing one.
1455 BasicBlock *OuterCond = BasicBlock::Create(
1456 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
1457 PreHeader->getParent());
1458 // This needs to be 32-bit always, so can't use the IVTy Zero above.
1459 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
1460 Value *Res =
1461 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
1462 PLowerBound, PUpperBound, PStride});
1463 Constant *Zero32 = ConstantInt::get(I32Type, 0);
1464 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32);
1465 Value *LowerBound =
1466 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb");
1467 Builder.CreateCondBr(MoreWork, Header, Exit);
1468
1469 // Change PHI-node in loop header to use outer cond rather than preheader,
1470 // and set IV to the LowerBound.
1471 Instruction *Phi = &Header->front();
1472 auto *PI = cast<PHINode>(Phi);
1473 PI->setIncomingBlock(0, OuterCond);
1474 PI->setIncomingValue(0, LowerBound);
1475
1476 // Then set the pre-header to jump to the OuterCond
1477 Instruction *Term = PreHeader->getTerminator();
1478 auto *Br = cast<BranchInst>(Term);
1479 Br->setSuccessor(0, OuterCond);
1480
1481 // Modify the inner condition:
1482 // * Use the UpperBound returned from the DynamicNext call.
1483 // * jump to the loop outer loop when done with one of the inner loops.
1484 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
1485 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
1486 Instruction *Comp = &*Builder.GetInsertPoint();
1487 auto *CI = cast<CmpInst>(Comp);
1488 CI->setOperand(1, UpperBound);
1489 // Redirect the inner exit to branch to outer condition.
1490 Instruction *Branch = &Cond->back();
1491 auto *BI = cast<BranchInst>(Branch);
1492 assert(BI->getSuccessor(1) == Exit)(static_cast <bool> (BI->getSuccessor(1) == Exit) ? void
(0) : __assert_fail ("BI->getSuccessor(1) == Exit", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1492, __extension__ __PRETTY_FUNCTION__))
;
1493 BI->setSuccessor(1, OuterCond);
1494
1495 // Add the barrier if requested.
1496 if (NeedsBarrier) {
1497 Builder.SetInsertPoint(&Exit->back());
1498 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
1499 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1500 /* CheckCancelFlag */ false);
1501 }
1502
1503 return AfterIP;
1504}
1505
1506/// Make \p Source branch to \p Target.
1507///
1508/// Handles two situations:
1509/// * \p Source already has an unconditional branch.
1510/// * \p Source is a degenerate block (no terminator because the BB is
1511/// the current head of the IR construction).
1512static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
1513 if (Instruction *Term = Source->getTerminator()) {
1514 auto *Br = cast<BranchInst>(Term);
1515 assert(!Br->isConditional() &&(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1516, __extension__ __PRETTY_FUNCTION__))
1516 "BB's terminator must be an unconditional branch (or degenerate)")(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1516, __extension__ __PRETTY_FUNCTION__))
;
1517 BasicBlock *Succ = Br->getSuccessor(0);
1518 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
1519 Br->setSuccessor(0, Target);
1520 return;
1521 }
1522
1523 auto *NewBr = BranchInst::Create(Target, Source);
1524 NewBr->setDebugLoc(DL);
1525}
1526
1527/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
1528/// after this \p OldTarget will be orphaned.
1529static void redirectAllPredecessorsTo(BasicBlock *OldTarget,
1530 BasicBlock *NewTarget, DebugLoc DL) {
1531 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
1532 redirectTo(Pred, NewTarget, DL);
1533}
1534
1535/// Determine which blocks in \p BBs are reachable from outside and remove the
1536/// ones that are not reachable from the function.
1537static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) {
1538 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
1539 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
1540 for (Use &U : BB->uses()) {
1541 auto *UseInst = dyn_cast<Instruction>(U.getUser());
1542 if (!UseInst)
1543 continue;
1544 if (BBsToErase.count(UseInst->getParent()))
1545 continue;
1546 return true;
1547 }
1548 return false;
1549 };
1550
1551 while (true) {
1552 bool Changed = false;
1553 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
1554 if (HasRemainingUses(BB)) {
1555 BBsToErase.erase(BB);
1556 Changed = true;
1557 }
1558 }
1559 if (!Changed)
1560 break;
1561 }
1562
1563 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
1564 DeleteDeadBlocks(BBVec);
1565}
1566
1567CanonicalLoopInfo *
1568OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1569 InsertPointTy ComputeIP) {
1570 assert(Loops.size() >= 1 && "At least one loop required")(static_cast <bool> (Loops.size() >= 1 && "At least one loop required"
) ? void (0) : __assert_fail ("Loops.size() >= 1 && \"At least one loop required\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1570, __extension__ __PRETTY_FUNCTION__))
;
1571 size_t NumLoops = Loops.size();
1572
1573 // Nothing to do if there is already just one loop.
1574 if (NumLoops == 1)
1575 return Loops.front();
1576
1577 CanonicalLoopInfo *Outermost = Loops.front();
1578 CanonicalLoopInfo *Innermost = Loops.back();
1579 BasicBlock *OrigPreheader = Outermost->getPreheader();
1580 BasicBlock *OrigAfter = Outermost->getAfter();
1581 Function *F = OrigPreheader->getParent();
1582
1583 // Setup the IRBuilder for inserting the trip count computation.
1584 Builder.SetCurrentDebugLocation(DL);
1585 if (ComputeIP.isSet())
1586 Builder.restoreIP(ComputeIP);
1587 else
1588 Builder.restoreIP(Outermost->getPreheaderIP());
1589
1590 // Derive the collapsed' loop trip count.
1591 // TODO: Find common/largest indvar type.
1592 Value *CollapsedTripCount = nullptr;
1593 for (CanonicalLoopInfo *L : Loops) {
1594 Value *OrigTripCount = L->getTripCount();
1595 if (!CollapsedTripCount) {
1596 CollapsedTripCount = OrigTripCount;
1597 continue;
1598 }
1599
1600 // TODO: Enable UndefinedSanitizer to diagnose an overflow here.
1601 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount,
1602 {}, /*HasNUW=*/true);
1603 }
1604
1605 // Create the collapsed loop control flow.
1606 CanonicalLoopInfo *Result =
1607 createLoopSkeleton(DL, CollapsedTripCount, F,
1608 OrigPreheader->getNextNode(), OrigAfter, "collapsed");
1609
1610 // Build the collapsed loop body code.
1611 // Start with deriving the input loop induction variables from the collapsed
1612 // one, using a divmod scheme. To preserve the original loops' order, the
1613 // innermost loop use the least significant bits.
1614 Builder.restoreIP(Result->getBodyIP());
1615
1616 Value *Leftover = Result->getIndVar();
1617 SmallVector<Value *> NewIndVars;
1618 NewIndVars.set_size(NumLoops);
1619 for (int i = NumLoops - 1; i >= 1; --i) {
1620 Value *OrigTripCount = Loops[i]->getTripCount();
1621
1622 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount);
1623 NewIndVars[i] = NewIndVar;
1624
1625 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount);
1626 }
1627 // Outermost loop gets all the remaining bits.
1628 NewIndVars[0] = Leftover;
1629
1630 // Construct the loop body control flow.
1631 // We progressively construct the branch structure following in direction of
1632 // the control flow, from the leading in-between code, the loop nest body, the
1633 // trailing in-between code, and rejoining the collapsed loop's latch.
1634 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If
1635 // the ContinueBlock is set, continue with that block. If ContinuePred, use
1636 // its predecessors as sources.
1637 BasicBlock *ContinueBlock = Result->getBody();
1638 BasicBlock *ContinuePred = nullptr;
1639 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest,
1640 BasicBlock *NextSrc) {
1641 if (ContinueBlock)
1642 redirectTo(ContinueBlock, Dest, DL);
1643 else
1644 redirectAllPredecessorsTo(ContinuePred, Dest, DL);
1645
1646 ContinueBlock = nullptr;
1647 ContinuePred = NextSrc;
1648 };
1649
1650 // The code before the nested loop of each level.
1651 // Because we are sinking it into the nest, it will be executed more often
1652 // that the original loop. More sophisticated schemes could keep track of what
1653 // the in-between code is and instantiate it only once per thread.
1654 for (size_t i = 0; i < NumLoops - 1; ++i)
1655 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader());
1656
1657 // Connect the loop nest body.
1658 ContinueWith(Innermost->getBody(), Innermost->getLatch());
1659
1660 // The code after the nested loop at each level.
1661 for (size_t i = NumLoops - 1; i > 0; --i)
1662 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch());
1663
1664 // Connect the finished loop to the collapsed loop latch.
1665 ContinueWith(Result->getLatch(), nullptr);
1666
1667 // Replace the input loops with the new collapsed loop.
1668 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL);
1669 redirectTo(Result->getAfter(), Outermost->getAfter(), DL);
1670
1671 // Replace the input loop indvars with the derived ones.
1672 for (size_t i = 0; i < NumLoops; ++i)
1673 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]);
1674
1675 // Remove unused parts of the input loops.
1676 SmallVector<BasicBlock *, 12> OldControlBBs;
1677 OldControlBBs.reserve(6 * Loops.size());
1678 for (CanonicalLoopInfo *Loop : Loops)
1679 Loop->collectControlBlocks(OldControlBBs);
1680 removeUnusedBlocksFromParent(OldControlBBs);
1681
1682#ifndef NDEBUG
1683 Result->assertOK();
1684#endif
1685 return Result;
1686}
1687
1688std::vector<CanonicalLoopInfo *>
1689OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1690 ArrayRef<Value *> TileSizes) {
1691 assert(TileSizes.size() == Loops.size() &&(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1692, __extension__ __PRETTY_FUNCTION__))
1692 "Must pass as many tile sizes as there are loops")(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1692, __extension__ __PRETTY_FUNCTION__))
;
1693 int NumLoops = Loops.size();
1694 assert(NumLoops >= 1 && "At least one loop to tile required")(static_cast <bool> (NumLoops >= 1 && "At least one loop to tile required"
) ? void (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1694, __extension__ __PRETTY_FUNCTION__))
;
1695
1696 CanonicalLoopInfo *OutermostLoop = Loops.front();
1697 CanonicalLoopInfo *InnermostLoop = Loops.back();
1698 Function *F = OutermostLoop->getBody()->getParent();
1699 BasicBlock *InnerEnter = InnermostLoop->getBody();
1700 BasicBlock *InnerLatch = InnermostLoop->getLatch();
1701
1702 // Collect original trip counts and induction variable to be accessible by
1703 // index. Also, the structure of the original loops is not preserved during
1704 // the construction of the tiled loops, so do it before we scavenge the BBs of
1705 // any original CanonicalLoopInfo.
1706 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
1707 for (CanonicalLoopInfo *L : Loops) {
1708 OrigTripCounts.push_back(L->getTripCount());
1709 OrigIndVars.push_back(L->getIndVar());
1710 }
1711
1712 // Collect the code between loop headers. These may contain SSA definitions
1713 // that are used in the loop nest body. To be usable with in the innermost
1714 // body, these BasicBlocks will be sunk into the loop nest body. That is,
1715 // these instructions may be executed more often than before the tiling.
1716 // TODO: It would be sufficient to only sink them into body of the
1717 // corresponding tile loop.
1718 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode;
1719 for (int i = 0; i < NumLoops - 1; ++i) {
1720 CanonicalLoopInfo *Surrounding = Loops[i];
1721 CanonicalLoopInfo *Nested = Loops[i + 1];
1722
1723 BasicBlock *EnterBB = Surrounding->getBody();
1724 BasicBlock *ExitBB = Nested->getHeader();
1725 InbetweenCode.emplace_back(EnterBB, ExitBB);
1726 }
1727
1728 // Compute the trip counts of the floor loops.
1729 Builder.SetCurrentDebugLocation(DL);
1730 Builder.restoreIP(OutermostLoop->getPreheaderIP());
1731 SmallVector<Value *, 4> FloorCount, FloorRems;
1732 for (int i = 0; i < NumLoops; ++i) {
1733 Value *TileSize = TileSizes[i];
1734 Value *OrigTripCount = OrigTripCounts[i];
1735 Type *IVType = OrigTripCount->getType();
1736
1737 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
1738 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
1739
1740 // 0 if tripcount divides the tilesize, 1 otherwise.
1741 // 1 means we need an additional iteration for a partial tile.
1742 //
1743 // Unfortunately we cannot just use the roundup-formula
1744 // (tripcount + tilesize - 1)/tilesize
1745 // because the summation might overflow. We do not want introduce undefined
1746 // behavior when the untiled loop nest did not.
1747 Value *FloorTripOverflow =
1748 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
1749
1750 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
1751 FloorTripCount =
1752 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
1753 "omp_floor" + Twine(i) + ".tripcount", true);
1754
1755 // Remember some values for later use.
1756 FloorCount.push_back(FloorTripCount);
1757 FloorRems.push_back(FloorTripRem);
1758 }
1759
1760 // Generate the new loop nest, from the outermost to the innermost.
1761 std::vector<CanonicalLoopInfo *> Result;
1762 Result.reserve(NumLoops * 2);
1763
1764 // The basic block of the surrounding loop that enters the nest generated
1765 // loop.
1766 BasicBlock *Enter = OutermostLoop->getPreheader();
1767
1768 // The basic block of the surrounding loop where the inner code should
1769 // continue.
1770 BasicBlock *Continue = OutermostLoop->getAfter();
1771
1772 // Where the next loop basic block should be inserted.
1773 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
1774
1775 auto EmbeddNewLoop =
1776 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
1777 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
1778 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
1779 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
1780 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
1781 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
1782
1783 // Setup the position where the next embedded loop connects to this loop.
1784 Enter = EmbeddedLoop->getBody();
1785 Continue = EmbeddedLoop->getLatch();
1786 OutroInsertBefore = EmbeddedLoop->getLatch();
1787 return EmbeddedLoop;
1788 };
1789
1790 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
1791 const Twine &NameBase) {
1792 for (auto P : enumerate(TripCounts)) {
1793 CanonicalLoopInfo *EmbeddedLoop =
1794 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
1795 Result.push_back(EmbeddedLoop);
1796 }
1797 };
1798
1799 EmbeddNewLoops(FloorCount, "floor");
1800
1801 // Within the innermost floor loop, emit the code that computes the tile
1802 // sizes.
1803 Builder.SetInsertPoint(Enter->getTerminator());
1804 SmallVector<Value *, 4> TileCounts;
1805 for (int i = 0; i < NumLoops; ++i) {
1806 CanonicalLoopInfo *FloorLoop = Result[i];
1807 Value *TileSize = TileSizes[i];
1808
1809 Value *FloorIsEpilogue =
1810 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
1811 Value *TileTripCount =
1812 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
1813
1814 TileCounts.push_back(TileTripCount);
1815 }
1816
1817 // Create the tile loops.
1818 EmbeddNewLoops(TileCounts, "tile");
1819
1820 // Insert the inbetween code into the body.
1821 BasicBlock *BodyEnter = Enter;
1822 BasicBlock *BodyEntered = nullptr;
1823 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
1824 BasicBlock *EnterBB = P.first;
1825 BasicBlock *ExitBB = P.second;
1826
1827 if (BodyEnter)
1828 redirectTo(BodyEnter, EnterBB, DL);
1829 else
1830 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
1831
1832 BodyEnter = nullptr;
1833 BodyEntered = ExitBB;
1834 }
1835
1836 // Append the original loop nest body into the generated loop nest body.
1837 if (BodyEnter)
1838 redirectTo(BodyEnter, InnerEnter, DL);
1839 else
1840 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
1841 redirectAllPredecessorsTo(InnerLatch, Continue, DL);
1842
1843 // Replace the original induction variable with an induction variable computed
1844 // from the tile and floor induction variables.
1845 Builder.restoreIP(Result.back()->getBodyIP());
1846 for (int i = 0; i < NumLoops; ++i) {
1847 CanonicalLoopInfo *FloorLoop = Result[i];
1848 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
1849 Value *OrigIndVar = OrigIndVars[i];
1850 Value *Size = TileSizes[i];
1851
1852 Value *Scale =
1853 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
1854 Value *Shift =
1855 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
1856 OrigIndVar->replaceAllUsesWith(Shift);
1857 }
1858
1859 // Remove unused parts of the original loops.
1860 SmallVector<BasicBlock *, 12> OldControlBBs;
1861 OldControlBBs.reserve(6 * Loops.size());
1862 for (CanonicalLoopInfo *Loop : Loops)
1863 Loop->collectControlBlocks(OldControlBBs);
1864 removeUnusedBlocksFromParent(OldControlBBs);
1865
1866#ifndef NDEBUG
1867 for (CanonicalLoopInfo *GenL : Result)
1868 GenL->assertOK();
1869#endif
1870 return Result;
1871}
1872
1873OpenMPIRBuilder::InsertPointTy
1874OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
1875 llvm::Value *BufSize, llvm::Value *CpyBuf,
1876 llvm::Value *CpyFn, llvm::Value *DidIt) {
1877 if (!updateToLocation(Loc))
1878 return Loc.IP;
1879
1880 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1881 Value *Ident = getOrCreateIdent(SrcLocStr);
1882 Value *ThreadId = getOrCreateThreadID(Ident);
1883
1884 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
1885
1886 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
1887
1888 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
1889 Builder.CreateCall(Fn, Args);
1890
1891 return Builder.saveIP();
1892}
1893
1894OpenMPIRBuilder::InsertPointTy
1895OpenMPIRBuilder::createSingle(const LocationDescription &Loc,
1896 BodyGenCallbackTy BodyGenCB,
1897 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) {
1898
1899 if (!updateToLocation(Loc))
1900 return Loc.IP;
1901
1902 // If needed (i.e. not null), initialize `DidIt` with 0
1903 if (DidIt) {
1904 Builder.CreateStore(Builder.getInt32(0), DidIt);
1905 }
1906
1907 Directive OMPD = Directive::OMPD_single;
1908 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1909 Value *Ident = getOrCreateIdent(SrcLocStr);
1910 Value *ThreadId = getOrCreateThreadID(Ident);
1911 Value *Args[] = {Ident, ThreadId};
1912
1913 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
1914 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1915
1916 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
1917 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1918
1919 // generates the following:
1920 // if (__kmpc_single()) {
1921 // .... single region ...
1922 // __kmpc_end_single
1923 // }
1924
1925 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1926 /*Conditional*/ true, /*hasFinalize*/ true);
1927}
1928
1929OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical(
1930 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
1931 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
1932
1933 if (!updateToLocation(Loc))
1934 return Loc.IP;
1935
1936 Directive OMPD = Directive::OMPD_critical;
1937 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1938 Value *Ident = getOrCreateIdent(SrcLocStr);
1939 Value *ThreadId = getOrCreateThreadID(Ident);
1940 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
1941 Value *Args[] = {Ident, ThreadId, LockVar};
1942
1943 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
1944 Function *RTFn = nullptr;
1945 if (HintInst) {
1946 // Add Hint to entry Args and create call
1947 EnterArgs.push_back(HintInst);
1948 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
1949 } else {
1950 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
1951 }
1952 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
1953
1954 Function *ExitRTLFn =
1955 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
1956 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1957
1958 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1959 /*Conditional*/ false, /*hasFinalize*/ true);
1960}
1961
1962OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
1963 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
1964 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
1965 bool HasFinalize, bool IsCancellable) {
1966
1967 if (HasFinalize)
1968 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable});
1969
1970 // Create inlined region's entry and body blocks, in preparation
1971 // for conditional creation
1972 BasicBlock *EntryBB = Builder.GetInsertBlock();
1973 Instruction *SplitPos = EntryBB->getTerminator();
1974 if (!isa_and_nonnull<BranchInst>(SplitPos))
1975 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
1976 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
1977 BasicBlock *FiniBB =
1978 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
1979
1980 Builder.SetInsertPoint(EntryBB->getTerminator());
1981 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
1982
1983 // generate body
1984 BodyGenCB(/* AllocaIP */ InsertPointTy(),
1985 /* CodeGenIP */ Builder.saveIP(), *FiniBB);
1986
1987 // If we didn't emit a branch to FiniBB during body generation, it means
1988 // FiniBB is unreachable (e.g. while(1);). stop generating all the
1989 // unreachable blocks, and remove anything we are not going to use.
1990 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0);
1991 if (SkipEmittingRegion) {
1992 FiniBB->eraseFromParent();
1993 ExitCall->eraseFromParent();
1994 // Discard finalization if we have it.
1995 if (HasFinalize) {
1996 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1997, __extension__ __PRETTY_FUNCTION__))
1997 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1997, __extension__ __PRETTY_FUNCTION__))
;
1998 FinalizationStack.pop_back();
1999 }
2000 } else {
2001 // emit exit call and do any needed finalization.
2002 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
2003 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
2004 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
2005 "Unexpected control flow graph state!!")(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
;
2006 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
2007 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2008, __extension__ __PRETTY_FUNCTION__))
2008 "Unexpected Control Flow State!")(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2008, __extension__ __PRETTY_FUNCTION__))
;
2009 MergeBlockIntoPredecessor(FiniBB);
2010 }
2011
2012 // If we are skipping the region of a non conditional, remove the exit
2013 // block, and clear the builder's insertion point.
2014 assert(SplitPos->getParent() == ExitBB &&(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2015, __extension__ __PRETTY_FUNCTION__))
2015 "Unexpected Insertion point location!")(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2015, __extension__ __PRETTY_FUNCTION__))
;
2016 if (!Conditional && SkipEmittingRegion) {
2017 ExitBB->eraseFromParent();
2018 Builder.ClearInsertionPoint();
2019 } else {
2020 auto merged = MergeBlockIntoPredecessor(ExitBB);
2021 BasicBlock *ExitPredBB = SplitPos->getParent();
2022 auto InsertBB = merged ? ExitPredBB : ExitBB;
2023 if (!isa_and_nonnull<BranchInst>(SplitPos))
2024 SplitPos->eraseFromParent();
2025 Builder.SetInsertPoint(InsertBB);
2026 }
2027
2028 return Builder.saveIP();
2029}
2030
2031OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
2032 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
2033 // if nothing to do, Return current insertion point.
2034 if (!Conditional || !EntryCall)
2035 return Builder.saveIP();
2036
2037 BasicBlock *EntryBB = Builder.GetInsertBlock();
2038 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
2039 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
2040 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
2041
2042 // Emit thenBB and set the Builder's insertion point there for
2043 // body generation next. Place the block after the current block.
2044 Function *CurFn = EntryBB->getParent();
2045 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB);
2046
2047 // Move Entry branch to end of ThenBB, and replace with conditional
2048 // branch (If-stmt)
2049 Instruction *EntryBBTI = EntryBB->getTerminator();
2050 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
2051 EntryBBTI->removeFromParent();
2052 Builder.SetInsertPoint(UI);
2053 Builder.Insert(EntryBBTI);
2054 UI->eraseFromParent();
2055 Builder.SetInsertPoint(ThenBB->getTerminator());
2056
2057 // return an insertion point to ExitBB.
2058 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
2059}
2060
2061OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
2062 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
2063 bool HasFinalize) {
2064
2065 Builder.restoreIP(FinIP);
2066
2067 // If there is finalization to do, emit it before the exit call
2068 if (HasFinalize) {
2069 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2070, __extension__ __PRETTY_FUNCTION__))
2070 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2070, __extension__ __PRETTY_FUNCTION__))
;
2071
2072 FinalizationInfo Fi = FinalizationStack.pop_back_val();
2073 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")(static_cast <bool> (Fi.DK == OMPD && "Unexpected Directive for Finalization call!"
) ? void (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2073, __extension__ __PRETTY_FUNCTION__))
;
2074
2075 Fi.FiniCB(FinIP);
2076
2077 BasicBlock *FiniBB = FinIP.getBlock();
2078 Instruction *FiniBBTI = FiniBB->getTerminator();
2079
2080 // set Builder IP for call creation
2081 Builder.SetInsertPoint(FiniBBTI);
2082 }
2083
2084 if (!ExitCall)
2085 return Builder.saveIP();
2086
2087 // place the Exitcall as last instruction before Finalization block terminator
2088 ExitCall->removeFromParent();
2089 Builder.Insert(ExitCall);
2090
2091 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
2092 ExitCall->getIterator());
2093}
2094
2095OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks(
2096 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
2097 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
2098 if (!IP.isSet())
2099 return IP;
2100
2101 IRBuilder<>::InsertPointGuard IPG(Builder);
2102
2103 // creates the following CFG structure
2104 // OMP_Entry : (MasterAddr != PrivateAddr)?
2105 // F T
2106 // | \
2107 // | copin.not.master
2108 // | /
2109 // v /
2110 // copyin.not.master.end
2111 // |
2112 // v
2113 // OMP.Entry.Next
2114
2115 BasicBlock *OMP_Entry = IP.getBlock();
2116 Function *CurFn = OMP_Entry->getParent();
2117 BasicBlock *CopyBegin =
2118 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
2119 BasicBlock *CopyEnd = nullptr;
2120
2121 // If entry block is terminated, split to preserve the branch to following
2122 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
2123 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
2124 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
2125 "copyin.not.master.end");
2126 OMP_Entry->getTerminator()->eraseFromParent();
2127 } else {
2128 CopyEnd =
2129 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
2130 }
2131
2132 Builder.SetInsertPoint(OMP_Entry);
2133 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
2134 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
2135 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
2136 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
2137
2138 Builder.SetInsertPoint(CopyBegin);
2139 if (BranchtoEnd)
2140 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd));
2141
2142 return Builder.saveIP();
2143}
2144
2145CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc,
2146 Value *Size, Value *Allocator,
2147 std::string Name) {
2148 IRBuilder<>::InsertPointGuard IPG(Builder);
2149 Builder.restoreIP(Loc.IP);
2150
2151 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2152 Value *Ident = getOrCreateIdent(SrcLocStr);
2153 Value *ThreadId = getOrCreateThreadID(Ident);
2154 Value *Args[] = {ThreadId, Size, Allocator};
2155
2156 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
2157
2158 return Builder.CreateCall(Fn, Args, Name);
2159}
2160
2161CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc,
2162 Value *Addr, Value *Allocator,
2163 std::string Name) {
2164 IRBuilder<>::InsertPointGuard IPG(Builder);
2165 Builder.restoreIP(Loc.IP);
2166
2167 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2168 Value *Ident = getOrCreateIdent(SrcLocStr);
2169 Value *ThreadId = getOrCreateThreadID(Ident);
2170 Value *Args[] = {ThreadId, Addr, Allocator};
2171 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
2172 return Builder.CreateCall(Fn, Args, Name);
2173}
2174
2175CallInst *OpenMPIRBuilder::createCachedThreadPrivate(
2176 const LocationDescription &Loc, llvm::Value *Pointer,
2177 llvm::ConstantInt *Size, const llvm::Twine &Name) {
2178 IRBuilder<>::InsertPointGuard IPG(Builder);
2179 Builder.restoreIP(Loc.IP);
2180
2181 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2182 Value *Ident = getOrCreateIdent(SrcLocStr);
2183 Value *ThreadId = getOrCreateThreadID(Ident);
2184 Constant *ThreadPrivateCache =
2185 getOrCreateOMPInternalVariable(Int8PtrPtr, Name);
2186 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache};
2187
2188 Function *Fn =
2189 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached);
2190
2191 return Builder.CreateCall(Fn, Args);
2192}
2193
2194std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts,
2195 StringRef FirstSeparator,
2196 StringRef Separator) {
2197 SmallString<128> Buffer;
2198 llvm::raw_svector_ostream OS(Buffer);
2199 StringRef Sep = FirstSeparator;
2200 for (StringRef Part : Parts) {
2201 OS << Sep << Part;
2202 Sep = Separator;
2203 }
2204 return OS.str().str();
2205}
2206
2207Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable(
2208 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
2209 // TODO: Replace the twine arg with stringref to get rid of the conversion
2210 // logic. However This is taken from current implementation in clang as is.
2211 // Since this method is used in many places exclusively for OMP internal use
2212 // we will keep it as is for temporarily until we move all users to the
2213 // builder and then, if possible, fix it everywhere in one go.
2214 SmallString<256> Buffer;
2215 llvm::raw_svector_ostream Out(Buffer);
2216 Out << Name;
2217 StringRef RuntimeName = Out.str();
2218 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2219 if (Elem.second) {
2220 assert(Elem.second->getType()->getPointerElementType() == Ty &&(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2221, __extension__ __PRETTY_FUNCTION__))
2221 "OMP internal variable has different type than requested")(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2221, __extension__ __PRETTY_FUNCTION__))
;
2222 } else {
2223 // TODO: investigate the appropriate linkage type used for the global
2224 // variable for possibly changing that to internal or private, or maybe
2225 // create different versions of the function for different OMP internal
2226 // variables.
2227 Elem.second = new llvm::GlobalVariable(
2228 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage,
2229 llvm::Constant::getNullValue(Ty), Elem.first(),
2230 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
2231 AddressSpace);
2232 }
2233
2234 return Elem.second;
2235}
2236
2237Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) {
2238 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2239 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", ".");
2240 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name);
2241}
2242
2243GlobalVariable *
2244OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
2245 std::string VarName) {
2246 llvm::Constant *MaptypesArrayInit =
2247 llvm::ConstantDataArray::get(M.getContext(), Mappings);
2248 auto *MaptypesArrayGlobal = new llvm::GlobalVariable(
2249 M, MaptypesArrayInit->getType(),
2250 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit,
2251 VarName);
2252 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2253 return MaptypesArrayGlobal;
2254}
2255
2256bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic(
2257 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) {
2258 assert(!(AO == AtomicOrdering::NotAtomic ||(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2260, __extension__ __PRETTY_FUNCTION__))
2259 AO == llvm::AtomicOrdering::Unordered) &&(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2260, __extension__ __PRETTY_FUNCTION__))
2260 "Unexpected Atomic Ordering.")(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2260, __extension__ __PRETTY_FUNCTION__))
;
2261
2262 bool Flush = false;
2263 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic;
2264
2265 switch (AK) {
2266 case Read:
2267 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease ||
2268 AO == AtomicOrdering::SequentiallyConsistent) {
2269 FlushAO = AtomicOrdering::Acquire;
2270 Flush = true;
2271 }
2272 break;
2273 case Write:
2274 case Update:
2275 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease ||
2276 AO == AtomicOrdering::SequentiallyConsistent) {
2277 FlushAO = AtomicOrdering::Release;
2278 Flush = true;
2279 }
2280 break;
2281 case Capture:
2282 switch (AO) {
2283 case AtomicOrdering::Acquire:
2284 FlushAO = AtomicOrdering::Acquire;
2285 Flush = true;
2286 break;
2287 case AtomicOrdering::Release:
2288 FlushAO = AtomicOrdering::Release;
2289 Flush = true;
2290 break;
2291 case AtomicOrdering::AcquireRelease:
2292 case AtomicOrdering::SequentiallyConsistent:
2293 FlushAO = AtomicOrdering::AcquireRelease;
2294 Flush = true;
2295 break;
2296 default:
2297 // do nothing - leave silently.
2298 break;
2299 }
2300 }
2301
2302 if (Flush) {
2303 // Currently Flush RT call still doesn't take memory_ordering, so for when
2304 // that happens, this tries to do the resolution of which atomic ordering
2305 // to use with but issue the flush call
2306 // TODO: pass `FlushAO` after memory ordering support is added
2307 (void)FlushAO;
2308 emitFlush(Loc);
2309 }
2310
2311 // for AO == AtomicOrdering::Monotonic and all other case combinations
2312 // do nothing
2313 return Flush;
2314}
2315
2316OpenMPIRBuilder::InsertPointTy
2317OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc,
2318 AtomicOpValue &X, AtomicOpValue &V,
2319 AtomicOrdering AO) {
2320 if (!updateToLocation(Loc))
2321 return Loc.IP;
2322
2323 Type *XTy = X.Var->getType();
2324 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2324, __extension__ __PRETTY_FUNCTION__))
;
2325 Type *XElemTy = XTy->getPointerElementType();
2326 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2328, __extension__ __PRETTY_FUNCTION__))
2327 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2328, __extension__ __PRETTY_FUNCTION__))
2328 "OMP atomic read expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2328, __extension__ __PRETTY_FUNCTION__))
;
2329
2330 Value *XRead = nullptr;
2331
2332 if (XElemTy->isIntegerTy()) {
2333 LoadInst *XLD =
2334 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read");
2335 XLD->setAtomic(AO);
2336 XRead = cast<Value>(XLD);
2337 } else {
2338 // We need to bitcast and perform atomic op as integer
2339 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
2340 IntegerType *IntCastTy =
2341 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2342 Value *XBCast = Builder.CreateBitCast(
2343 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast");
2344 LoadInst *XLoad =
2345 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load");
2346 XLoad->setAtomic(AO);
2347 if (XElemTy->isFloatingPointTy()) {
2348 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast");
2349 } else {
2350 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast");
2351 }
2352 }
2353 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read);
2354 Builder.CreateStore(XRead, V.Var, V.IsVolatile);
2355 return Builder.saveIP();
2356}
2357
2358OpenMPIRBuilder::InsertPointTy
2359OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
2360 AtomicOpValue &X, Value *Expr,
2361 AtomicOrdering AO) {
2362 if (!updateToLocation(Loc))
2363 return Loc.IP;
2364
2365 Type *XTy = X.Var->getType();
2366 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2366, __extension__ __PRETTY_FUNCTION__))
;
2367 Type *XElemTy = XTy->getPointerElementType();
2368 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2370, __extension__ __PRETTY_FUNCTION__))
2369 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2370, __extension__ __PRETTY_FUNCTION__))
2370 "OMP atomic write expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2370, __extension__ __PRETTY_FUNCTION__))
;
2371
2372 if (XElemTy->isIntegerTy()) {
2373 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
2374 XSt->setAtomic(AO);
2375 } else {
2376 // We need to bitcast and perform atomic op as integers
2377 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
2378 IntegerType *IntCastTy =
2379 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2380 Value *XBCast = Builder.CreateBitCast(
2381 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast");
2382 Value *ExprCast =
2383 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast");
2384 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile);
2385 XSt->setAtomic(AO);
2386 }
2387
2388 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write);
2389 return Builder.saveIP();
2390}
2391
2392OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate(
2393 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
2394 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
2395 AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart) {
2396 if (!updateToLocation(Loc))
2397 return Loc.IP;
2398
2399 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2400 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2401 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2402 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2403 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2404 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2405 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2406 "OMP atomic update expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2407 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2408 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2409 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2410 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2406, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2409, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
2411
2412 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile,
2413 IsXLHSInRHSPart);
2414 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update);
2415 return Builder.saveIP();
2416}
2417
2418Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
2419 AtomicRMWInst::BinOp RMWOp) {
2420 switch (RMWOp) {
2421 case AtomicRMWInst::Add:
2422 return Builder.CreateAdd(Src1, Src2);
2423 case AtomicRMWInst::Sub:
2424 return Builder.CreateSub(Src1, Src2);
2425 case AtomicRMWInst::And:
2426 return Builder.CreateAnd(Src1, Src2);
2427 case AtomicRMWInst::Nand:
2428 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2));
2429 case AtomicRMWInst::Or:
2430 return Builder.CreateOr(Src1, Src2);
2431 case AtomicRMWInst::Xor:
2432 return Builder.CreateXor(Src1, Src2);
2433 case AtomicRMWInst::Xchg:
2434 case AtomicRMWInst::FAdd:
2435 case AtomicRMWInst::FSub:
2436 case AtomicRMWInst::BAD_BINOP:
2437 case AtomicRMWInst::Max:
2438 case AtomicRMWInst::Min:
2439 case AtomicRMWInst::UMax:
2440 case AtomicRMWInst::UMin:
2441 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2441)
;
2442 }
2443 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2443)
;
2444}
2445
2446std::pair<Value *, Value *>
2447OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr,
2448 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
2449 AtomicUpdateCallbackTy &UpdateOp,
2450 bool VolatileX, bool IsXLHSInRHSPart) {
2451 Type *XElemTy = X->getType()->getPointerElementType();
2452
2453 bool DoCmpExch =
2454 ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) ||
2455 (RMWOp == AtomicRMWInst::FSub) ||
2456 (RMWOp == AtomicRMWInst::Sub && !IsXLHSInRHSPart);
2457
2458 std::pair<Value *, Value *> Res;
2459 if (XElemTy->isIntegerTy() && !DoCmpExch) {
2460 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
2461 // not needed except in case of postfix captures. Generate anyway for
2462 // consistency with the else part. Will be removed with any DCE pass.
2463 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp);
2464 } else {
2465 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace();
2466 IntegerType *IntCastTy =
2467 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2468 Value *XBCast =
2469 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
2470 LoadInst *OldVal =
2471 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load");
2472 OldVal->setAtomic(AO);
2473 // CurBB
2474 // | /---\
2475 // ContBB |
2476 // | \---/
2477 // ExitBB
2478 BasicBlock *CurBB = Builder.GetInsertBlock();
2479 Instruction *CurBBTI = CurBB->getTerminator();
2480 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable();
2481 BasicBlock *ExitBB =
2482 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit");
2483 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(),
2484 X->getName() + ".atomic.cont");
2485 ContBB->getTerminator()->eraseFromParent();
2486 Builder.SetInsertPoint(ContBB);
2487 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2);
2488 PHI->addIncoming(OldVal, CurBB);
2489 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy);
2490 NewAtomicAddr->setName(X->getName() + "x.new.val");
2491 NewAtomicAddr->moveBefore(AllocIP);
2492 IntegerType *NewAtomicCastTy =
2493 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2494 bool IsIntTy = XElemTy->isIntegerTy();
2495 Value *NewAtomicIntAddr =
2496 (IsIntTy)
2497 ? NewAtomicAddr
2498 : Builder.CreateBitCast(NewAtomicAddr,
2499 NewAtomicCastTy->getPointerTo(Addrspace));
2500 Value *OldExprVal = PHI;
2501 if (!IsIntTy) {
2502 if (XElemTy->isFloatingPointTy()) {
2503 OldExprVal = Builder.CreateBitCast(PHI, XElemTy,
2504 X->getName() + ".atomic.fltCast");
2505 } else {
2506 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy,
2507 X->getName() + ".atomic.ptrCast");
2508 }
2509 }
2510
2511 Value *Upd = UpdateOp(OldExprVal, Builder);
2512 Builder.CreateStore(Upd, NewAtomicAddr);
2513 LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr);
2514 Value *XAddr =
2515 (IsIntTy)
2516 ? X
2517 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
2518 AtomicOrdering Failure =
2519 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
2520 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg(
2521 XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure);
2522 Result->setVolatile(VolatileX);
2523 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0);
2524 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1);
2525 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock());
2526 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB);
2527
2528 Res.first = OldExprVal;
2529 Res.second = Upd;
2530
2531 // set Insertion point in exit block
2532 if (UnreachableInst *ExitTI =
2533 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) {
2534 CurBBTI->eraseFromParent();
2535 Builder.SetInsertPoint(ExitBB);
2536 } else {
2537 Builder.SetInsertPoint(ExitTI);
2538 }
2539 }
2540
2541 return Res;
2542}
2543
2544OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture(
2545 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
2546 AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
2547 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
2548 bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart) {
2549 if (!updateToLocation(Loc))
2550 return Loc.IP;
2551
2552 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2553 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2554 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2555 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2556 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2557 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2558 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2559 "OMP atomic capture expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2560 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2561 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2562 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2561, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
2563
2564 // If UpdateExpr is 'x' updated with some `expr` not based on 'x',
2565 // 'x' is simply atomically rewritten with 'expr'.
2566 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg);
2567 std::pair<Value *, Value *> Result =
2568 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp,
2569 X.IsVolatile, IsXLHSInRHSPart);
2570
2571 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second);
2572 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile);
2573
2574 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture);
2575 return Builder.saveIP();
2576}
2577
2578GlobalVariable *
2579OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
2580 std::string VarName) {
2581 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get(
2582 llvm::ArrayType::get(
2583 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()),
2584 Names);
2585 auto *MapNamesArrayGlobal = new llvm::GlobalVariable(
2586 M, MapNamesArrayInit->getType(),
2587 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit,
2588 VarName);
2589 return MapNamesArrayGlobal;
2590}
2591
2592// Create all simple and struct types exposed by the runtime and remember
2593// the llvm::PointerTypes of them for easy access later.
2594void OpenMPIRBuilder::initializeTypes(Module &M) {
2595 LLVMContext &Ctx = M.getContext();
2596 StructType *T;
2597#define OMP_TYPE(VarName, InitValue) VarName = InitValue;
2598#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
2599 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \
2600 VarName##PtrTy = PointerType::getUnqual(VarName##Ty);
2601#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
2602 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \
2603 VarName##Ptr = PointerType::getUnqual(VarName);
2604#define OMP_STRUCT_TYPE(VarName, StructName, ...) \
2605 T = StructType::getTypeByName(Ctx, StructName); \
2606 if (!T) \
2607 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \
2608 VarName = T; \
2609 VarName##Ptr = PointerType::getUnqual(T);
2610#include "llvm/Frontend/OpenMP/OMPKinds.def"
2611}
2612
2613void OpenMPIRBuilder::OutlineInfo::collectBlocks(
2614 SmallPtrSetImpl<BasicBlock *> &BlockSet,
2615 SmallVectorImpl<BasicBlock *> &BlockVector) {
2616 SmallVector<BasicBlock *, 32> Worklist;
2617 BlockSet.insert(EntryBB);
2618 BlockSet.insert(ExitBB);
2619
2620 Worklist.push_back(EntryBB);
2621 while (!Worklist.empty()) {
2622 BasicBlock *BB = Worklist.pop_back_val();
2623 BlockVector.push_back(BB);
2624 for (BasicBlock *SuccBB : successors(BB))
2625 if (BlockSet.insert(SuccBB).second)
2626 Worklist.push_back(SuccBB);
2627 }
2628}
2629
2630void CanonicalLoopInfo::collectControlBlocks(
2631 SmallVectorImpl<BasicBlock *> &BBs) {
2632 // We only count those BBs as control block for which we do not need to
2633 // reverse the CFG, i.e. not the loop body which can contain arbitrary control
2634 // flow. For consistency, this also means we do not add the Body block, which
2635 // is just the entry to the body code.
2636 BBs.reserve(BBs.size() + 6);
2637 BBs.append({Preheader, Header, Cond, Latch, Exit, After});
2638}
2639
2640void CanonicalLoopInfo::assertOK() const {
2641#ifndef NDEBUG
2642 if (!IsValid)
2643 return;
2644
2645 // Verify standard control-flow we use for OpenMP loops.
2646 assert(Preheader)(static_cast <bool> (Preheader) ? void (0) : __assert_fail
("Preheader", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2646, __extension__ __PRETTY_FUNCTION__))
;
2647 assert(isa<BranchInst>(Preheader->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2648, __extension__ __PRETTY_FUNCTION__))
2648 "Preheader must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2648, __extension__ __PRETTY_FUNCTION__))
;
2649 assert(Preheader->getSingleSuccessor() == Header &&(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2650, __extension__ __PRETTY_FUNCTION__))
2650 "Preheader must jump to header")(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2650, __extension__ __PRETTY_FUNCTION__))
;
2651
2652 assert(Header)(static_cast <bool> (Header) ? void (0) : __assert_fail
("Header", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2652, __extension__ __PRETTY_FUNCTION__))
;
2653 assert(isa<BranchInst>(Header->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2654, __extension__ __PRETTY_FUNCTION__))
2654 "Header must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2654, __extension__ __PRETTY_FUNCTION__))
;
2655 assert(Header->getSingleSuccessor() == Cond &&(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2656, __extension__ __PRETTY_FUNCTION__))
2656 "Header must jump to exiting block")(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2656, __extension__ __PRETTY_FUNCTION__))
;
2657
2658 assert(Cond)(static_cast <bool> (Cond) ? void (0) : __assert_fail (
"Cond", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2658, __extension__ __PRETTY_FUNCTION__))
;
2659 assert(Cond->getSinglePredecessor() == Header &&(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2660, __extension__ __PRETTY_FUNCTION__))
2660 "Exiting block only reachable from header")(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2660, __extension__ __PRETTY_FUNCTION__))
;
2661
2662 assert(isa<BranchInst>(Cond->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2663, __extension__ __PRETTY_FUNCTION__))
2663 "Exiting block must terminate with conditional branch")(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2663, __extension__ __PRETTY_FUNCTION__))
;
2664 assert(size(successors(Cond)) == 2 &&(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2665, __extension__ __PRETTY_FUNCTION__))
2665 "Exiting block must have two successors")(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2665, __extension__ __PRETTY_FUNCTION__))
;
2666 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2667, __extension__ __PRETTY_FUNCTION__))
2667 "Exiting block's first successor jump to the body")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2667, __extension__ __PRETTY_FUNCTION__))
;
2668 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2669, __extension__ __PRETTY_FUNCTION__))
2669 "Exiting block's second successor must exit the loop")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2669, __extension__ __PRETTY_FUNCTION__))
;
2670
2671 assert(Body)(static_cast <bool> (Body) ? void (0) : __assert_fail (
"Body", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2671, __extension__ __PRETTY_FUNCTION__))
;
2672 assert(Body->getSinglePredecessor() == Cond &&(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2673, __extension__ __PRETTY_FUNCTION__))
2673 "Body only reachable from exiting block")(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2673, __extension__ __PRETTY_FUNCTION__))
;
2674 assert(!isa<PHINode>(Body->front()))(static_cast <bool> (!isa<PHINode>(Body->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Body->front())"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2674, __extension__ __PRETTY_FUNCTION__))
;
2675
2676 assert(Latch)(static_cast <bool> (Latch) ? void (0) : __assert_fail (
"Latch", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2676, __extension__ __PRETTY_FUNCTION__))
;
2677 assert(isa<BranchInst>(Latch->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2678, __extension__ __PRETTY_FUNCTION__))
2678 "Latch must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2678, __extension__ __PRETTY_FUNCTION__))
;
2679 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")(static_cast <bool> (Latch->getSingleSuccessor() == Header
&& "Latch must jump to header") ? void (0) : __assert_fail
("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2679, __extension__ __PRETTY_FUNCTION__))
;
2680 // TODO: To support simple redirecting of the end of the body code that has
2681 // multiple; introduce another auxiliary basic block like preheader and after.
2682 assert(Latch->getSinglePredecessor() != nullptr)(static_cast <bool> (Latch->getSinglePredecessor() !=
nullptr) ? void (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2682, __extension__ __PRETTY_FUNCTION__))
;
2683 assert(!isa<PHINode>(Latch->front()))(static_cast <bool> (!isa<PHINode>(Latch->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Latch->front())"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2683, __extension__ __PRETTY_FUNCTION__))
;
2684
2685 assert(Exit)(static_cast <bool> (Exit) ? void (0) : __assert_fail (
"Exit", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2685, __extension__ __PRETTY_FUNCTION__))
;
2686 assert(isa<BranchInst>(Exit->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2687, __extension__ __PRETTY_FUNCTION__))
2687 "Exit block must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2687, __extension__ __PRETTY_FUNCTION__))
;
2688 assert(Exit->getSingleSuccessor() == After &&(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2689, __extension__ __PRETTY_FUNCTION__))
2689 "Exit block must jump to after block")(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2689, __extension__ __PRETTY_FUNCTION__))
;
2690
2691 assert(After)(static_cast <bool> (After) ? void (0) : __assert_fail (
"After", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2691, __extension__ __PRETTY_FUNCTION__))
;
2692 assert(After->getSinglePredecessor() == Exit &&(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2693, __extension__ __PRETTY_FUNCTION__))
2693 "After block only reachable from exit block")(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2693, __extension__ __PRETTY_FUNCTION__))
;
2694 assert(After->empty() || !isa<PHINode>(After->front()))(static_cast <bool> (After->empty() || !isa<PHINode
>(After->front())) ? void (0) : __assert_fail ("After->empty() || !isa<PHINode>(After->front())"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2694, __extension__ __PRETTY_FUNCTION__))
;
2695
2696 Instruction *IndVar = getIndVar();
2697 assert(IndVar && "Canonical induction variable not found?")(static_cast <bool> (IndVar && "Canonical induction variable not found?"
) ? void (0) : __assert_fail ("IndVar && \"Canonical induction variable not found?\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__))
;
2698 assert(isa<IntegerType>(IndVar->getType()) &&(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2699, __extension__ __PRETTY_FUNCTION__))
2699 "Induction variable must be an integer")(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2699, __extension__ __PRETTY_FUNCTION__))
;
2700 assert(cast<PHINode>(IndVar)->getParent() == Header &&(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__))
2701 "Induction variable must be a PHI in the loop header")(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__))
;
2702 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(0) == Preheader) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2702, __extension__ __PRETTY_FUNCTION__))
;
2703 assert((static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__))
2704 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())(static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__))
;
2705 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(1) == Latch) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2705, __extension__ __PRETTY_FUNCTION__))
;
2706
2707 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1);
2708 assert(cast<Instruction>(NextIndVar)->getParent() == Latch)(static_cast <bool> (cast<Instruction>(NextIndVar
)->getParent() == Latch) ? void (0) : __assert_fail ("cast<Instruction>(NextIndVar)->getParent() == Latch"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2708, __extension__ __PRETTY_FUNCTION__))
;
2709 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOpcode() == BinaryOperator::Add) ? void (0) : __assert_fail
("cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2709, __extension__ __PRETTY_FUNCTION__))
;
2710 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOperand(0) == IndVar) ? void (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2710, __extension__ __PRETTY_FUNCTION__))
;
2711 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2712, __extension__ __PRETTY_FUNCTION__))
2712 ->isOne())(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2712, __extension__ __PRETTY_FUNCTION__))
;
2713
2714 Value *TripCount = getTripCount();
2715 assert(TripCount && "Loop trip count not found?")(static_cast <bool> (TripCount && "Loop trip count not found?"
) ? void (0) : __assert_fail ("TripCount && \"Loop trip count not found?\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2715, __extension__ __PRETTY_FUNCTION__))
;
2716 assert(IndVar->getType() == TripCount->getType() &&(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2717, __extension__ __PRETTY_FUNCTION__))
2717 "Trip count and induction variable must have the same type")(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2717, __extension__ __PRETTY_FUNCTION__))
;
2718
2719 auto *CmpI = cast<CmpInst>(&Cond->front());
2720 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
2721 "Exit condition must be a signed less-than comparison")(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
;
2722 assert(CmpI->getOperand(0) == IndVar &&(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2723, __extension__ __PRETTY_FUNCTION__))
2723 "Exit condition must compare the induction variable")(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2723, __extension__ __PRETTY_FUNCTION__))
;
2724 assert(CmpI->getOperand(1) == TripCount &&(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2725, __extension__ __PRETTY_FUNCTION__))
2725 "Exit condition must compare with the trip count")(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2725, __extension__ __PRETTY_FUNCTION__))
;
2726#endif
2727}

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0)
: __assert_fail ("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 163, __extension__ __PRETTY_FUNCTION__))
;
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
33
Null pointer value stored to field 'BB'
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 194, __extension__ __PRETTY_FUNCTION__))
;
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
31
Taking false branch
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
32
Calling 'IRBuilderBase::ClearInsertionPoint'
34
Returning from 'IRBuilderBase::ClearInsertionPoint'
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 320, __extension__ __PRETTY_FUNCTION__))
;
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 329, __extension__ __PRETTY_FUNCTION__))
;
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!"
) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 345, __extension__ __PRETTY_FUNCTION__))
;
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
651 MaybeAlign SrcAlign, Value *Size);
652
653 /// Create and insert an element unordered-atomic memcpy between the
654 /// specified pointers.
655 ///
656 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
657 ///
658 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
659 /// specified, it will be added to the instruction. Likewise with alias.scope
660 /// and noalias tags.
661 CallInst *CreateElementUnorderedAtomicMemCpy(
662 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
663 uint32_t ElementSize, MDNode *TBAATag = nullptr,
664 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
665 MDNode *NoAliasTag = nullptr);
666
667 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
668 MaybeAlign SrcAlign, uint64_t Size,
669 bool isVolatile = false, MDNode *TBAATag = nullptr,
670 MDNode *ScopeTag = nullptr,
671 MDNode *NoAliasTag = nullptr) {
672 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
673 isVolatile, TBAATag, ScopeTag, NoAliasTag);
674 }
675
676 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
677 MaybeAlign SrcAlign, Value *Size,
678 bool isVolatile = false, MDNode *TBAATag = nullptr,
679 MDNode *ScopeTag = nullptr,
680 MDNode *NoAliasTag = nullptr);
681
682 /// \brief Create and insert an element unordered-atomic memmove between the
683 /// specified pointers.
684 ///
685 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
686 /// respectively.
687 ///
688 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
689 /// specified, it will be added to the instruction. Likewise with alias.scope
690 /// and noalias tags.
691 CallInst *CreateElementUnorderedAtomicMemMove(
692 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
693 uint32_t ElementSize, MDNode *TBAATag = nullptr,
694 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
695 MDNode *NoAliasTag = nullptr);
696
697 /// Create a vector fadd reduction intrinsic of the source vector.
698 /// The first parameter is a scalar accumulator value for ordered reductions.
699 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
700
701 /// Create a vector fmul reduction intrinsic of the source vector.
702 /// The first parameter is a scalar accumulator value for ordered reductions.
703 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
704
705 /// Create a vector int add reduction intrinsic of the source vector.
706 CallInst *CreateAddReduce(Value *Src);
707
708 /// Create a vector int mul reduction intrinsic of the source vector.
709 CallInst *CreateMulReduce(Value *Src);
710
711 /// Create a vector int AND reduction intrinsic of the source vector.
712 CallInst *CreateAndReduce(Value *Src);
713
714 /// Create a vector int OR reduction intrinsic of the source vector.
715 CallInst *CreateOrReduce(Value *Src);
716
717 /// Create a vector int XOR reduction intrinsic of the source vector.
718 CallInst *CreateXorReduce(Value *Src);
719
720 /// Create a vector integer max reduction intrinsic of the source
721 /// vector.
722 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
723
724 /// Create a vector integer min reduction intrinsic of the source
725 /// vector.
726 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
727
728 /// Create a vector float max reduction intrinsic of the source
729 /// vector.
730 CallInst *CreateFPMaxReduce(Value *Src);
731
732 /// Create a vector float min reduction intrinsic of the source
733 /// vector.
734 CallInst *CreateFPMinReduce(Value *Src);
735
736 /// Create a lifetime.start intrinsic.
737 ///
738 /// If the pointer isn't i8* it will be converted.
739 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
740
741 /// Create a lifetime.end intrinsic.
742 ///
743 /// If the pointer isn't i8* it will be converted.
744 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
745
746 /// Create a call to invariant.start intrinsic.
747 ///
748 /// If the pointer isn't i8* it will be converted.
749 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
750
751 /// Create a call to Masked Load intrinsic
752 CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
753 Value *PassThru = nullptr, const Twine &Name = "");
754
755 /// Create a call to Masked Store intrinsic
756 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
757 Value *Mask);
758
759 /// Create a call to Masked Gather intrinsic
760 CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
761 Value *Mask = nullptr, Value *PassThru = nullptr,
762 const Twine &Name = "");
763
764 /// Create a call to Masked Scatter intrinsic
765 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
766 Value *Mask = nullptr);
767
768 /// Create an assume intrinsic call that allows the optimizer to
769 /// assume that the provided condition will be true.
770 ///
771 /// The optional argument \p OpBundles specifies operand bundles that are
772 /// added to the call instruction.
773 CallInst *CreateAssumption(Value *Cond,
774 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
775
776 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
777 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
778 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
779 return CreateNoAliasScopeDeclaration(
780 MetadataAsValue::get(Context, ScopeTag));
781 }
782
783 /// Create a call to the experimental.gc.statepoint intrinsic to
784 /// start a new statepoint sequence.
785 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
786 Value *ActualCallee,
787 ArrayRef<Value *> CallArgs,
788 Optional<ArrayRef<Value *>> DeoptArgs,
789 ArrayRef<Value *> GCArgs,
790 const Twine &Name = "");
791
792 /// Create a call to the experimental.gc.statepoint intrinsic to
793 /// start a new statepoint sequence.
794 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
795 Value *ActualCallee, uint32_t Flags,
796 ArrayRef<Value *> CallArgs,
797 Optional<ArrayRef<Use>> TransitionArgs,
798 Optional<ArrayRef<Use>> DeoptArgs,
799 ArrayRef<Value *> GCArgs,
800 const Twine &Name = "");
801
802 /// Conveninence function for the common case when CallArgs are filled
803 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
804 /// .get()'ed to get the Value pointer.
805 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
806 Value *ActualCallee, ArrayRef<Use> CallArgs,
807 Optional<ArrayRef<Value *>> DeoptArgs,
808 ArrayRef<Value *> GCArgs,
809 const Twine &Name = "");
810
811 /// Create an invoke to the experimental.gc.statepoint intrinsic to
812 /// start a new statepoint sequence.
813 InvokeInst *
814 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
815 Value *ActualInvokee, BasicBlock *NormalDest,
816 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
817 Optional<ArrayRef<Value *>> DeoptArgs,
818 ArrayRef<Value *> GCArgs, const Twine &Name = "");
819
820 /// Create an invoke to the experimental.gc.statepoint intrinsic to
821 /// start a new statepoint sequence.
822 InvokeInst *CreateGCStatepointInvoke(
823 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
824 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
825 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
826 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
827 const Twine &Name = "");
828
829 // Convenience function for the common case when CallArgs are filled in using
830 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
831 // get the Value *.
832 InvokeInst *
833 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
834 Value *ActualInvokee, BasicBlock *NormalDest,
835 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
836 Optional<ArrayRef<Value *>> DeoptArgs,
837 ArrayRef<Value *> GCArgs, const Twine &Name = "");
838
839 /// Create a call to the experimental.gc.result intrinsic to extract
840 /// the result from a call wrapped in a statepoint.
841 CallInst *CreateGCResult(Instruction *Statepoint,
842 Type *ResultType,
843 const Twine &Name = "");
844
845 /// Create a call to the experimental.gc.relocate intrinsics to
846 /// project the relocated value of one pointer from the statepoint.
847 CallInst *CreateGCRelocate(Instruction *Statepoint,
848 int BaseOffset,
849 int DerivedOffset,
850 Type *ResultType,
851 const Twine &Name = "");
852
853 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
854 /// base pointer for the specified derived pointer.
855 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
856
857 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
858 /// the offset of the specified derived pointer from its base.
859 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
860
861 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
862 /// will be the same type as that of \p Scaling.
863 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
864
865 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
866 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
867
868 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
869 /// type.
870 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
871 Instruction *FMFSource = nullptr,
872 const Twine &Name = "");
873
874 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
875 /// first type.
876 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
877 Instruction *FMFSource = nullptr,
878 const Twine &Name = "");
879
880 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
881 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
882 /// the intrinsic.
883 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
884 ArrayRef<Value *> Args,
885 Instruction *FMFSource = nullptr,
886 const Twine &Name = "");
887
888 /// Create call to the minnum intrinsic.
889 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
890 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
891 }
892
893 /// Create call to the maxnum intrinsic.
894 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
895 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
896 }
897
898 /// Create call to the minimum intrinsic.
899 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
900 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
901 }
902
903 /// Create call to the maximum intrinsic.
904 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
905 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
906 }
907
908 /// Create a call to the experimental.vector.extract intrinsic.
909 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
910 const Twine &Name = "") {
911 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
912 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
913 Name);
914 }
915
916 /// Create a call to the experimental.vector.insert intrinsic.
917 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
918 Value *Idx, const Twine &Name = "") {
919 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
920 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
921 nullptr, Name);
922 }
923
924private:
925 /// Create a call to a masked intrinsic with given Id.
926 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
927 ArrayRef<Type *> OverloadedTypes,
928 const Twine &Name = "");
929
930 Value *getCastedInt8PtrValue(Value *Ptr);
931
932 //===--------------------------------------------------------------------===//
933 // Instruction creation methods: Terminators
934 //===--------------------------------------------------------------------===//
935
936private:
937 /// Helper to add branch weight and unpredictable metadata onto an
938 /// instruction.
939 /// \returns The annotated instruction.
940 template <typename InstTy>
941 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
942 if (Weights)
943 I->setMetadata(LLVMContext::MD_prof, Weights);
944 if (Unpredictable)
945 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
946 return I;
947 }
948
949public:
950 /// Create a 'ret void' instruction.
951 ReturnInst *CreateRetVoid() {
952 return Insert(ReturnInst::Create(Context));
953 }
954
955 /// Create a 'ret <val>' instruction.
956 ReturnInst *CreateRet(Value *V) {
957 return Insert(ReturnInst::Create(Context, V));
958 }
959
960 /// Create a sequence of N insertvalue instructions,
961 /// with one Value from the retVals array each, that build a aggregate
962 /// return value one value at a time, and a ret instruction to return
963 /// the resulting aggregate value.
964 ///
965 /// This is a convenience function for code that uses aggregate return values
966 /// as a vehicle for having multiple return values.
967 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
968 Value *V = UndefValue::get(getCurrentFunctionReturnType());
969 for (unsigned i = 0; i != N; ++i)
970 V = CreateInsertValue(V, retVals[i], i, "mrv");
971 return Insert(ReturnInst::Create(Context, V));
972 }
973
974 /// Create an unconditional 'br label X' instruction.
975 BranchInst *CreateBr(BasicBlock *Dest) {
976 return Insert(BranchInst::Create(Dest));
977 }
978
979 /// Create a conditional 'br Cond, TrueDest, FalseDest'
980 /// instruction.
981 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
982 MDNode *BranchWeights = nullptr,
983 MDNode *Unpredictable = nullptr) {
984 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
985 BranchWeights, Unpredictable));
986 }
987
988 /// Create a conditional 'br Cond, TrueDest, FalseDest'
989 /// instruction. Copy branch meta data if available.
990 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
991 Instruction *MDSrc) {
992 BranchInst *Br = BranchInst::Create(True, False, Cond);
993 if (MDSrc) {
994 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
995 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
996 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
997 }
998 return Insert(Br);
999 }
1000
1001 /// Create a switch instruction with the specified value, default dest,
1002 /// and with a hint for the number of cases that will be added (for efficient
1003 /// allocation).
1004 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1005 MDNode *BranchWeights = nullptr,
1006 MDNode *Unpredictable = nullptr) {
1007 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1008 BranchWeights, Unpredictable));
1009 }
1010
1011 /// Create an indirect branch instruction with the specified address
1012 /// operand, with an optional hint for the number of destinations that will be
1013 /// added (for efficient allocation).
1014 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1015 return Insert(IndirectBrInst::Create(Addr, NumDests));
1016 }
1017
1018 /// Create an invoke instruction.
1019 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1020 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1021 ArrayRef<Value *> Args,
1022 ArrayRef<OperandBundleDef> OpBundles,
1023 const Twine &Name = "") {
1024 InvokeInst *II =
1025 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1026 if (IsFPConstrained)
1027 setConstrainedFPCallAttr(II);
1028 return Insert(II, Name);
1029 }
1030 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1031 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1032 ArrayRef<Value *> Args = None,
1033 const Twine &Name = "") {
1034 InvokeInst *II =
1035 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1036 if (IsFPConstrained)
1037 setConstrainedFPCallAttr(II);
1038 return Insert(II, Name);
1039 }
1040
1041 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1042 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1043 ArrayRef<OperandBundleDef> OpBundles,
1044 const Twine &Name = "") {
1045 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1046 NormalDest, UnwindDest, Args, OpBundles, Name);
1047 }
1048
1049 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1050 BasicBlock *UnwindDest,
1051 ArrayRef<Value *> Args = None,
1052 const Twine &Name = "") {
1053 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1054 NormalDest, UnwindDest, Args, Name);
1055 }
1056
1057 /// \brief Create a callbr instruction.
1058 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1059 BasicBlock *DefaultDest,
1060 ArrayRef<BasicBlock *> IndirectDests,
1061 ArrayRef<Value *> Args = None,
1062 const Twine &Name = "") {
1063 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1064 Args), Name);
1065 }
1066 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1067 BasicBlock *DefaultDest,
1068 ArrayRef<BasicBlock *> IndirectDests,
1069 ArrayRef<Value *> Args,
1070 ArrayRef<OperandBundleDef> OpBundles,
1071 const Twine &Name = "") {
1072 return Insert(
1073 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1074 OpBundles), Name);
1075 }
1076
1077 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1078 ArrayRef<BasicBlock *> IndirectDests,
1079 ArrayRef<Value *> Args = None,
1080 const Twine &Name = "") {
1081 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1082 DefaultDest, IndirectDests, Args, Name);
1083 }
1084 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1085 ArrayRef<BasicBlock *> IndirectDests,
1086 ArrayRef<Value *> Args,
1087 ArrayRef<OperandBundleDef> OpBundles,
1088 const Twine &Name = "") {
1089 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1090 DefaultDest, IndirectDests, Args, Name);
1091 }
1092
1093 ResumeInst *CreateResume(Value *Exn) {
1094 return Insert(ResumeInst::Create(Exn));
1095 }
1096
1097 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1098 BasicBlock *UnwindBB = nullptr) {
1099 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1100 }
1101
1102 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1103 unsigned NumHandlers,
1104 const Twine &Name = "") {
1105 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1106 Name);
1107 }
1108
1109 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1110 const Twine &Name = "") {
1111 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1112 }
1113
1114 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1115 ArrayRef<Value *> Args = None,
1116 const Twine &Name = "") {
1117 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1118 }
1119
1120 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1121 return Insert(CatchReturnInst::Create(CatchPad, BB));
1122 }
1123
1124 UnreachableInst *CreateUnreachable() {
1125 return Insert(new UnreachableInst(Context));
1126 }
1127
1128 //===--------------------------------------------------------------------===//
1129 // Instruction creation methods: Binary Operators
1130 //===--------------------------------------------------------------------===//
1131private:
1132 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1133 Value *LHS, Value *RHS,
1134 const Twine &Name,
1135 bool HasNUW, bool HasNSW) {
1136 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1137 if (HasNUW) BO->setHasNoUnsignedWrap();
1138 if (HasNSW) BO->setHasNoSignedWrap();
1139 return BO;
1140 }
1141
1142 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1143 FastMathFlags FMF) const {
1144 if (!FPMD)
1145 FPMD = DefaultFPMathTag;
1146 if (FPMD)
1147 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1148 I->setFastMathFlags(FMF);
1149 return I;
1150 }
1151
1152 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1153 Value *R, const Twine &Name) const {
1154 auto *LC = dyn_cast<Constant>(L);
1155 auto *RC = dyn_cast<Constant>(R);
1156 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1157 }
1158
1159 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1160 RoundingMode UseRounding = DefaultConstrainedRounding;
1161
1162 if (Rounding.hasValue())
1163 UseRounding = Rounding.getValue();
1164
1165 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1166 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1166, __extension__ __PRETTY_FUNCTION__))
;
1167 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1168
1169 return MetadataAsValue::get(Context, RoundingMDS);
1170 }
1171
1172 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1173 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1174
1175 if (Except.hasValue())
1176 UseExcept = Except.getValue();
1177
1178 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1179 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1179, __extension__ __PRETTY_FUNCTION__))
;
1180 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1181
1182 return MetadataAsValue::get(Context, ExceptMDS);
1183 }
1184
1185 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1186 assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1187 Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1188 Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1189 "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190
1191 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1192 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1193
1194 return MetadataAsValue::get(Context, PredicateMDS);
1195 }
1196
1197public:
1198 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1199 bool HasNUW = false, bool HasNSW = false) {
1200 if (auto *LC = dyn_cast<Constant>(LHS))
1201 if (auto *RC = dyn_cast<Constant>(RHS))
1202 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1203 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1204 HasNUW, HasNSW);
1205 }
1206
1207 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1208 return CreateAdd(LHS, RHS, Name, false, true);
1209 }
1210
1211 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1212 return CreateAdd(LHS, RHS, Name, true, false);
1213 }
1214
1215 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1216 bool HasNUW = false, bool HasNSW = false) {
1217 if (auto *LC = dyn_cast<Constant>(LHS))
1218 if (auto *RC = dyn_cast<Constant>(RHS))
1219 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1220 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1221 HasNUW, HasNSW);
1222 }
1223
1224 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1225 return CreateSub(LHS, RHS, Name, false, true);
1226 }
1227
1228 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1229 return CreateSub(LHS, RHS, Name, true, false);
1230 }
1231
1232 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1233 bool HasNUW = false, bool HasNSW = false) {
1234 if (auto *LC = dyn_cast<Constant>(LHS))
1235 if (auto *RC = dyn_cast<Constant>(RHS))
1236 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1237 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1238 HasNUW, HasNSW);
1239 }
1240
1241 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1242 return CreateMul(LHS, RHS, Name, false, true);
1243 }
1244
1245 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1246 return CreateMul(LHS, RHS, Name, true, false);
1247 }
1248
1249 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1250 bool isExact = false) {
1251 if (auto *LC = dyn_cast<Constant>(LHS))
1252 if (auto *RC = dyn_cast<Constant>(RHS))
1253 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1254 if (!isExact)
1255 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1256 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1257 }
1258
1259 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1260 return CreateUDiv(LHS, RHS, Name, true);
1261 }
1262
1263 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1264 bool isExact = false) {
1265 if (auto *LC = dyn_cast<Constant>(LHS))
1266 if (auto *RC = dyn_cast<Constant>(RHS))
1267 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1268 if (!isExact)
1269 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1270 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1271 }
1272
1273 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1274 return CreateSDiv(LHS, RHS, Name, true);
1275 }
1276
1277 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1278 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1279 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1280 }
1281
1282 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1283 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1284 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1285 }
1286
1287 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1288 bool HasNUW = false, bool HasNSW = false) {
1289 if (auto *LC = dyn_cast<Constant>(LHS))
1290 if (auto *RC = dyn_cast<Constant>(RHS))
1291 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1292 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1293 HasNUW, HasNSW);
1294 }
1295
1296 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1297 bool HasNUW = false, bool HasNSW = false) {
1298 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1299 HasNUW, HasNSW);
1300 }
1301
1302 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1303 bool HasNUW = false, bool HasNSW = false) {
1304 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1305 HasNUW, HasNSW);
1306 }
1307
1308 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1309 bool isExact = false) {
1310 if (auto *LC = dyn_cast<Constant>(LHS))
1311 if (auto *RC = dyn_cast<Constant>(RHS))
1312 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1313 if (!isExact)
1314 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1315 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1316 }
1317
1318 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1319 bool isExact = false) {
1320 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1321 }
1322
1323 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1324 bool isExact = false) {
1325 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1326 }
1327
1328 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1329 bool isExact = false) {
1330 if (auto *LC = dyn_cast<Constant>(LHS))
1331 if (auto *RC = dyn_cast<Constant>(RHS))
1332 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1333 if (!isExact)
1334 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1335 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1336 }
1337
1338 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1339 bool isExact = false) {
1340 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1341 }
1342
1343 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1344 bool isExact = false) {
1345 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1346 }
1347
1348 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1349 if (auto *RC = dyn_cast<Constant>(RHS)) {
1350 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1351 return LHS; // LHS & -1 -> LHS
1352 if (auto *LC = dyn_cast<Constant>(LHS))
1353 return Insert(Folder.CreateAnd(LC, RC), Name);
1354 }
1355 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1356 }
1357
1358 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1359 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1360 }
1361
1362 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1363 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1364 }
1365
1366 Value *CreateAnd(ArrayRef<Value*> Ops) {
1367 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1367, __extension__ __PRETTY_FUNCTION__))
;
1368 Value *Accum = Ops[0];
1369 for (unsigned i = 1; i < Ops.size(); i++)
1370 Accum = CreateAnd(Accum, Ops[i]);
1371 return Accum;
1372 }
1373
1374 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1375 if (auto *RC = dyn_cast<Constant>(RHS)) {
1376 if (RC->isNullValue())
1377 return LHS; // LHS | 0 -> LHS
1378 if (auto *LC = dyn_cast<Constant>(LHS))
1379 return Insert(Folder.CreateOr(LC, RC), Name);
1380 }
1381 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1382 }
1383
1384 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1385 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1386 }
1387
1388 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1389 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1390 }
1391
1392 Value *CreateOr(ArrayRef<Value*> Ops) {
1393 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1393, __extension__ __PRETTY_FUNCTION__))
;
1394 Value *Accum = Ops[0];
1395 for (unsigned i = 1; i < Ops.size(); i++)
1396 Accum = CreateOr(Accum, Ops[i]);
1397 return Accum;
1398 }
1399
1400 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1401 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1402 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1403 }
1404
1405 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1406 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1407 }
1408
1409 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1410 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1411 }
1412
1413 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1414 MDNode *FPMD = nullptr) {
1415 if (IsFPConstrained)
1416 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1417 L, R, nullptr, Name, FPMD);
1418
1419 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1420 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1421 return Insert(I, Name);
1422 }
1423
1424 /// Copy fast-math-flags from an instruction rather than using the builder's
1425 /// default FMF.
1426 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1427 const Twine &Name = "") {
1428 if (IsFPConstrained)
1429 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1430 L, R, FMFSource, Name);
1431
1432 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1433 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1434 FMFSource->getFastMathFlags());
1435 return Insert(I, Name);
1436 }
1437
1438 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1439 MDNode *FPMD = nullptr) {
1440 if (IsFPConstrained)
1441 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1442 L, R, nullptr, Name, FPMD);
1443
1444 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1445 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1446 return Insert(I, Name);
1447 }
1448
1449 /// Copy fast-math-flags from an instruction rather than using the builder's
1450 /// default FMF.
1451 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1452 const Twine &Name = "") {
1453 if (IsFPConstrained)
1454 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1455 L, R, FMFSource, Name);
1456
1457 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1458 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1459 FMFSource->getFastMathFlags());
1460 return Insert(I, Name);
1461 }
1462
1463 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1464 MDNode *FPMD = nullptr) {
1465 if (IsFPConstrained)
1466 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1467 L, R, nullptr, Name, FPMD);
1468
1469 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1470 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1471 return Insert(I, Name);
1472 }
1473
1474 /// Copy fast-math-flags from an instruction rather than using the builder's
1475 /// default FMF.
1476 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1477 const Twine &Name = "") {
1478 if (IsFPConstrained)
1479 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1480 L, R, FMFSource, Name);
1481
1482 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1483 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1484 FMFSource->getFastMathFlags());
1485 return Insert(I, Name);
1486 }
1487
1488 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1489 MDNode *FPMD = nullptr) {
1490 if (IsFPConstrained)
1491 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1492 L, R, nullptr, Name, FPMD);
1493
1494 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1495 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1496 return Insert(I, Name);
1497 }
1498
1499 /// Copy fast-math-flags from an instruction rather than using the builder's
1500 /// default FMF.
1501 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1502 const Twine &Name = "") {
1503 if (IsFPConstrained)
1504 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1505 L, R, FMFSource, Name);
1506
1507 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1508 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1509 FMFSource->getFastMathFlags());
1510 return Insert(I, Name);
1511 }
1512
1513 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1514 MDNode *FPMD = nullptr) {
1515 if (IsFPConstrained)
1516 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1517 L, R, nullptr, Name, FPMD);
1518
1519 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1520 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1521 return Insert(I, Name);
1522 }
1523
1524 /// Copy fast-math-flags from an instruction rather than using the builder's
1525 /// default FMF.
1526 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1527 const Twine &Name = "") {
1528 if (IsFPConstrained)
1529 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1530 L, R, FMFSource, Name);
1531
1532 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1533 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1534 FMFSource->getFastMathFlags());
1535 return Insert(I, Name);
1536 }
1537
1538 Value *CreateBinOp(Instruction::BinaryOps Opc,
1539 Value *LHS, Value *RHS, const Twine &Name = "",
1540 MDNode *FPMathTag = nullptr) {
1541 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1542 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1543 if (isa<FPMathOperator>(BinOp))
1544 setFPAttrs(BinOp, FPMathTag, FMF);
1545 return Insert(BinOp, Name);
1546 }
1547
1548 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1549 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1549, __extension__ __PRETTY_FUNCTION__))
;
1550 return CreateSelect(Cond1, Cond2,
1551 ConstantInt::getNullValue(Cond2->getType()), Name);
1552 }
1553
1554 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1555 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1555, __extension__ __PRETTY_FUNCTION__))
;
1556 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1557 Cond2, Name);
1558 }
1559
1560 CallInst *CreateConstrainedFPBinOp(
1561 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1562 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1563 Optional<RoundingMode> Rounding = None,
1564 Optional<fp::ExceptionBehavior> Except = None);
1565
1566 Value *CreateNeg(Value *V, const Twine &Name = "",
1567 bool HasNUW = false, bool HasNSW = false) {
1568 if (auto *VC = dyn_cast<Constant>(V))
1569 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1570 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1571 if (HasNUW) BO->setHasNoUnsignedWrap();
1572 if (HasNSW) BO->setHasNoSignedWrap();
1573 return BO;
1574 }
1575
1576 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1577 return CreateNeg(V, Name, false, true);
1578 }
1579
1580 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1581 return CreateNeg(V, Name, true, false);
1582 }
1583
1584 Value *CreateFNeg(Value *V, const Twine &Name = "",
1585 MDNode *FPMathTag = nullptr) {
1586 if (auto *VC = dyn_cast<Constant>(V))
1587 return Insert(Folder.CreateFNeg(VC), Name);
1588 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1589 Name);
1590 }
1591
1592 /// Copy fast-math-flags from an instruction rather than using the builder's
1593 /// default FMF.
1594 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1595 const Twine &Name = "") {
1596 if (auto *VC = dyn_cast<Constant>(V))
1597 return Insert(Folder.CreateFNeg(VC), Name);
1598 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1599 FMFSource->getFastMathFlags()),
1600 Name);
1601 }
1602
1603 Value *CreateNot(Value *V, const Twine &Name = "") {
1604 if (auto *VC = dyn_cast<Constant>(V))
1605 return Insert(Folder.CreateNot(VC), Name);
1606 return Insert(BinaryOperator::CreateNot(V), Name);
1607 }
1608
1609 Value *CreateUnOp(Instruction::UnaryOps Opc,
1610 Value *V, const Twine &Name = "",
1611 MDNode *FPMathTag = nullptr) {
1612 if (auto *VC = dyn_cast<Constant>(V))
1613 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1614 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1615 if (isa<FPMathOperator>(UnOp))
1616 setFPAttrs(UnOp, FPMathTag, FMF);
1617 return Insert(UnOp, Name);
1618 }
1619
1620 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1621 /// Correct number of operands must be passed accordingly.
1622 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1623 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1624
1625 //===--------------------------------------------------------------------===//
1626 // Instruction creation methods: Memory Instructions
1627 //===--------------------------------------------------------------------===//
1628
1629 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1630 Value *ArraySize = nullptr, const Twine &Name = "") {
1631 const DataLayout &DL = BB->getModule()->getDataLayout();
1632 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1633 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1634 }
1635
1636 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1637 const Twine &Name = "") {
1638 const DataLayout &DL = BB->getModule()->getDataLayout();
1639 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1640 unsigned AddrSpace = DL.getAllocaAddrSpace();
1641 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1642 }
1643
1644 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1645 /// converting the string to 'bool' for the isVolatile parameter.
1646 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1647 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1648 }
1649
1650 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1651 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
37
Calling 'IRBuilderBase::CreateAlignedLoad'
1652 }
1653
1654 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1655 const Twine &Name = "") {
1656 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1657 }
1658
1659 // Deprecated [opaque pointer types]
1660 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1661 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1662 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1663 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1664 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1665 }
1666
1667 // Deprecated [opaque pointer types]
1668 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1669 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1670 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1671 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1672 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1673 }
1674
1675 // Deprecated [opaque pointer types]
1676 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1677 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1678 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1679 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1680 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1681 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1682 Name);
1683 }
1684
1685 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1686 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1687 }
1688
1689 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1690 const char *Name) {
1691 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1692 }
1693
1694 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1695 const Twine &Name = "") {
1696 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
38
Calling 'IRBuilderBase::CreateAlignedLoad'
1697 }
1698
1699 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1700 bool isVolatile, const Twine &Name = "") {
1701 if (!Align) {
39
Calling 'Optional::operator bool'
47
Returning from 'Optional::operator bool'
48
Taking true branch
1702 const DataLayout &DL = BB->getModule()->getDataLayout();
49
Called C++ object pointer is null
1703 Align = DL.getABITypeAlign(Ty);
1704 }
1705 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1706 }
1707
1708 // Deprecated [opaque pointer types]
1709 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1710 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1711 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1712 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1713 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1714 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1715 Align, Name);
1716 }
1717 // Deprecated [opaque pointer types]
1718 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1719 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1720 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1721 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1722 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1723 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1724 Align, Name);
1725 }
1726 // Deprecated [opaque pointer types]
1727 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1728 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1729 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1730 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1731 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1732 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1733 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1734 Align, isVolatile, Name);
1735 }
1736
1737 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1738 bool isVolatile = false) {
1739 if (!Align) {
1740 const DataLayout &DL = BB->getModule()->getDataLayout();
1741 Align = DL.getABITypeAlign(Val->getType());
1742 }
1743 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1744 }
1745 FenceInst *CreateFence(AtomicOrdering Ordering,
1746 SyncScope::ID SSID = SyncScope::System,
1747 const Twine &Name = "") {
1748 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1749 }
1750
1751 AtomicCmpXchgInst *
1752 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1753 AtomicOrdering SuccessOrdering,
1754 AtomicOrdering FailureOrdering,
1755 SyncScope::ID SSID = SyncScope::System) {
1756 if (!Align) {
1757 const DataLayout &DL = BB->getModule()->getDataLayout();
1758 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1759 }
1760
1761 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1762 FailureOrdering, SSID));
1763 }
1764
1765 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1766 Value *Val, MaybeAlign Align,
1767 AtomicOrdering Ordering,
1768 SyncScope::ID SSID = SyncScope::System) {
1769 if (!Align) {
1770 const DataLayout &DL = BB->getModule()->getDataLayout();
1771 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1772 }
1773
1774 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1775 }
1776
1777 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1778 const Twine &Name = "") {
1779 return CreateGEP(nullptr, Ptr, IdxList, Name);
1780 }
1781
1782 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1783 const Twine &Name = "") {
1784 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1785 // Every index must be constant.
1786 size_t i, e;
1787 for (i = 0, e = IdxList.size(); i != e; ++i)
1788 if (!isa<Constant>(IdxList[i]))
1789 break;
1790 if (i == e)
1791 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1792 }
1793 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1794 }
1795
1796 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1797 const Twine &Name = "") {
1798 return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
1799 }
1800
1801 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1802 const Twine &Name = "") {
1803 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1804 // Every index must be constant.
1805 size_t i, e;
1806 for (i = 0, e = IdxList.size(); i != e; ++i)
1807 if (!isa<Constant>(IdxList[i]))
1808 break;
1809 if (i == e)
1810 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1811 Name);
1812 }
1813 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1814 }
1815
1816 Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
1817 return CreateGEP(nullptr, Ptr, Idx, Name);
1818 }
1819
1820 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1821 if (auto *PC = dyn_cast<Constant>(Ptr))
1822 if (auto *IC = dyn_cast<Constant>(Idx))
1823 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1824 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1825 }
1826
1827 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1828 const Twine &Name = "") {
1829 if (auto *PC = dyn_cast<Constant>(Ptr))
1830 if (auto *IC = dyn_cast<Constant>(Idx))
1831 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1832 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1833 }
1834
1835 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
1836 return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
1837 }
1838
1839 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1840 const Twine &Name = "") {
1841 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1842
1843 if (auto *PC = dyn_cast<Constant>(Ptr))
1844 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1845
1846 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1847 }
1848
1849 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1850 const Twine &Name = "") {
1851 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1852
1853 if (auto *PC = dyn_cast<Constant>(Ptr))
1854 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1855
1856 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1857 }
1858
1859 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1860 const Twine &Name = "") {
1861 Value *Idxs[] = {
1862 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1863 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1864 };
1865
1866 if (auto *PC = dyn_cast<Constant>(Ptr))
1867 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1868
1869 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1870 }
1871
1872 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1873 unsigned Idx1, const Twine &Name = "") {
1874 Value *Idxs[] = {
1875 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1876 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1877 };
1878
1879 if (auto *PC = dyn_cast<Constant>(Ptr))
1880 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1881
1882 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1883 }
1884
1885 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1886 const Twine &Name = "") {
1887 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1888
1889 if (auto *PC = dyn_cast<Constant>(Ptr))
1890 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1891
1892 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1893 }
1894
1895 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
1896 return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
1897 }
1898
1899 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1900 const Twine &Name = "") {
1901 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1902
1903 if (auto *PC = dyn_cast<Constant>(Ptr))
1904 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1905
1906 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1907 }
1908
1909 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
1910 const Twine &Name = "") {
1911 return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
1912 }
1913
1914 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1915 const Twine &Name = "") {
1916 Value *Idxs[] = {
1917 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1918 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1919 };
1920
1921 if (auto *PC = dyn_cast<Constant>(Ptr))
1922 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1923
1924 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1925 }
1926
1927 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1928 const Twine &Name = "") {
1929 return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1930 }
1931
1932 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1933 uint64_t Idx1, const Twine &Name = "") {
1934 Value *Idxs[] = {
1935 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1936 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1937 };
1938
1939 if (auto *PC = dyn_cast<Constant>(Ptr))
1940 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1941
1942 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1943 }
1944
1945 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1946 const Twine &Name = "") {
1947 return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1948 }
1949
1950 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1951 const Twine &Name = "") {
1952 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1953 }
1954
1955 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") {
1956 return CreateConstInBoundsGEP2_32(nullptr, Ptr, 0, Idx, Name);
1957 }
1958
1959 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1960 /// instead of a pointer to array of i8.
1961 ///
1962 /// If no module is given via \p M, it is take from the insertion point basic
1963 /// block.
1964 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
1965 unsigned AddressSpace = 0,
1966 Module *M = nullptr) {
1967 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
1968 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1969 Constant *Indices[] = {Zero, Zero};
1970 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
1971 Indices);
1972 }
1973
1974 //===--------------------------------------------------------------------===//
1975 // Instruction creation methods: Cast/Conversion Operators
1976 //===--------------------------------------------------------------------===//
1977
1978 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
1979 return CreateCast(Instruction::Trunc, V, DestTy, Name);
1980 }
1981
1982 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
1983 return CreateCast(Instruction::ZExt, V, DestTy, Name);
1984 }
1985
1986 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
1987 return CreateCast(Instruction::SExt, V, DestTy, Name);
1988 }
1989
1990 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
1991 /// the value untouched if the type of V is already DestTy.
1992 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
1993 const Twine &Name = "") {
1994 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1996, __extension__ __PRETTY_FUNCTION__))
1995 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1996, __extension__ __PRETTY_FUNCTION__))
1996 "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 1996, __extension__ __PRETTY_FUNCTION__))
;
1997 Type *VTy = V->getType();
1998 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
1999 return CreateZExt(V, DestTy, Name);
2000 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2001 return CreateTrunc(V, DestTy, Name);
2002 return V;
2003 }
2004
2005 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2006 /// the value untouched if the type of V is already DestTy.
2007 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2008 const Twine &Name = "") {
2009 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 2011, __extension__ __PRETTY_FUNCTION__))
2010 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 2011, __extension__ __PRETTY_FUNCTION__))
2011 "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/IRBuilder.h"
, 2011, __extension__ __PRETTY_FUNCTION__))
;
2012 Type *VTy = V->getType();
2013 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2014 return CreateSExt(V, DestTy, Name);
2015 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2016 return CreateTrunc(V, DestTy, Name);
2017 return V;
2018 }
2019
2020 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2021 if (IsFPConstrained)
2022 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2023 V, DestTy, nullptr, Name);
2024 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2025 }
2026
2027 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2028 if (IsFPConstrained)
2029 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2030 V, DestTy, nullptr, Name);
2031 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2032 }
2033
2034 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2035 if (IsFPConstrained)
2036 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2037 V, DestTy, nullptr, Name);
2038 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2039 }
2040
2041 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2042 if (IsFPConstrained)
2043 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2044 V, DestTy, nullptr, Name);
2045 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2046 }
2047
2048 Value *CreateFPTrunc(Value *V, Type *DestTy,
2049 const Twine &Name = "") {
2050 if (IsFPConstrained)
2051 return CreateConstrainedFPCast(
2052 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2053 Name);
2054 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2055 }
2056
2057 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2058 if (IsFPConstrained)
2059 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2060 V, DestTy, nullptr, Name);
2061 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2062 }
2063
2064 Value *CreatePtrToInt(Value *V, Type *DestTy,
2065 const Twine &Name = "") {
2066 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2067 }
2068
2069 Value *CreateIntToPtr(Value *V, Type *DestTy,
2070 const Twine &Name = "") {
2071 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2072 }
2073
2074 Value *CreateBitCast(Value *V, Type *DestTy,
2075 const Twine &Name = "") {
2076 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2077 }
2078
2079 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2080 const Twine &Name = "") {
2081 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2082 }
2083
2084 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2085 const Twine &Name = "") {
2086 if (V->getType() == DestTy)
2087 return V;
2088 if (auto *VC = dyn_cast<Constant>(V))
2089 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2090 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2091 }
2092
2093 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2094 const Twine &Name = "") {
2095 if (V->getType() == DestTy)
2096 return V;
2097 if (auto *VC = dyn_cast<Constant>(V))
2098 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2099 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2100 }
2101
2102 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2103 const Twine &Name = "") {
2104 if (V->getType() == DestTy)
2105 return V;
2106 if (auto *VC = dyn_cast<Constant>(V))
2107 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2108 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2109 }
2110
2111 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2112 const Twine &Name = "") {
2113 if (V->getType() == DestTy)
2114 return V;
2115 if (auto *VC = dyn_cast<Constant>(V))
2116 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2117 return Insert(CastInst::Create(Op, V, DestTy), Name);
2118 }
2119
2120 Value *CreatePointerCast(Value *V, Type *DestTy,
2121 const Twine &Name = "") {
2122 if (V->getType() == DestTy)
2123 return V;
2124 if (auto *VC = dyn_cast<Constant>(V))
2125 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2126 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2127 }
2128
2129 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2130 const Twine &Name = "") {
2131 if (V->getType() == DestTy)
2132 return V;
2133
2134 if (auto *VC = dyn_cast<Constant>(V)) {
2135 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2136 Name);
2137 }
2138
2139 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2140 Name);
2141 }
2142
2143 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2144 const Twine &Name = "") {
2145 if (V->getType() == DestTy)
2146 return V;
2147 if (auto *VC = dyn_cast<Constant>(V))
2148 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2149 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2150 }
2151
2152 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2153 const Twine &Name = "") {
2154 if (V->getType() == DestTy)
2155 return V;
2156 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2157 return CreatePtrToInt(V, DestTy, Name);
2158 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2159 return CreateIntToPtr(V, DestTy, Name);
2160
2161 return CreateBitCast(V, DestTy, Name);
2162 }
2163
2164 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2165 if (V->getType() == DestTy)
2166 return V;
2167 if (auto *VC = dyn_cast<Constant>(V))
2168 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2169 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2170 }
2171
2172 CallInst *CreateConstrainedFPCast(
2173 Intrinsic::ID ID, Value *V, Type *DestTy,
2174 Instruction *FMFSource = nullptr, const Twine &Name = "",
2175 MDNode *FPMathTag = nullptr,
2176 Optional<RoundingMode> Rounding = None,
2177 Optional<fp::ExceptionBehavior> Except = None);
2178
2179 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2180 // compile time error, instead of converting the string to bool for the
2181 // isSigned parameter.
2182 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2183
2184 //===--------------------------------------------------------------------===//
2185 // Instruction creation methods: Compare Instructions
2186 //===--------------------------------------------------------------------===//
2187
2188 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2189 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2190 }
2191
2192 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2193 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2194 }
2195
2196 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2197 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2198 }
2199
2200 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2201 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2202 }
2203
2204 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2205 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2206 }
2207
2208 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2209 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2210 }
2211
2212 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2213 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2214 }
2215
2216 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2217 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2218 }
2219
2220 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2221 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2222 }
2223
2224 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2225 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2226 }
2227
2228 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2229 MDNode *FPMathTag = nullptr) {
2230 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2231 }
2232
2233 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2234 MDNode *FPMathTag = nullptr) {
2235 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2236 }
2237
2238 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2239 MDNode *FPMathTag = nullptr) {
2240 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2241 }
2242
2243 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2244 MDNode *FPMathTag = nullptr) {
2245 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2246 }
2247
2248 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2249 MDNode *FPMathTag = nullptr) {
2250 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2251 }
2252
2253 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2254 MDNode *FPMathTag = nullptr) {
2255 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2256 }
2257
2258 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2259 MDNode *FPMathTag = nullptr) {
2260 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2261 }
2262
2263 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2264 MDNode *FPMathTag = nullptr) {
2265 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2266 }
2267
2268 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2269 MDNode *FPMathTag = nullptr) {
2270 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2271 }
2272
2273 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2274 MDNode *FPMathTag = nullptr) {
2275 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2276 }
2277
2278 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2279 MDNode *FPMathTag = nullptr) {
2280 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2281 }
2282
2283 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2284 MDNode *FPMathTag = nullptr) {
2285 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2286 }
2287
2288 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2289 MDNode *FPMathTag = nullptr) {
2290 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2291 }
2292
2293 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2294 MDNode *FPMathTag = nullptr) {
2295 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2296 }
2297
2298 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2299 const Twine &Name = "") {
2300 if (auto *LC = dyn_cast<Constant>(LHS))
2301 if (auto *RC = dyn_cast<Constant>(RHS))
2302 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2303 return Insert(new ICmpInst(P, LHS, RHS), Name);
2304 }
2305
2306 // Create a quiet floating-point comparison (i.e. one that raises an FP
2307 // exception only in the case where an input is a signaling NaN).
2308 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2309 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2310 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2311 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2312 }
2313
2314 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2315 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2316 return CmpInst::isFPPredicate(Pred)
2317 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2318 : CreateICmp(Pred, LHS, RHS, Name);
2319 }
2320
2321 // Create a signaling floating-point comparison (i.e. one that raises an FP
2322 // exception whenever an input is any NaN, signaling or quiet).
2323 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2324 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2325 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2326 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2327 }
2328
2329private:
2330 // Helper routine to create either a signaling or a quiet FP comparison.
2331 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2332 const Twine &Name, MDNode *FPMathTag,
2333 bool IsSignaling);
2334
2335public:
2336 CallInst *CreateConstrainedFPCmp(
2337 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2338 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2339
2340 //===--------------------------------------------------------------------===//
2341 // Instruction creation methods: Other Instructions
2342 //===--------------------------------------------------------------------===//
2343
2344 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2345 const Twine &Name = "") {
2346 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2347 if (isa<FPMathOperator>(Phi))
2348 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2349 return Insert(Phi, Name);
2350 }
2351
2352 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2353 ArrayRef<Value *> Args = None, const Twine &Name = "",
2354 MDNode *FPMathTag = nullptr) {
2355 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2356 if (IsFPConstrained)
2357 setConstrainedFPCallAttr(CI);
2358 if (isa<FPMathOperator>(CI))
2359 setFPAttrs(CI, FPMathTag, FMF);
2360 return Insert(CI, Name);
2361 }
2362
2363 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2364 ArrayRef<OperandBundleDef> OpBundles,
2365 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2366 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2367 if (IsFPConstrained)
2368 setConstrainedFPCallAttr(CI);
2369 if (isa<FPMathOperator>(CI))
2370 setFPAttrs(CI, FPMathTag, FMF);
2371 return Insert(CI, Name);
2372 }
2373
2374 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2375 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2376 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2377 FPMathTag);
2378 }
2379
2380 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2381 ArrayRef<OperandBundleDef> OpBundles,
2382 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2383 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2384 OpBundles, Name, FPMathTag);
2385 }
2386
2387 CallInst *CreateConstrainedFPCall(
2388 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2389 Optional<RoundingMode> Rounding = None,
2390 Optional<fp::ExceptionBehavior> Except = None);
2391
2392 Value *CreateSelect(Value *C, Value *True, Value *False,
2393 const Twine &Name = "", Instruction *MDFrom = nullptr);
2394
2395 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2396 return Insert(new VAArgInst(List, Ty), Name);
2397 }
2398
2399 Value *CreateExtractElement(Value *Vec, Value *Idx,
2400 const Twine &Name = "") {
2401 if (auto *VC = dyn_cast<Constant>(Vec))
2402 if (auto *IC = dyn_cast<Constant>(Idx))
2403 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2404 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2405 }
2406
2407 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2408 const Twine &Name = "") {
2409 return CreateExtractElement(Vec, getInt64(Idx), Name);
2410 }
2411
2412 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2413 const Twine &Name = "") {
2414 if (auto *VC = dyn_cast<Constant>(Vec))
2415 if (auto *NC = dyn_cast<Constant>(NewElt))
2416 if (auto *IC = dyn_cast<Constant>(Idx))
2417 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2418 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2419 }
2420
2421 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2422 const Twine &Name = "") {
2423 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2424 }
2425
2426 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2427 const Twine &Name = "") {
2428 SmallVector<int, 16> IntMask;
2429 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2430 return CreateShuffleVector(V1, V2, IntMask, Name);
2431 }
2432
2433 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2434 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2435 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2436 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2437 SmallVector<int, 16> IntMask;
2438 IntMask.assign(Mask.begin(), Mask.end());
2439 return CreateShuffleVector(V1, V2, IntMask, Name);
2440 }
2441
2442 /// See class ShuffleVectorInst for a description of the mask representation.
2443 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2444 const Twine &Name = "") {
2445 if (auto *V1C = dyn_cast<Constant>(V1))
2446 if (auto *V2C = dyn_cast<Constant>(V2))
2447 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2448 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2449 }
2450
2451 /// Create a unary shuffle. The second vector operand of the IR instruction
2452 /// is poison.
2453 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2454 const Twine &Name = "") {
2455 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2456 }
2457
2458 Value *CreateExtractValue(Value *Agg,
2459 ArrayRef<unsigned> Idxs,
2460 const Twine &Name = "") {
2461 if (auto *AggC = dyn_cast<Constant>(Agg))
2462 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2463 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2464 }
2465
2466 Value *CreateInsertValue(Value *Agg, Value *Val,
2467 ArrayRef<unsigned> Idxs,
2468 const Twine &Name = "") {
2469 if (auto *AggC = dyn_cast<Constant>(Agg))
2470 if (auto *ValC = dyn_cast<Constant>(Val))
2471 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2472 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2473 }
2474
2475 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2476 const Twine &Name = "") {
2477 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2478 }
2479
2480 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2481 return Insert(new FreezeInst(V), Name);
2482 }
2483
2484 //===--------------------------------------------------------------------===//
2485 // Utility creation methods
2486 //===--------------------------------------------------------------------===//
2487
2488 /// Return an i1 value testing if \p Arg is null.
2489 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2490 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2491 Name);
2492 }
2493
2494 /// Return an i1 value testing if \p Arg is not null.
2495 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2496 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2497 Name);
2498 }
2499
2500 /// Return the i64 difference between two pointer values, dividing out
2501 /// the size of the pointed-to objects.
2502 ///
2503 /// This is intended to implement C-style pointer subtraction. As such, the
2504 /// pointers must be appropriately aligned for their element types and
2505 /// pointing into the same object.
2506 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2507
2508 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2509 /// different from pointer to i8, it's casted to pointer to i8 in the same
2510 /// address space before call and casted back to Ptr type after call.
2511 Value *CreateLaunderInvariantGroup(Value *Ptr);
2512
2513 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2514 /// different from pointer to i8, it's casted to pointer to i8 in the same
2515 /// address space before call and casted back to Ptr type after call.
2516 Value *CreateStripInvariantGroup(Value *Ptr);
2517
2518 /// Return a vector value that contains the vector V reversed
2519 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2520
2521 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2522 /// return a shufflevector. If the immediate is positive, a vector is
2523 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2524 /// is negative, we extract -Imm elements from V1 and the remaining
2525 /// elements from V2. Imm is a signed integer in the range
2526 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2527 /// source/result vector)
2528 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2529 const Twine &Name = "");
2530
2531 /// Return a vector value that contains \arg V broadcasted to \p
2532 /// NumElts elements.
2533 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2534
2535 /// Return a vector value that contains \arg V broadcasted to \p
2536 /// EC elements.
2537 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2538
2539 /// Return a value that has been extracted from a larger integer type.
2540 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2541 IntegerType *ExtractedTy, uint64_t Offset,
2542 const Twine &Name);
2543
2544 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2545 unsigned Dimension, unsigned LastIndex,
2546 MDNode *DbgInfo);
2547
2548 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2549 MDNode *DbgInfo);
2550
2551 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2552 unsigned Index, unsigned FieldIndex,
2553 MDNode *DbgInfo);
2554
2555private:
2556 /// Helper function that creates an assume intrinsic call that
2557 /// represents an alignment assumption on the provided pointer \p PtrValue
2558 /// with offset \p OffsetValue and alignment value \p AlignValue.
2559 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2560 Value *PtrValue, Value *AlignValue,
2561 Value *OffsetValue);
2562
2563public:
2564 /// Create an assume intrinsic call that represents an alignment
2565 /// assumption on the provided pointer.
2566 ///
2567 /// An optional offset can be provided, and if it is provided, the offset
2568 /// must be subtracted from the provided pointer to get the pointer with the
2569 /// specified alignment.
2570 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2571 unsigned Alignment,
2572 Value *OffsetValue = nullptr);
2573
2574 /// Create an assume intrinsic call that represents an alignment
2575 /// assumption on the provided pointer.
2576 ///
2577 /// An optional offset can be provided, and if it is provided, the offset
2578 /// must be subtracted from the provided pointer to get the pointer with the
2579 /// specified alignment.
2580 ///
2581 /// This overload handles the condition where the Alignment is dependent
2582 /// on an existing value rather than a static value.
2583 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2584 Value *Alignment,
2585 Value *OffsetValue = nullptr);
2586};
2587
2588/// This provides a uniform API for creating instructions and inserting
2589/// them into a basic block: either at the end of a BasicBlock, or at a specific
2590/// iterator location in a block.