Bug Summary

File:llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
Warning:line 1275, column 20
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name OMPIRBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP -I include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -ferror-limit 19 -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-26-234817-15343-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
16#include "llvm/ADT/StringRef.h"
17#include "llvm/ADT/Triple.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/CodeMetrics.h"
20#include "llvm/Analysis/OptimizationRemarkEmitter.h"
21#include "llvm/Analysis/ScalarEvolution.h"
22#include "llvm/Analysis/TargetLibraryInfo.h"
23#include "llvm/IR/CFG.h"
24#include "llvm/IR/DebugInfo.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/MDBuilder.h"
27#include "llvm/IR/PassManager.h"
28#include "llvm/IR/Value.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Error.h"
31#include "llvm/Support/TargetRegistry.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Transforms/Utils/BasicBlockUtils.h"
35#include "llvm/Transforms/Utils/CodeExtractor.h"
36#include "llvm/Transforms/Utils/LoopPeel.h"
37#include "llvm/Transforms/Utils/ModuleUtils.h"
38#include "llvm/Transforms/Utils/UnrollLoop.h"
39
40#include <sstream>
41
42#define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder"
43
44using namespace llvm;
45using namespace omp;
46
47static cl::opt<bool>
48 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
49 cl::desc("Use optimistic attributes describing "
50 "'as-if' properties of runtime calls."),
51 cl::init(false));
52
53static cl::opt<double> UnrollThresholdFactor(
54 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden,
55 cl::desc("Factor for the unroll threshold to account for code "
56 "simplifications still taking place"),
57 cl::init(1.5));
58
59void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
60 LLVMContext &Ctx = Fn.getContext();
61
62 // Get the function's current attributes.
63 auto Attrs = Fn.getAttributes();
64 auto FnAttrs = Attrs.getFnAttrs();
65 auto RetAttrs = Attrs.getRetAttrs();
66 SmallVector<AttributeSet, 4> ArgAttrs;
67 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo)
68 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo));
69
70#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
71#include "llvm/Frontend/OpenMP/OMPKinds.def"
72
73 // Add attributes to the function declaration.
74 switch (FnID) {
75#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
76 case Enum: \
77 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \
78 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \
79 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \
80 ArgAttrs[ArgNo] = \
81 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \
82 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \
83 break;
84#include "llvm/Frontend/OpenMP/OMPKinds.def"
85 default:
86 // Attributes are optional.
87 break;
88 }
89}
90
91FunctionCallee
92OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) {
93 FunctionType *FnTy = nullptr;
94 Function *Fn = nullptr;
95
96 // Try to find the declation in the module first.
97 switch (FnID) {
98#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
99 case Enum: \
100 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
101 IsVarArg); \
102 Fn = M.getFunction(Str); \
103 break;
104#include "llvm/Frontend/OpenMP/OMPKinds.def"
105 }
106
107 if (!Fn) {
108 // Create a new declaration if we need one.
109 switch (FnID) {
110#define OMP_RTL(Enum, Str, ...) \
111 case Enum: \
112 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
113 break;
114#include "llvm/Frontend/OpenMP/OMPKinds.def"
115 }
116
117 // Add information if the runtime function takes a callback function
118 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
119 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
120 LLVMContext &Ctx = Fn->getContext();
121 MDBuilder MDB(Ctx);
122 // Annotate the callback behavior of the runtime function:
123 // - The callback callee is argument number 2 (microtask).
124 // - The first two arguments of the callback callee are unknown (-1).
125 // - All variadic arguments to the runtime function are passed to the
126 // callback callee.
127 Fn->addMetadata(
128 LLVMContext::MD_callback,
129 *MDNode::get(Ctx, {MDB.createCallbackEncoding(
130 2, {-1, -1}, /* VarArgsArePassed */ true)}));
131 }
132 }
133
134 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
135 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
136 addAttributes(FnID, *Fn);
137
138 } else {
139 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
140 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
141 }
142
143 assert(Fn && "Failed to create OpenMP runtime function")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 143, __extension__ __PRETTY_FUNCTION__))
;
144
145 // Cast the function to the expected type if necessary
146 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo());
147 return {FnTy, C};
148}
149
150Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) {
151 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID);
152 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
153 assert(Fn && "Failed to create OpenMP runtime function pointer")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function pointer"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 153, __extension__ __PRETTY_FUNCTION__))
;
154 return Fn;
155}
156
157void OpenMPIRBuilder::initialize() { initializeTypes(M); }
158
159void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
160 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
161 SmallVector<BasicBlock *, 32> Blocks;
162 SmallVector<OutlineInfo, 16> DeferredOutlines;
163 for (OutlineInfo &OI : OutlineInfos) {
164 // Skip functions that have not finalized yet; may happen with nested
165 // function generation.
166 if (Fn && OI.getFunction() != Fn) {
167 DeferredOutlines.push_back(OI);
168 continue;
169 }
170
171 ParallelRegionBlockSet.clear();
172 Blocks.clear();
173 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
174
175 Function *OuterFn = OI.getFunction();
176 CodeExtractorAnalysisCache CEAC(*OuterFn);
177 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
178 /* AggregateArgs */ false,
179 /* BlockFrequencyInfo */ nullptr,
180 /* BranchProbabilityInfo */ nullptr,
181 /* AssumptionCache */ nullptr,
182 /* AllowVarArgs */ true,
183 /* AllowAlloca */ true,
184 /* Suffix */ ".omp_par");
185
186 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before outlining: "
<< *OuterFn << "\n"; } } while (false)
;
187 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
188 << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
;
189 assert(Extractor.isEligible() &&(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 190, __extension__ __PRETTY_FUNCTION__))
190 "Expected OpenMP outlining to be possible!")(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 190, __extension__ __PRETTY_FUNCTION__))
;
191
192 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
193
194 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After outlining: "
<< *OuterFn << "\n"; } } while (false)
;
195 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << " Outlined function: "
<< *OutlinedFn << "\n"; } } while (false)
;
196 assert(OutlinedFn->getReturnType()->isVoidTy() &&(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 197, __extension__ __PRETTY_FUNCTION__))
197 "OpenMP outlined functions should not return a value!")(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 197, __extension__ __PRETTY_FUNCTION__))
;
198
199 // For compability with the clang CG we move the outlined function after the
200 // one with the parallel region.
201 OutlinedFn->removeFromParent();
202 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
203
204 // Remove the artificial entry introduced by the extractor right away, we
205 // made our own entry block after all.
206 {
207 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
208 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)(static_cast <bool> (ArtificialEntry.getUniqueSuccessor
() == OI.EntryBB) ? void (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 208, __extension__ __PRETTY_FUNCTION__))
;
209 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)(static_cast <bool> (OI.EntryBB->getUniquePredecessor
() == &ArtificialEntry) ? void (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 209, __extension__ __PRETTY_FUNCTION__))
;
210 if (AllowExtractorSinking) {
211 // Move instructions from the to-be-deleted ArtificialEntry to the entry
212 // basic block of the parallel region. CodeExtractor may have sunk
213 // allocas/bitcasts for values that are solely used in the outlined
214 // region and do not escape.
215 assert(!ArtificialEntry.empty() &&(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 216, __extension__ __PRETTY_FUNCTION__))
216 "Expected instructions to sink in the outlined region")(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 216, __extension__ __PRETTY_FUNCTION__))
;
217 for (BasicBlock::iterator It = ArtificialEntry.begin(),
218 End = ArtificialEntry.end();
219 It != End;) {
220 Instruction &I = *It;
221 It++;
222
223 if (I.isTerminator())
224 continue;
225
226 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
227 }
228 }
229 OI.EntryBB->moveBefore(&ArtificialEntry);
230 ArtificialEntry.eraseFromParent();
231 }
232 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)(static_cast <bool> (&OutlinedFn->getEntryBlock(
) == OI.EntryBB) ? void (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 232, __extension__ __PRETTY_FUNCTION__))
;
233 assert(OutlinedFn && OutlinedFn->getNumUses() == 1)(static_cast <bool> (OutlinedFn && OutlinedFn->
getNumUses() == 1) ? void (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 233, __extension__ __PRETTY_FUNCTION__))
;
234
235 // Run a user callback, e.g. to add attributes.
236 if (OI.PostOutlineCB)
237 OI.PostOutlineCB(*OutlinedFn);
238 }
239
240 // Remove work items that have been completed.
241 OutlineInfos = std::move(DeferredOutlines);
242}
243
244OpenMPIRBuilder::~OpenMPIRBuilder() {
245 assert(OutlineInfos.empty() && "There must be no outstanding outlinings")(static_cast <bool> (OutlineInfos.empty() && "There must be no outstanding outlinings"
) ? void (0) : __assert_fail ("OutlineInfos.empty() && \"There must be no outstanding outlinings\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 245, __extension__ __PRETTY_FUNCTION__))
;
246}
247
248GlobalValue *OpenMPIRBuilder::createDebugKind(unsigned DebugKind) {
249 IntegerType *I32Ty = Type::getInt32Ty(M.getContext());
250 auto *GV = new GlobalVariable(
251 M, I32Ty,
252 /* isConstant = */ true, GlobalValue::WeakODRLinkage,
253 ConstantInt::get(I32Ty, DebugKind), "__omp_rtl_debug_kind");
254
255 return GV;
256}
257
258Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
259 IdentFlag LocFlags,
260 unsigned Reserve2Flags) {
261 // Enable "C-mode".
262 LocFlags |= OMP_IDENT_FLAG_KMPC;
263
264 Value *&Ident =
265 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
266 if (!Ident) {
267 Constant *I32Null = ConstantInt::getNullValue(Int32);
268 Constant *IdentData[] = {
269 I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)),
270 ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr};
271 Constant *Initializer =
272 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData);
273
274 // Look for existing encoding of the location + flags, not needed but
275 // minimizes the difference to the existing solution while we transition.
276 for (GlobalVariable &GV : M.getGlobalList())
277 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer())
278 if (GV.getInitializer() == Initializer)
279 return Ident = &GV;
280
281 auto *GV = new GlobalVariable(M, OpenMPIRBuilder::Ident,
282 /* isConstant = */ true,
283 GlobalValue::PrivateLinkage, Initializer);
284 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
285 GV->setAlignment(Align(8));
286 Ident = GV;
287 }
288 return Builder.CreatePointerCast(Ident, IdentPtr);
289}
290
291Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) {
292 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
293 if (!SrcLocStr) {
294 Constant *Initializer =
295 ConstantDataArray::getString(M.getContext(), LocStr);
296
297 // Look for existing encoding of the location, not needed but minimizes the
298 // difference to the existing solution while we transition.
299 for (GlobalVariable &GV : M.getGlobalList())
300 if (GV.isConstant() && GV.hasInitializer() &&
301 GV.getInitializer() == Initializer)
302 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
303
304 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
305 /* AddressSpace */ 0, &M);
306 }
307 return SrcLocStr;
308}
309
310Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
311 StringRef FileName,
312 unsigned Line,
313 unsigned Column) {
314 SmallString<128> Buffer;
315 Buffer.push_back(';');
316 Buffer.append(FileName);
317 Buffer.push_back(';');
318 Buffer.append(FunctionName);
319 Buffer.push_back(';');
320 Buffer.append(std::to_string(Line));
321 Buffer.push_back(';');
322 Buffer.append(std::to_string(Column));
323 Buffer.push_back(';');
324 Buffer.push_back(';');
325 return getOrCreateSrcLocStr(Buffer.str());
326}
327
328Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() {
329 return getOrCreateSrcLocStr(";unknown;unknown;0;0;;");
330}
331
332Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, Function *F) {
333 DILocation *DIL = DL.get();
334 if (!DIL)
335 return getOrCreateDefaultSrcLocStr();
336 StringRef FileName = M.getName();
337 if (DIFile *DIF = DIL->getFile())
338 if (Optional<StringRef> Source = DIF->getSource())
339 FileName = *Source;
340 StringRef Function = DIL->getScope()->getSubprogram()->getName();
341 if (Function.empty() && F)
342 Function = F->getName();
343 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
344 DIL->getColumn());
345}
346
347Constant *
348OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) {
349 return getOrCreateSrcLocStr(Loc.DL, Loc.IP.getBlock()->getParent());
350}
351
352Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) {
353 return Builder.CreateCall(
354 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
355 "omp_global_thread_num");
356}
357
358OpenMPIRBuilder::InsertPointTy
359OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK,
360 bool ForceSimpleCall, bool CheckCancelFlag) {
361 if (!updateToLocation(Loc))
362 return Loc.IP;
363 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
364}
365
366OpenMPIRBuilder::InsertPointTy
367OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind,
368 bool ForceSimpleCall, bool CheckCancelFlag) {
369 // Build call __kmpc_cancel_barrier(loc, thread_id) or
370 // __kmpc_barrier(loc, thread_id);
371
372 IdentFlag BarrierLocFlags;
373 switch (Kind) {
374 case OMPD_for:
375 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
376 break;
377 case OMPD_sections:
378 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
379 break;
380 case OMPD_single:
381 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
382 break;
383 case OMPD_barrier:
384 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
385 break;
386 default:
387 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
388 break;
389 }
390
391 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
392 Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags),
393 getOrCreateThreadID(getOrCreateIdent(SrcLocStr))};
394
395 // If we are in a cancellable parallel region, barriers are cancellation
396 // points.
397 // TODO: Check why we would force simple calls or to ignore the cancel flag.
398 bool UseCancelBarrier =
399 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
400
401 Value *Result =
402 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(
403 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
404 : OMPRTL___kmpc_barrier),
405 Args);
406
407 if (UseCancelBarrier && CheckCancelFlag)
408 emitCancelationCheckImpl(Result, OMPD_parallel);
409
410 return Builder.saveIP();
411}
412
413OpenMPIRBuilder::InsertPointTy
414OpenMPIRBuilder::createCancel(const LocationDescription &Loc,
415 Value *IfCondition,
416 omp::Directive CanceledDirective) {
417 if (!updateToLocation(Loc))
418 return Loc.IP;
419
420 // LLVM utilities like blocks with terminators.
421 auto *UI = Builder.CreateUnreachable();
422
423 Instruction *ThenTI = UI, *ElseTI = nullptr;
424 if (IfCondition)
425 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
426 Builder.SetInsertPoint(ThenTI);
427
428 Value *CancelKind = nullptr;
429 switch (CanceledDirective) {
430#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
431 case DirectiveEnum: \
432 CancelKind = Builder.getInt32(Value); \
433 break;
434#include "llvm/Frontend/OpenMP/OMPKinds.def"
435 default:
436 llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 436)
;
437 }
438
439 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
440 Value *Ident = getOrCreateIdent(SrcLocStr);
441 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
442 Value *Result = Builder.CreateCall(
443 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
444 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) {
445 if (CanceledDirective == OMPD_parallel) {
446 IRBuilder<>::InsertPointGuard IPG(Builder);
447 Builder.restoreIP(IP);
448 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
449 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
450 /* CheckCancelFlag */ false);
451 }
452 };
453
454 // The actual cancel logic is shared with others, e.g., cancel_barriers.
455 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB);
456
457 // Update the insertion point and remove the terminator we introduced.
458 Builder.SetInsertPoint(UI->getParent());
459 UI->eraseFromParent();
460
461 return Builder.saveIP();
462}
463
464void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
465 omp::Directive CanceledDirective,
466 FinalizeCallbackTy ExitCB) {
467 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 468, __extension__ __PRETTY_FUNCTION__))
468 "Unexpected cancellation!")(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 468, __extension__ __PRETTY_FUNCTION__))
;
469
470 // For a cancel barrier we create two new blocks.
471 BasicBlock *BB = Builder.GetInsertBlock();
472 BasicBlock *NonCancellationBlock;
473 if (Builder.GetInsertPoint() == BB->end()) {
474 // TODO: This branch will not be needed once we moved to the
475 // OpenMPIRBuilder codegen completely.
476 NonCancellationBlock = BasicBlock::Create(
477 BB->getContext(), BB->getName() + ".cont", BB->getParent());
478 } else {
479 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
480 BB->getTerminator()->eraseFromParent();
481 Builder.SetInsertPoint(BB);
482 }
483 BasicBlock *CancellationBlock = BasicBlock::Create(
484 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
485
486 // Jump to them based on the return value.
487 Value *Cmp = Builder.CreateIsNull(CancelFlag);
488 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
489 /* TODO weight */ nullptr, nullptr);
490
491 // From the cancellation block we finalize all variables and go to the
492 // post finalization block that is known to the FiniCB callback.
493 Builder.SetInsertPoint(CancellationBlock);
494 if (ExitCB)
495 ExitCB(Builder.saveIP());
496 auto &FI = FinalizationStack.back();
497 FI.FiniCB(Builder.saveIP());
498
499 // The continuation block is where code generation continues.
500 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
501}
502
503IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
504 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
505 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
506 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
507 omp::ProcBindKind ProcBind, bool IsCancellable) {
508 if (!updateToLocation(Loc))
509 return Loc.IP;
510
511 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
512 Value *Ident = getOrCreateIdent(SrcLocStr);
513 Value *ThreadID = getOrCreateThreadID(Ident);
514
515 if (NumThreads) {
516 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
517 Value *Args[] = {
518 Ident, ThreadID,
519 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
520 Builder.CreateCall(
521 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
522 }
523
524 if (ProcBind != OMP_PROC_BIND_default) {
525 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
526 Value *Args[] = {
527 Ident, ThreadID,
528 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
529 Builder.CreateCall(
530 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
531 }
532
533 BasicBlock *InsertBB = Builder.GetInsertBlock();
534 Function *OuterFn = InsertBB->getParent();
535
536 // Save the outer alloca block because the insertion iterator may get
537 // invalidated and we still need this later.
538 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
539
540 // Vector to remember instructions we used only during the modeling but which
541 // we want to delete at the end.
542 SmallVector<Instruction *, 4> ToBeDeleted;
543
544 // Change the location to the outer alloca insertion point to create and
545 // initialize the allocas we pass into the parallel region.
546 Builder.restoreIP(OuterAllocaIP);
547 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
548 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr");
549
550 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the
551 // program, otherwise we only need them for modeling purposes to get the
552 // associated arguments in the outlined function. In the former case,
553 // initialize the allocas properly, in the latter case, delete them later.
554 if (IfCondition) {
555 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr);
556 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr);
557 } else {
558 ToBeDeleted.push_back(TIDAddr);
559 ToBeDeleted.push_back(ZeroAddr);
560 }
561
562 // Create an artificial insertion point that will also ensure the blocks we
563 // are about to split are not degenerated.
564 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
565
566 Instruction *ThenTI = UI, *ElseTI = nullptr;
567 if (IfCondition)
568 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
569
570 BasicBlock *ThenBB = ThenTI->getParent();
571 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry");
572 BasicBlock *PRegBodyBB =
573 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region");
574 BasicBlock *PRegPreFiniBB =
575 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize");
576 BasicBlock *PRegExitBB =
577 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit");
578
579 auto FiniCBWrapper = [&](InsertPointTy IP) {
580 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
581 // target to the region exit block.
582 if (IP.getBlock()->end() == IP.getPoint()) {
583 IRBuilder<>::InsertPointGuard IPG(Builder);
584 Builder.restoreIP(IP);
585 Instruction *I = Builder.CreateBr(PRegExitBB);
586 IP = InsertPointTy(I->getParent(), I->getIterator());
587 }
588 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 590, __extension__ __PRETTY_FUNCTION__))
589 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 590, __extension__ __PRETTY_FUNCTION__))
590 "Unexpected insertion point for finalization call!")(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 590, __extension__ __PRETTY_FUNCTION__))
;
591 return FiniCB(IP);
592 };
593
594 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
595
596 // Generate the privatization allocas in the block that will become the entry
597 // of the outlined function.
598 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
599 InsertPointTy InnerAllocaIP = Builder.saveIP();
600
601 AllocaInst *PrivTIDAddr =
602 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
603 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid");
604
605 // Add some fake uses for OpenMP provided arguments.
606 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use"));
607 Instruction *ZeroAddrUse =
608 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use");
609 ToBeDeleted.push_back(ZeroAddrUse);
610
611 // ThenBB
612 // |
613 // V
614 // PRegionEntryBB <- Privatization allocas are placed here.
615 // |
616 // V
617 // PRegionBodyBB <- BodeGen is invoked here.
618 // |
619 // V
620 // PRegPreFiniBB <- The block we will start finalization from.
621 // |
622 // V
623 // PRegionExitBB <- A common exit to simplify block collection.
624 //
625
626 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
627
628 // Let the caller create the body.
629 assert(BodyGenCB && "Expected body generation callback!")(static_cast <bool> (BodyGenCB && "Expected body generation callback!"
) ? void (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 629, __extension__ __PRETTY_FUNCTION__))
;
630 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
631 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB);
632
633 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
634
635 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
636 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
637 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
638 llvm::LLVMContext &Ctx = F->getContext();
639 MDBuilder MDB(Ctx);
640 // Annotate the callback behavior of the __kmpc_fork_call:
641 // - The callback callee is argument number 2 (microtask).
642 // - The first two arguments of the callback callee are unknown (-1).
643 // - All variadic arguments to the __kmpc_fork_call are passed to the
644 // callback callee.
645 F->addMetadata(
646 llvm::LLVMContext::MD_callback,
647 *llvm::MDNode::get(
648 Ctx, {MDB.createCallbackEncoding(2, {-1, -1},
649 /* VarArgsArePassed */ true)}));
650 }
651 }
652
653 OutlineInfo OI;
654 OI.PostOutlineCB = [=](Function &OutlinedFn) {
655 // Add some known attributes.
656 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
657 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
658 OutlinedFn.addFnAttr(Attribute::NoUnwind);
659 OutlinedFn.addFnAttr(Attribute::NoRecurse);
660
661 assert(OutlinedFn.arg_size() >= 2 &&(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 662, __extension__ __PRETTY_FUNCTION__))
662 "Expected at least tid and bounded tid as arguments")(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 662, __extension__ __PRETTY_FUNCTION__))
;
663 unsigned NumCapturedVars =
664 OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
665
666 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
667 CI->getParent()->setName("omp_parallel");
668 Builder.SetInsertPoint(CI);
669
670 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn);
671 Value *ForkCallArgs[] = {
672 Ident, Builder.getInt32(NumCapturedVars),
673 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)};
674
675 SmallVector<Value *, 16> RealArgs;
676 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
677 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
678
679 Builder.CreateCall(RTLFn, RealArgs);
680
681 LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
682 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
683
684 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end());
685
686 // Initialize the local TID stack location with the argument value.
687 Builder.SetInsertPoint(PrivTID);
688 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
689 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr);
690
691 // If no "if" clause was present we do not need the call created during
692 // outlining, otherwise we reuse it in the serialized parallel region.
693 if (!ElseTI) {
694 CI->eraseFromParent();
695 } else {
696
697 // If an "if" clause was present we are now generating the serialized
698 // version into the "else" branch.
699 Builder.SetInsertPoint(ElseTI);
700
701 // Build calls __kmpc_serialized_parallel(&Ident, GTid);
702 Value *SerializedParallelCallArgs[] = {Ident, ThreadID};
703 Builder.CreateCall(
704 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel),
705 SerializedParallelCallArgs);
706
707 // OutlinedFn(&GTid, &zero, CapturedStruct);
708 CI->removeFromParent();
709 Builder.Insert(CI);
710
711 // __kmpc_end_serialized_parallel(&Ident, GTid);
712 Value *EndArgs[] = {Ident, ThreadID};
713 Builder.CreateCall(
714 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel),
715 EndArgs);
716
717 LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
718 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
719 }
720
721 for (Instruction *I : ToBeDeleted)
722 I->eraseFromParent();
723 };
724
725 // Adjust the finalization stack, verify the adjustment, and call the
726 // finalize function a last time to finalize values between the pre-fini
727 // block and the exit block if we left the parallel "the normal way".
728 auto FiniInfo = FinalizationStack.pop_back_val();
729 (void)FiniInfo;
730 assert(FiniInfo.DK == OMPD_parallel &&(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 731, __extension__ __PRETTY_FUNCTION__))
731 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 731, __extension__ __PRETTY_FUNCTION__))
;
732
733 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
734
735 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
736 FiniCB(PreFiniIP);
737
738 OI.EntryBB = PRegEntryBB;
739 OI.ExitBB = PRegExitBB;
740
741 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
742 SmallVector<BasicBlock *, 32> Blocks;
743 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
744
745 // Ensure a single exit node for the outlined region by creating one.
746 // We might have multiple incoming edges to the exit now due to finalizations,
747 // e.g., cancel calls that cause the control flow to leave the region.
748 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
749 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
750 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
751 Blocks.push_back(PRegOutlinedExitBB);
752
753 CodeExtractorAnalysisCache CEAC(*OuterFn);
754 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
755 /* AggregateArgs */ false,
756 /* BlockFrequencyInfo */ nullptr,
757 /* BranchProbabilityInfo */ nullptr,
758 /* AssumptionCache */ nullptr,
759 /* AllowVarArgs */ true,
760 /* AllowAlloca */ true,
761 /* Suffix */ ".omp_par");
762
763 // Find inputs to, outputs from the code region.
764 BasicBlock *CommonExit = nullptr;
765 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
766 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
767 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
768
769 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before privatization: "
<< *OuterFn << "\n"; } } while (false)
;
770
771 FunctionCallee TIDRTLFn =
772 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
773
774 auto PrivHelper = [&](Value &V) {
775 if (&V == TIDAddr || &V == ZeroAddr)
776 return;
777
778 SetVector<Use *> Uses;
779 for (Use &U : V.uses())
780 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
781 if (ParallelRegionBlockSet.count(UserI->getParent()))
782 Uses.insert(&U);
783
784 // __kmpc_fork_call expects extra arguments as pointers. If the input
785 // already has a pointer type, everything is fine. Otherwise, store the
786 // value onto stack and load it back inside the to-be-outlined region. This
787 // will ensure only the pointer will be passed to the function.
788 // FIXME: if there are more than 15 trailing arguments, they must be
789 // additionally packed in a struct.
790 Value *Inner = &V;
791 if (!V.getType()->isPointerTy()) {
792 IRBuilder<>::InsertPointGuard Guard(Builder);
793 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: "
<< V << "\n"; } } while (false)
;
794
795 Builder.restoreIP(OuterAllocaIP);
796 Value *Ptr =
797 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
798
799 // Store to stack at end of the block that currently branches to the entry
800 // block of the to-be-outlined region.
801 Builder.SetInsertPoint(InsertBB,
802 InsertBB->getTerminator()->getIterator());
803 Builder.CreateStore(&V, Ptr);
804
805 // Load back next to allocations in the to-be-outlined region.
806 Builder.restoreIP(InnerAllocaIP);
807 Inner = Builder.CreateLoad(V.getType(), Ptr);
808 }
809
810 Value *ReplacementValue = nullptr;
811 CallInst *CI = dyn_cast<CallInst>(&V);
812 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
813 ReplacementValue = PrivTID;
814 } else {
815 Builder.restoreIP(
816 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
817 assert(ReplacementValue &&(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 818, __extension__ __PRETTY_FUNCTION__))
818 "Expected copy/create callback to set replacement value!")(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 818, __extension__ __PRETTY_FUNCTION__))
;
819 if (ReplacementValue == &V)
820 return;
821 }
822
823 for (Use *UPtr : Uses)
824 UPtr->set(ReplacementValue);
825 };
826
827 // Reset the inner alloca insertion as it will be used for loading the values
828 // wrapped into pointers before passing them into the to-be-outlined region.
829 // Configure it to insert immediately after the fake use of zero address so
830 // that they are available in the generated body and so that the
831 // OpenMP-related values (thread ID and zero address pointers) remain leading
832 // in the argument list.
833 InnerAllocaIP = IRBuilder<>::InsertPoint(
834 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
835
836 // Reset the outer alloca insertion point to the entry of the relevant block
837 // in case it was invalidated.
838 OuterAllocaIP = IRBuilder<>::InsertPoint(
839 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
840
841 for (Value *Input : Inputs) {
842 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Captured input: " <<
*Input << "\n"; } } while (false)
;
843 PrivHelper(*Input);
844 }
845 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
846 for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
847 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
848 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
;
849 assert(Outputs.empty() &&(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 850, __extension__ __PRETTY_FUNCTION__))
850 "OpenMP outlining should not produce live-out values!")(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 850, __extension__ __PRETTY_FUNCTION__))
;
851
852 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After privatization: "
<< *OuterFn << "\n"; } } while (false)
;
853 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
854 for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
855 dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
856 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
;
857
858 // Register the outlined info.
859 addOutlineInfo(std::move(OI));
860
861 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
862 UI->eraseFromParent();
863
864 return AfterIP;
865}
866
867void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) {
868 // Build call void __kmpc_flush(ident_t *loc)
869 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
870 Value *Args[] = {getOrCreateIdent(SrcLocStr)};
871
872 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
873}
874
875void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) {
876 if (!updateToLocation(Loc))
877 return;
878 emitFlush(Loc);
879}
880
881void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) {
882 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
883 // global_tid);
884 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
885 Value *Ident = getOrCreateIdent(SrcLocStr);
886 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
887
888 // Ignore return result until untied tasks are supported.
889 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
890 Args);
891}
892
893void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) {
894 if (!updateToLocation(Loc))
895 return;
896 emitTaskwaitImpl(Loc);
897}
898
899void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) {
900 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
901 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
902 Value *Ident = getOrCreateIdent(SrcLocStr);
903 Constant *I32Null = ConstantInt::getNullValue(Int32);
904 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
905
906 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
907 Args);
908}
909
910void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) {
911 if (!updateToLocation(Loc))
912 return;
913 emitTaskyieldImpl(Loc);
914}
915
916OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections(
917 const LocationDescription &Loc, InsertPointTy AllocaIP,
918 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB,
919 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) {
920 if (!updateToLocation(Loc))
921 return Loc.IP;
922
923 auto FiniCBWrapper = [&](InsertPointTy IP) {
924 if (IP.getBlock()->end() != IP.getPoint())
925 return FiniCB(IP);
926 // This must be done otherwise any nested constructs using FinalizeOMPRegion
927 // will fail because that function requires the Finalization Basic Block to
928 // have a terminator, which is already removed by EmitOMPRegionBody.
929 // IP is currently at cancelation block.
930 // We need to backtrack to the condition block to fetch
931 // the exit block and create a branch from cancelation
932 // to exit block.
933 IRBuilder<>::InsertPointGuard IPG(Builder);
934 Builder.restoreIP(IP);
935 auto *CaseBB = IP.getBlock()->getSinglePredecessor();
936 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
937 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
938 Instruction *I = Builder.CreateBr(ExitBB);
939 IP = InsertPointTy(I->getParent(), I->getIterator());
940 return FiniCB(IP);
941 };
942
943 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable});
944
945 // Each section is emitted as a switch case
946 // Each finalization callback is handled from clang.EmitOMPSectionDirective()
947 // -> OMP.createSection() which generates the IR for each section
948 // Iterate through all sections and emit a switch construct:
949 // switch (IV) {
950 // case 0:
951 // <SectionStmt[0]>;
952 // break;
953 // ...
954 // case <NumSection> - 1:
955 // <SectionStmt[<NumSection> - 1]>;
956 // break;
957 // }
958 // ...
959 // section_loop.after:
960 // <FiniCB>;
961 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) {
962 auto *CurFn = CodeGenIP.getBlock()->getParent();
963 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor();
964 auto *ForExitBB = CodeGenIP.getBlock()
965 ->getSinglePredecessor()
966 ->getTerminator()
967 ->getSuccessor(1);
968 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB);
969 Builder.restoreIP(CodeGenIP);
970 unsigned CaseNumber = 0;
971 for (auto SectionCB : SectionCBs) {
972 auto *CaseBB = BasicBlock::Create(M.getContext(),
973 "omp_section_loop.body.case", CurFn);
974 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB);
975 Builder.SetInsertPoint(CaseBB);
976 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB);
977 CaseNumber++;
978 }
979 // remove the existing terminator from body BB since there can be no
980 // terminators after switch/case
981 CodeGenIP.getBlock()->getTerminator()->eraseFromParent();
982 };
983 // Loop body ends here
984 // LowerBound, UpperBound, and STride for createCanonicalLoop
985 Type *I32Ty = Type::getInt32Ty(M.getContext());
986 Value *LB = ConstantInt::get(I32Ty, 0);
987 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size());
988 Value *ST = ConstantInt::get(I32Ty, 1);
989 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop(
990 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop");
991 InsertPointTy AfterIP =
992 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, true);
993 BasicBlock *LoopAfterBB = AfterIP.getBlock();
994 Instruction *SplitPos = LoopAfterBB->getTerminator();
995 if (!isa_and_nonnull<BranchInst>(SplitPos))
996 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB);
997 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB,
998 // which requires a BB with branch
999 BasicBlock *ExitBB =
1000 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end");
1001 SplitPos->eraseFromParent();
1002
1003 // Apply the finalization callback in LoopAfterBB
1004 auto FiniInfo = FinalizationStack.pop_back_val();
1005 assert(FiniInfo.DK == OMPD_sections &&(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1006, __extension__ __PRETTY_FUNCTION__))
1006 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1006, __extension__ __PRETTY_FUNCTION__))
;
1007 Builder.SetInsertPoint(LoopAfterBB->getTerminator());
1008 FiniInfo.FiniCB(Builder.saveIP());
1009 Builder.SetInsertPoint(ExitBB);
1010
1011 return Builder.saveIP();
1012}
1013
1014OpenMPIRBuilder::InsertPointTy
1015OpenMPIRBuilder::createSection(const LocationDescription &Loc,
1016 BodyGenCallbackTy BodyGenCB,
1017 FinalizeCallbackTy FiniCB) {
1018 if (!updateToLocation(Loc))
1019 return Loc.IP;
1020
1021 auto FiniCBWrapper = [&](InsertPointTy IP) {
1022 if (IP.getBlock()->end() != IP.getPoint())
1023 return FiniCB(IP);
1024 // This must be done otherwise any nested constructs using FinalizeOMPRegion
1025 // will fail because that function requires the Finalization Basic Block to
1026 // have a terminator, which is already removed by EmitOMPRegionBody.
1027 // IP is currently at cancelation block.
1028 // We need to backtrack to the condition block to fetch
1029 // the exit block and create a branch from cancelation
1030 // to exit block.
1031 IRBuilder<>::InsertPointGuard IPG(Builder);
1032 Builder.restoreIP(IP);
1033 auto *CaseBB = Loc.IP.getBlock();
1034 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
1035 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
1036 Instruction *I = Builder.CreateBr(ExitBB);
1037 IP = InsertPointTy(I->getParent(), I->getIterator());
1038 return FiniCB(IP);
1039 };
1040
1041 Directive OMPD = Directive::OMPD_sections;
1042 // Since we are using Finalization Callback here, HasFinalize
1043 // and IsCancellable have to be true
1044 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper,
1045 /*Conditional*/ false, /*hasFinalize*/ true,
1046 /*IsCancellable*/ true);
1047}
1048
1049/// Create a function with a unique name and a "void (i8*, i8*)" signature in
1050/// the given module and return it.
1051Function *getFreshReductionFunc(Module &M) {
1052 Type *VoidTy = Type::getVoidTy(M.getContext());
1053 Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
1054 auto *FuncTy =
1055 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false);
1056 return Function::Create(FuncTy, GlobalVariable::InternalLinkage,
1057 M.getDataLayout().getDefaultGlobalsAddressSpace(),
1058 ".omp.reduction.func", &M);
1059}
1060
1061OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions(
1062 const LocationDescription &Loc, InsertPointTy AllocaIP,
1063 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) {
1064 for (const ReductionInfo &RI : ReductionInfos) {
1065 (void)RI;
1066 assert(RI.Variable && "expected non-null variable")(static_cast <bool> (RI.Variable && "expected non-null variable"
) ? void (0) : __assert_fail ("RI.Variable && \"expected non-null variable\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1066, __extension__ __PRETTY_FUNCTION__))
;
1067 assert(RI.PrivateVariable && "expected non-null private variable")(static_cast <bool> (RI.PrivateVariable && "expected non-null private variable"
) ? void (0) : __assert_fail ("RI.PrivateVariable && \"expected non-null private variable\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1067, __extension__ __PRETTY_FUNCTION__))
;
1068 assert(RI.ReductionGen && "expected non-null reduction generator callback")(static_cast <bool> (RI.ReductionGen && "expected non-null reduction generator callback"
) ? void (0) : __assert_fail ("RI.ReductionGen && \"expected non-null reduction generator callback\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1068, __extension__ __PRETTY_FUNCTION__))
;
1069 assert(RI.Variable->getType() == RI.PrivateVariable->getType() &&(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1071, __extension__ __PRETTY_FUNCTION__))
1070 "expected variables and their private equivalents to have the same "(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1071, __extension__ __PRETTY_FUNCTION__))
1071 "type")(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1071, __extension__ __PRETTY_FUNCTION__))
;
1072 assert(RI.Variable->getType()->isPointerTy() &&(static_cast <bool> (RI.Variable->getType()->isPointerTy
() && "expected variables to be pointers") ? void (0)
: __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1073, __extension__ __PRETTY_FUNCTION__))
1073 "expected variables to be pointers")(static_cast <bool> (RI.Variable->getType()->isPointerTy
() && "expected variables to be pointers") ? void (0)
: __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1073, __extension__ __PRETTY_FUNCTION__))
;
1074 }
1075
1076 if (!updateToLocation(Loc))
1077 return InsertPointTy();
1078
1079 BasicBlock *InsertBlock = Loc.IP.getBlock();
1080 BasicBlock *ContinuationBlock =
1081 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize");
1082 InsertBlock->getTerminator()->eraseFromParent();
1083
1084 // Create and populate array of type-erased pointers to private reduction
1085 // values.
1086 unsigned NumReductions = ReductionInfos.size();
1087 Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions);
1088 Builder.restoreIP(AllocaIP);
1089 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
1090
1091 Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
1092
1093 for (auto En : enumerate(ReductionInfos)) {
1094 unsigned Index = En.index();
1095 const ReductionInfo &RI = En.value();
1096 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64(
1097 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index));
1098 Value *Casted =
1099 Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(),
1100 "private.red.var." + Twine(Index) + ".casted");
1101 Builder.CreateStore(Casted, RedArrayElemPtr);
1102 }
1103
1104 // Emit a call to the runtime function that orchestrates the reduction.
1105 // Declare the reduction function in the process.
1106 Function *Func = Builder.GetInsertBlock()->getParent();
1107 Module *Module = Func->getParent();
1108 Value *RedArrayPtr =
1109 Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr");
1110 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1111 bool CanGenerateAtomic =
1112 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) {
1113 return RI.AtomicReductionGen;
1114 });
1115 Value *Ident = getOrCreateIdent(
1116 SrcLocStr, CanGenerateAtomic ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE
1117 : IdentFlag(0));
1118 Value *ThreadId = getOrCreateThreadID(Ident);
1119 Constant *NumVariables = Builder.getInt32(NumReductions);
1120 const DataLayout &DL = Module->getDataLayout();
1121 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy);
1122 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize);
1123 Function *ReductionFunc = getFreshReductionFunc(*Module);
1124 Value *Lock = getOMPCriticalRegionLock(".reduction");
1125 Function *ReduceFunc = getOrCreateRuntimeFunctionPtr(
1126 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait
1127 : RuntimeFunction::OMPRTL___kmpc_reduce);
1128 CallInst *ReduceCall =
1129 Builder.CreateCall(ReduceFunc,
1130 {Ident, ThreadId, NumVariables, RedArraySize,
1131 RedArrayPtr, ReductionFunc, Lock},
1132 "reduce");
1133
1134 // Create final reduction entry blocks for the atomic and non-atomic case.
1135 // Emit IR that dispatches control flow to one of the blocks based on the
1136 // reduction supporting the atomic mode.
1137 BasicBlock *NonAtomicRedBlock =
1138 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func);
1139 BasicBlock *AtomicRedBlock =
1140 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func);
1141 SwitchInst *Switch =
1142 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2);
1143 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock);
1144 Switch->addCase(Builder.getInt32(2), AtomicRedBlock);
1145
1146 // Populate the non-atomic reduction using the elementwise reduction function.
1147 // This loads the elements from the global and private variables and reduces
1148 // them before storing back the result to the global variable.
1149 Builder.SetInsertPoint(NonAtomicRedBlock);
1150 for (auto En : enumerate(ReductionInfos)) {
1151 const ReductionInfo &RI = En.value();
1152 Type *ValueType = RI.getElementType();
1153 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable,
1154 "red.value." + Twine(En.index()));
1155 Value *PrivateRedValue =
1156 Builder.CreateLoad(ValueType, RI.PrivateVariable,
1157 "red.private.value." + Twine(En.index()));
1158 Value *Reduced;
1159 Builder.restoreIP(
1160 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced));
1161 if (!Builder.GetInsertBlock())
1162 return InsertPointTy();
1163 Builder.CreateStore(Reduced, RI.Variable);
1164 }
1165 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr(
1166 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait
1167 : RuntimeFunction::OMPRTL___kmpc_end_reduce);
1168 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock});
1169 Builder.CreateBr(ContinuationBlock);
1170
1171 // Populate the atomic reduction using the atomic elementwise reduction
1172 // function. There are no loads/stores here because they will be happening
1173 // inside the atomic elementwise reduction.
1174 Builder.SetInsertPoint(AtomicRedBlock);
1175 if (CanGenerateAtomic) {
1176 for (const ReductionInfo &RI : ReductionInfos) {
1177 Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.Variable,
1178 RI.PrivateVariable));
1179 if (!Builder.GetInsertBlock())
1180 return InsertPointTy();
1181 }
1182 Builder.CreateBr(ContinuationBlock);
1183 } else {
1184 Builder.CreateUnreachable();
1185 }
1186
1187 // Populate the outlined reduction function using the elementwise reduction
1188 // function. Partial values are extracted from the type-erased array of
1189 // pointers to private variables.
1190 BasicBlock *ReductionFuncBlock =
1191 BasicBlock::Create(Module->getContext(), "", ReductionFunc);
1192 Builder.SetInsertPoint(ReductionFuncBlock);
1193 Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0),
1194 RedArrayTy->getPointerTo());
1195 Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1),
1196 RedArrayTy->getPointerTo());
1197 for (auto En : enumerate(ReductionInfos)) {
1198 const ReductionInfo &RI = En.value();
1199 Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64(
1200 RedArrayTy, LHSArrayPtr, 0, En.index());
1201 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr);
1202 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType());
1203 Value *LHS = Builder.CreateLoad(RI.getElementType(), LHSPtr);
1204 Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64(
1205 RedArrayTy, RHSArrayPtr, 0, En.index());
1206 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr);
1207 Value *RHSPtr =
1208 Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType());
1209 Value *RHS = Builder.CreateLoad(RI.getElementType(), RHSPtr);
1210 Value *Reduced;
1211 Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced));
1212 if (!Builder.GetInsertBlock())
1213 return InsertPointTy();
1214 Builder.CreateStore(Reduced, LHSPtr);
1215 }
1216 Builder.CreateRetVoid();
1217
1218 Builder.SetInsertPoint(ContinuationBlock);
1219 return Builder.saveIP();
1220}
1221
1222OpenMPIRBuilder::InsertPointTy
1223OpenMPIRBuilder::createMaster(const LocationDescription &Loc,
1224 BodyGenCallbackTy BodyGenCB,
1225 FinalizeCallbackTy FiniCB) {
1226
1227 if (!updateToLocation(Loc))
1228 return Loc.IP;
1229
1230 Directive OMPD = Directive::OMPD_master;
1231 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1232 Value *Ident = getOrCreateIdent(SrcLocStr);
1233 Value *ThreadId = getOrCreateThreadID(Ident);
1234 Value *Args[] = {Ident, ThreadId};
1235
1236 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
1237 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1238
1239 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
1240 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1241
1242 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1243 /*Conditional*/ true, /*hasFinalize*/ true);
1244}
1245
1246OpenMPIRBuilder::InsertPointTy
1247OpenMPIRBuilder::createMasked(const LocationDescription &Loc,
1248 BodyGenCallbackTy BodyGenCB,
1249 FinalizeCallbackTy FiniCB, Value *Filter) {
1250 if (!updateToLocation(Loc))
1251 return Loc.IP;
1252
1253 Directive OMPD = Directive::OMPD_masked;
1254 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1255 Value *Ident = getOrCreateIdent(SrcLocStr);
1256 Value *ThreadId = getOrCreateThreadID(Ident);
1257 Value *Args[] = {Ident, ThreadId, Filter};
1258 Value *ArgsEnd[] = {Ident, ThreadId};
1259
1260 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked);
1261 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1262
1263 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked);
1264 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd);
1265
1266 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1267 /*Conditional*/ true, /*hasFinalize*/ true);
1268}
1269
1270CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton(
1271 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
1272 BasicBlock *PostInsertBefore, const Twine &Name) {
1273 Module *M = F->getParent();
1274 LLVMContext &Ctx = M->getContext();
1275 Type *IndVarTy = TripCount->getType();
10
Called C++ object pointer is null
1276
1277 // Create the basic block structure.
1278 BasicBlock *Preheader =
1279 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
1280 BasicBlock *Header =
1281 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
1282 BasicBlock *Cond =
1283 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
1284 BasicBlock *Body =
1285 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
1286 BasicBlock *Latch =
1287 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
1288 BasicBlock *Exit =
1289 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
1290 BasicBlock *After =
1291 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
1292
1293 // Use specified DebugLoc for new instructions.
1294 Builder.SetCurrentDebugLocation(DL);
1295
1296 Builder.SetInsertPoint(Preheader);
1297 Builder.CreateBr(Header);
1298
1299 Builder.SetInsertPoint(Header);
1300 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
1301 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
1302 Builder.CreateBr(Cond);
1303
1304 Builder.SetInsertPoint(Cond);
1305 Value *Cmp =
1306 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
1307 Builder.CreateCondBr(Cmp, Body, Exit);
1308
1309 Builder.SetInsertPoint(Body);
1310 Builder.CreateBr(Latch);
1311
1312 Builder.SetInsertPoint(Latch);
1313 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
1314 "omp_" + Name + ".next", /*HasNUW=*/true);
1315 Builder.CreateBr(Header);
1316 IndVarPHI->addIncoming(Next, Latch);
1317
1318 Builder.SetInsertPoint(Exit);
1319 Builder.CreateBr(After);
1320
1321 // Remember and return the canonical control flow.
1322 LoopInfos.emplace_front();
1323 CanonicalLoopInfo *CL = &LoopInfos.front();
1324
1325 CL->Preheader = Preheader;
1326 CL->Header = Header;
1327 CL->Cond = Cond;
1328 CL->Body = Body;
1329 CL->Latch = Latch;
1330 CL->Exit = Exit;
1331 CL->After = After;
1332
1333#ifndef NDEBUG
1334 CL->assertOK();
1335#endif
1336 return CL;
1337}
1338
1339CanonicalLoopInfo *
1340OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc,
1341 LoopBodyGenCallbackTy BodyGenCB,
1342 Value *TripCount, const Twine &Name) {
1343 BasicBlock *BB = Loc.IP.getBlock();
1344 BasicBlock *NextBB = BB->getNextNode();
1345
1346 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
1347 NextBB, NextBB, Name);
1348 BasicBlock *After = CL->getAfter();
1349
1350 // If location is not set, don't connect the loop.
1351 if (updateToLocation(Loc)) {
1352 // Split the loop at the insertion point: Branch to the preheader and move
1353 // every following instruction to after the loop (the After BB). Also, the
1354 // new successor is the loop's after block.
1355 Builder.CreateBr(CL->Preheader);
1356 After->getInstList().splice(After->begin(), BB->getInstList(),
1357 Builder.GetInsertPoint(), BB->end());
1358 After->replaceSuccessorsPhiUsesWith(BB, After);
1359 }
1360
1361 // Emit the body content. We do it after connecting the loop to the CFG to
1362 // avoid that the callback encounters degenerate BBs.
1363 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
1364
1365#ifndef NDEBUG
1366 CL->assertOK();
1367#endif
1368 return CL;
1369}
1370
1371CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop(
1372 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
1373 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
1374 InsertPointTy ComputeIP, const Twine &Name) {
1375
1376 // Consider the following difficulties (assuming 8-bit signed integers):
1377 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
1378 // DO I = 1, 100, 50
1379 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
1380 // DO I = 100, 0, -128
1381
1382 // Start, Stop and Step must be of the same integer type.
1383 auto *IndVarTy = cast<IntegerType>(Start->getType());
1384 assert(IndVarTy == Stop->getType() && "Stop type mismatch")(static_cast <bool> (IndVarTy == Stop->getType() &&
"Stop type mismatch") ? void (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1384, __extension__ __PRETTY_FUNCTION__))
;
1385 assert(IndVarTy == Step->getType() && "Step type mismatch")(static_cast <bool> (IndVarTy == Step->getType() &&
"Step type mismatch") ? void (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1385, __extension__ __PRETTY_FUNCTION__))
;
1386
1387 LocationDescription ComputeLoc =
1388 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
1389 updateToLocation(ComputeLoc);
1390
1391 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
1392 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
1393
1394 // Like Step, but always positive.
1395 Value *Incr = Step;
1396
1397 // Distance between Start and Stop; always positive.
1398 Value *Span;
1399
1400 // Condition whether there are no iterations are executed at all, e.g. because
1401 // UB < LB.
1402 Value *ZeroCmp;
1403
1404 if (IsSigned) {
1405 // Ensure that increment is positive. If not, negate and invert LB and UB.
1406 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
1407 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
1408 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
1409 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
1410 Span = Builder.CreateSub(UB, LB, "", false, true);
1411 ZeroCmp = Builder.CreateICmp(
1412 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
1413 } else {
1414 Span = Builder.CreateSub(Stop, Start, "", true);
1415 ZeroCmp = Builder.CreateICmp(
1416 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
1417 }
1418
1419 Value *CountIfLooping;
1420 if (InclusiveStop) {
1421 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
1422 } else {
1423 // Avoid incrementing past stop since it could overflow.
1424 Value *CountIfTwo = Builder.CreateAdd(
1425 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
1426 Value *OneCmp = Builder.CreateICmp(
1427 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr);
1428 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
1429 }
1430 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
1431 "omp_" + Name + ".tripcount");
1432
1433 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
1434 Builder.restoreIP(CodeGenIP);
1435 Value *Span = Builder.CreateMul(IV, Step);
1436 Value *IndVar = Builder.CreateAdd(Span, Start);
1437 BodyGenCB(Builder.saveIP(), IndVar);
1438 };
1439 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
1440 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
1441}
1442
1443// Returns an LLVM function to call for initializing loop bounds using OpenMP
1444// static scheduling depending on `type`. Only i32 and i64 are supported by the
1445// runtime. Always interpret integers as unsigned similarly to
1446// CanonicalLoopInfo.
1447static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M,
1448 OpenMPIRBuilder &OMPBuilder) {
1449 unsigned Bitwidth = Ty->getIntegerBitWidth();
1450 if (Bitwidth == 32)
1451 return OMPBuilder.getOrCreateRuntimeFunction(
1452 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
1453 if (Bitwidth == 64)
1454 return OMPBuilder.getOrCreateRuntimeFunction(
1455 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
1456 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1456)
;
1457}
1458
1459// Sets the number of loop iterations to the given value. This value must be
1460// valid in the condition block (i.e., defined in the preheader) and is
1461// interpreted as an unsigned integer.
1462void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) {
1463 Instruction *CmpI = &CLI->getCond()->front();
1464 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")(static_cast <bool> (isa<CmpInst>(CmpI) &&
"First inst must compare IV with TripCount") ? void (0) : __assert_fail
("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1464, __extension__ __PRETTY_FUNCTION__))
;
1465 CmpI->setOperand(1, TripCount);
1466 CLI->assertOK();
1467}
1468
1469OpenMPIRBuilder::InsertPointTy
1470OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
1471 InsertPointTy AllocaIP,
1472 bool NeedsBarrier, Value *Chunk) {
1473 assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1473, __extension__ __PRETTY_FUNCTION__))
;
1474
1475 // Set up the source location value for OpenMP runtime.
1476 Builder.restoreIP(CLI->getPreheaderIP());
1477 Builder.SetCurrentDebugLocation(DL);
1478
1479 Constant *SrcLocStr = getOrCreateSrcLocStr(DL);
1480 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1481
1482 // Declare useful OpenMP runtime functions.
1483 Value *IV = CLI->getIndVar();
1484 Type *IVTy = IV->getType();
1485 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
1486 FunctionCallee StaticFini =
1487 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
1488
1489 // Allocate space for computed loop bounds as expected by the "init" function.
1490 Builder.restoreIP(AllocaIP);
1491 Type *I32Type = Type::getInt32Ty(M.getContext());
1492 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1493 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1494 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1495 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1496
1497 // At the end of the preheader, prepare for calling the "init" function by
1498 // storing the current loop bounds into the allocated space. A canonical loop
1499 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1500 // and produces an inclusive upper bound.
1501 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
1502 Constant *Zero = ConstantInt::get(IVTy, 0);
1503 Constant *One = ConstantInt::get(IVTy, 1);
1504 Builder.CreateStore(Zero, PLowerBound);
1505 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
1506 Builder.CreateStore(UpperBound, PUpperBound);
1507 Builder.CreateStore(One, PStride);
1508
1509 // FIXME: schedule(static) is NOT the same as schedule(static,1)
1510 if (!Chunk)
1511 Chunk = One;
1512
1513 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1514
1515 Constant *SchedulingType =
1516 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static));
1517
1518 // Call the "init" function and update the trip count of the loop with the
1519 // value it produced.
1520 Builder.CreateCall(StaticInit,
1521 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
1522 PUpperBound, PStride, One, Chunk});
1523 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound);
1524 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound);
1525 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
1526 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
1527 setCanonicalLoopTripCount(CLI, TripCount);
1528
1529 // Update all uses of the induction variable except the one in the condition
1530 // block that compares it with the actual upper bound, and the increment in
1531 // the latch block.
1532 // TODO: this can eventually move to CanonicalLoopInfo or to a new
1533 // CanonicalLoopInfoUpdater interface.
1534 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt());
1535 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound);
1536 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) {
1537 auto *Instr = dyn_cast<Instruction>(U.getUser());
1538 return !Instr ||
1539 (Instr->getParent() != CLI->getCond() &&
1540 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV);
1541 });
1542
1543 // In the "exit" block, call the "fini" function.
1544 Builder.SetInsertPoint(CLI->getExit(),
1545 CLI->getExit()->getTerminator()->getIterator());
1546 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
1547
1548 // Add the barrier if requested.
1549 if (NeedsBarrier)
1550 createBarrier(LocationDescription(Builder.saveIP(), DL),
1551 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1552 /* CheckCancelFlag */ false);
1553
1554 InsertPointTy AfterIP = CLI->getAfterIP();
1555 CLI->invalidate();
1556
1557 return AfterIP;
1558}
1559
1560OpenMPIRBuilder::InsertPointTy
1561OpenMPIRBuilder::applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
1562 InsertPointTy AllocaIP, bool NeedsBarrier) {
1563 // Currently only supports static schedules.
1564 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier);
1565}
1566
1567/// Returns an LLVM function to call for initializing loop bounds using OpenMP
1568/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1569/// the runtime. Always interpret integers as unsigned similarly to
1570/// CanonicalLoopInfo.
1571static FunctionCallee
1572getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1573 unsigned Bitwidth = Ty->getIntegerBitWidth();
1574 if (Bitwidth == 32)
1575 return OMPBuilder.getOrCreateRuntimeFunction(
1576 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u);
1577 if (Bitwidth == 64)
1578 return OMPBuilder.getOrCreateRuntimeFunction(
1579 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u);
1580 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1580)
;
1581}
1582
1583/// Returns an LLVM function to call for updating the next loop using OpenMP
1584/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1585/// the runtime. Always interpret integers as unsigned similarly to
1586/// CanonicalLoopInfo.
1587static FunctionCallee
1588getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1589 unsigned Bitwidth = Ty->getIntegerBitWidth();
1590 if (Bitwidth == 32)
1591 return OMPBuilder.getOrCreateRuntimeFunction(
1592 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u);
1593 if (Bitwidth == 64)
1594 return OMPBuilder.getOrCreateRuntimeFunction(
1595 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u);
1596 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1596)
;
1597}
1598
1599OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
1600 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
1601 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) {
1602 assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1602, __extension__ __PRETTY_FUNCTION__))
;
1603
1604 // Set up the source location value for OpenMP runtime.
1605 Builder.SetCurrentDebugLocation(DL);
1606
1607 Constant *SrcLocStr = getOrCreateSrcLocStr(DL);
1608 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1609
1610 // Declare useful OpenMP runtime functions.
1611 Value *IV = CLI->getIndVar();
1612 Type *IVTy = IV->getType();
1613 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this);
1614 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this);
1615
1616 // Allocate space for computed loop bounds as expected by the "init" function.
1617 Builder.restoreIP(AllocaIP);
1618 Type *I32Type = Type::getInt32Ty(M.getContext());
1619 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1620 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1621 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1622 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1623
1624 // At the end of the preheader, prepare for calling the "init" function by
1625 // storing the current loop bounds into the allocated space. A canonical loop
1626 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1627 // and produces an inclusive upper bound.
1628 BasicBlock *PreHeader = CLI->getPreheader();
1629 Builder.SetInsertPoint(PreHeader->getTerminator());
1630 Constant *One = ConstantInt::get(IVTy, 1);
1631 Builder.CreateStore(One, PLowerBound);
1632 Value *UpperBound = CLI->getTripCount();
1633 Builder.CreateStore(UpperBound, PUpperBound);
1634 Builder.CreateStore(One, PStride);
1635
1636 BasicBlock *Header = CLI->getHeader();
1637 BasicBlock *Exit = CLI->getExit();
1638 BasicBlock *Cond = CLI->getCond();
1639 InsertPointTy AfterIP = CLI->getAfterIP();
1640
1641 // The CLI will be "broken" in the code below, as the loop is no longer
1642 // a valid canonical loop.
1643
1644 if (!Chunk)
1645 Chunk = One;
1646
1647 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1648
1649 Constant *SchedulingType =
1650 ConstantInt::get(I32Type, static_cast<int>(SchedType));
1651
1652 // Call the "init" function.
1653 Builder.CreateCall(DynamicInit,
1654 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One,
1655 UpperBound, /* step */ One, Chunk});
1656
1657 // An outer loop around the existing one.
1658 BasicBlock *OuterCond = BasicBlock::Create(
1659 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
1660 PreHeader->getParent());
1661 // This needs to be 32-bit always, so can't use the IVTy Zero above.
1662 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
1663 Value *Res =
1664 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
1665 PLowerBound, PUpperBound, PStride});
1666 Constant *Zero32 = ConstantInt::get(I32Type, 0);
1667 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32);
1668 Value *LowerBound =
1669 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb");
1670 Builder.CreateCondBr(MoreWork, Header, Exit);
1671
1672 // Change PHI-node in loop header to use outer cond rather than preheader,
1673 // and set IV to the LowerBound.
1674 Instruction *Phi = &Header->front();
1675 auto *PI = cast<PHINode>(Phi);
1676 PI->setIncomingBlock(0, OuterCond);
1677 PI->setIncomingValue(0, LowerBound);
1678
1679 // Then set the pre-header to jump to the OuterCond
1680 Instruction *Term = PreHeader->getTerminator();
1681 auto *Br = cast<BranchInst>(Term);
1682 Br->setSuccessor(0, OuterCond);
1683
1684 // Modify the inner condition:
1685 // * Use the UpperBound returned from the DynamicNext call.
1686 // * jump to the loop outer loop when done with one of the inner loops.
1687 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
1688 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
1689 Instruction *Comp = &*Builder.GetInsertPoint();
1690 auto *CI = cast<CmpInst>(Comp);
1691 CI->setOperand(1, UpperBound);
1692 // Redirect the inner exit to branch to outer condition.
1693 Instruction *Branch = &Cond->back();
1694 auto *BI = cast<BranchInst>(Branch);
1695 assert(BI->getSuccessor(1) == Exit)(static_cast <bool> (BI->getSuccessor(1) == Exit) ? void
(0) : __assert_fail ("BI->getSuccessor(1) == Exit", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1695, __extension__ __PRETTY_FUNCTION__))
;
1696 BI->setSuccessor(1, OuterCond);
1697
1698 // Add the barrier if requested.
1699 if (NeedsBarrier) {
1700 Builder.SetInsertPoint(&Exit->back());
1701 createBarrier(LocationDescription(Builder.saveIP(), DL),
1702 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1703 /* CheckCancelFlag */ false);
1704 }
1705
1706 CLI->invalidate();
1707 return AfterIP;
1708}
1709
1710/// Make \p Source branch to \p Target.
1711///
1712/// Handles two situations:
1713/// * \p Source already has an unconditional branch.
1714/// * \p Source is a degenerate block (no terminator because the BB is
1715/// the current head of the IR construction).
1716static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
1717 if (Instruction *Term = Source->getTerminator()) {
1718 auto *Br = cast<BranchInst>(Term);
1719 assert(!Br->isConditional() &&(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1720, __extension__ __PRETTY_FUNCTION__))
1720 "BB's terminator must be an unconditional branch (or degenerate)")(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1720, __extension__ __PRETTY_FUNCTION__))
;
1721 BasicBlock *Succ = Br->getSuccessor(0);
1722 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
1723 Br->setSuccessor(0, Target);
1724 return;
1725 }
1726
1727 auto *NewBr = BranchInst::Create(Target, Source);
1728 NewBr->setDebugLoc(DL);
1729}
1730
1731/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
1732/// after this \p OldTarget will be orphaned.
1733static void redirectAllPredecessorsTo(BasicBlock *OldTarget,
1734 BasicBlock *NewTarget, DebugLoc DL) {
1735 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
1736 redirectTo(Pred, NewTarget, DL);
1737}
1738
1739/// Determine which blocks in \p BBs are reachable from outside and remove the
1740/// ones that are not reachable from the function.
1741static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) {
1742 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
1743 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
1744 for (Use &U : BB->uses()) {
1745 auto *UseInst = dyn_cast<Instruction>(U.getUser());
1746 if (!UseInst)
1747 continue;
1748 if (BBsToErase.count(UseInst->getParent()))
1749 continue;
1750 return true;
1751 }
1752 return false;
1753 };
1754
1755 while (true) {
1756 bool Changed = false;
1757 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
1758 if (HasRemainingUses(BB)) {
1759 BBsToErase.erase(BB);
1760 Changed = true;
1761 }
1762 }
1763 if (!Changed)
1764 break;
1765 }
1766
1767 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
1768 DeleteDeadBlocks(BBVec);
1769}
1770
1771CanonicalLoopInfo *
1772OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1773 InsertPointTy ComputeIP) {
1774 assert(Loops.size() >= 1 && "At least one loop required")(static_cast <bool> (Loops.size() >= 1 && "At least one loop required"
) ? void (0) : __assert_fail ("Loops.size() >= 1 && \"At least one loop required\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1774, __extension__ __PRETTY_FUNCTION__))
;
1
Assuming the condition is true
2
'?' condition is true
1775 size_t NumLoops = Loops.size();
1776
1777 // Nothing to do if there is already just one loop.
1778 if (NumLoops == 1)
3
Assuming 'NumLoops' is not equal to 1
4
Taking false branch
1779 return Loops.front();
1780
1781 CanonicalLoopInfo *Outermost = Loops.front();
1782 CanonicalLoopInfo *Innermost = Loops.back();
1783 BasicBlock *OrigPreheader = Outermost->getPreheader();
1784 BasicBlock *OrigAfter = Outermost->getAfter();
1785 Function *F = OrigPreheader->getParent();
1786
1787 // Setup the IRBuilder for inserting the trip count computation.
1788 Builder.SetCurrentDebugLocation(DL);
1789 if (ComputeIP.isSet())
5
Taking true branch
1790 Builder.restoreIP(ComputeIP);
1791 else
1792 Builder.restoreIP(Outermost->getPreheaderIP());
1793
1794 // Derive the collapsed' loop trip count.
1795 // TODO: Find common/largest indvar type.
1796 Value *CollapsedTripCount = nullptr;
6
'CollapsedTripCount' initialized to a null pointer value
1797 for (CanonicalLoopInfo *L : Loops) {
7
Assuming '__begin1' is equal to '__end1'
1798 assert(L->isValid() &&(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1799, __extension__ __PRETTY_FUNCTION__))
1799 "All loops to collapse must be valid canonical loops")(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1799, __extension__ __PRETTY_FUNCTION__))
;
1800 Value *OrigTripCount = L->getTripCount();
1801 if (!CollapsedTripCount) {
1802 CollapsedTripCount = OrigTripCount;
1803 continue;
1804 }
1805
1806 // TODO: Enable UndefinedSanitizer to diagnose an overflow here.
1807 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount,
1808 {}, /*HasNUW=*/true);
1809 }
1810
1811 // Create the collapsed loop control flow.
1812 CanonicalLoopInfo *Result =
1813 createLoopSkeleton(DL, CollapsedTripCount, F,
8
Passing null pointer value via 2nd parameter 'TripCount'
9
Calling 'OpenMPIRBuilder::createLoopSkeleton'
1814 OrigPreheader->getNextNode(), OrigAfter, "collapsed");
1815
1816 // Build the collapsed loop body code.
1817 // Start with deriving the input loop induction variables from the collapsed
1818 // one, using a divmod scheme. To preserve the original loops' order, the
1819 // innermost loop use the least significant bits.
1820 Builder.restoreIP(Result->getBodyIP());
1821
1822 Value *Leftover = Result->getIndVar();
1823 SmallVector<Value *> NewIndVars;
1824 NewIndVars.set_size(NumLoops);
1825 for (int i = NumLoops - 1; i >= 1; --i) {
1826 Value *OrigTripCount = Loops[i]->getTripCount();
1827
1828 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount);
1829 NewIndVars[i] = NewIndVar;
1830
1831 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount);
1832 }
1833 // Outermost loop gets all the remaining bits.
1834 NewIndVars[0] = Leftover;
1835
1836 // Construct the loop body control flow.
1837 // We progressively construct the branch structure following in direction of
1838 // the control flow, from the leading in-between code, the loop nest body, the
1839 // trailing in-between code, and rejoining the collapsed loop's latch.
1840 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If
1841 // the ContinueBlock is set, continue with that block. If ContinuePred, use
1842 // its predecessors as sources.
1843 BasicBlock *ContinueBlock = Result->getBody();
1844 BasicBlock *ContinuePred = nullptr;
1845 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest,
1846 BasicBlock *NextSrc) {
1847 if (ContinueBlock)
1848 redirectTo(ContinueBlock, Dest, DL);
1849 else
1850 redirectAllPredecessorsTo(ContinuePred, Dest, DL);
1851
1852 ContinueBlock = nullptr;
1853 ContinuePred = NextSrc;
1854 };
1855
1856 // The code before the nested loop of each level.
1857 // Because we are sinking it into the nest, it will be executed more often
1858 // that the original loop. More sophisticated schemes could keep track of what
1859 // the in-between code is and instantiate it only once per thread.
1860 for (size_t i = 0; i < NumLoops - 1; ++i)
1861 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader());
1862
1863 // Connect the loop nest body.
1864 ContinueWith(Innermost->getBody(), Innermost->getLatch());
1865
1866 // The code after the nested loop at each level.
1867 for (size_t i = NumLoops - 1; i > 0; --i)
1868 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch());
1869
1870 // Connect the finished loop to the collapsed loop latch.
1871 ContinueWith(Result->getLatch(), nullptr);
1872
1873 // Replace the input loops with the new collapsed loop.
1874 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL);
1875 redirectTo(Result->getAfter(), Outermost->getAfter(), DL);
1876
1877 // Replace the input loop indvars with the derived ones.
1878 for (size_t i = 0; i < NumLoops; ++i)
1879 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]);
1880
1881 // Remove unused parts of the input loops.
1882 SmallVector<BasicBlock *, 12> OldControlBBs;
1883 OldControlBBs.reserve(6 * Loops.size());
1884 for (CanonicalLoopInfo *Loop : Loops)
1885 Loop->collectControlBlocks(OldControlBBs);
1886 removeUnusedBlocksFromParent(OldControlBBs);
1887
1888 for (CanonicalLoopInfo *L : Loops)
1889 L->invalidate();
1890
1891#ifndef NDEBUG
1892 Result->assertOK();
1893#endif
1894 return Result;
1895}
1896
1897std::vector<CanonicalLoopInfo *>
1898OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1899 ArrayRef<Value *> TileSizes) {
1900 assert(TileSizes.size() == Loops.size() &&(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1901, __extension__ __PRETTY_FUNCTION__))
1901 "Must pass as many tile sizes as there are loops")(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1901, __extension__ __PRETTY_FUNCTION__))
;
1902 int NumLoops = Loops.size();
1903 assert(NumLoops >= 1 && "At least one loop to tile required")(static_cast <bool> (NumLoops >= 1 && "At least one loop to tile required"
) ? void (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1903, __extension__ __PRETTY_FUNCTION__))
;
1904
1905 CanonicalLoopInfo *OutermostLoop = Loops.front();
1906 CanonicalLoopInfo *InnermostLoop = Loops.back();
1907 Function *F = OutermostLoop->getBody()->getParent();
1908 BasicBlock *InnerEnter = InnermostLoop->getBody();
1909 BasicBlock *InnerLatch = InnermostLoop->getLatch();
1910
1911 // Collect original trip counts and induction variable to be accessible by
1912 // index. Also, the structure of the original loops is not preserved during
1913 // the construction of the tiled loops, so do it before we scavenge the BBs of
1914 // any original CanonicalLoopInfo.
1915 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
1916 for (CanonicalLoopInfo *L : Loops) {
1917 assert(L->isValid() && "All input loops must be valid canonical loops")(static_cast <bool> (L->isValid() && "All input loops must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All input loops must be valid canonical loops\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1917, __extension__ __PRETTY_FUNCTION__))
;
1918 OrigTripCounts.push_back(L->getTripCount());
1919 OrigIndVars.push_back(L->getIndVar());
1920 }
1921
1922 // Collect the code between loop headers. These may contain SSA definitions
1923 // that are used in the loop nest body. To be usable with in the innermost
1924 // body, these BasicBlocks will be sunk into the loop nest body. That is,
1925 // these instructions may be executed more often than before the tiling.
1926 // TODO: It would be sufficient to only sink them into body of the
1927 // corresponding tile loop.
1928 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode;
1929 for (int i = 0; i < NumLoops - 1; ++i) {
1930 CanonicalLoopInfo *Surrounding = Loops[i];
1931 CanonicalLoopInfo *Nested = Loops[i + 1];
1932
1933 BasicBlock *EnterBB = Surrounding->getBody();
1934 BasicBlock *ExitBB = Nested->getHeader();
1935 InbetweenCode.emplace_back(EnterBB, ExitBB);
1936 }
1937
1938 // Compute the trip counts of the floor loops.
1939 Builder.SetCurrentDebugLocation(DL);
1940 Builder.restoreIP(OutermostLoop->getPreheaderIP());
1941 SmallVector<Value *, 4> FloorCount, FloorRems;
1942 for (int i = 0; i < NumLoops; ++i) {
1943 Value *TileSize = TileSizes[i];
1944 Value *OrigTripCount = OrigTripCounts[i];
1945 Type *IVType = OrigTripCount->getType();
1946
1947 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
1948 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
1949
1950 // 0 if tripcount divides the tilesize, 1 otherwise.
1951 // 1 means we need an additional iteration for a partial tile.
1952 //
1953 // Unfortunately we cannot just use the roundup-formula
1954 // (tripcount + tilesize - 1)/tilesize
1955 // because the summation might overflow. We do not want introduce undefined
1956 // behavior when the untiled loop nest did not.
1957 Value *FloorTripOverflow =
1958 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
1959
1960 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
1961 FloorTripCount =
1962 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
1963 "omp_floor" + Twine(i) + ".tripcount", true);
1964
1965 // Remember some values for later use.
1966 FloorCount.push_back(FloorTripCount);
1967 FloorRems.push_back(FloorTripRem);
1968 }
1969
1970 // Generate the new loop nest, from the outermost to the innermost.
1971 std::vector<CanonicalLoopInfo *> Result;
1972 Result.reserve(NumLoops * 2);
1973
1974 // The basic block of the surrounding loop that enters the nest generated
1975 // loop.
1976 BasicBlock *Enter = OutermostLoop->getPreheader();
1977
1978 // The basic block of the surrounding loop where the inner code should
1979 // continue.
1980 BasicBlock *Continue = OutermostLoop->getAfter();
1981
1982 // Where the next loop basic block should be inserted.
1983 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
1984
1985 auto EmbeddNewLoop =
1986 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
1987 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
1988 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
1989 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
1990 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
1991 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
1992
1993 // Setup the position where the next embedded loop connects to this loop.
1994 Enter = EmbeddedLoop->getBody();
1995 Continue = EmbeddedLoop->getLatch();
1996 OutroInsertBefore = EmbeddedLoop->getLatch();
1997 return EmbeddedLoop;
1998 };
1999
2000 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
2001 const Twine &NameBase) {
2002 for (auto P : enumerate(TripCounts)) {
2003 CanonicalLoopInfo *EmbeddedLoop =
2004 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
2005 Result.push_back(EmbeddedLoop);
2006 }
2007 };
2008
2009 EmbeddNewLoops(FloorCount, "floor");
2010
2011 // Within the innermost floor loop, emit the code that computes the tile
2012 // sizes.
2013 Builder.SetInsertPoint(Enter->getTerminator());
2014 SmallVector<Value *, 4> TileCounts;
2015 for (int i = 0; i < NumLoops; ++i) {
2016 CanonicalLoopInfo *FloorLoop = Result[i];
2017 Value *TileSize = TileSizes[i];
2018
2019 Value *FloorIsEpilogue =
2020 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
2021 Value *TileTripCount =
2022 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
2023
2024 TileCounts.push_back(TileTripCount);
2025 }
2026
2027 // Create the tile loops.
2028 EmbeddNewLoops(TileCounts, "tile");
2029
2030 // Insert the inbetween code into the body.
2031 BasicBlock *BodyEnter = Enter;
2032 BasicBlock *BodyEntered = nullptr;
2033 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
2034 BasicBlock *EnterBB = P.first;
2035 BasicBlock *ExitBB = P.second;
2036
2037 if (BodyEnter)
2038 redirectTo(BodyEnter, EnterBB, DL);
2039 else
2040 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
2041
2042 BodyEnter = nullptr;
2043 BodyEntered = ExitBB;
2044 }
2045
2046 // Append the original loop nest body into the generated loop nest body.
2047 if (BodyEnter)
2048 redirectTo(BodyEnter, InnerEnter, DL);
2049 else
2050 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
2051 redirectAllPredecessorsTo(InnerLatch, Continue, DL);
2052
2053 // Replace the original induction variable with an induction variable computed
2054 // from the tile and floor induction variables.
2055 Builder.restoreIP(Result.back()->getBodyIP());
2056 for (int i = 0; i < NumLoops; ++i) {
2057 CanonicalLoopInfo *FloorLoop = Result[i];
2058 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
2059 Value *OrigIndVar = OrigIndVars[i];
2060 Value *Size = TileSizes[i];
2061
2062 Value *Scale =
2063 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
2064 Value *Shift =
2065 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
2066 OrigIndVar->replaceAllUsesWith(Shift);
2067 }
2068
2069 // Remove unused parts of the original loops.
2070 SmallVector<BasicBlock *, 12> OldControlBBs;
2071 OldControlBBs.reserve(6 * Loops.size());
2072 for (CanonicalLoopInfo *Loop : Loops)
2073 Loop->collectControlBlocks(OldControlBBs);
2074 removeUnusedBlocksFromParent(OldControlBBs);
2075
2076 for (CanonicalLoopInfo *L : Loops)
2077 L->invalidate();
2078
2079#ifndef NDEBUG
2080 for (CanonicalLoopInfo *GenL : Result)
2081 GenL->assertOK();
2082#endif
2083 return Result;
2084}
2085
2086/// Attach loop metadata \p Properties to the loop described by \p Loop. If the
2087/// loop already has metadata, the loop properties are appended.
2088static void addLoopMetadata(CanonicalLoopInfo *Loop,
2089 ArrayRef<Metadata *> Properties) {
2090 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo")(static_cast <bool> (Loop->isValid() && "Expecting a valid CanonicalLoopInfo"
) ? void (0) : __assert_fail ("Loop->isValid() && \"Expecting a valid CanonicalLoopInfo\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2090, __extension__ __PRETTY_FUNCTION__))
;
2091
2092 // Nothing to do if no property to attach.
2093 if (Properties.empty())
2094 return;
2095
2096 LLVMContext &Ctx = Loop->getFunction()->getContext();
2097 SmallVector<Metadata *> NewLoopProperties;
2098 NewLoopProperties.push_back(nullptr);
2099
2100 // If the loop already has metadata, prepend it to the new metadata.
2101 BasicBlock *Latch = Loop->getLatch();
2102 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch")(static_cast <bool> (Latch && "A valid CanonicalLoopInfo must have a unique latch"
) ? void (0) : __assert_fail ("Latch && \"A valid CanonicalLoopInfo must have a unique latch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2102, __extension__ __PRETTY_FUNCTION__))
;
2103 MDNode *Existing = Latch->getTerminator()->getMetadata(LLVMContext::MD_loop);
2104 if (Existing)
2105 append_range(NewLoopProperties, drop_begin(Existing->operands(), 1));
2106
2107 append_range(NewLoopProperties, Properties);
2108 MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties);
2109 LoopID->replaceOperandWith(0, LoopID);
2110
2111 Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID);
2112}
2113
2114void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) {
2115 LLVMContext &Ctx = Builder.getContext();
2116 addLoopMetadata(
2117 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
2118 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))});
2119}
2120
2121void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) {
2122 LLVMContext &Ctx = Builder.getContext();
2123 addLoopMetadata(
2124 Loop, {
2125 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
2126 });
2127}
2128
2129/// Create the TargetMachine object to query the backend for optimization
2130/// preferences.
2131///
2132/// Ideally, this would be passed from the front-end to the OpenMPBuilder, but
2133/// e.g. Clang does not pass it to its CodeGen layer and creates it only when
2134/// needed for the LLVM pass pipline. We use some default options to avoid
2135/// having to pass too many settings from the frontend that probably do not
2136/// matter.
2137///
2138/// Currently, TargetMachine is only used sometimes by the unrollLoopPartial
2139/// method. If we are going to use TargetMachine for more purposes, especially
2140/// those that are sensitive to TargetOptions, RelocModel and CodeModel, it
2141/// might become be worth requiring front-ends to pass on their TargetMachine,
2142/// or at least cache it between methods. Note that while fontends such as Clang
2143/// have just a single main TargetMachine per translation unit, "target-cpu" and
2144/// "target-features" that determine the TargetMachine are per-function and can
2145/// be overrided using __attribute__((target("OPTIONS"))).
2146static std::unique_ptr<TargetMachine>
2147createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) {
2148 Module *M = F->getParent();
2149
2150 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString();
2151 StringRef Features = F->getFnAttribute("target-features").getValueAsString();
2152 const std::string &Triple = M->getTargetTriple();
2153
2154 std::string Error;
2155 const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
2156 if (!TheTarget)
2157 return {};
2158
2159 llvm::TargetOptions Options;
2160 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine(
2161 Triple, CPU, Features, Options, /*RelocModel=*/None, /*CodeModel=*/None,
2162 OptLevel));
2163}
2164
2165/// Heuristically determine the best-performant unroll factor for \p CLI. This
2166/// depends on the target processor. We are re-using the same heuristics as the
2167/// LoopUnrollPass.
2168static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) {
2169 Function *F = CLI->getFunction();
2170
2171 // Assume the user requests the most aggressive unrolling, even if the rest of
2172 // the code is optimized using a lower setting.
2173 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive;
2174 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel);
2175
2176 FunctionAnalysisManager FAM;
2177 FAM.registerPass([]() { return TargetLibraryAnalysis(); });
2178 FAM.registerPass([]() { return AssumptionAnalysis(); });
2179 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
2180 FAM.registerPass([]() { return LoopAnalysis(); });
2181 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); });
2182 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
2183 TargetIRAnalysis TIRA;
2184 if (TM)
2185 TIRA = TargetIRAnalysis(
2186 [&](const Function &F) { return TM->getTargetTransformInfo(F); });
2187 FAM.registerPass([&]() { return TIRA; });
2188
2189 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM);
2190 ScalarEvolutionAnalysis SEA;
2191 ScalarEvolution &&SE = SEA.run(*F, FAM);
2192 DominatorTreeAnalysis DTA;
2193 DominatorTree &&DT = DTA.run(*F, FAM);
2194 LoopAnalysis LIA;
2195 LoopInfo &&LI = LIA.run(*F, FAM);
2196 AssumptionAnalysis ACT;
2197 AssumptionCache &&AC = ACT.run(*F, FAM);
2198 OptimizationRemarkEmitter ORE{F};
2199
2200 Loop *L = LI.getLoopFor(CLI->getHeader());
2201 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop")(static_cast <bool> (L && "Expecting CanonicalLoopInfo to be recognized as a loop"
) ? void (0) : __assert_fail ("L && \"Expecting CanonicalLoopInfo to be recognized as a loop\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2201, __extension__ __PRETTY_FUNCTION__))
;
2202
2203 TargetTransformInfo::UnrollingPreferences UP =
2204 gatherUnrollingPreferences(L, SE, TTI,
2205 /*BlockFrequencyInfo=*/nullptr,
2206 /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel,
2207 /*UserThreshold=*/None,
2208 /*UserCount=*/None,
2209 /*UserAllowPartial=*/true,
2210 /*UserAllowRuntime=*/true,
2211 /*UserUpperBound=*/None,
2212 /*UserFullUnrollMaxCount=*/None);
2213
2214 UP.Force = true;
2215
2216 // Account for additional optimizations taking place before the LoopUnrollPass
2217 // would unroll the loop.
2218 UP.Threshold *= UnrollThresholdFactor;
2219 UP.PartialThreshold *= UnrollThresholdFactor;
2220
2221 // Use normal unroll factors even if the rest of the code is optimized for
2222 // size.
2223 UP.OptSizeThreshold = UP.Threshold;
2224 UP.PartialOptSizeThreshold = UP.PartialThreshold;
2225
2226 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2227 << " Threshold=" << UP.Threshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2228 << " PartialThreshold=" << UP.PartialThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2229 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2230 << " PartialOptSizeThreshold="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2231 << UP.PartialOptSizeThreshold << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
;
2232
2233 // Disable peeling.
2234 TargetTransformInfo::PeelingPreferences PP =
2235 gatherPeelingPreferences(L, SE, TTI,
2236 /*UserAllowPeeling=*/false,
2237 /*UserAllowProfileBasedPeeling=*/false,
2238 /*UserUnrollingSpecficValues=*/false);
2239
2240 SmallPtrSet<const Value *, 32> EphValues;
2241 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
2242
2243 // Assume that reads and writes to stack variables can be eliminated by
2244 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's
2245 // size.
2246 for (BasicBlock *BB : L->blocks()) {
2247 for (Instruction &I : *BB) {
2248 Value *Ptr;
2249 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2250 Ptr = Load->getPointerOperand();
2251 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2252 Ptr = Store->getPointerOperand();
2253 } else
2254 continue;
2255
2256 Ptr = Ptr->stripPointerCasts();
2257
2258 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) {
2259 if (Alloca->getParent() == &F->getEntryBlock())
2260 EphValues.insert(&I);
2261 }
2262 }
2263 }
2264
2265 unsigned NumInlineCandidates;
2266 bool NotDuplicatable;
2267 bool Convergent;
2268 unsigned LoopSize =
2269 ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent,
2270 TTI, EphValues, UP.BEInsns);
2271 LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSize << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Estimated loop size is "
<< LoopSize << "\n"; } } while (false)
;
2272
2273 // Loop is not unrollable if the loop contains certain instructions.
2274 if (NotDuplicatable || Convergent) {
2275 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Loop not considered unrollable\n"
; } } while (false)
;
2276 return 1;
2277 }
2278
2279 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might
2280 // be able to use it.
2281 int TripCount = 0;
2282 int MaxTripCount = 0;
2283 bool MaxOrZero = false;
2284 unsigned TripMultiple = 0;
2285
2286 bool UseUpperBound = false;
2287 computeUnrollCount(L, TTI, DT, &LI, SE, EphValues, &ORE, TripCount,
2288 MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP,
2289 UseUpperBound);
2290 unsigned Factor = UP.Count;
2291 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Suggesting unroll factor of "
<< Factor << "\n"; } } while (false)
;
2292
2293 // This function returns 1 to signal to not unroll a loop.
2294 if (Factor == 0)
2295 return 1;
2296 return Factor;
2297}
2298
2299void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop,
2300 int32_t Factor,
2301 CanonicalLoopInfo **UnrolledCLI) {
2302 assert(Factor >= 0 && "Unroll factor must not be negative")(static_cast <bool> (Factor >= 0 && "Unroll factor must not be negative"
) ? void (0) : __assert_fail ("Factor >= 0 && \"Unroll factor must not be negative\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2302, __extension__ __PRETTY_FUNCTION__))
;
2303
2304 Function *F = Loop->getFunction();
2305 LLVMContext &Ctx = F->getContext();
2306
2307 // If the unrolled loop is not used for another loop-associated directive, it
2308 // is sufficient to add metadata for the LoopUnrollPass.
2309 if (!UnrolledCLI) {
2310 SmallVector<Metadata *, 2> LoopMetadata;
2311 LoopMetadata.push_back(
2312 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")));
2313
2314 if (Factor >= 1) {
2315 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get(
2316 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
2317 LoopMetadata.push_back(MDNode::get(
2318 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst}));
2319 }
2320
2321 addLoopMetadata(Loop, LoopMetadata);
2322 return;
2323 }
2324
2325 // Heuristically determine the unroll factor.
2326 if (Factor == 0)
2327 Factor = computeHeuristicUnrollFactor(Loop);
2328
2329 // No change required with unroll factor 1.
2330 if (Factor == 1) {
2331 *UnrolledCLI = Loop;
2332 return;
2333 }
2334
2335 assert(Factor >= 2 &&(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger"
) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2336, __extension__ __PRETTY_FUNCTION__))
2336 "unrolling only makes sense with a factor of 2 or larger")(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger"
) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2336, __extension__ __PRETTY_FUNCTION__))
;
2337
2338 Type *IndVarTy = Loop->getIndVarType();
2339
2340 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully
2341 // unroll the inner loop.
2342 Value *FactorVal =
2343 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor,
2344 /*isSigned=*/false));
2345 std::vector<CanonicalLoopInfo *> LoopNest =
2346 tileLoops(DL, {Loop}, {FactorVal});
2347 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling")(static_cast <bool> (LoopNest.size() == 2 && "Expect 2 loops after tiling"
) ? void (0) : __assert_fail ("LoopNest.size() == 2 && \"Expect 2 loops after tiling\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2347, __extension__ __PRETTY_FUNCTION__))
;
2348 *UnrolledCLI = LoopNest[0];
2349 CanonicalLoopInfo *InnerLoop = LoopNest[1];
2350
2351 // LoopUnrollPass can only fully unroll loops with constant trip count.
2352 // Unroll by the unroll factor with a fallback epilog for the remainder
2353 // iterations if necessary.
2354 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get(
2355 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
2356 addLoopMetadata(
2357 InnerLoop,
2358 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
2359 MDNode::get(
2360 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})});
2361
2362#ifndef NDEBUG
2363 (*UnrolledCLI)->assertOK();
2364#endif
2365}
2366
2367OpenMPIRBuilder::InsertPointTy
2368OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
2369 llvm::Value *BufSize, llvm::Value *CpyBuf,
2370 llvm::Value *CpyFn, llvm::Value *DidIt) {
2371 if (!updateToLocation(Loc))
2372 return Loc.IP;
2373
2374 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2375 Value *Ident = getOrCreateIdent(SrcLocStr);
2376 Value *ThreadId = getOrCreateThreadID(Ident);
2377
2378 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
2379
2380 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
2381
2382 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
2383 Builder.CreateCall(Fn, Args);
2384
2385 return Builder.saveIP();
2386}
2387
2388OpenMPIRBuilder::InsertPointTy
2389OpenMPIRBuilder::createSingle(const LocationDescription &Loc,
2390 BodyGenCallbackTy BodyGenCB,
2391 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) {
2392
2393 if (!updateToLocation(Loc))
2394 return Loc.IP;
2395
2396 // If needed (i.e. not null), initialize `DidIt` with 0
2397 if (DidIt) {
2398 Builder.CreateStore(Builder.getInt32(0), DidIt);
2399 }
2400
2401 Directive OMPD = Directive::OMPD_single;
2402 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2403 Value *Ident = getOrCreateIdent(SrcLocStr);
2404 Value *ThreadId = getOrCreateThreadID(Ident);
2405 Value *Args[] = {Ident, ThreadId};
2406
2407 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
2408 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2409
2410 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
2411 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2412
2413 // generates the following:
2414 // if (__kmpc_single()) {
2415 // .... single region ...
2416 // __kmpc_end_single
2417 // }
2418
2419 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2420 /*Conditional*/ true, /*hasFinalize*/ true);
2421}
2422
2423OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical(
2424 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2425 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
2426
2427 if (!updateToLocation(Loc))
2428 return Loc.IP;
2429
2430 Directive OMPD = Directive::OMPD_critical;
2431 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2432 Value *Ident = getOrCreateIdent(SrcLocStr);
2433 Value *ThreadId = getOrCreateThreadID(Ident);
2434 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
2435 Value *Args[] = {Ident, ThreadId, LockVar};
2436
2437 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
2438 Function *RTFn = nullptr;
2439 if (HintInst) {
2440 // Add Hint to entry Args and create call
2441 EnterArgs.push_back(HintInst);
2442 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
2443 } else {
2444 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
2445 }
2446 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
2447
2448 Function *ExitRTLFn =
2449 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
2450 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2451
2452 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2453 /*Conditional*/ false, /*hasFinalize*/ true);
2454}
2455
2456OpenMPIRBuilder::InsertPointTy
2457OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc,
2458 InsertPointTy AllocaIP, unsigned NumLoops,
2459 ArrayRef<llvm::Value *> StoreValues,
2460 const Twine &Name, bool IsDependSource) {
2461 if (!updateToLocation(Loc))
2462 return Loc.IP;
2463
2464 // Allocate space for vector and generate alloc instruction.
2465 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops);
2466 Builder.restoreIP(AllocaIP);
2467 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name);
2468 ArgsBase->setAlignment(Align(8));
2469 Builder.restoreIP(Loc.IP);
2470
2471 // Store the index value with offset in depend vector.
2472 for (unsigned I = 0; I < NumLoops; ++I) {
2473 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP(
2474 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)});
2475 Builder.CreateStore(StoreValues[I], DependAddrGEPIter);
2476 }
2477
2478 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP(
2479 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)});
2480
2481 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2482 Value *Ident = getOrCreateIdent(SrcLocStr);
2483 Value *ThreadId = getOrCreateThreadID(Ident);
2484 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP};
2485
2486 Function *RTLFn = nullptr;
2487 if (IsDependSource)
2488 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post);
2489 else
2490 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait);
2491 Builder.CreateCall(RTLFn, Args);
2492
2493 return Builder.saveIP();
2494}
2495
2496OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd(
2497 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2498 FinalizeCallbackTy FiniCB, bool IsThreads) {
2499 if (!updateToLocation(Loc))
2500 return Loc.IP;
2501
2502 Directive OMPD = Directive::OMPD_ordered;
2503 Instruction *EntryCall = nullptr;
2504 Instruction *ExitCall = nullptr;
2505
2506 if (IsThreads) {
2507 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2508 Value *Ident = getOrCreateIdent(SrcLocStr);
2509 Value *ThreadId = getOrCreateThreadID(Ident);
2510 Value *Args[] = {Ident, ThreadId};
2511
2512 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered);
2513 EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2514
2515 Function *ExitRTLFn =
2516 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered);
2517 ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2518 }
2519
2520 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2521 /*Conditional*/ false, /*hasFinalize*/ true);
2522}
2523
2524OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
2525 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
2526 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
2527 bool HasFinalize, bool IsCancellable) {
2528
2529 if (HasFinalize)
2530 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable});
2531
2532 // Create inlined region's entry and body blocks, in preparation
2533 // for conditional creation
2534 BasicBlock *EntryBB = Builder.GetInsertBlock();
2535 Instruction *SplitPos = EntryBB->getTerminator();
2536 if (!isa_and_nonnull<BranchInst>(SplitPos))
2537 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
2538 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
2539 BasicBlock *FiniBB =
2540 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
2541
2542 Builder.SetInsertPoint(EntryBB->getTerminator());
2543 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
2544
2545 // generate body
2546 BodyGenCB(/* AllocaIP */ InsertPointTy(),
2547 /* CodeGenIP */ Builder.saveIP(), *FiniBB);
2548
2549 // If we didn't emit a branch to FiniBB during body generation, it means
2550 // FiniBB is unreachable (e.g. while(1);). stop generating all the
2551 // unreachable blocks, and remove anything we are not going to use.
2552 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0);
2553 if (SkipEmittingRegion) {
2554 FiniBB->eraseFromParent();
2555 ExitCall->eraseFromParent();
2556 // Discard finalization if we have it.
2557 if (HasFinalize) {
2558 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__))
2559 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2559, __extension__ __PRETTY_FUNCTION__))
;
2560 FinalizationStack.pop_back();
2561 }
2562 } else {
2563 // emit exit call and do any needed finalization.
2564 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
2565 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2567, __extension__ __PRETTY_FUNCTION__))
2566 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2567, __extension__ __PRETTY_FUNCTION__))
2567 "Unexpected control flow graph state!!")(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2567, __extension__ __PRETTY_FUNCTION__))
;
2568 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
2569 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2570, __extension__ __PRETTY_FUNCTION__))
2570 "Unexpected Control Flow State!")(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2570, __extension__ __PRETTY_FUNCTION__))
;
2571 MergeBlockIntoPredecessor(FiniBB);
2572 }
2573
2574 // If we are skipping the region of a non conditional, remove the exit
2575 // block, and clear the builder's insertion point.
2576 assert(SplitPos->getParent() == ExitBB &&(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2577, __extension__ __PRETTY_FUNCTION__))
2577 "Unexpected Insertion point location!")(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2577, __extension__ __PRETTY_FUNCTION__))
;
2578 if (!Conditional && SkipEmittingRegion) {
2579 ExitBB->eraseFromParent();
2580 Builder.ClearInsertionPoint();
2581 } else {
2582 auto merged = MergeBlockIntoPredecessor(ExitBB);
2583 BasicBlock *ExitPredBB = SplitPos->getParent();
2584 auto InsertBB = merged ? ExitPredBB : ExitBB;
2585 if (!isa_and_nonnull<BranchInst>(SplitPos))
2586 SplitPos->eraseFromParent();
2587 Builder.SetInsertPoint(InsertBB);
2588 }
2589
2590 return Builder.saveIP();
2591}
2592
2593OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
2594 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
2595 // if nothing to do, Return current insertion point.
2596 if (!Conditional || !EntryCall)
2597 return Builder.saveIP();
2598
2599 BasicBlock *EntryBB = Builder.GetInsertBlock();
2600 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
2601 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
2602 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
2603
2604 // Emit thenBB and set the Builder's insertion point there for
2605 // body generation next. Place the block after the current block.
2606 Function *CurFn = EntryBB->getParent();
2607 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB);
2608
2609 // Move Entry branch to end of ThenBB, and replace with conditional
2610 // branch (If-stmt)
2611 Instruction *EntryBBTI = EntryBB->getTerminator();
2612 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
2613 EntryBBTI->removeFromParent();
2614 Builder.SetInsertPoint(UI);
2615 Builder.Insert(EntryBBTI);
2616 UI->eraseFromParent();
2617 Builder.SetInsertPoint(ThenBB->getTerminator());
2618
2619 // return an insertion point to ExitBB.
2620 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
2621}
2622
2623OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
2624 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
2625 bool HasFinalize) {
2626
2627 Builder.restoreIP(FinIP);
2628
2629 // If there is finalization to do, emit it before the exit call
2630 if (HasFinalize) {
2631 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2632, __extension__ __PRETTY_FUNCTION__))
2632 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2632, __extension__ __PRETTY_FUNCTION__))
;
2633
2634 FinalizationInfo Fi = FinalizationStack.pop_back_val();
2635 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")(static_cast <bool> (Fi.DK == OMPD && "Unexpected Directive for Finalization call!"
) ? void (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2635, __extension__ __PRETTY_FUNCTION__))
;
2636
2637 Fi.FiniCB(FinIP);
2638
2639 BasicBlock *FiniBB = FinIP.getBlock();
2640 Instruction *FiniBBTI = FiniBB->getTerminator();
2641
2642 // set Builder IP for call creation
2643 Builder.SetInsertPoint(FiniBBTI);
2644 }
2645
2646 if (!ExitCall)
2647 return Builder.saveIP();
2648
2649 // place the Exitcall as last instruction before Finalization block terminator
2650 ExitCall->removeFromParent();
2651 Builder.Insert(ExitCall);
2652
2653 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
2654 ExitCall->getIterator());
2655}
2656
2657OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks(
2658 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
2659 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
2660 if (!IP.isSet())
2661 return IP;
2662
2663 IRBuilder<>::InsertPointGuard IPG(Builder);
2664
2665 // creates the following CFG structure
2666 // OMP_Entry : (MasterAddr != PrivateAddr)?
2667 // F T
2668 // | \
2669 // | copin.not.master
2670 // | /
2671 // v /
2672 // copyin.not.master.end
2673 // |
2674 // v
2675 // OMP.Entry.Next
2676
2677 BasicBlock *OMP_Entry = IP.getBlock();
2678 Function *CurFn = OMP_Entry->getParent();
2679 BasicBlock *CopyBegin =
2680 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
2681 BasicBlock *CopyEnd = nullptr;
2682
2683 // If entry block is terminated, split to preserve the branch to following
2684 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
2685 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
2686 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
2687 "copyin.not.master.end");
2688 OMP_Entry->getTerminator()->eraseFromParent();
2689 } else {
2690 CopyEnd =
2691 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
2692 }
2693
2694 Builder.SetInsertPoint(OMP_Entry);
2695 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
2696 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
2697 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
2698 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
2699
2700 Builder.SetInsertPoint(CopyBegin);
2701 if (BranchtoEnd)
2702 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd));
2703
2704 return Builder.saveIP();
2705}
2706
2707CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc,
2708 Value *Size, Value *Allocator,
2709 std::string Name) {
2710 IRBuilder<>::InsertPointGuard IPG(Builder);
2711 Builder.restoreIP(Loc.IP);
2712
2713 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2714 Value *Ident = getOrCreateIdent(SrcLocStr);
2715 Value *ThreadId = getOrCreateThreadID(Ident);
2716 Value *Args[] = {ThreadId, Size, Allocator};
2717
2718 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
2719
2720 return Builder.CreateCall(Fn, Args, Name);
2721}
2722
2723CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc,
2724 Value *Addr, Value *Allocator,
2725 std::string Name) {
2726 IRBuilder<>::InsertPointGuard IPG(Builder);
2727 Builder.restoreIP(Loc.IP);
2728
2729 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2730 Value *Ident = getOrCreateIdent(SrcLocStr);
2731 Value *ThreadId = getOrCreateThreadID(Ident);
2732 Value *Args[] = {ThreadId, Addr, Allocator};
2733 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
2734 return Builder.CreateCall(Fn, Args, Name);
2735}
2736
2737CallInst *OpenMPIRBuilder::createCachedThreadPrivate(
2738 const LocationDescription &Loc, llvm::Value *Pointer,
2739 llvm::ConstantInt *Size, const llvm::Twine &Name) {
2740 IRBuilder<>::InsertPointGuard IPG(Builder);
2741 Builder.restoreIP(Loc.IP);
2742
2743 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2744 Value *Ident = getOrCreateIdent(SrcLocStr);
2745 Value *ThreadId = getOrCreateThreadID(Ident);
2746 Constant *ThreadPrivateCache =
2747 getOrCreateOMPInternalVariable(Int8PtrPtr, Name);
2748 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache};
2749
2750 Function *Fn =
2751 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached);
2752
2753 return Builder.CreateCall(Fn, Args);
2754}
2755
2756OpenMPIRBuilder::InsertPointTy
2757OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD,
2758 bool RequiresFullRuntime) {
2759 if (!updateToLocation(Loc))
2760 return Loc.IP;
2761
2762 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2763 Value *Ident = getOrCreateIdent(SrcLocStr);
2764 ConstantInt *IsSPMDVal = ConstantInt::getSigned(
2765 IntegerType::getInt8Ty(Int8->getContext()),
2766 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC);
2767 ConstantInt *UseGenericStateMachine =
2768 ConstantInt::getBool(Int32->getContext(), !IsSPMD);
2769 ConstantInt *RequiresFullRuntimeVal =
2770 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
2771
2772 Function *Fn = getOrCreateRuntimeFunctionPtr(
2773 omp::RuntimeFunction::OMPRTL___kmpc_target_init);
2774
2775 CallInst *ThreadKind = Builder.CreateCall(
2776 Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal});
2777
2778 Value *ExecUserCode = Builder.CreateICmpEQ(
2779 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1),
2780 "exec_user_code");
2781
2782 // ThreadKind = __kmpc_target_init(...)
2783 // if (ThreadKind == -1)
2784 // user_code
2785 // else
2786 // return;
2787
2788 auto *UI = Builder.CreateUnreachable();
2789 BasicBlock *CheckBB = UI->getParent();
2790 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry");
2791
2792 BasicBlock *WorkerExitBB = BasicBlock::Create(
2793 CheckBB->getContext(), "worker.exit", CheckBB->getParent());
2794 Builder.SetInsertPoint(WorkerExitBB);
2795 Builder.CreateRetVoid();
2796
2797 auto *CheckBBTI = CheckBB->getTerminator();
2798 Builder.SetInsertPoint(CheckBBTI);
2799 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB);
2800
2801 CheckBBTI->eraseFromParent();
2802 UI->eraseFromParent();
2803
2804 // Continue in the "user_code" block, see diagram above and in
2805 // openmp/libomptarget/deviceRTLs/common/include/target.h .
2806 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt());
2807}
2808
2809void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc,
2810 bool IsSPMD,
2811 bool RequiresFullRuntime) {
2812 if (!updateToLocation(Loc))
2813 return;
2814
2815 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2816 Value *Ident = getOrCreateIdent(SrcLocStr);
2817 ConstantInt *IsSPMDVal = ConstantInt::getSigned(
2818 IntegerType::getInt8Ty(Int8->getContext()),
2819 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC);
2820 ConstantInt *RequiresFullRuntimeVal =
2821 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
2822
2823 Function *Fn = getOrCreateRuntimeFunctionPtr(
2824 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit);
2825
2826 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal});
2827}
2828
2829std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts,
2830 StringRef FirstSeparator,
2831 StringRef Separator) {
2832 SmallString<128> Buffer;
2833 llvm::raw_svector_ostream OS(Buffer);
2834 StringRef Sep = FirstSeparator;
2835 for (StringRef Part : Parts) {
2836 OS << Sep << Part;
2837 Sep = Separator;
2838 }
2839 return OS.str().str();
2840}
2841
2842Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable(
2843 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
2844 // TODO: Replace the twine arg with stringref to get rid of the conversion
2845 // logic. However This is taken from current implementation in clang as is.
2846 // Since this method is used in many places exclusively for OMP internal use
2847 // we will keep it as is for temporarily until we move all users to the
2848 // builder and then, if possible, fix it everywhere in one go.
2849 SmallString<256> Buffer;
2850 llvm::raw_svector_ostream Out(Buffer);
2851 Out << Name;
2852 StringRef RuntimeName = Out.str();
2853 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2854 if (Elem.second) {
2855 assert(Elem.second->getType()->getPointerElementType() == Ty &&(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__))
2856 "OMP internal variable has different type than requested")(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__))
;
2857 } else {
2858 // TODO: investigate the appropriate linkage type used for the global
2859 // variable for possibly changing that to internal or private, or maybe
2860 // create different versions of the function for different OMP internal
2861 // variables.
2862 Elem.second = new llvm::GlobalVariable(
2863 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage,
2864 llvm::Constant::getNullValue(Ty), Elem.first(),
2865 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
2866 AddressSpace);
2867 }
2868
2869 return Elem.second;
2870}
2871
2872Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) {
2873 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2874 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", ".");
2875 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name);
2876}
2877
2878GlobalVariable *
2879OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
2880 std::string VarName) {
2881 llvm::Constant *MaptypesArrayInit =
2882 llvm::ConstantDataArray::get(M.getContext(), Mappings);
2883 auto *MaptypesArrayGlobal = new llvm::GlobalVariable(
2884 M, MaptypesArrayInit->getType(),
2885 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit,
2886 VarName);
2887 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2888 return MaptypesArrayGlobal;
2889}
2890
2891void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc,
2892 InsertPointTy AllocaIP,
2893 unsigned NumOperands,
2894 struct MapperAllocas &MapperAllocas) {
2895 if (!updateToLocation(Loc))
2896 return;
2897
2898 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands);
2899 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands);
2900 Builder.restoreIP(AllocaIP);
2901 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy);
2902 AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy);
2903 AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty);
2904 Builder.restoreIP(Loc.IP);
2905 MapperAllocas.ArgsBase = ArgsBase;
2906 MapperAllocas.Args = Args;
2907 MapperAllocas.ArgSizes = ArgSizes;
2908}
2909
2910void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc,
2911 Function *MapperFunc, Value *SrcLocInfo,
2912 Value *MaptypesArg, Value *MapnamesArg,
2913 struct MapperAllocas &MapperAllocas,
2914 int64_t DeviceID, unsigned NumOperands) {
2915 if (!updateToLocation(Loc))
2916 return;
2917
2918 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands);
2919 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands);
2920 Value *ArgsBaseGEP =
2921 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase,
2922 {Builder.getInt32(0), Builder.getInt32(0)});
2923 Value *ArgsGEP =
2924 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args,
2925 {Builder.getInt32(0), Builder.getInt32(0)});
2926 Value *ArgSizesGEP =
2927 Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes,
2928 {Builder.getInt32(0), Builder.getInt32(0)});
2929 Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo());
2930 Builder.CreateCall(MapperFunc,
2931 {SrcLocInfo, Builder.getInt64(DeviceID),
2932 Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP,
2933 ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr});
2934}
2935
2936bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic(
2937 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) {
2938 assert(!(AO == AtomicOrdering::NotAtomic ||(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2940, __extension__ __PRETTY_FUNCTION__))
2939 AO == llvm::AtomicOrdering::Unordered) &&(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2940, __extension__ __PRETTY_FUNCTION__))
2940 "Unexpected Atomic Ordering.")(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2940, __extension__ __PRETTY_FUNCTION__))
;
2941
2942 bool Flush = false;
2943 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic;
2944
2945 switch (AK) {
2946 case Read:
2947 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease ||
2948 AO == AtomicOrdering::SequentiallyConsistent) {
2949 FlushAO = AtomicOrdering::Acquire;
2950 Flush = true;
2951 }
2952 break;
2953 case Write:
2954 case Update:
2955 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease ||
2956 AO == AtomicOrdering::SequentiallyConsistent) {
2957 FlushAO = AtomicOrdering::Release;
2958 Flush = true;
2959 }
2960 break;
2961 case Capture:
2962 switch (AO) {
2963 case AtomicOrdering::Acquire:
2964 FlushAO = AtomicOrdering::Acquire;
2965 Flush = true;
2966 break;
2967 case AtomicOrdering::Release:
2968 FlushAO = AtomicOrdering::Release;
2969 Flush = true;
2970 break;
2971 case AtomicOrdering::AcquireRelease:
2972 case AtomicOrdering::SequentiallyConsistent:
2973 FlushAO = AtomicOrdering::AcquireRelease;
2974 Flush = true;
2975 break;
2976 default:
2977 // do nothing - leave silently.
2978 break;
2979 }
2980 }
2981
2982 if (Flush) {
2983 // Currently Flush RT call still doesn't take memory_ordering, so for when
2984 // that happens, this tries to do the resolution of which atomic ordering
2985 // to use with but issue the flush call
2986 // TODO: pass `FlushAO` after memory ordering support is added
2987 (void)FlushAO;
2988 emitFlush(Loc);
2989 }
2990
2991 // for AO == AtomicOrdering::Monotonic and all other case combinations
2992 // do nothing
2993 return Flush;
2994}
2995
2996OpenMPIRBuilder::InsertPointTy
2997OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc,
2998 AtomicOpValue &X, AtomicOpValue &V,
2999 AtomicOrdering AO) {
3000 if (!updateToLocation(Loc))
3001 return Loc.IP;
3002
3003 Type *XTy = X.Var->getType();
3004 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3004, __extension__ __PRETTY_FUNCTION__))
;
3005 Type *XElemTy = XTy->getPointerElementType();
3006 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3008, __extension__ __PRETTY_FUNCTION__))
3007 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3008, __extension__ __PRETTY_FUNCTION__))
3008 "OMP atomic read expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3008, __extension__ __PRETTY_FUNCTION__))
;
3009
3010 Value *XRead = nullptr;
3011
3012 if (XElemTy->isIntegerTy()) {
3013 LoadInst *XLD =
3014 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read");
3015 XLD->setAtomic(AO);
3016 XRead = cast<Value>(XLD);
3017 } else {
3018 // We need to bitcast and perform atomic op as integer
3019 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
3020 IntegerType *IntCastTy =
3021 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3022 Value *XBCast = Builder.CreateBitCast(
3023 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast");
3024 LoadInst *XLoad =
3025 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load");
3026 XLoad->setAtomic(AO);
3027 if (XElemTy->isFloatingPointTy()) {
3028 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast");
3029 } else {
3030 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast");
3031 }
3032 }
3033 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read);
3034 Builder.CreateStore(XRead, V.Var, V.IsVolatile);
3035 return Builder.saveIP();
3036}
3037
3038OpenMPIRBuilder::InsertPointTy
3039OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
3040 AtomicOpValue &X, Value *Expr,
3041 AtomicOrdering AO) {
3042 if (!updateToLocation(Loc))
3043 return Loc.IP;
3044
3045 Type *XTy = X.Var->getType();
3046 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3046, __extension__ __PRETTY_FUNCTION__))
;
3047 Type *XElemTy = XTy->getPointerElementType();
3048 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3050, __extension__ __PRETTY_FUNCTION__))
3049 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3050, __extension__ __PRETTY_FUNCTION__))
3050 "OMP atomic write expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3050, __extension__ __PRETTY_FUNCTION__))
;
3051
3052 if (XElemTy->isIntegerTy()) {
3053 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
3054 XSt->setAtomic(AO);
3055 } else {
3056 // We need to bitcast and perform atomic op as integers
3057 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
3058 IntegerType *IntCastTy =
3059 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3060 Value *XBCast = Builder.CreateBitCast(
3061 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast");
3062 Value *ExprCast =
3063 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast");
3064 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile);
3065 XSt->setAtomic(AO);
3066 }
3067
3068 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write);
3069 return Builder.saveIP();
3070}
3071
3072OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate(
3073 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
3074 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
3075 AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart) {
3076 if (!updateToLocation(Loc))
3077 return Loc.IP;
3078
3079 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3080 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3081 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3082 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3083 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3084 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3085 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3086 "OMP atomic update expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3087 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3088 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3089 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3090 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3082, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3089, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
3091
3092 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile,
3093 IsXLHSInRHSPart);
3094 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update);
3095 return Builder.saveIP();
3096}
3097
3098Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
3099 AtomicRMWInst::BinOp RMWOp) {
3100 switch (RMWOp) {
3101 case AtomicRMWInst::Add:
3102 return Builder.CreateAdd(Src1, Src2);
3103 case AtomicRMWInst::Sub:
3104 return Builder.CreateSub(Src1, Src2);
3105 case AtomicRMWInst::And:
3106 return Builder.CreateAnd(Src1, Src2);
3107 case AtomicRMWInst::Nand:
3108 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2));
3109 case AtomicRMWInst::Or:
3110 return Builder.CreateOr(Src1, Src2);
3111 case AtomicRMWInst::Xor:
3112 return Builder.CreateXor(Src1, Src2);
3113 case AtomicRMWInst::Xchg:
3114 case AtomicRMWInst::FAdd:
3115 case AtomicRMWInst::FSub:
3116 case AtomicRMWInst::BAD_BINOP:
3117 case AtomicRMWInst::Max:
3118 case AtomicRMWInst::Min:
3119 case AtomicRMWInst::UMax:
3120 case AtomicRMWInst::UMin:
3121 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3121)
;
3122 }
3123 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3123)
;
3124}
3125
3126std::pair<Value *, Value *>
3127OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr,
3128 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
3129 AtomicUpdateCallbackTy &UpdateOp,
3130 bool VolatileX, bool IsXLHSInRHSPart) {
3131 Type *XElemTy = X->getType()->getPointerElementType();
3132
3133 bool DoCmpExch =
3134 ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) ||
3135 (RMWOp == AtomicRMWInst::FSub) ||
3136 (RMWOp == AtomicRMWInst::Sub && !IsXLHSInRHSPart);
3137
3138 std::pair<Value *, Value *> Res;
3139 if (XElemTy->isIntegerTy() && !DoCmpExch) {
3140 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
3141 // not needed except in case of postfix captures. Generate anyway for
3142 // consistency with the else part. Will be removed with any DCE pass.
3143 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp);
3144 } else {
3145 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace();
3146 IntegerType *IntCastTy =
3147 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3148 Value *XBCast =
3149 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
3150 LoadInst *OldVal =
3151 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load");
3152 OldVal->setAtomic(AO);
3153 // CurBB
3154 // | /---\
3155 // ContBB |
3156 // | \---/
3157 // ExitBB
3158 BasicBlock *CurBB = Builder.GetInsertBlock();
3159 Instruction *CurBBTI = CurBB->getTerminator();
3160 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable();
3161 BasicBlock *ExitBB =
3162 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit");
3163 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(),
3164 X->getName() + ".atomic.cont");
3165 ContBB->getTerminator()->eraseFromParent();
3166 Builder.SetInsertPoint(ContBB);
3167 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2);
3168 PHI->addIncoming(OldVal, CurBB);
3169 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy);
3170 NewAtomicAddr->setName(X->getName() + "x.new.val");
3171 NewAtomicAddr->moveBefore(AllocIP);
3172 IntegerType *NewAtomicCastTy =
3173 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3174 bool IsIntTy = XElemTy->isIntegerTy();
3175 Value *NewAtomicIntAddr =
3176 (IsIntTy)
3177 ? NewAtomicAddr
3178 : Builder.CreateBitCast(NewAtomicAddr,
3179 NewAtomicCastTy->getPointerTo(Addrspace));
3180 Value *OldExprVal = PHI;
3181 if (!IsIntTy) {
3182 if (XElemTy->isFloatingPointTy()) {
3183 OldExprVal = Builder.CreateBitCast(PHI, XElemTy,
3184 X->getName() + ".atomic.fltCast");
3185 } else {
3186 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy,
3187 X->getName() + ".atomic.ptrCast");
3188 }
3189 }
3190
3191 Value *Upd = UpdateOp(OldExprVal, Builder);
3192 Builder.CreateStore(Upd, NewAtomicAddr);
3193 LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr);
3194 Value *XAddr =
3195 (IsIntTy)
3196 ? X
3197 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
3198 AtomicOrdering Failure =
3199 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
3200 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg(
3201 XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure);
3202 Result->setVolatile(VolatileX);
3203 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0);
3204 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1);
3205 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock());
3206 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB);
3207
3208 Res.first = OldExprVal;
3209 Res.second = Upd;
3210
3211 // set Insertion point in exit block
3212 if (UnreachableInst *ExitTI =
3213 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) {
3214 CurBBTI->eraseFromParent();
3215 Builder.SetInsertPoint(ExitBB);
3216 } else {
3217 Builder.SetInsertPoint(ExitTI);
3218 }
3219 }
3220
3221 return Res;
3222}
3223
3224OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture(
3225 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
3226 AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
3227 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
3228 bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart) {
3229 if (!updateToLocation(Loc))
3230 return Loc.IP;
3231
3232 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3233 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3234 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3235 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3236 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3237 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3238 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3239 "OMP atomic capture expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3240 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3241 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
3242 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3235, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3239, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3241, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
3243
3244 // If UpdateExpr is 'x' updated with some `expr` not based on 'x',
3245 // 'x' is simply atomically rewritten with 'expr'.
3246 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg);
3247 std::pair<Value *, Value *> Result =
3248 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp,
3249 X.IsVolatile, IsXLHSInRHSPart);
3250
3251 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second);
3252 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile);
3253
3254 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture);
3255 return Builder.saveIP();
3256}
3257
3258GlobalVariable *
3259OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
3260 std::string VarName) {
3261 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get(
3262 llvm::ArrayType::get(
3263 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()),
3264 Names);
3265 auto *MapNamesArrayGlobal = new llvm::GlobalVariable(
3266 M, MapNamesArrayInit->getType(),
3267 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit,
3268 VarName);
3269 return MapNamesArrayGlobal;
3270}
3271
3272// Create all simple and struct types exposed by the runtime and remember
3273// the llvm::PointerTypes of them for easy access later.
3274void OpenMPIRBuilder::initializeTypes(Module &M) {
3275 LLVMContext &Ctx = M.getContext();
3276 StructType *T;
3277#define OMP_TYPE(VarName, InitValue) VarName = InitValue;
3278#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
3279 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \
3280 VarName##PtrTy = PointerType::getUnqual(VarName##Ty);
3281#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
3282 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \
3283 VarName##Ptr = PointerType::getUnqual(VarName);
3284#define OMP_STRUCT_TYPE(VarName, StructName, ...) \
3285 T = StructType::getTypeByName(Ctx, StructName); \
3286 if (!T) \
3287 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \
3288 VarName = T; \
3289 VarName##Ptr = PointerType::getUnqual(T);
3290#include "llvm/Frontend/OpenMP/OMPKinds.def"
3291}
3292
3293void OpenMPIRBuilder::OutlineInfo::collectBlocks(
3294 SmallPtrSetImpl<BasicBlock *> &BlockSet,
3295 SmallVectorImpl<BasicBlock *> &BlockVector) {
3296 SmallVector<BasicBlock *, 32> Worklist;
3297 BlockSet.insert(EntryBB);
3298 BlockSet.insert(ExitBB);
3299
3300 Worklist.push_back(EntryBB);
3301 while (!Worklist.empty()) {
3302 BasicBlock *BB = Worklist.pop_back_val();
3303 BlockVector.push_back(BB);
3304 for (BasicBlock *SuccBB : successors(BB))
3305 if (BlockSet.insert(SuccBB).second)
3306 Worklist.push_back(SuccBB);
3307 }
3308}
3309
3310void CanonicalLoopInfo::collectControlBlocks(
3311 SmallVectorImpl<BasicBlock *> &BBs) {
3312 // We only count those BBs as control block for which we do not need to
3313 // reverse the CFG, i.e. not the loop body which can contain arbitrary control
3314 // flow. For consistency, this also means we do not add the Body block, which
3315 // is just the entry to the body code.
3316 BBs.reserve(BBs.size() + 6);
3317 BBs.append({Preheader, Header, Cond, Latch, Exit, After});
3318}
3319
3320void CanonicalLoopInfo::assertOK() const {
3321#ifndef NDEBUG
3322 // No constraints if this object currently does not describe a loop.
3323 if (!isValid())
3324 return;
3325
3326 // Verify standard control-flow we use for OpenMP loops.
3327 assert(Preheader)(static_cast <bool> (Preheader) ? void (0) : __assert_fail
("Preheader", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3327, __extension__ __PRETTY_FUNCTION__))
;
3328 assert(isa<BranchInst>(Preheader->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3329, __extension__ __PRETTY_FUNCTION__))
3329 "Preheader must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3329, __extension__ __PRETTY_FUNCTION__))
;
3330 assert(Preheader->getSingleSuccessor() == Header &&(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3331, __extension__ __PRETTY_FUNCTION__))
3331 "Preheader must jump to header")(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3331, __extension__ __PRETTY_FUNCTION__))
;
3332
3333 assert(Header)(static_cast <bool> (Header) ? void (0) : __assert_fail
("Header", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3333, __extension__ __PRETTY_FUNCTION__))
;
3334 assert(isa<BranchInst>(Header->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3335, __extension__ __PRETTY_FUNCTION__))
3335 "Header must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3335, __extension__ __PRETTY_FUNCTION__))
;
3336 assert(Header->getSingleSuccessor() == Cond &&(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3337, __extension__ __PRETTY_FUNCTION__))
3337 "Header must jump to exiting block")(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3337, __extension__ __PRETTY_FUNCTION__))
;
3338
3339 assert(Cond)(static_cast <bool> (Cond) ? void (0) : __assert_fail (
"Cond", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3339, __extension__ __PRETTY_FUNCTION__))
;
3340 assert(Cond->getSinglePredecessor() == Header &&(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3341, __extension__ __PRETTY_FUNCTION__))
3341 "Exiting block only reachable from header")(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3341, __extension__ __PRETTY_FUNCTION__))
;
3342
3343 assert(isa<BranchInst>(Cond->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3344, __extension__ __PRETTY_FUNCTION__))
3344 "Exiting block must terminate with conditional branch")(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3344, __extension__ __PRETTY_FUNCTION__))
;
3345 assert(size(successors(Cond)) == 2 &&(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3346, __extension__ __PRETTY_FUNCTION__))
3346 "Exiting block must have two successors")(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3346, __extension__ __PRETTY_FUNCTION__))
;
3347 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3348, __extension__ __PRETTY_FUNCTION__))
3348 "Exiting block's first successor jump to the body")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3348, __extension__ __PRETTY_FUNCTION__))
;
3349 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3350, __extension__ __PRETTY_FUNCTION__))
3350 "Exiting block's second successor must exit the loop")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3350, __extension__ __PRETTY_FUNCTION__))
;
3351
3352 assert(Body)(static_cast <bool> (Body) ? void (0) : __assert_fail (
"Body", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3352, __extension__ __PRETTY_FUNCTION__))
;
3353 assert(Body->getSinglePredecessor() == Cond &&(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3354, __extension__ __PRETTY_FUNCTION__))
3354 "Body only reachable from exiting block")(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3354, __extension__ __PRETTY_FUNCTION__))
;
3355 assert(!isa<PHINode>(Body->front()))(static_cast <bool> (!isa<PHINode>(Body->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Body->front())"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3355, __extension__ __PRETTY_FUNCTION__))
;
3356
3357 assert(Latch)(static_cast <bool> (Latch) ? void (0) : __assert_fail (
"Latch", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3357, __extension__ __PRETTY_FUNCTION__))
;
3358 assert(isa<BranchInst>(Latch->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3359, __extension__ __PRETTY_FUNCTION__))
3359 "Latch must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3359, __extension__ __PRETTY_FUNCTION__))
;
3360 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")(static_cast <bool> (Latch->getSingleSuccessor() == Header
&& "Latch must jump to header") ? void (0) : __assert_fail
("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3360, __extension__ __PRETTY_FUNCTION__))
;
3361 // TODO: To support simple redirecting of the end of the body code that has
3362 // multiple; introduce another auxiliary basic block like preheader and after.
3363 assert(Latch->getSinglePredecessor() != nullptr)(static_cast <bool> (Latch->getSinglePredecessor() !=
nullptr) ? void (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3363, __extension__ __PRETTY_FUNCTION__))
;
3364 assert(!isa<PHINode>(Latch->front()))(static_cast <bool> (!isa<PHINode>(Latch->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Latch->front())"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3364, __extension__ __PRETTY_FUNCTION__))
;
3365
3366 assert(Exit)(static_cast <bool> (Exit) ? void (0) : __assert_fail (
"Exit", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3366, __extension__ __PRETTY_FUNCTION__))
;
3367 assert(isa<BranchInst>(Exit->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3368, __extension__ __PRETTY_FUNCTION__))
3368 "Exit block must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3368, __extension__ __PRETTY_FUNCTION__))
;
3369 assert(Exit->getSingleSuccessor() == After &&(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3370, __extension__ __PRETTY_FUNCTION__))
3370 "Exit block must jump to after block")(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3370, __extension__ __PRETTY_FUNCTION__))
;
3371
3372 assert(After)(static_cast <bool> (After) ? void (0) : __assert_fail (
"After", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3372, __extension__ __PRETTY_FUNCTION__))
;
3373 assert(After->getSinglePredecessor() == Exit &&(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3374, __extension__ __PRETTY_FUNCTION__))
3374 "After block only reachable from exit block")(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3374, __extension__ __PRETTY_FUNCTION__))
;
3375 assert(After->empty() || !isa<PHINode>(After->front()))(static_cast <bool> (After->empty() || !isa<PHINode
>(After->front())) ? void (0) : __assert_fail ("After->empty() || !isa<PHINode>(After->front())"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3375, __extension__ __PRETTY_FUNCTION__))
;
3376
3377 Instruction *IndVar = getIndVar();
3378 assert(IndVar && "Canonical induction variable not found?")(static_cast <bool> (IndVar && "Canonical induction variable not found?"
) ? void (0) : __assert_fail ("IndVar && \"Canonical induction variable not found?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3378, __extension__ __PRETTY_FUNCTION__))
;
3379 assert(isa<IntegerType>(IndVar->getType()) &&(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3380, __extension__ __PRETTY_FUNCTION__))
3380 "Induction variable must be an integer")(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3380, __extension__ __PRETTY_FUNCTION__))
;
3381 assert(cast<PHINode>(IndVar)->getParent() == Header &&(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3382, __extension__ __PRETTY_FUNCTION__))
3382 "Induction variable must be a PHI in the loop header")(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3382, __extension__ __PRETTY_FUNCTION__))
;
3383 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(0) == Preheader) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3383, __extension__ __PRETTY_FUNCTION__))
;
3384 assert((static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3385, __extension__ __PRETTY_FUNCTION__))
3385 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())(static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3385, __extension__ __PRETTY_FUNCTION__))
;
3386 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(1) == Latch) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3386, __extension__ __PRETTY_FUNCTION__))
;
3387
3388 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1);
3389 assert(cast<Instruction>(NextIndVar)->getParent() == Latch)(static_cast <bool> (cast<Instruction>(NextIndVar
)->getParent() == Latch) ? void (0) : __assert_fail ("cast<Instruction>(NextIndVar)->getParent() == Latch"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3389, __extension__ __PRETTY_FUNCTION__))
;
3390 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOpcode() == BinaryOperator::Add) ? void (0) : __assert_fail
("cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3390, __extension__ __PRETTY_FUNCTION__))
;
3391 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOperand(0) == IndVar) ? void (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3391, __extension__ __PRETTY_FUNCTION__))
;
3392 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3393, __extension__ __PRETTY_FUNCTION__))
3393 ->isOne())(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3393, __extension__ __PRETTY_FUNCTION__))
;
3394
3395 Value *TripCount = getTripCount();
3396 assert(TripCount && "Loop trip count not found?")(static_cast <bool> (TripCount && "Loop trip count not found?"
) ? void (0) : __assert_fail ("TripCount && \"Loop trip count not found?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3396, __extension__ __PRETTY_FUNCTION__))
;
3397 assert(IndVar->getType() == TripCount->getType() &&(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3398, __extension__ __PRETTY_FUNCTION__))
3398 "Trip count and induction variable must have the same type")(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3398, __extension__ __PRETTY_FUNCTION__))
;
3399
3400 auto *CmpI = cast<CmpInst>(&Cond->front());
3401 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3402, __extension__ __PRETTY_FUNCTION__))
3402 "Exit condition must be a signed less-than comparison")(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3402, __extension__ __PRETTY_FUNCTION__))
;
3403 assert(CmpI->getOperand(0) == IndVar &&(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3404, __extension__ __PRETTY_FUNCTION__))
3404 "Exit condition must compare the induction variable")(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3404, __extension__ __PRETTY_FUNCTION__))
;
3405 assert(CmpI->getOperand(1) == TripCount &&(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3406, __extension__ __PRETTY_FUNCTION__))
3406 "Exit condition must compare with the trip count")(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3406, __extension__ __PRETTY_FUNCTION__))
;
3407#endif
3408}
3409
3410void CanonicalLoopInfo::invalidate() {
3411 Preheader = nullptr;
3412 Header = nullptr;
3413 Cond = nullptr;
3414 Body = nullptr;
3415 Latch = nullptr;
3416 Exit = nullptr;
3417 After = nullptr;
3418}