Bug Summary

File:tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp
Warning:line 6178, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGOpenMPRuntime.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -relaxed-aliasing -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-7~svn325118/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn325118/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn325118/build-llvm/tools/clang/lib/CodeGen -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fno-common -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-02-14-150435-17243-1 -x c++ /build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp

/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp

1//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This provides a class for OpenMP runtime code generation.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGOpenMPRuntime.h"
17#include "CodeGenFunction.h"
18#include "clang/CodeGen/ConstantInitBuilder.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/StmtOpenMP.h"
21#include "llvm/ADT/ArrayRef.h"
22#include "llvm/ADT/BitmaskEnum.h"
23#include "llvm/Bitcode/BitcodeReader.h"
24#include "llvm/IR/CallSite.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/GlobalValue.h"
27#include "llvm/IR/Value.h"
28#include "llvm/Support/Format.h"
29#include "llvm/Support/raw_ostream.h"
30#include <cassert>
31
32using namespace clang;
33using namespace CodeGen;
34
35namespace {
36/// \brief Base class for handling code generation inside OpenMP regions.
37class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
38public:
39 /// \brief Kinds of OpenMP regions used in codegen.
40 enum CGOpenMPRegionKind {
41 /// \brief Region with outlined function for standalone 'parallel'
42 /// directive.
43 ParallelOutlinedRegion,
44 /// \brief Region with outlined function for standalone 'task' directive.
45 TaskOutlinedRegion,
46 /// \brief Region for constructs that do not require function outlining,
47 /// like 'for', 'sections', 'atomic' etc. directives.
48 InlinedRegion,
49 /// \brief Region with outlined function for standalone 'target' directive.
50 TargetRegion,
51 };
52
53 CGOpenMPRegionInfo(const CapturedStmt &CS,
54 const CGOpenMPRegionKind RegionKind,
55 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
56 bool HasCancel)
57 : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
58 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
59
60 CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
61 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
62 bool HasCancel)
63 : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
64 Kind(Kind), HasCancel(HasCancel) {}
65
66 /// \brief Get a variable or parameter for storing global thread id
67 /// inside OpenMP construct.
68 virtual const VarDecl *getThreadIDVariable() const = 0;
69
70 /// \brief Emit the captured statement body.
71 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
72
73 /// \brief Get an LValue for the current ThreadID variable.
74 /// \return LValue for thread id variable. This LValue always has type int32*.
75 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
76
77 virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
78
79 CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
80
81 OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
82
83 bool hasCancel() const { return HasCancel; }
84
85 static bool classof(const CGCapturedStmtInfo *Info) {
86 return Info->getKind() == CR_OpenMP;
87 }
88
89 ~CGOpenMPRegionInfo() override = default;
90
91protected:
92 CGOpenMPRegionKind RegionKind;
93 RegionCodeGenTy CodeGen;
94 OpenMPDirectiveKind Kind;
95 bool HasCancel;
96};
97
98/// \brief API for captured statement code generation in OpenMP constructs.
99class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
100public:
101 CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
102 const RegionCodeGenTy &CodeGen,
103 OpenMPDirectiveKind Kind, bool HasCancel,
104 StringRef HelperName)
105 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
106 HasCancel),
107 ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
108 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")(static_cast <bool> (ThreadIDVar != nullptr && "No ThreadID in OpenMP region."
) ? void (0) : __assert_fail ("ThreadIDVar != nullptr && \"No ThreadID in OpenMP region.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 108, __extension__ __PRETTY_FUNCTION__))
;
109 }
110
111 /// \brief Get a variable or parameter for storing global thread id
112 /// inside OpenMP construct.
113 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
114
115 /// \brief Get the name of the capture helper.
116 StringRef getHelperName() const override { return HelperName; }
117
118 static bool classof(const CGCapturedStmtInfo *Info) {
119 return CGOpenMPRegionInfo::classof(Info) &&
120 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
121 ParallelOutlinedRegion;
122 }
123
124private:
125 /// \brief A variable or parameter storing global thread id for OpenMP
126 /// constructs.
127 const VarDecl *ThreadIDVar;
128 StringRef HelperName;
129};
130
131/// \brief API for captured statement code generation in OpenMP constructs.
132class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
133public:
134 class UntiedTaskActionTy final : public PrePostActionTy {
135 bool Untied;
136 const VarDecl *PartIDVar;
137 const RegionCodeGenTy UntiedCodeGen;
138 llvm::SwitchInst *UntiedSwitch = nullptr;
139
140 public:
141 UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
142 const RegionCodeGenTy &UntiedCodeGen)
143 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
144 void Enter(CodeGenFunction &CGF) override {
145 if (Untied) {
146 // Emit task switching point.
147 auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
148 CGF.GetAddrOfLocalVar(PartIDVar),
149 PartIDVar->getType()->castAs<PointerType>());
150 auto *Res = CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
151 auto *DoneBB = CGF.createBasicBlock(".untied.done.");
152 UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
153 CGF.EmitBlock(DoneBB);
154 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
155 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
156 UntiedSwitch->addCase(CGF.Builder.getInt32(0),
157 CGF.Builder.GetInsertBlock());
158 emitUntiedSwitch(CGF);
159 }
160 }
161 void emitUntiedSwitch(CodeGenFunction &CGF) const {
162 if (Untied) {
163 auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
164 CGF.GetAddrOfLocalVar(PartIDVar),
165 PartIDVar->getType()->castAs<PointerType>());
166 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
167 PartIdLVal);
168 UntiedCodeGen(CGF);
169 CodeGenFunction::JumpDest CurPoint =
170 CGF.getJumpDestInCurrentScope(".untied.next.");
171 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
172 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
173 UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
174 CGF.Builder.GetInsertBlock());
175 CGF.EmitBranchThroughCleanup(CurPoint);
176 CGF.EmitBlock(CurPoint.getBlock());
177 }
178 }
179 unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
180 };
181 CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
182 const VarDecl *ThreadIDVar,
183 const RegionCodeGenTy &CodeGen,
184 OpenMPDirectiveKind Kind, bool HasCancel,
185 const UntiedTaskActionTy &Action)
186 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
187 ThreadIDVar(ThreadIDVar), Action(Action) {
188 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")(static_cast <bool> (ThreadIDVar != nullptr && "No ThreadID in OpenMP region."
) ? void (0) : __assert_fail ("ThreadIDVar != nullptr && \"No ThreadID in OpenMP region.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 188, __extension__ __PRETTY_FUNCTION__))
;
189 }
190
191 /// \brief Get a variable or parameter for storing global thread id
192 /// inside OpenMP construct.
193 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
194
195 /// \brief Get an LValue for the current ThreadID variable.
196 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
197
198 /// \brief Get the name of the capture helper.
199 StringRef getHelperName() const override { return ".omp_outlined."; }
200
201 void emitUntiedSwitch(CodeGenFunction &CGF) override {
202 Action.emitUntiedSwitch(CGF);
203 }
204
205 static bool classof(const CGCapturedStmtInfo *Info) {
206 return CGOpenMPRegionInfo::classof(Info) &&
207 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
208 TaskOutlinedRegion;
209 }
210
211private:
212 /// \brief A variable or parameter storing global thread id for OpenMP
213 /// constructs.
214 const VarDecl *ThreadIDVar;
215 /// Action for emitting code for untied tasks.
216 const UntiedTaskActionTy &Action;
217};
218
219/// \brief API for inlined captured statement code generation in OpenMP
220/// constructs.
221class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
222public:
223 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
224 const RegionCodeGenTy &CodeGen,
225 OpenMPDirectiveKind Kind, bool HasCancel)
226 : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
227 OldCSI(OldCSI),
228 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
229
230 // \brief Retrieve the value of the context parameter.
231 llvm::Value *getContextValue() const override {
232 if (OuterRegionInfo)
233 return OuterRegionInfo->getContextValue();
234 llvm_unreachable("No context value for inlined OpenMP region")::llvm::llvm_unreachable_internal("No context value for inlined OpenMP region"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 234)
;
235 }
236
237 void setContextValue(llvm::Value *V) override {
238 if (OuterRegionInfo) {
239 OuterRegionInfo->setContextValue(V);
240 return;
241 }
242 llvm_unreachable("No context value for inlined OpenMP region")::llvm::llvm_unreachable_internal("No context value for inlined OpenMP region"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 242)
;
243 }
244
245 /// \brief Lookup the captured field decl for a variable.
246 const FieldDecl *lookup(const VarDecl *VD) const override {
247 if (OuterRegionInfo)
248 return OuterRegionInfo->lookup(VD);
249 // If there is no outer outlined region,no need to lookup in a list of
250 // captured variables, we can use the original one.
251 return nullptr;
252 }
253
254 FieldDecl *getThisFieldDecl() const override {
255 if (OuterRegionInfo)
256 return OuterRegionInfo->getThisFieldDecl();
257 return nullptr;
258 }
259
260 /// \brief Get a variable or parameter for storing global thread id
261 /// inside OpenMP construct.
262 const VarDecl *getThreadIDVariable() const override {
263 if (OuterRegionInfo)
264 return OuterRegionInfo->getThreadIDVariable();
265 return nullptr;
266 }
267
268 /// \brief Get an LValue for the current ThreadID variable.
269 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
270 if (OuterRegionInfo)
271 return OuterRegionInfo->getThreadIDVariableLValue(CGF);
272 llvm_unreachable("No LValue for inlined OpenMP construct")::llvm::llvm_unreachable_internal("No LValue for inlined OpenMP construct"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 272)
;
273 }
274
275 /// \brief Get the name of the capture helper.
276 StringRef getHelperName() const override {
277 if (auto *OuterRegionInfo = getOldCSI())
278 return OuterRegionInfo->getHelperName();
279 llvm_unreachable("No helper name for inlined OpenMP construct")::llvm::llvm_unreachable_internal("No helper name for inlined OpenMP construct"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 279)
;
280 }
281
282 void emitUntiedSwitch(CodeGenFunction &CGF) override {
283 if (OuterRegionInfo)
284 OuterRegionInfo->emitUntiedSwitch(CGF);
285 }
286
287 CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
288
289 static bool classof(const CGCapturedStmtInfo *Info) {
290 return CGOpenMPRegionInfo::classof(Info) &&
291 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
292 }
293
294 ~CGOpenMPInlinedRegionInfo() override = default;
295
296private:
297 /// \brief CodeGen info about outer OpenMP region.
298 CodeGenFunction::CGCapturedStmtInfo *OldCSI;
299 CGOpenMPRegionInfo *OuterRegionInfo;
300};
301
302/// \brief API for captured statement code generation in OpenMP target
303/// constructs. For this captures, implicit parameters are used instead of the
304/// captured fields. The name of the target region has to be unique in a given
305/// application so it is provided by the client, because only the client has
306/// the information to generate that.
307class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
308public:
309 CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
310 const RegionCodeGenTy &CodeGen, StringRef HelperName)
311 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
312 /*HasCancel=*/false),
313 HelperName(HelperName) {}
314
315 /// \brief This is unused for target regions because each starts executing
316 /// with a single thread.
317 const VarDecl *getThreadIDVariable() const override { return nullptr; }
318
319 /// \brief Get the name of the capture helper.
320 StringRef getHelperName() const override { return HelperName; }
321
322 static bool classof(const CGCapturedStmtInfo *Info) {
323 return CGOpenMPRegionInfo::classof(Info) &&
324 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
325 }
326
327private:
328 StringRef HelperName;
329};
330
331static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
332 llvm_unreachable("No codegen for expressions")::llvm::llvm_unreachable_internal("No codegen for expressions"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 332)
;
333}
334/// \brief API for generation of expressions captured in a innermost OpenMP
335/// region.
336class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
337public:
338 CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
339 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
340 OMPD_unknown,
341 /*HasCancel=*/false),
342 PrivScope(CGF) {
343 // Make sure the globals captured in the provided statement are local by
344 // using the privatization logic. We assume the same variable is not
345 // captured more than once.
346 for (auto &C : CS.captures()) {
347 if (!C.capturesVariable() && !C.capturesVariableByCopy())
348 continue;
349
350 const VarDecl *VD = C.getCapturedVar();
351 if (VD->isLocalVarDeclOrParm())
352 continue;
353
354 DeclRefExpr DRE(const_cast<VarDecl *>(VD),
355 /*RefersToEnclosingVariableOrCapture=*/false,
356 VD->getType().getNonReferenceType(), VK_LValue,
357 C.getLocation());
358 PrivScope.addPrivate(VD, [&CGF, &DRE]() -> Address {
359 return CGF.EmitLValue(&DRE).getAddress();
360 });
361 }
362 (void)PrivScope.Privatize();
363 }
364
365 /// \brief Lookup the captured field decl for a variable.
366 const FieldDecl *lookup(const VarDecl *VD) const override {
367 if (auto *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
368 return FD;
369 return nullptr;
370 }
371
372 /// \brief Emit the captured statement body.
373 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
374 llvm_unreachable("No body for expressions")::llvm::llvm_unreachable_internal("No body for expressions", "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 374)
;
375 }
376
377 /// \brief Get a variable or parameter for storing global thread id
378 /// inside OpenMP construct.
379 const VarDecl *getThreadIDVariable() const override {
380 llvm_unreachable("No thread id for expressions")::llvm::llvm_unreachable_internal("No thread id for expressions"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 380)
;
381 }
382
383 /// \brief Get the name of the capture helper.
384 StringRef getHelperName() const override {
385 llvm_unreachable("No helper name for expressions")::llvm::llvm_unreachable_internal("No helper name for expressions"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 385)
;
386 }
387
388 static bool classof(const CGCapturedStmtInfo *Info) { return false; }
389
390private:
391 /// Private scope to capture global variables.
392 CodeGenFunction::OMPPrivateScope PrivScope;
393};
394
395/// \brief RAII for emitting code of OpenMP constructs.
396class InlinedOpenMPRegionRAII {
397 CodeGenFunction &CGF;
398 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
399 FieldDecl *LambdaThisCaptureField = nullptr;
400 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
401
402public:
403 /// \brief Constructs region for combined constructs.
404 /// \param CodeGen Code generation sequence for combined directives. Includes
405 /// a list of functions used for code generation of implicitly inlined
406 /// regions.
407 InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
408 OpenMPDirectiveKind Kind, bool HasCancel)
409 : CGF(CGF) {
410 // Start emission for the construct.
411 CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
412 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
413 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
414 LambdaThisCaptureField = CGF.LambdaThisCaptureField;
415 CGF.LambdaThisCaptureField = nullptr;
416 BlockInfo = CGF.BlockInfo;
417 CGF.BlockInfo = nullptr;
418 }
419
420 ~InlinedOpenMPRegionRAII() {
421 // Restore original CapturedStmtInfo only if we're done with code emission.
422 auto *OldCSI =
423 cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
424 delete CGF.CapturedStmtInfo;
425 CGF.CapturedStmtInfo = OldCSI;
426 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
427 CGF.LambdaThisCaptureField = LambdaThisCaptureField;
428 CGF.BlockInfo = BlockInfo;
429 }
430};
431
432/// \brief Values for bit flags used in the ident_t to describe the fields.
433/// All enumeric elements are named and described in accordance with the code
434/// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
435enum OpenMPLocationFlags : unsigned {
436 /// \brief Use trampoline for internal microtask.
437 OMP_IDENT_IMD = 0x01,
438 /// \brief Use c-style ident structure.
439 OMP_IDENT_KMPC = 0x02,
440 /// \brief Atomic reduction option for kmpc_reduce.
441 OMP_ATOMIC_REDUCE = 0x10,
442 /// \brief Explicit 'barrier' directive.
443 OMP_IDENT_BARRIER_EXPL = 0x20,
444 /// \brief Implicit barrier in code.
445 OMP_IDENT_BARRIER_IMPL = 0x40,
446 /// \brief Implicit barrier in 'for' directive.
447 OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
448 /// \brief Implicit barrier in 'sections' directive.
449 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
450 /// \brief Implicit barrier in 'single' directive.
451 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
452 /// Call of __kmp_for_static_init for static loop.
453 OMP_IDENT_WORK_LOOP = 0x200,
454 /// Call of __kmp_for_static_init for sections.
455 OMP_IDENT_WORK_SECTIONS = 0x400,
456 /// Call of __kmp_for_static_init for distribute.
457 OMP_IDENT_WORK_DISTRIBUTE = 0x800,
458 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_IDENT_WORK_DISTRIBUTE
459};
460
461/// \brief Describes ident structure that describes a source location.
462/// All descriptions are taken from
463/// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
464/// Original structure:
465/// typedef struct ident {
466/// kmp_int32 reserved_1; /**< might be used in Fortran;
467/// see above */
468/// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
469/// KMP_IDENT_KMPC identifies this union
470/// member */
471/// kmp_int32 reserved_2; /**< not really used in Fortran any more;
472/// see above */
473///#if USE_ITT_BUILD
474/// /* but currently used for storing
475/// region-specific ITT */
476/// /* contextual information. */
477///#endif /* USE_ITT_BUILD */
478/// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
479/// C++ */
480/// char const *psource; /**< String describing the source location.
481/// The string is composed of semi-colon separated
482// fields which describe the source file,
483/// the function and a pair of line numbers that
484/// delimit the construct.
485/// */
486/// } ident_t;
487enum IdentFieldIndex {
488 /// \brief might be used in Fortran
489 IdentField_Reserved_1,
490 /// \brief OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
491 IdentField_Flags,
492 /// \brief Not really used in Fortran any more
493 IdentField_Reserved_2,
494 /// \brief Source[4] in Fortran, do not use for C++
495 IdentField_Reserved_3,
496 /// \brief String describing the source location. The string is composed of
497 /// semi-colon separated fields which describe the source file, the function
498 /// and a pair of line numbers that delimit the construct.
499 IdentField_PSource
500};
501
502/// \brief Schedule types for 'omp for' loops (these enumerators are taken from
503/// the enum sched_type in kmp.h).
504enum OpenMPSchedType {
505 /// \brief Lower bound for default (unordered) versions.
506 OMP_sch_lower = 32,
507 OMP_sch_static_chunked = 33,
508 OMP_sch_static = 34,
509 OMP_sch_dynamic_chunked = 35,
510 OMP_sch_guided_chunked = 36,
511 OMP_sch_runtime = 37,
512 OMP_sch_auto = 38,
513 /// static with chunk adjustment (e.g., simd)
514 OMP_sch_static_balanced_chunked = 45,
515 /// \brief Lower bound for 'ordered' versions.
516 OMP_ord_lower = 64,
517 OMP_ord_static_chunked = 65,
518 OMP_ord_static = 66,
519 OMP_ord_dynamic_chunked = 67,
520 OMP_ord_guided_chunked = 68,
521 OMP_ord_runtime = 69,
522 OMP_ord_auto = 70,
523 OMP_sch_default = OMP_sch_static,
524 /// \brief dist_schedule types
525 OMP_dist_sch_static_chunked = 91,
526 OMP_dist_sch_static = 92,
527 /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
528 /// Set if the monotonic schedule modifier was present.
529 OMP_sch_modifier_monotonic = (1 << 29),
530 /// Set if the nonmonotonic schedule modifier was present.
531 OMP_sch_modifier_nonmonotonic = (1 << 30),
532};
533
534enum OpenMPRTLFunction {
535 /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
536 /// kmpc_micro microtask, ...);
537 OMPRTL__kmpc_fork_call,
538 /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
539 /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
540 OMPRTL__kmpc_threadprivate_cached,
541 /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
542 /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
543 OMPRTL__kmpc_threadprivate_register,
544 // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
545 OMPRTL__kmpc_global_thread_num,
546 // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
547 // kmp_critical_name *crit);
548 OMPRTL__kmpc_critical,
549 // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
550 // global_tid, kmp_critical_name *crit, uintptr_t hint);
551 OMPRTL__kmpc_critical_with_hint,
552 // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
553 // kmp_critical_name *crit);
554 OMPRTL__kmpc_end_critical,
555 // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
556 // global_tid);
557 OMPRTL__kmpc_cancel_barrier,
558 // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
559 OMPRTL__kmpc_barrier,
560 // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
561 OMPRTL__kmpc_for_static_fini,
562 // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
563 // global_tid);
564 OMPRTL__kmpc_serialized_parallel,
565 // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
566 // global_tid);
567 OMPRTL__kmpc_end_serialized_parallel,
568 // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
569 // kmp_int32 num_threads);
570 OMPRTL__kmpc_push_num_threads,
571 // Call to void __kmpc_flush(ident_t *loc);
572 OMPRTL__kmpc_flush,
573 // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
574 OMPRTL__kmpc_master,
575 // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
576 OMPRTL__kmpc_end_master,
577 // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
578 // int end_part);
579 OMPRTL__kmpc_omp_taskyield,
580 // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
581 OMPRTL__kmpc_single,
582 // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
583 OMPRTL__kmpc_end_single,
584 // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
585 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
586 // kmp_routine_entry_t *task_entry);
587 OMPRTL__kmpc_omp_task_alloc,
588 // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
589 // new_task);
590 OMPRTL__kmpc_omp_task,
591 // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
592 // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
593 // kmp_int32 didit);
594 OMPRTL__kmpc_copyprivate,
595 // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
596 // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
597 // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
598 OMPRTL__kmpc_reduce,
599 // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
600 // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
601 // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
602 // *lck);
603 OMPRTL__kmpc_reduce_nowait,
604 // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
605 // kmp_critical_name *lck);
606 OMPRTL__kmpc_end_reduce,
607 // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
608 // kmp_critical_name *lck);
609 OMPRTL__kmpc_end_reduce_nowait,
610 // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
611 // kmp_task_t * new_task);
612 OMPRTL__kmpc_omp_task_begin_if0,
613 // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
614 // kmp_task_t * new_task);
615 OMPRTL__kmpc_omp_task_complete_if0,
616 // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
617 OMPRTL__kmpc_ordered,
618 // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
619 OMPRTL__kmpc_end_ordered,
620 // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
621 // global_tid);
622 OMPRTL__kmpc_omp_taskwait,
623 // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
624 OMPRTL__kmpc_taskgroup,
625 // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
626 OMPRTL__kmpc_end_taskgroup,
627 // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
628 // int proc_bind);
629 OMPRTL__kmpc_push_proc_bind,
630 // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
631 // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
632 // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
633 OMPRTL__kmpc_omp_task_with_deps,
634 // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
635 // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
636 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
637 OMPRTL__kmpc_omp_wait_deps,
638 // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
639 // global_tid, kmp_int32 cncl_kind);
640 OMPRTL__kmpc_cancellationpoint,
641 // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
642 // kmp_int32 cncl_kind);
643 OMPRTL__kmpc_cancel,
644 // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
645 // kmp_int32 num_teams, kmp_int32 thread_limit);
646 OMPRTL__kmpc_push_num_teams,
647 // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
648 // microtask, ...);
649 OMPRTL__kmpc_fork_teams,
650 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
651 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
652 // sched, kmp_uint64 grainsize, void *task_dup);
653 OMPRTL__kmpc_taskloop,
654 // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
655 // num_dims, struct kmp_dim *dims);
656 OMPRTL__kmpc_doacross_init,
657 // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
658 OMPRTL__kmpc_doacross_fini,
659 // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
660 // *vec);
661 OMPRTL__kmpc_doacross_post,
662 // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
663 // *vec);
664 OMPRTL__kmpc_doacross_wait,
665 // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
666 // *data);
667 OMPRTL__kmpc_task_reduction_init,
668 // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
669 // *d);
670 OMPRTL__kmpc_task_reduction_get_th_data,
671
672 //
673 // Offloading related calls
674 //
675 // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
676 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
677 // *arg_types);
678 OMPRTL__tgt_target,
679 // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
680 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
681 // *arg_types);
682 OMPRTL__tgt_target_nowait,
683 // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
684 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
685 // *arg_types, int32_t num_teams, int32_t thread_limit);
686 OMPRTL__tgt_target_teams,
687 // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
688 // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
689 // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
690 OMPRTL__tgt_target_teams_nowait,
691 // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
692 OMPRTL__tgt_register_lib,
693 // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
694 OMPRTL__tgt_unregister_lib,
695 // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
696 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
697 OMPRTL__tgt_target_data_begin,
698 // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
699 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
700 // *arg_types);
701 OMPRTL__tgt_target_data_begin_nowait,
702 // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
703 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
704 OMPRTL__tgt_target_data_end,
705 // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
706 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
707 // *arg_types);
708 OMPRTL__tgt_target_data_end_nowait,
709 // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
710 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
711 OMPRTL__tgt_target_data_update,
712 // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
713 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
714 // *arg_types);
715 OMPRTL__tgt_target_data_update_nowait,
716};
717
718/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
719/// region.
720class CleanupTy final : public EHScopeStack::Cleanup {
721 PrePostActionTy *Action;
722
723public:
724 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
725 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
726 if (!CGF.HaveInsertPoint())
727 return;
728 Action->Exit(CGF);
729 }
730};
731
732} // anonymous namespace
733
734void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
735 CodeGenFunction::RunCleanupsScope Scope(CGF);
736 if (PrePostAction) {
737 CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
738 Callback(CodeGen, CGF, *PrePostAction);
739 } else {
740 PrePostActionTy Action;
741 Callback(CodeGen, CGF, Action);
742 }
743}
744
745/// Check if the combiner is a call to UDR combiner and if it is so return the
746/// UDR decl used for reduction.
747static const OMPDeclareReductionDecl *
748getReductionInit(const Expr *ReductionOp) {
749 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
750 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
751 if (auto *DRE =
752 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
753 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
754 return DRD;
755 return nullptr;
756}
757
758static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
759 const OMPDeclareReductionDecl *DRD,
760 const Expr *InitOp,
761 Address Private, Address Original,
762 QualType Ty) {
763 if (DRD->getInitializer()) {
764 std::pair<llvm::Function *, llvm::Function *> Reduction =
765 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
766 auto *CE = cast<CallExpr>(InitOp);
767 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
768 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
769 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
770 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
771 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
772 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
773 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
774 [=]() -> Address { return Private; });
775 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
776 [=]() -> Address { return Original; });
777 (void)PrivateScope.Privatize();
778 RValue Func = RValue::get(Reduction.second);
779 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
780 CGF.EmitIgnoredExpr(InitOp);
781 } else {
782 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
783 auto *GV = new llvm::GlobalVariable(
784 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
785 llvm::GlobalValue::PrivateLinkage, Init, ".init");
786 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
787 RValue InitRVal;
788 switch (CGF.getEvaluationKind(Ty)) {
789 case TEK_Scalar:
790 InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
791 break;
792 case TEK_Complex:
793 InitRVal =
794 RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
795 break;
796 case TEK_Aggregate:
797 InitRVal = RValue::getAggregate(LV.getAddress());
798 break;
799 }
800 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
801 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
802 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
803 /*IsInitializer=*/false);
804 }
805}
806
807/// \brief Emit initialization of arrays of complex types.
808/// \param DestAddr Address of the array.
809/// \param Type Type of array.
810/// \param Init Initial expression of array.
811/// \param SrcAddr Address of the original array.
812static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
813 QualType Type, bool EmitDeclareReductionInit,
814 const Expr *Init,
815 const OMPDeclareReductionDecl *DRD,
816 Address SrcAddr = Address::invalid()) {
817 // Perform element-by-element initialization.
818 QualType ElementTy;
819
820 // Drill down to the base element type on both arrays.
821 auto ArrayTy = Type->getAsArrayTypeUnsafe();
822 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
823 DestAddr =
824 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
825 if (DRD)
826 SrcAddr =
827 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
828
829 llvm::Value *SrcBegin = nullptr;
830 if (DRD)
831 SrcBegin = SrcAddr.getPointer();
832 auto DestBegin = DestAddr.getPointer();
833 // Cast from pointer to array type to pointer to single element.
834 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
835 // The basic structure here is a while-do loop.
836 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
837 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
838 auto IsEmpty =
839 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
840 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
841
842 // Enter the loop body, making that address the current address.
843 auto EntryBB = CGF.Builder.GetInsertBlock();
844 CGF.EmitBlock(BodyBB);
845
846 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
847
848 llvm::PHINode *SrcElementPHI = nullptr;
849 Address SrcElementCurrent = Address::invalid();
850 if (DRD) {
851 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
852 "omp.arraycpy.srcElementPast");
853 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
854 SrcElementCurrent =
855 Address(SrcElementPHI,
856 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
857 }
858 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
859 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
860 DestElementPHI->addIncoming(DestBegin, EntryBB);
861 Address DestElementCurrent =
862 Address(DestElementPHI,
863 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
864
865 // Emit copy.
866 {
867 CodeGenFunction::RunCleanupsScope InitScope(CGF);
868 if (EmitDeclareReductionInit) {
869 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
870 SrcElementCurrent, ElementTy);
871 } else
872 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
873 /*IsInitializer=*/false);
874 }
875
876 if (DRD) {
877 // Shift the address forward by one element.
878 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
879 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
880 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
881 }
882
883 // Shift the address forward by one element.
884 auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
885 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
886 // Check whether we've reached the end.
887 auto Done =
888 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
889 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
890 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
891
892 // Done.
893 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
894}
895
896LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
897 return CGF.EmitOMPSharedLValue(E);
898}
899
900LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
901 const Expr *E) {
902 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
903 return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
904 return LValue();
905}
906
907void ReductionCodeGen::emitAggregateInitialization(
908 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
909 const OMPDeclareReductionDecl *DRD) {
910 // Emit VarDecl with copy init for arrays.
911 // Get the address of the original variable captured in current
912 // captured region.
913 auto *PrivateVD =
914 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
915 bool EmitDeclareReductionInit =
916 DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
917 EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
918 EmitDeclareReductionInit,
919 EmitDeclareReductionInit ? ClausesData[N].ReductionOp
920 : PrivateVD->getInit(),
921 DRD, SharedLVal.getAddress());
922}
923
924ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
925 ArrayRef<const Expr *> Privates,
926 ArrayRef<const Expr *> ReductionOps) {
927 ClausesData.reserve(Shareds.size());
928 SharedAddresses.reserve(Shareds.size());
929 Sizes.reserve(Shareds.size());
930 BaseDecls.reserve(Shareds.size());
931 auto IPriv = Privates.begin();
932 auto IRed = ReductionOps.begin();
933 for (const auto *Ref : Shareds) {
934 ClausesData.emplace_back(Ref, *IPriv, *IRed);
935 std::advance(IPriv, 1);
936 std::advance(IRed, 1);
937 }
938}
939
940void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
941 assert(SharedAddresses.size() == N &&(static_cast <bool> (SharedAddresses.size() == N &&
"Number of generated lvalues must be exactly N.") ? void (0)
: __assert_fail ("SharedAddresses.size() == N && \"Number of generated lvalues must be exactly N.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 942, __extension__ __PRETTY_FUNCTION__))
942 "Number of generated lvalues must be exactly N.")(static_cast <bool> (SharedAddresses.size() == N &&
"Number of generated lvalues must be exactly N.") ? void (0)
: __assert_fail ("SharedAddresses.size() == N && \"Number of generated lvalues must be exactly N.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 942, __extension__ __PRETTY_FUNCTION__))
;
943 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
944 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
945 SharedAddresses.emplace_back(First, Second);
946}
947
948void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
949 auto *PrivateVD =
950 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
951 QualType PrivateType = PrivateVD->getType();
952 bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
953 if (!PrivateType->isVariablyModifiedType()) {
954 Sizes.emplace_back(
955 CGF.getTypeSize(
956 SharedAddresses[N].first.getType().getNonReferenceType()),
957 nullptr);
958 return;
959 }
960 llvm::Value *Size;
961 llvm::Value *SizeInChars;
962 llvm::Type *ElemType =
963 cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
964 ->getElementType();
965 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
966 if (AsArraySection) {
967 Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
968 SharedAddresses[N].first.getPointer());
969 Size = CGF.Builder.CreateNUWAdd(
970 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
971 SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
972 } else {
973 SizeInChars = CGF.getTypeSize(
974 SharedAddresses[N].first.getType().getNonReferenceType());
975 Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
976 }
977 Sizes.emplace_back(SizeInChars, Size);
978 CodeGenFunction::OpaqueValueMapping OpaqueMap(
979 CGF,
980 cast<OpaqueValueExpr>(
981 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
982 RValue::get(Size));
983 CGF.EmitVariablyModifiedType(PrivateType);
984}
985
986void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
987 llvm::Value *Size) {
988 auto *PrivateVD =
989 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
990 QualType PrivateType = PrivateVD->getType();
991 if (!PrivateType->isVariablyModifiedType()) {
992 assert(!Size && !Sizes[N].second &&(static_cast <bool> (!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
"items.") ? void (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 994, __extension__ __PRETTY_FUNCTION__))
993 "Size should be nullptr for non-variably modified reduction "(static_cast <bool> (!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
"items.") ? void (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 994, __extension__ __PRETTY_FUNCTION__))
994 "items.")(static_cast <bool> (!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
"items.") ? void (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 994, __extension__ __PRETTY_FUNCTION__))
;
995 return;
996 }
997 CodeGenFunction::OpaqueValueMapping OpaqueMap(
998 CGF,
999 cast<OpaqueValueExpr>(
1000 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1001 RValue::get(Size));
1002 CGF.EmitVariablyModifiedType(PrivateType);
1003}
1004
1005void ReductionCodeGen::emitInitialization(
1006 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1007 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1008 assert(SharedAddresses.size() > N && "No variable was generated")(static_cast <bool> (SharedAddresses.size() > N &&
"No variable was generated") ? void (0) : __assert_fail ("SharedAddresses.size() > N && \"No variable was generated\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1008, __extension__ __PRETTY_FUNCTION__))
;
1009 auto *PrivateVD =
1010 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1011 auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
1012 QualType PrivateType = PrivateVD->getType();
1013 PrivateAddr = CGF.Builder.CreateElementBitCast(
1014 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1015 QualType SharedType = SharedAddresses[N].first.getType();
1016 SharedLVal = CGF.MakeAddrLValue(
1017 CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1018 CGF.ConvertTypeForMem(SharedType)),
1019 SharedType, SharedAddresses[N].first.getBaseInfo(),
1020 CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1021 if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1022 emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1023 } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1024 emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1025 PrivateAddr, SharedLVal.getAddress(),
1026 SharedLVal.getType());
1027 } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1028 !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1029 CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1030 PrivateVD->getType().getQualifiers(),
1031 /*IsInitializer=*/false);
1032 }
1033}
1034
1035bool ReductionCodeGen::needCleanups(unsigned N) {
1036 auto *PrivateVD =
1037 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1038 QualType PrivateType = PrivateVD->getType();
1039 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1040 return DTorKind != QualType::DK_none;
1041}
1042
1043void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
1044 Address PrivateAddr) {
1045 auto *PrivateVD =
1046 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1047 QualType PrivateType = PrivateVD->getType();
1048 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1049 if (needCleanups(N)) {
1050 PrivateAddr = CGF.Builder.CreateElementBitCast(
1051 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1052 CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1053 }
1054}
1055
1056static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1057 LValue BaseLV) {
1058 BaseTy = BaseTy.getNonReferenceType();
1059 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1060 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1061 if (auto *PtrTy = BaseTy->getAs<PointerType>())
1062 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1063 else {
1064 LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1065 BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1066 }
1067 BaseTy = BaseTy->getPointeeType();
1068 }
1069 return CGF.MakeAddrLValue(
1070 CGF.Builder.CreateElementBitCast(BaseLV.getAddress(),
1071 CGF.ConvertTypeForMem(ElTy)),
1072 BaseLV.getType(), BaseLV.getBaseInfo(),
1073 CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1074}
1075
1076static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1077 llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1078 llvm::Value *Addr) {
1079 Address Tmp = Address::invalid();
1080 Address TopTmp = Address::invalid();
1081 Address MostTopTmp = Address::invalid();
1082 BaseTy = BaseTy.getNonReferenceType();
1083 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1084 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1085 Tmp = CGF.CreateMemTemp(BaseTy);
1086 if (TopTmp.isValid())
1087 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1088 else
1089 MostTopTmp = Tmp;
1090 TopTmp = Tmp;
1091 BaseTy = BaseTy->getPointeeType();
1092 }
1093 llvm::Type *Ty = BaseLVType;
1094 if (Tmp.isValid())
1095 Ty = Tmp.getElementType();
1096 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1097 if (Tmp.isValid()) {
1098 CGF.Builder.CreateStore(Addr, Tmp);
1099 return MostTopTmp;
1100 }
1101 return Address(Addr, BaseLVAlignment);
1102}
1103
1104Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1105 Address PrivateAddr) {
1106 const DeclRefExpr *DE;
1107 const VarDecl *OrigVD = nullptr;
1108 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(ClausesData[N].Ref)) {
1109 auto *Base = OASE->getBase()->IgnoreParenImpCasts();
1110 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1111 Base = TempOASE->getBase()->IgnoreParenImpCasts();
1112 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1113 Base = TempASE->getBase()->IgnoreParenImpCasts();
1114 DE = cast<DeclRefExpr>(Base);
1115 OrigVD = cast<VarDecl>(DE->getDecl());
1116 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(ClausesData[N].Ref)) {
1117 auto *Base = ASE->getBase()->IgnoreParenImpCasts();
1118 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1119 Base = TempASE->getBase()->IgnoreParenImpCasts();
1120 DE = cast<DeclRefExpr>(Base);
1121 OrigVD = cast<VarDecl>(DE->getDecl());
1122 }
1123 if (OrigVD) {
1124 BaseDecls.emplace_back(OrigVD);
1125 auto OriginalBaseLValue = CGF.EmitLValue(DE);
1126 LValue BaseLValue =
1127 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1128 OriginalBaseLValue);
1129 llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1130 BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1131 llvm::Value *PrivatePointer =
1132 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1133 PrivateAddr.getPointer(),
1134 SharedAddresses[N].first.getAddress().getType());
1135 llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1136 return castToBase(CGF, OrigVD->getType(),
1137 SharedAddresses[N].first.getType(),
1138 OriginalBaseLValue.getAddress().getType(),
1139 OriginalBaseLValue.getAlignment(), Ptr);
1140 }
1141 BaseDecls.emplace_back(
1142 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1143 return PrivateAddr;
1144}
1145
1146bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1147 auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
1148 return DRD && DRD->getInitializer();
1149}
1150
1151LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1152 return CGF.EmitLoadOfPointerLValue(
1153 CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1154 getThreadIDVariable()->getType()->castAs<PointerType>());
1155}
1156
1157void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1158 if (!CGF.HaveInsertPoint())
1159 return;
1160 // 1.2.2 OpenMP Language Terminology
1161 // Structured block - An executable statement with a single entry at the
1162 // top and a single exit at the bottom.
1163 // The point of exit cannot be a branch out of the structured block.
1164 // longjmp() and throw() must not violate the entry/exit criteria.
1165 CGF.EHStack.pushTerminate();
1166 CodeGen(CGF);
1167 CGF.EHStack.popTerminate();
1168}
1169
1170LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1171 CodeGenFunction &CGF) {
1172 return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1173 getThreadIDVariable()->getType(),
1174 AlignmentSource::Decl);
1175}
1176
1177CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
1178 : CGM(CGM), OffloadEntriesInfoManager(CGM) {
1179 IdentTy = llvm::StructType::create(
1180 "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
1181 CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
1182 CGM.Int8PtrTy /* psource */);
1183 KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1184
1185 loadOffloadInfoMetadata();
1186}
1187
1188void CGOpenMPRuntime::clear() {
1189 InternalVars.clear();
1190}
1191
1192static llvm::Function *
1193emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1194 const Expr *CombinerInitializer, const VarDecl *In,
1195 const VarDecl *Out, bool IsCombiner) {
1196 // void .omp_combiner.(Ty *in, Ty *out);
1197 auto &C = CGM.getContext();
1198 QualType PtrTy = C.getPointerType(Ty).withRestrict();
1199 FunctionArgList Args;
1200 ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1201 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1202 ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1203 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1204 Args.push_back(&OmpOutParm);
1205 Args.push_back(&OmpInParm);
1206 auto &FnInfo =
1207 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1208 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1209 auto *Fn = llvm::Function::Create(
1210 FnTy, llvm::GlobalValue::InternalLinkage,
1211 IsCombiner ? ".omp_combiner." : ".omp_initializer.", &CGM.getModule());
1212 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
1213 Fn->removeFnAttr(llvm::Attribute::NoInline);
1214 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1215 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1216 CodeGenFunction CGF(CGM);
1217 // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1218 // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1219 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1220 Out->getLocation());
1221 CodeGenFunction::OMPPrivateScope Scope(CGF);
1222 Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1223 Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() -> Address {
1224 return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1225 .getAddress();
1226 });
1227 Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1228 Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() -> Address {
1229 return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1230 .getAddress();
1231 });
1232 (void)Scope.Privatize();
1233 if (!IsCombiner && Out->hasInit() &&
1234 !CGF.isTrivialInitializer(Out->getInit())) {
1235 CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1236 Out->getType().getQualifiers(),
1237 /*IsInitializer=*/true);
1238 }
1239 if (CombinerInitializer)
1240 CGF.EmitIgnoredExpr(CombinerInitializer);
1241 Scope.ForceCleanup();
1242 CGF.FinishFunction();
1243 return Fn;
1244}
1245
1246void CGOpenMPRuntime::emitUserDefinedReduction(
1247 CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1248 if (UDRMap.count(D) > 0)
1249 return;
1250 auto &C = CGM.getContext();
1251 if (!In || !Out) {
1252 In = &C.Idents.get("omp_in");
1253 Out = &C.Idents.get("omp_out");
1254 }
1255 llvm::Function *Combiner = emitCombinerOrInitializer(
1256 CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
1257 cast<VarDecl>(D->lookup(Out).front()),
1258 /*IsCombiner=*/true);
1259 llvm::Function *Initializer = nullptr;
1260 if (auto *Init = D->getInitializer()) {
1261 if (!Priv || !Orig) {
1262 Priv = &C.Idents.get("omp_priv");
1263 Orig = &C.Idents.get("omp_orig");
1264 }
1265 Initializer = emitCombinerOrInitializer(
1266 CGM, D->getType(),
1267 D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1268 : nullptr,
1269 cast<VarDecl>(D->lookup(Orig).front()),
1270 cast<VarDecl>(D->lookup(Priv).front()),
1271 /*IsCombiner=*/false);
1272 }
1273 UDRMap.insert(std::make_pair(D, std::make_pair(Combiner, Initializer)));
1274 if (CGF) {
1275 auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1276 Decls.second.push_back(D);
1277 }
1278}
1279
1280std::pair<llvm::Function *, llvm::Function *>
1281CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1282 auto I = UDRMap.find(D);
1283 if (I != UDRMap.end())
1284 return I->second;
1285 emitUserDefinedReduction(/*CGF=*/nullptr, D);
1286 return UDRMap.lookup(D);
1287}
1288
1289// Layout information for ident_t.
1290static CharUnits getIdentAlign(CodeGenModule &CGM) {
1291 return CGM.getPointerAlign();
1292}
1293static CharUnits getIdentSize(CodeGenModule &CGM) {
1294 assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign()))(static_cast <bool> ((4 * CGM.getPointerSize()).isMultipleOf
(CGM.getPointerAlign())) ? void (0) : __assert_fail ("(4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign())"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1294, __extension__ __PRETTY_FUNCTION__))
;
1295 return CharUnits::fromQuantity(16) + CGM.getPointerSize();
1296}
1297static CharUnits getOffsetOfIdentField(IdentFieldIndex Field) {
1298 // All the fields except the last are i32, so this works beautifully.
1299 return unsigned(Field) * CharUnits::fromQuantity(4);
1300}
1301static Address createIdentFieldGEP(CodeGenFunction &CGF, Address Addr,
1302 IdentFieldIndex Field,
1303 const llvm::Twine &Name = "") {
1304 auto Offset = getOffsetOfIdentField(Field);
1305 return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name);
1306}
1307
1308static llvm::Value *emitParallelOrTeamsOutlinedFunction(
1309 CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1310 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1311 const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1312 assert(ThreadIDVar->getType()->isPointerType() &&(static_cast <bool> (ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 *"
) ? void (0) : __assert_fail ("ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 *\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1313, __extension__ __PRETTY_FUNCTION__))
1313 "thread id variable must be of type kmp_int32 *")(static_cast <bool> (ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 *"
) ? void (0) : __assert_fail ("ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 *\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1313, __extension__ __PRETTY_FUNCTION__))
;
1314 CodeGenFunction CGF(CGM, true);
1315 bool HasCancel = false;
1316 if (auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1317 HasCancel = OPD->hasCancel();
1318 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1319 HasCancel = OPSD->hasCancel();
1320 else if (auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1321 HasCancel = OPFD->hasCancel();
1322 else if (auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1323 HasCancel = OPFD->hasCancel();
1324 else if (auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1325 HasCancel = OPFD->hasCancel();
1326 else if (auto *OPFD = dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1327 HasCancel = OPFD->hasCancel();
1328 else if (auto *OPFD =
1329 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1330 HasCancel = OPFD->hasCancel();
1331 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1332 HasCancel, OutlinedHelperName);
1333 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1334 return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1335}
1336
1337llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
1338 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1339 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1340 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1341 return emitParallelOrTeamsOutlinedFunction(
1342 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1343}
1344
1345llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1346 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1347 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1348 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1349 return emitParallelOrTeamsOutlinedFunction(
1350 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1351}
1352
1353llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
1354 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1355 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1356 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1357 bool Tied, unsigned &NumberOfParts) {
1358 auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1359 PrePostActionTy &) {
1360 auto *ThreadID = getThreadID(CGF, D.getLocStart());
1361 auto *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
1362 llvm::Value *TaskArgs[] = {
1363 UpLoc, ThreadID,
1364 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1365 TaskTVar->getType()->castAs<PointerType>())
1366 .getPointer()};
1367 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1368 };
1369 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1370 UntiedCodeGen);
1371 CodeGen.setAction(Action);
1372 assert(!ThreadIDVar->getType()->isPointerType() &&(static_cast <bool> (!ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 for tasks"
) ? void (0) : __assert_fail ("!ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 for tasks\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1373, __extension__ __PRETTY_FUNCTION__))
1373 "thread id variable must be of type kmp_int32 for tasks")(static_cast <bool> (!ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 for tasks"
) ? void (0) : __assert_fail ("!ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 for tasks\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1373, __extension__ __PRETTY_FUNCTION__))
;
1374 const OpenMPDirectiveKind Region =
1375 isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1376 : OMPD_task;
1377 auto *CS = D.getCapturedStmt(Region);
1378 auto *TD = dyn_cast<OMPTaskDirective>(&D);
1379 CodeGenFunction CGF(CGM, true);
1380 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1381 InnermostKind,
1382 TD ? TD->hasCancel() : false, Action);
1383 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1384 auto *Res = CGF.GenerateCapturedStmtFunction(*CS);
1385 if (!Tied)
1386 NumberOfParts = Action.getNumberOfParts();
1387 return Res;
1388}
1389
1390Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1391 CharUnits Align = getIdentAlign(CGM);
1392 llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
1393 if (!Entry) {
1394 if (!DefaultOpenMPPSource) {
1395 // Initialize default location for psource field of ident_t structure of
1396 // all ident_t objects. Format is ";file;function;line;column;;".
1397 // Taken from
1398 // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
1399 DefaultOpenMPPSource =
1400 CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1401 DefaultOpenMPPSource =
1402 llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1403 }
1404
1405 ConstantInitBuilder builder(CGM);
1406 auto fields = builder.beginStruct(IdentTy);
1407 fields.addInt(CGM.Int32Ty, 0);
1408 fields.addInt(CGM.Int32Ty, Flags);
1409 fields.addInt(CGM.Int32Ty, 0);
1410 fields.addInt(CGM.Int32Ty, 0);
1411 fields.add(DefaultOpenMPPSource);
1412 auto DefaultOpenMPLocation =
1413 fields.finishAndCreateGlobal("", Align, /*isConstant*/ true,
1414 llvm::GlobalValue::PrivateLinkage);
1415 DefaultOpenMPLocation->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1416
1417 OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
1418 }
1419 return Address(Entry, Align);
1420}
1421
1422llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1423 SourceLocation Loc,
1424 unsigned Flags) {
1425 Flags |= OMP_IDENT_KMPC;
1426 // If no debug info is generated - return global default location.
1427 if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1428 Loc.isInvalid())
1429 return getOrCreateDefaultLocation(Flags).getPointer();
1430
1431 assert(CGF.CurFn && "No function in current CodeGenFunction.")(static_cast <bool> (CGF.CurFn && "No function in current CodeGenFunction."
) ? void (0) : __assert_fail ("CGF.CurFn && \"No function in current CodeGenFunction.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1431, __extension__ __PRETTY_FUNCTION__))
;
1432
1433 Address LocValue = Address::invalid();
1434 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1435 if (I != OpenMPLocThreadIDMap.end())
1436 LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM));
1437
1438 // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1439 // GetOpenMPThreadID was called before this routine.
1440 if (!LocValue.isValid()) {
1441 // Generate "ident_t .kmpc_loc.addr;"
1442 Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM),
1443 ".kmpc_loc.addr");
1444 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1445 Elem.second.DebugLoc = AI.getPointer();
1446 LocValue = AI;
1447
1448 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1449 CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1450 CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1451 CGM.getSize(getIdentSize(CGF.CGM)));
1452 }
1453
1454 // char **psource = &.kmpc_loc_<flags>.addr.psource;
1455 Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource);
1456
1457 auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1458 if (OMPDebugLoc == nullptr) {
1459 SmallString<128> Buffer2;
1460 llvm::raw_svector_ostream OS2(Buffer2);
1461 // Build debug location
1462 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1463 OS2 << ";" << PLoc.getFilename() << ";";
1464 if (const FunctionDecl *FD =
1465 dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) {
1466 OS2 << FD->getQualifiedNameAsString();
1467 }
1468 OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1469 OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1470 OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1471 }
1472 // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1473 CGF.Builder.CreateStore(OMPDebugLoc, PSource);
1474
1475 // Our callers always pass this to a runtime function, so for
1476 // convenience, go ahead and return a naked pointer.
1477 return LocValue.getPointer();
1478}
1479
1480llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1481 SourceLocation Loc) {
1482 assert(CGF.CurFn && "No function in current CodeGenFunction.")(static_cast <bool> (CGF.CurFn && "No function in current CodeGenFunction."
) ? void (0) : __assert_fail ("CGF.CurFn && \"No function in current CodeGenFunction.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1482, __extension__ __PRETTY_FUNCTION__))
;
1483
1484 llvm::Value *ThreadID = nullptr;
1485 // Check whether we've already cached a load of the thread id in this
1486 // function.
1487 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1488 if (I != OpenMPLocThreadIDMap.end()) {
1489 ThreadID = I->second.ThreadID;
1490 if (ThreadID != nullptr)
1491 return ThreadID;
1492 }
1493 // If exceptions are enabled, do not use parameter to avoid possible crash.
1494 if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1495 !CGF.getLangOpts().CXXExceptions ||
1496 CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1497 if (auto *OMPRegionInfo =
1498 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1499 if (OMPRegionInfo->getThreadIDVariable()) {
1500 // Check if this an outlined function with thread id passed as argument.
1501 auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1502 ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1503 // If value loaded in entry block, cache it and use it everywhere in
1504 // function.
1505 if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1506 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1507 Elem.second.ThreadID = ThreadID;
1508 }
1509 return ThreadID;
1510 }
1511 }
1512 }
1513
1514 // This is not an outlined function region - need to call __kmpc_int32
1515 // kmpc_global_thread_num(ident_t *loc).
1516 // Generate thread id value and cache this value for use across the
1517 // function.
1518 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1519 CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1520 auto *Call = CGF.Builder.CreateCall(
1521 createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
1522 emitUpdateLocation(CGF, Loc));
1523 Call->setCallingConv(CGF.getRuntimeCC());
1524 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1525 Elem.second.ThreadID = Call;
1526 return Call;
1527}
1528
1529void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1530 assert(CGF.CurFn && "No function in current CodeGenFunction.")(static_cast <bool> (CGF.CurFn && "No function in current CodeGenFunction."
) ? void (0) : __assert_fail ("CGF.CurFn && \"No function in current CodeGenFunction.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1530, __extension__ __PRETTY_FUNCTION__))
;
1531 if (OpenMPLocThreadIDMap.count(CGF.CurFn))
1532 OpenMPLocThreadIDMap.erase(CGF.CurFn);
1533 if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1534 for(auto *D : FunctionUDRMap[CGF.CurFn]) {
1535 UDRMap.erase(D);
1536 }
1537 FunctionUDRMap.erase(CGF.CurFn);
1538 }
1539}
1540
1541llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1542 if (!IdentTy) {
1543 }
1544 return llvm::PointerType::getUnqual(IdentTy);
1545}
1546
1547llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1548 if (!Kmpc_MicroTy) {
1549 // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1550 llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1551 llvm::PointerType::getUnqual(CGM.Int32Ty)};
1552 Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1553 }
1554 return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1555}
1556
1557llvm::Constant *
1558CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1559 llvm::Constant *RTLFn = nullptr;
1560 switch (static_cast<OpenMPRTLFunction>(Function)) {
1561 case OMPRTL__kmpc_fork_call: {
1562 // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1563 // microtask, ...);
1564 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1565 getKmpc_MicroPointerTy()};
1566 llvm::FunctionType *FnTy =
1567 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1568 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1569 break;
1570 }
1571 case OMPRTL__kmpc_global_thread_num: {
1572 // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1573 llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1574 llvm::FunctionType *FnTy =
1575 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1576 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1577 break;
1578 }
1579 case OMPRTL__kmpc_threadprivate_cached: {
1580 // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1581 // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1582 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1583 CGM.VoidPtrTy, CGM.SizeTy,
1584 CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1585 llvm::FunctionType *FnTy =
1586 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1587 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1588 break;
1589 }
1590 case OMPRTL__kmpc_critical: {
1591 // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1592 // kmp_critical_name *crit);
1593 llvm::Type *TypeParams[] = {
1594 getIdentTyPointerTy(), CGM.Int32Ty,
1595 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1596 llvm::FunctionType *FnTy =
1597 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1598 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1599 break;
1600 }
1601 case OMPRTL__kmpc_critical_with_hint: {
1602 // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1603 // kmp_critical_name *crit, uintptr_t hint);
1604 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1605 llvm::PointerType::getUnqual(KmpCriticalNameTy),
1606 CGM.IntPtrTy};
1607 llvm::FunctionType *FnTy =
1608 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1609 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1610 break;
1611 }
1612 case OMPRTL__kmpc_threadprivate_register: {
1613 // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1614 // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1615 // typedef void *(*kmpc_ctor)(void *);
1616 auto KmpcCtorTy =
1617 llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1618 /*isVarArg*/ false)->getPointerTo();
1619 // typedef void *(*kmpc_cctor)(void *, void *);
1620 llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1621 auto KmpcCopyCtorTy =
1622 llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1623 /*isVarArg*/ false)->getPointerTo();
1624 // typedef void (*kmpc_dtor)(void *);
1625 auto KmpcDtorTy =
1626 llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1627 ->getPointerTo();
1628 llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1629 KmpcCopyCtorTy, KmpcDtorTy};
1630 auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1631 /*isVarArg*/ false);
1632 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1633 break;
1634 }
1635 case OMPRTL__kmpc_end_critical: {
1636 // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1637 // kmp_critical_name *crit);
1638 llvm::Type *TypeParams[] = {
1639 getIdentTyPointerTy(), CGM.Int32Ty,
1640 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1641 llvm::FunctionType *FnTy =
1642 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1643 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1644 break;
1645 }
1646 case OMPRTL__kmpc_cancel_barrier: {
1647 // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1648 // global_tid);
1649 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1650 llvm::FunctionType *FnTy =
1651 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1652 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1653 break;
1654 }
1655 case OMPRTL__kmpc_barrier: {
1656 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1657 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1658 llvm::FunctionType *FnTy =
1659 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1660 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1661 break;
1662 }
1663 case OMPRTL__kmpc_for_static_fini: {
1664 // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1665 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1666 llvm::FunctionType *FnTy =
1667 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1668 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1669 break;
1670 }
1671 case OMPRTL__kmpc_push_num_threads: {
1672 // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1673 // kmp_int32 num_threads)
1674 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1675 CGM.Int32Ty};
1676 llvm::FunctionType *FnTy =
1677 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1678 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1679 break;
1680 }
1681 case OMPRTL__kmpc_serialized_parallel: {
1682 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1683 // global_tid);
1684 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1685 llvm::FunctionType *FnTy =
1686 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1687 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1688 break;
1689 }
1690 case OMPRTL__kmpc_end_serialized_parallel: {
1691 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1692 // global_tid);
1693 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1694 llvm::FunctionType *FnTy =
1695 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1696 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1697 break;
1698 }
1699 case OMPRTL__kmpc_flush: {
1700 // Build void __kmpc_flush(ident_t *loc);
1701 llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1702 llvm::FunctionType *FnTy =
1703 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1704 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1705 break;
1706 }
1707 case OMPRTL__kmpc_master: {
1708 // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1709 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1710 llvm::FunctionType *FnTy =
1711 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1712 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1713 break;
1714 }
1715 case OMPRTL__kmpc_end_master: {
1716 // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1717 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1718 llvm::FunctionType *FnTy =
1719 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1720 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1721 break;
1722 }
1723 case OMPRTL__kmpc_omp_taskyield: {
1724 // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1725 // int end_part);
1726 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1727 llvm::FunctionType *FnTy =
1728 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1729 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1730 break;
1731 }
1732 case OMPRTL__kmpc_single: {
1733 // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1734 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1735 llvm::FunctionType *FnTy =
1736 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1737 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1738 break;
1739 }
1740 case OMPRTL__kmpc_end_single: {
1741 // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1742 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1743 llvm::FunctionType *FnTy =
1744 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1745 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1746 break;
1747 }
1748 case OMPRTL__kmpc_omp_task_alloc: {
1749 // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1750 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1751 // kmp_routine_entry_t *task_entry);
1752 assert(KmpRoutineEntryPtrTy != nullptr &&(static_cast <bool> (KmpRoutineEntryPtrTy != nullptr &&
"Type kmp_routine_entry_t must be created.") ? void (0) : __assert_fail
("KmpRoutineEntryPtrTy != nullptr && \"Type kmp_routine_entry_t must be created.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1753, __extension__ __PRETTY_FUNCTION__))
1753 "Type kmp_routine_entry_t must be created.")(static_cast <bool> (KmpRoutineEntryPtrTy != nullptr &&
"Type kmp_routine_entry_t must be created.") ? void (0) : __assert_fail
("KmpRoutineEntryPtrTy != nullptr && \"Type kmp_routine_entry_t must be created.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1753, __extension__ __PRETTY_FUNCTION__))
;
1754 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1755 CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1756 // Return void * and then cast to particular kmp_task_t type.
1757 llvm::FunctionType *FnTy =
1758 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1759 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1760 break;
1761 }
1762 case OMPRTL__kmpc_omp_task: {
1763 // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1764 // *new_task);
1765 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1766 CGM.VoidPtrTy};
1767 llvm::FunctionType *FnTy =
1768 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1769 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1770 break;
1771 }
1772 case OMPRTL__kmpc_copyprivate: {
1773 // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1774 // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1775 // kmp_int32 didit);
1776 llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1777 auto *CpyFnTy =
1778 llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1779 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1780 CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1781 CGM.Int32Ty};
1782 llvm::FunctionType *FnTy =
1783 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1784 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1785 break;
1786 }
1787 case OMPRTL__kmpc_reduce: {
1788 // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1789 // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1790 // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1791 llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1792 auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1793 /*isVarArg=*/false);
1794 llvm::Type *TypeParams[] = {
1795 getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1796 CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1797 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1798 llvm::FunctionType *FnTy =
1799 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1800 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1801 break;
1802 }
1803 case OMPRTL__kmpc_reduce_nowait: {
1804 // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1805 // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1806 // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1807 // *lck);
1808 llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1809 auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1810 /*isVarArg=*/false);
1811 llvm::Type *TypeParams[] = {
1812 getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1813 CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1814 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1815 llvm::FunctionType *FnTy =
1816 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1817 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1818 break;
1819 }
1820 case OMPRTL__kmpc_end_reduce: {
1821 // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
1822 // kmp_critical_name *lck);
1823 llvm::Type *TypeParams[] = {
1824 getIdentTyPointerTy(), CGM.Int32Ty,
1825 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1826 llvm::FunctionType *FnTy =
1827 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1828 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
1829 break;
1830 }
1831 case OMPRTL__kmpc_end_reduce_nowait: {
1832 // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
1833 // kmp_critical_name *lck);
1834 llvm::Type *TypeParams[] = {
1835 getIdentTyPointerTy(), CGM.Int32Ty,
1836 llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1837 llvm::FunctionType *FnTy =
1838 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1839 RTLFn =
1840 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
1841 break;
1842 }
1843 case OMPRTL__kmpc_omp_task_begin_if0: {
1844 // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1845 // *new_task);
1846 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1847 CGM.VoidPtrTy};
1848 llvm::FunctionType *FnTy =
1849 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1850 RTLFn =
1851 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
1852 break;
1853 }
1854 case OMPRTL__kmpc_omp_task_complete_if0: {
1855 // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1856 // *new_task);
1857 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1858 CGM.VoidPtrTy};
1859 llvm::FunctionType *FnTy =
1860 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1861 RTLFn = CGM.CreateRuntimeFunction(FnTy,
1862 /*Name=*/"__kmpc_omp_task_complete_if0");
1863 break;
1864 }
1865 case OMPRTL__kmpc_ordered: {
1866 // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
1867 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1868 llvm::FunctionType *FnTy =
1869 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1870 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
1871 break;
1872 }
1873 case OMPRTL__kmpc_end_ordered: {
1874 // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
1875 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1876 llvm::FunctionType *FnTy =
1877 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1878 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
1879 break;
1880 }
1881 case OMPRTL__kmpc_omp_taskwait: {
1882 // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
1883 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1884 llvm::FunctionType *FnTy =
1885 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1886 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
1887 break;
1888 }
1889 case OMPRTL__kmpc_taskgroup: {
1890 // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
1891 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1892 llvm::FunctionType *FnTy =
1893 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1894 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
1895 break;
1896 }
1897 case OMPRTL__kmpc_end_taskgroup: {
1898 // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
1899 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1900 llvm::FunctionType *FnTy =
1901 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1902 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
1903 break;
1904 }
1905 case OMPRTL__kmpc_push_proc_bind: {
1906 // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
1907 // int proc_bind)
1908 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1909 llvm::FunctionType *FnTy =
1910 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1911 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
1912 break;
1913 }
1914 case OMPRTL__kmpc_omp_task_with_deps: {
1915 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
1916 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
1917 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
1918 llvm::Type *TypeParams[] = {
1919 getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
1920 CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
1921 llvm::FunctionType *FnTy =
1922 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1923 RTLFn =
1924 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
1925 break;
1926 }
1927 case OMPRTL__kmpc_omp_wait_deps: {
1928 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
1929 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
1930 // kmp_depend_info_t *noalias_dep_list);
1931 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1932 CGM.Int32Ty, CGM.VoidPtrTy,
1933 CGM.Int32Ty, CGM.VoidPtrTy};
1934 llvm::FunctionType *FnTy =
1935 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1936 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
1937 break;
1938 }
1939 case OMPRTL__kmpc_cancellationpoint: {
1940 // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
1941 // global_tid, kmp_int32 cncl_kind)
1942 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1943 llvm::FunctionType *FnTy =
1944 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1945 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
1946 break;
1947 }
1948 case OMPRTL__kmpc_cancel: {
1949 // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
1950 // kmp_int32 cncl_kind)
1951 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1952 llvm::FunctionType *FnTy =
1953 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1954 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
1955 break;
1956 }
1957 case OMPRTL__kmpc_push_num_teams: {
1958 // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
1959 // kmp_int32 num_teams, kmp_int32 num_threads)
1960 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1961 CGM.Int32Ty};
1962 llvm::FunctionType *FnTy =
1963 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1964 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
1965 break;
1966 }
1967 case OMPRTL__kmpc_fork_teams: {
1968 // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
1969 // microtask, ...);
1970 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1971 getKmpc_MicroPointerTy()};
1972 llvm::FunctionType *FnTy =
1973 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1974 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
1975 break;
1976 }
1977 case OMPRTL__kmpc_taskloop: {
1978 // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
1979 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
1980 // sched, kmp_uint64 grainsize, void *task_dup);
1981 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1982 CGM.IntTy,
1983 CGM.VoidPtrTy,
1984 CGM.IntTy,
1985 CGM.Int64Ty->getPointerTo(),
1986 CGM.Int64Ty->getPointerTo(),
1987 CGM.Int64Ty,
1988 CGM.IntTy,
1989 CGM.IntTy,
1990 CGM.Int64Ty,
1991 CGM.VoidPtrTy};
1992 llvm::FunctionType *FnTy =
1993 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1994 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
1995 break;
1996 }
1997 case OMPRTL__kmpc_doacross_init: {
1998 // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
1999 // num_dims, struct kmp_dim *dims);
2000 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2001 CGM.Int32Ty,
2002 CGM.Int32Ty,
2003 CGM.VoidPtrTy};
2004 llvm::FunctionType *FnTy =
2005 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2006 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2007 break;
2008 }
2009 case OMPRTL__kmpc_doacross_fini: {
2010 // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2011 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2012 llvm::FunctionType *FnTy =
2013 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2014 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2015 break;
2016 }
2017 case OMPRTL__kmpc_doacross_post: {
2018 // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2019 // *vec);
2020 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2021 CGM.Int64Ty->getPointerTo()};
2022 llvm::FunctionType *FnTy =
2023 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2024 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2025 break;
2026 }
2027 case OMPRTL__kmpc_doacross_wait: {
2028 // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2029 // *vec);
2030 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2031 CGM.Int64Ty->getPointerTo()};
2032 llvm::FunctionType *FnTy =
2033 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2034 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2035 break;
2036 }
2037 case OMPRTL__kmpc_task_reduction_init: {
2038 // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2039 // *data);
2040 llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2041 llvm::FunctionType *FnTy =
2042 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2043 RTLFn =
2044 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2045 break;
2046 }
2047 case OMPRTL__kmpc_task_reduction_get_th_data: {
2048 // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2049 // *d);
2050 llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2051 llvm::FunctionType *FnTy =
2052 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2053 RTLFn = CGM.CreateRuntimeFunction(
2054 FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2055 break;
2056 }
2057 case OMPRTL__tgt_target: {
2058 // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2059 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2060 // *arg_types);
2061 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2062 CGM.VoidPtrTy,
2063 CGM.Int32Ty,
2064 CGM.VoidPtrPtrTy,
2065 CGM.VoidPtrPtrTy,
2066 CGM.SizeTy->getPointerTo(),
2067 CGM.Int64Ty->getPointerTo()};
2068 llvm::FunctionType *FnTy =
2069 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2070 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2071 break;
2072 }
2073 case OMPRTL__tgt_target_nowait: {
2074 // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2075 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2076 // int64_t *arg_types);
2077 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2078 CGM.VoidPtrTy,
2079 CGM.Int32Ty,
2080 CGM.VoidPtrPtrTy,
2081 CGM.VoidPtrPtrTy,
2082 CGM.SizeTy->getPointerTo(),
2083 CGM.Int64Ty->getPointerTo()};
2084 llvm::FunctionType *FnTy =
2085 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2086 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2087 break;
2088 }
2089 case OMPRTL__tgt_target_teams: {
2090 // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2091 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2092 // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2093 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2094 CGM.VoidPtrTy,
2095 CGM.Int32Ty,
2096 CGM.VoidPtrPtrTy,
2097 CGM.VoidPtrPtrTy,
2098 CGM.SizeTy->getPointerTo(),
2099 CGM.Int64Ty->getPointerTo(),
2100 CGM.Int32Ty,
2101 CGM.Int32Ty};
2102 llvm::FunctionType *FnTy =
2103 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2104 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2105 break;
2106 }
2107 case OMPRTL__tgt_target_teams_nowait: {
2108 // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2109 // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2110 // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2111 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2112 CGM.VoidPtrTy,
2113 CGM.Int32Ty,
2114 CGM.VoidPtrPtrTy,
2115 CGM.VoidPtrPtrTy,
2116 CGM.SizeTy->getPointerTo(),
2117 CGM.Int64Ty->getPointerTo(),
2118 CGM.Int32Ty,
2119 CGM.Int32Ty};
2120 llvm::FunctionType *FnTy =
2121 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2122 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2123 break;
2124 }
2125 case OMPRTL__tgt_register_lib: {
2126 // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2127 QualType ParamTy =
2128 CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2129 llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2130 llvm::FunctionType *FnTy =
2131 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2132 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2133 break;
2134 }
2135 case OMPRTL__tgt_unregister_lib: {
2136 // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2137 QualType ParamTy =
2138 CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2139 llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2140 llvm::FunctionType *FnTy =
2141 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2142 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2143 break;
2144 }
2145 case OMPRTL__tgt_target_data_begin: {
2146 // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2147 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2148 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2149 CGM.Int32Ty,
2150 CGM.VoidPtrPtrTy,
2151 CGM.VoidPtrPtrTy,
2152 CGM.SizeTy->getPointerTo(),
2153 CGM.Int64Ty->getPointerTo()};
2154 llvm::FunctionType *FnTy =
2155 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2156 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2157 break;
2158 }
2159 case OMPRTL__tgt_target_data_begin_nowait: {
2160 // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2161 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2162 // *arg_types);
2163 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2164 CGM.Int32Ty,
2165 CGM.VoidPtrPtrTy,
2166 CGM.VoidPtrPtrTy,
2167 CGM.SizeTy->getPointerTo(),
2168 CGM.Int64Ty->getPointerTo()};
2169 auto *FnTy =
2170 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2171 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2172 break;
2173 }
2174 case OMPRTL__tgt_target_data_end: {
2175 // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2176 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2177 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2178 CGM.Int32Ty,
2179 CGM.VoidPtrPtrTy,
2180 CGM.VoidPtrPtrTy,
2181 CGM.SizeTy->getPointerTo(),
2182 CGM.Int64Ty->getPointerTo()};
2183 llvm::FunctionType *FnTy =
2184 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2185 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2186 break;
2187 }
2188 case OMPRTL__tgt_target_data_end_nowait: {
2189 // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2190 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2191 // *arg_types);
2192 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2193 CGM.Int32Ty,
2194 CGM.VoidPtrPtrTy,
2195 CGM.VoidPtrPtrTy,
2196 CGM.SizeTy->getPointerTo(),
2197 CGM.Int64Ty->getPointerTo()};
2198 auto *FnTy =
2199 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2200 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2201 break;
2202 }
2203 case OMPRTL__tgt_target_data_update: {
2204 // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2205 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2206 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2207 CGM.Int32Ty,
2208 CGM.VoidPtrPtrTy,
2209 CGM.VoidPtrPtrTy,
2210 CGM.SizeTy->getPointerTo(),
2211 CGM.Int64Ty->getPointerTo()};
2212 llvm::FunctionType *FnTy =
2213 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2214 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2215 break;
2216 }
2217 case OMPRTL__tgt_target_data_update_nowait: {
2218 // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2219 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2220 // *arg_types);
2221 llvm::Type *TypeParams[] = {CGM.Int64Ty,
2222 CGM.Int32Ty,
2223 CGM.VoidPtrPtrTy,
2224 CGM.VoidPtrPtrTy,
2225 CGM.SizeTy->getPointerTo(),
2226 CGM.Int64Ty->getPointerTo()};
2227 auto *FnTy =
2228 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2229 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2230 break;
2231 }
2232 }
2233 assert(RTLFn && "Unable to find OpenMP runtime function")(static_cast <bool> (RTLFn && "Unable to find OpenMP runtime function"
) ? void (0) : __assert_fail ("RTLFn && \"Unable to find OpenMP runtime function\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2233, __extension__ __PRETTY_FUNCTION__))
;
2234 return RTLFn;
2235}
2236
2237llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
2238 bool IVSigned) {
2239 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2240, __extension__ __PRETTY_FUNCTION__))
2240 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2240, __extension__ __PRETTY_FUNCTION__))
;
2241 auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2242 : "__kmpc_for_static_init_4u")
2243 : (IVSigned ? "__kmpc_for_static_init_8"
2244 : "__kmpc_for_static_init_8u");
2245 auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2246 auto PtrTy = llvm::PointerType::getUnqual(ITy);
2247 llvm::Type *TypeParams[] = {
2248 getIdentTyPointerTy(), // loc
2249 CGM.Int32Ty, // tid
2250 CGM.Int32Ty, // schedtype
2251 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2252 PtrTy, // p_lower
2253 PtrTy, // p_upper
2254 PtrTy, // p_stride
2255 ITy, // incr
2256 ITy // chunk
2257 };
2258 llvm::FunctionType *FnTy =
2259 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2260 return CGM.CreateRuntimeFunction(FnTy, Name);
2261}
2262
2263llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
2264 bool IVSigned) {
2265 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2266, __extension__ __PRETTY_FUNCTION__))
2266 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2266, __extension__ __PRETTY_FUNCTION__))
;
2267 auto Name =
2268 IVSize == 32
2269 ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2270 : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2271 auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2272 llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2273 CGM.Int32Ty, // tid
2274 CGM.Int32Ty, // schedtype
2275 ITy, // lower
2276 ITy, // upper
2277 ITy, // stride
2278 ITy // chunk
2279 };
2280 llvm::FunctionType *FnTy =
2281 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2282 return CGM.CreateRuntimeFunction(FnTy, Name);
2283}
2284
2285llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
2286 bool IVSigned) {
2287 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2288, __extension__ __PRETTY_FUNCTION__))
2288 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2288, __extension__ __PRETTY_FUNCTION__))
;
2289 auto Name =
2290 IVSize == 32
2291 ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2292 : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2293 llvm::Type *TypeParams[] = {
2294 getIdentTyPointerTy(), // loc
2295 CGM.Int32Ty, // tid
2296 };
2297 llvm::FunctionType *FnTy =
2298 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2299 return CGM.CreateRuntimeFunction(FnTy, Name);
2300}
2301
2302llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
2303 bool IVSigned) {
2304 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2305, __extension__ __PRETTY_FUNCTION__))
2305 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2305, __extension__ __PRETTY_FUNCTION__))
;
2306 auto Name =
2307 IVSize == 32
2308 ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2309 : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2310 auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2311 auto PtrTy = llvm::PointerType::getUnqual(ITy);
2312 llvm::Type *TypeParams[] = {
2313 getIdentTyPointerTy(), // loc
2314 CGM.Int32Ty, // tid
2315 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2316 PtrTy, // p_lower
2317 PtrTy, // p_upper
2318 PtrTy // p_stride
2319 };
2320 llvm::FunctionType *FnTy =
2321 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2322 return CGM.CreateRuntimeFunction(FnTy, Name);
2323}
2324
2325llvm::Constant *
2326CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
2327 assert(!CGM.getLangOpts().OpenMPUseTLS ||(static_cast <bool> (!CGM.getLangOpts().OpenMPUseTLS ||
!CGM.getContext().getTargetInfo().isTLSSupported()) ? void (
0) : __assert_fail ("!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo().isTLSSupported()"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2328, __extension__ __PRETTY_FUNCTION__))
2328 !CGM.getContext().getTargetInfo().isTLSSupported())(static_cast <bool> (!CGM.getLangOpts().OpenMPUseTLS ||
!CGM.getContext().getTargetInfo().isTLSSupported()) ? void (
0) : __assert_fail ("!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo().isTLSSupported()"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2328, __extension__ __PRETTY_FUNCTION__))
;
2329 // Lookup the entry, lazily creating it if necessary.
2330 return getOrCreateInternalVariable(CGM.Int8PtrPtrTy,
2331 Twine(CGM.getMangledName(VD)) + ".cache.");
2332}
2333
2334Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
2335 const VarDecl *VD,
2336 Address VDAddr,
2337 SourceLocation Loc) {
2338 if (CGM.getLangOpts().OpenMPUseTLS &&
2339 CGM.getContext().getTargetInfo().isTLSSupported())
2340 return VDAddr;
2341
2342 auto VarTy = VDAddr.getElementType();
2343 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2344 CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2345 CGM.Int8PtrTy),
2346 CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
2347 getOrCreateThreadPrivateCache(VD)};
2348 return Address(CGF.EmitRuntimeCall(
2349 createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2350 VDAddr.getAlignment());
2351}
2352
2353void CGOpenMPRuntime::emitThreadPrivateVarInit(
2354 CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2355 llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2356 // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2357 // library.
2358 auto OMPLoc = emitUpdateLocation(CGF, Loc);
2359 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
2360 OMPLoc);
2361 // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2362 // to register constructor/destructor for variable.
2363 llvm::Value *Args[] = {OMPLoc,
2364 CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2365 CGM.VoidPtrTy),
2366 Ctor, CopyCtor, Dtor};
2367 CGF.EmitRuntimeCall(
2368 createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
2369}
2370
2371llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
2372 const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2373 bool PerformInit, CodeGenFunction *CGF) {
2374 if (CGM.getLangOpts().OpenMPUseTLS &&
2375 CGM.getContext().getTargetInfo().isTLSSupported())
2376 return nullptr;
2377
2378 VD = VD->getDefinition(CGM.getContext());
2379 if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
2380 ThreadPrivateWithDefinition.insert(VD);
2381 QualType ASTTy = VD->getType();
2382
2383 llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2384 auto Init = VD->getAnyInitializer();
2385 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2386 // Generate function that re-emits the declaration's initializer into the
2387 // threadprivate copy of the variable VD
2388 CodeGenFunction CtorCGF(CGM);
2389 FunctionArgList Args;
2390 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2391 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2392 ImplicitParamDecl::Other);
2393 Args.push_back(&Dst);
2394
2395 auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2396 CGM.getContext().VoidPtrTy, Args);
2397 auto FTy = CGM.getTypes().GetFunctionType(FI);
2398 auto Fn = CGM.CreateGlobalInitOrDestructFunction(
2399 FTy, ".__kmpc_global_ctor_.", FI, Loc);
2400 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2401 Args, Loc, Loc);
2402 auto ArgVal = CtorCGF.EmitLoadOfScalar(
2403 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2404 CGM.getContext().VoidPtrTy, Dst.getLocation());
2405 Address Arg = Address(ArgVal, VDAddr.getAlignment());
2406 Arg = CtorCGF.Builder.CreateElementBitCast(
2407 Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2408 CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2409 /*IsInitializer=*/true);
2410 ArgVal = CtorCGF.EmitLoadOfScalar(
2411 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2412 CGM.getContext().VoidPtrTy, Dst.getLocation());
2413 CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2414 CtorCGF.FinishFunction();
2415 Ctor = Fn;
2416 }
2417 if (VD->getType().isDestructedType() != QualType::DK_none) {
2418 // Generate function that emits destructor call for the threadprivate copy
2419 // of the variable VD
2420 CodeGenFunction DtorCGF(CGM);
2421 FunctionArgList Args;
2422 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2423 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2424 ImplicitParamDecl::Other);
2425 Args.push_back(&Dst);
2426
2427 auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2428 CGM.getContext().VoidTy, Args);
2429 auto FTy = CGM.getTypes().GetFunctionType(FI);
2430 auto Fn = CGM.CreateGlobalInitOrDestructFunction(
2431 FTy, ".__kmpc_global_dtor_.", FI, Loc);
2432 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2433 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2434 Loc, Loc);
2435 // Create a scope with an artificial location for the body of this function.
2436 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2437 auto ArgVal = DtorCGF.EmitLoadOfScalar(
2438 DtorCGF.GetAddrOfLocalVar(&Dst),
2439 /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2440 DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2441 DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2442 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2443 DtorCGF.FinishFunction();
2444 Dtor = Fn;
2445 }
2446 // Do not emit init function if it is not required.
2447 if (!Ctor && !Dtor)
2448 return nullptr;
2449
2450 llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2451 auto CopyCtorTy =
2452 llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2453 /*isVarArg=*/false)->getPointerTo();
2454 // Copying constructor for the threadprivate variable.
2455 // Must be NULL - reserved by runtime, but currently it requires that this
2456 // parameter is always NULL. Otherwise it fires assertion.
2457 CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2458 if (Ctor == nullptr) {
2459 auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2460 /*isVarArg=*/false)->getPointerTo();
2461 Ctor = llvm::Constant::getNullValue(CtorTy);
2462 }
2463 if (Dtor == nullptr) {
2464 auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2465 /*isVarArg=*/false)->getPointerTo();
2466 Dtor = llvm::Constant::getNullValue(DtorTy);
2467 }
2468 if (!CGF) {
2469 auto InitFunctionTy =
2470 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2471 auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2472 InitFunctionTy, ".__omp_threadprivate_init_.",
2473 CGM.getTypes().arrangeNullaryFunction());
2474 CodeGenFunction InitCGF(CGM);
2475 FunctionArgList ArgList;
2476 InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2477 CGM.getTypes().arrangeNullaryFunction(), ArgList,
2478 Loc, Loc);
2479 emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2480 InitCGF.FinishFunction();
2481 return InitFunction;
2482 }
2483 emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2484 }
2485 return nullptr;
2486}
2487
2488Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2489 QualType VarType,
2490 StringRef Name) {
2491 llvm::Twine VarName(Name, ".artificial.");
2492 llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2493 llvm::Value *GAddr = getOrCreateInternalVariable(VarLVType, VarName);
2494 llvm::Value *Args[] = {
2495 emitUpdateLocation(CGF, SourceLocation()),
2496 getThreadID(CGF, SourceLocation()),
2497 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2498 CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2499 /*IsSigned=*/false),
2500 getOrCreateInternalVariable(CGM.VoidPtrPtrTy, VarName + ".cache.")};
2501 return Address(
2502 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2503 CGF.EmitRuntimeCall(
2504 createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2505 VarLVType->getPointerTo(/*AddrSpace=*/0)),
2506 CGM.getPointerAlign());
2507}
2508
2509/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
2510/// function. Here is the logic:
2511/// if (Cond) {
2512/// ThenGen();
2513/// } else {
2514/// ElseGen();
2515/// }
2516void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
2517 const RegionCodeGenTy &ThenGen,
2518 const RegionCodeGenTy &ElseGen) {
2519 CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2520
2521 // If the condition constant folds and can be elided, try to avoid emitting
2522 // the condition and the dead arm of the if/else.
2523 bool CondConstant;
2524 if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2525 if (CondConstant)
2526 ThenGen(CGF);
2527 else
2528 ElseGen(CGF);
2529 return;
2530 }
2531
2532 // Otherwise, the condition did not fold, or we couldn't elide it. Just
2533 // emit the conditional branch.
2534 auto ThenBlock = CGF.createBasicBlock("omp_if.then");
2535 auto ElseBlock = CGF.createBasicBlock("omp_if.else");
2536 auto ContBlock = CGF.createBasicBlock("omp_if.end");
2537 CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2538
2539 // Emit the 'then' code.
2540 CGF.EmitBlock(ThenBlock);
2541 ThenGen(CGF);
2542 CGF.EmitBranch(ContBlock);
2543 // Emit the 'else' code if present.
2544 // There is no need to emit line number for unconditional branch.
2545 (void)ApplyDebugLocation::CreateEmpty(CGF);
2546 CGF.EmitBlock(ElseBlock);
2547 ElseGen(CGF);
2548 // There is no need to emit line number for unconditional branch.
2549 (void)ApplyDebugLocation::CreateEmpty(CGF);
2550 CGF.EmitBranch(ContBlock);
2551 // Emit the continuation block for code after the if.
2552 CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2553}
2554
2555void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2556 llvm::Value *OutlinedFn,
2557 ArrayRef<llvm::Value *> CapturedVars,
2558 const Expr *IfCond) {
2559 if (!CGF.HaveInsertPoint())
2560 return;
2561 auto *RTLoc = emitUpdateLocation(CGF, Loc);
2562 auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2563 PrePostActionTy &) {
2564 // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2565 auto &RT = CGF.CGM.getOpenMPRuntime();
2566 llvm::Value *Args[] = {
2567 RTLoc,
2568 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2569 CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2570 llvm::SmallVector<llvm::Value *, 16> RealArgs;
2571 RealArgs.append(std::begin(Args), std::end(Args));
2572 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2573
2574 auto RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2575 CGF.EmitRuntimeCall(RTLFn, RealArgs);
2576 };
2577 auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2578 PrePostActionTy &) {
2579 auto &RT = CGF.CGM.getOpenMPRuntime();
2580 auto ThreadID = RT.getThreadID(CGF, Loc);
2581 // Build calls:
2582 // __kmpc_serialized_parallel(&Loc, GTid);
2583 llvm::Value *Args[] = {RTLoc, ThreadID};
2584 CGF.EmitRuntimeCall(
2585 RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2586
2587 // OutlinedFn(&GTid, &zero, CapturedStruct);
2588 auto ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
2589 Address ZeroAddr =
2590 CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
2591 /*Name*/ ".zero.addr");
2592 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2593 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2594 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2595 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2596 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2597 RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2598
2599 // __kmpc_end_serialized_parallel(&Loc, GTid);
2600 llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2601 CGF.EmitRuntimeCall(
2602 RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2603 EndArgs);
2604 };
2605 if (IfCond)
2606 emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
2607 else {
2608 RegionCodeGenTy ThenRCG(ThenGen);
2609 ThenRCG(CGF);
2610 }
2611}
2612
2613// If we're inside an (outlined) parallel region, use the region info's
2614// thread-ID variable (it is passed in a first argument of the outlined function
2615// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2616// regular serial code region, get thread ID by calling kmp_int32
2617// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2618// return the address of that temp.
2619Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2620 SourceLocation Loc) {
2621 if (auto *OMPRegionInfo =
2622 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2623 if (OMPRegionInfo->getThreadIDVariable())
2624 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
2625
2626 auto ThreadID = getThreadID(CGF, Loc);
2627 auto Int32Ty =
2628 CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2629 auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2630 CGF.EmitStoreOfScalar(ThreadID,
2631 CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2632
2633 return ThreadIDTemp;
2634}
2635
2636llvm::Constant *
2637CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
2638 const llvm::Twine &Name) {
2639 SmallString<256> Buffer;
2640 llvm::raw_svector_ostream Out(Buffer);
2641 Out << Name;
2642 auto RuntimeName = Out.str();
2643 auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
2644 if (Elem.second) {
2645 assert(Elem.second->getType()->getPointerElementType() == Ty &&(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2646, __extension__ __PRETTY_FUNCTION__))
2646 "OMP internal variable has different type than requested")(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2646, __extension__ __PRETTY_FUNCTION__))
;
2647 return &*Elem.second;
2648 }
2649
2650 return Elem.second = new llvm::GlobalVariable(
2651 CGM.getModule(), Ty, /*IsConstant*/ false,
2652 llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2653 Elem.first());
2654}
2655
2656llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2657 llvm::Twine Name(".gomp_critical_user_", CriticalName);
2658 return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
2659}
2660
2661namespace {
2662/// Common pre(post)-action for different OpenMP constructs.
2663class CommonActionTy final : public PrePostActionTy {
2664 llvm::Value *EnterCallee;
2665 ArrayRef<llvm::Value *> EnterArgs;
2666 llvm::Value *ExitCallee;
2667 ArrayRef<llvm::Value *> ExitArgs;
2668 bool Conditional;
2669 llvm::BasicBlock *ContBlock = nullptr;
2670
2671public:
2672 CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
2673 llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
2674 bool Conditional = false)
2675 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2676 ExitArgs(ExitArgs), Conditional(Conditional) {}
2677 void Enter(CodeGenFunction &CGF) override {
2678 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2679 if (Conditional) {
2680 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2681 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2682 ContBlock = CGF.createBasicBlock("omp_if.end");
2683 // Generate the branch (If-stmt)
2684 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2685 CGF.EmitBlock(ThenBlock);
2686 }
2687 }
2688 void Done(CodeGenFunction &CGF) {
2689 // Emit the rest of blocks/branches
2690 CGF.EmitBranch(ContBlock);
2691 CGF.EmitBlock(ContBlock, true);
2692 }
2693 void Exit(CodeGenFunction &CGF) override {
2694 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2695 }
2696};
2697} // anonymous namespace
2698
2699void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2700 StringRef CriticalName,
2701 const RegionCodeGenTy &CriticalOpGen,
2702 SourceLocation Loc, const Expr *Hint) {
2703 // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2704 // CriticalOpGen();
2705 // __kmpc_end_critical(ident_t *, gtid, Lock);
2706 // Prepare arguments and build a call to __kmpc_critical
2707 if (!CGF.HaveInsertPoint())
2708 return;
2709 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2710 getCriticalRegionLock(CriticalName)};
2711 llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2712 std::end(Args));
2713 if (Hint) {
2714 EnterArgs.push_back(CGF.Builder.CreateIntCast(
2715 CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
2716 }
2717 CommonActionTy Action(
2718 createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
2719 : OMPRTL__kmpc_critical),
2720 EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
2721 CriticalOpGen.setAction(Action);
2722 emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2723}
2724
2725void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2726 const RegionCodeGenTy &MasterOpGen,
2727 SourceLocation Loc) {
2728 if (!CGF.HaveInsertPoint())
2729 return;
2730 // if(__kmpc_master(ident_t *, gtid)) {
2731 // MasterOpGen();
2732 // __kmpc_end_master(ident_t *, gtid);
2733 // }
2734 // Prepare arguments and build a call to __kmpc_master
2735 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2736 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
2737 createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
2738 /*Conditional=*/true);
2739 MasterOpGen.setAction(Action);
2740 emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2741 Action.Done(CGF);
2742}
2743
2744void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2745 SourceLocation Loc) {
2746 if (!CGF.HaveInsertPoint())
2747 return;
2748 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2749 llvm::Value *Args[] = {
2750 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2751 llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2752 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
2753 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2754 Region->emitUntiedSwitch(CGF);
2755}
2756
2757void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
2758 const RegionCodeGenTy &TaskgroupOpGen,
2759 SourceLocation Loc) {
2760 if (!CGF.HaveInsertPoint())
2761 return;
2762 // __kmpc_taskgroup(ident_t *, gtid);
2763 // TaskgroupOpGen();
2764 // __kmpc_end_taskgroup(ident_t *, gtid);
2765 // Prepare arguments and build a call to __kmpc_taskgroup
2766 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2767 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
2768 createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
2769 Args);
2770 TaskgroupOpGen.setAction(Action);
2771 emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
2772}
2773
2774/// Given an array of pointers to variables, project the address of a
2775/// given variable.
2776static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
2777 unsigned Index, const VarDecl *Var) {
2778 // Pull out the pointer to the variable.
2779 Address PtrAddr =
2780 CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
2781 llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
2782
2783 Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
2784 Addr = CGF.Builder.CreateElementBitCast(
2785 Addr, CGF.ConvertTypeForMem(Var->getType()));
2786 return Addr;
2787}
2788
2789static llvm::Value *emitCopyprivateCopyFunction(
2790 CodeGenModule &CGM, llvm::Type *ArgsType,
2791 ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
2792 ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
2793 SourceLocation Loc) {
2794 auto &C = CGM.getContext();
2795 // void copy_func(void *LHSArg, void *RHSArg);
2796 FunctionArgList Args;
2797 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2798 ImplicitParamDecl::Other);
2799 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2800 ImplicitParamDecl::Other);
2801 Args.push_back(&LHSArg);
2802 Args.push_back(&RHSArg);
2803 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2804 auto *Fn = llvm::Function::Create(
2805 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2806 ".omp.copyprivate.copy_func", &CGM.getModule());
2807 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
2808 CodeGenFunction CGF(CGM);
2809 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2810 // Dest = (void*[n])(LHSArg);
2811 // Src = (void*[n])(RHSArg);
2812 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2813 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
2814 ArgsType), CGF.getPointerAlign());
2815 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2816 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
2817 ArgsType), CGF.getPointerAlign());
2818 // *(Type0*)Dst[0] = *(Type0*)Src[0];
2819 // *(Type1*)Dst[1] = *(Type1*)Src[1];
2820 // ...
2821 // *(Typen*)Dst[n] = *(Typen*)Src[n];
2822 for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2823 auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
2824 Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
2825
2826 auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
2827 Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
2828
2829 auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
2830 QualType Type = VD->getType();
2831 CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2832 }
2833 CGF.FinishFunction();
2834 return Fn;
2835}
2836
2837void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
2838 const RegionCodeGenTy &SingleOpGen,
2839 SourceLocation Loc,
2840 ArrayRef<const Expr *> CopyprivateVars,
2841 ArrayRef<const Expr *> SrcExprs,
2842 ArrayRef<const Expr *> DstExprs,
2843 ArrayRef<const Expr *> AssignmentOps) {
2844 if (!CGF.HaveInsertPoint())
2845 return;
2846 assert(CopyprivateVars.size() == SrcExprs.size() &&(static_cast <bool> (CopyprivateVars.size() == SrcExprs
.size() && CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size()) ? void (0) :
__assert_fail ("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2848, __extension__ __PRETTY_FUNCTION__))
2847 CopyprivateVars.size() == DstExprs.size() &&(static_cast <bool> (CopyprivateVars.size() == SrcExprs
.size() && CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size()) ? void (0) :
__assert_fail ("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2848, __extension__ __PRETTY_FUNCTION__))
2848 CopyprivateVars.size() == AssignmentOps.size())(static_cast <bool> (CopyprivateVars.size() == SrcExprs
.size() && CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size()) ? void (0) :
__assert_fail ("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2848, __extension__ __PRETTY_FUNCTION__))
;
2849 auto &C = CGM.getContext();
2850 // int32 did_it = 0;
2851 // if(__kmpc_single(ident_t *, gtid)) {
2852 // SingleOpGen();
2853 // __kmpc_end_single(ident_t *, gtid);
2854 // did_it = 1;
2855 // }
2856 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2857 // <copy_func>, did_it);
2858
2859 Address DidIt = Address::invalid();
2860 if (!CopyprivateVars.empty()) {
2861 // int32 did_it = 0;
2862 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2863 DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
2864 CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
2865 }
2866 // Prepare arguments and build a call to __kmpc_single
2867 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2868 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
2869 createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
2870 /*Conditional=*/true);
2871 SingleOpGen.setAction(Action);
2872 emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
2873 if (DidIt.isValid()) {
2874 // did_it = 1;
2875 CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
2876 }
2877 Action.Done(CGF);
2878 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2879 // <copy_func>, did_it);
2880 if (DidIt.isValid()) {
2881 llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
2882 auto CopyprivateArrayTy =
2883 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
2884 /*IndexTypeQuals=*/0);
2885 // Create a list of all private variables for copyprivate.
2886 Address CopyprivateList =
2887 CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
2888 for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2889 Address Elem = CGF.Builder.CreateConstArrayGEP(
2890 CopyprivateList, I, CGF.getPointerSize());
2891 CGF.Builder.CreateStore(
2892 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2893 CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
2894 Elem);
2895 }
2896 // Build function that copies private values from single region to all other
2897 // threads in the corresponding parallel region.
2898 auto *CpyFn = emitCopyprivateCopyFunction(
2899 CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
2900 CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
2901 auto *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
2902 Address CL =
2903 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
2904 CGF.VoidPtrTy);
2905 auto *DidItVal = CGF.Builder.CreateLoad(DidIt);
2906 llvm::Value *Args[] = {
2907 emitUpdateLocation(CGF, Loc), // ident_t *<loc>
2908 getThreadID(CGF, Loc), // i32 <gtid>
2909 BufSize, // size_t <buf_size>
2910 CL.getPointer(), // void *<copyprivate list>
2911 CpyFn, // void (*) (void *, void *) <copy_func>
2912 DidItVal // i32 did_it
2913 };
2914 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
2915 }
2916}
2917
2918void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
2919 const RegionCodeGenTy &OrderedOpGen,
2920 SourceLocation Loc, bool IsThreads) {
2921 if (!CGF.HaveInsertPoint())
2922 return;
2923 // __kmpc_ordered(ident_t *, gtid);
2924 // OrderedOpGen();
2925 // __kmpc_end_ordered(ident_t *, gtid);
2926 // Prepare arguments and build a call to __kmpc_ordered
2927 if (IsThreads) {
2928 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2929 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
2930 createRuntimeFunction(OMPRTL__kmpc_end_ordered),
2931 Args);
2932 OrderedOpGen.setAction(Action);
2933 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2934 return;
2935 }
2936 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2937}
2938
2939void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
2940 OpenMPDirectiveKind Kind, bool EmitChecks,
2941 bool ForceSimpleCall) {
2942 if (!CGF.HaveInsertPoint())
2943 return;
2944 // Build call __kmpc_cancel_barrier(loc, thread_id);
2945 // Build call __kmpc_barrier(loc, thread_id);
2946 unsigned Flags;
2947 if (Kind == OMPD_for)
2948 Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2949 else if (Kind == OMPD_sections)
2950 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2951 else if (Kind == OMPD_single)
2952 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2953 else if (Kind == OMPD_barrier)
2954 Flags = OMP_IDENT_BARRIER_EXPL;
2955 else
2956 Flags = OMP_IDENT_BARRIER_IMPL;
2957 // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
2958 // thread_id);
2959 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2960 getThreadID(CGF, Loc)};
2961 if (auto *OMPRegionInfo =
2962 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
2963 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2964 auto *Result = CGF.EmitRuntimeCall(
2965 createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
2966 if (EmitChecks) {
2967 // if (__kmpc_cancel_barrier()) {
2968 // exit from construct;
2969 // }
2970 auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
2971 auto *ContBB = CGF.createBasicBlock(".cancel.continue");
2972 auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
2973 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2974 CGF.EmitBlock(ExitBB);
2975 // exit from construct;
2976 auto CancelDestination =
2977 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
2978 CGF.EmitBranchThroughCleanup(CancelDestination);
2979 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
2980 }
2981 return;
2982 }
2983 }
2984 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
2985}
2986
2987/// \brief Map the OpenMP loop schedule to the runtime enumeration.
2988static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
2989 bool Chunked, bool Ordered) {
2990 switch (ScheduleKind) {
2991 case OMPC_SCHEDULE_static:
2992 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2993 : (Ordered ? OMP_ord_static : OMP_sch_static);
2994 case OMPC_SCHEDULE_dynamic:
2995 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
2996 case OMPC_SCHEDULE_guided:
2997 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
2998 case OMPC_SCHEDULE_runtime:
2999 return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3000 case OMPC_SCHEDULE_auto:
3001 return Ordered ? OMP_ord_auto : OMP_sch_auto;
3002 case OMPC_SCHEDULE_unknown:
3003 assert(!Chunked && "chunk was specified but schedule kind not known")(static_cast <bool> (!Chunked && "chunk was specified but schedule kind not known"
) ? void (0) : __assert_fail ("!Chunked && \"chunk was specified but schedule kind not known\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3003, __extension__ __PRETTY_FUNCTION__))
;
3004 return Ordered ? OMP_ord_static : OMP_sch_static;
3005 }
3006 llvm_unreachable("Unexpected runtime schedule")::llvm::llvm_unreachable_internal("Unexpected runtime schedule"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3006)
;
3007}
3008
3009/// \brief Map the OpenMP distribute schedule to the runtime enumeration.
3010static OpenMPSchedType
3011getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
3012 // only static is allowed for dist_schedule
3013 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
3014}
3015
3016bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
3017 bool Chunked) const {
3018 auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3019 return Schedule == OMP_sch_static;
3020}
3021
3022bool CGOpenMPRuntime::isStaticNonchunked(
3023 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3024 auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3025 return Schedule == OMP_dist_sch_static;
3026}
3027
3028
3029bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
3030 auto Schedule =
3031 getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3032 assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here")(static_cast <bool> (Schedule != OMP_sch_static_chunked
&& "cannot be chunked here") ? void (0) : __assert_fail
("Schedule != OMP_sch_static_chunked && \"cannot be chunked here\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3032, __extension__ __PRETTY_FUNCTION__))
;
3033 return Schedule != OMP_sch_static;
3034}
3035
3036static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
3037 OpenMPScheduleClauseModifier M1,
3038 OpenMPScheduleClauseModifier M2) {
3039 int Modifier = 0;
3040 switch (M1) {
3041 case OMPC_SCHEDULE_MODIFIER_monotonic:
3042 Modifier = OMP_sch_modifier_monotonic;
3043 break;
3044 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3045 Modifier = OMP_sch_modifier_nonmonotonic;
3046 break;
3047 case OMPC_SCHEDULE_MODIFIER_simd:
3048 if (Schedule == OMP_sch_static_chunked)
3049 Schedule = OMP_sch_static_balanced_chunked;
3050 break;
3051 case OMPC_SCHEDULE_MODIFIER_last:
3052 case OMPC_SCHEDULE_MODIFIER_unknown:
3053 break;
3054 }
3055 switch (M2) {
3056 case OMPC_SCHEDULE_MODIFIER_monotonic:
3057 Modifier = OMP_sch_modifier_monotonic;
3058 break;
3059 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3060 Modifier = OMP_sch_modifier_nonmonotonic;
3061 break;
3062 case OMPC_SCHEDULE_MODIFIER_simd:
3063 if (Schedule == OMP_sch_static_chunked)
3064 Schedule = OMP_sch_static_balanced_chunked;
3065 break;
3066 case OMPC_SCHEDULE_MODIFIER_last:
3067 case OMPC_SCHEDULE_MODIFIER_unknown:
3068 break;
3069 }
3070 return Schedule | Modifier;
3071}
3072
3073void CGOpenMPRuntime::emitForDispatchInit(
3074 CodeGenFunction &CGF, SourceLocation Loc,
3075 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3076 bool Ordered, const DispatchRTInput &DispatchValues) {
3077 if (!CGF.HaveInsertPoint())
3078 return;
3079 OpenMPSchedType Schedule = getRuntimeSchedule(
3080 ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3081 assert(Ordered ||(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3084, __extension__ __PRETTY_FUNCTION__))
3082 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3084, __extension__ __PRETTY_FUNCTION__))
3083 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3084, __extension__ __PRETTY_FUNCTION__))
3084 Schedule != OMP_sch_static_balanced_chunked))(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3084, __extension__ __PRETTY_FUNCTION__))
;
3085 // Call __kmpc_dispatch_init(
3086 // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3087 // kmp_int[32|64] lower, kmp_int[32|64] upper,
3088 // kmp_int[32|64] stride, kmp_int[32|64] chunk);
3089
3090 // If the Chunk was not specified in the clause - use default value 1.
3091 llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3092 : CGF.Builder.getIntN(IVSize, 1);
3093 llvm::Value *Args[] = {
3094 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3095 CGF.Builder.getInt32(addMonoNonMonoModifier(
3096 Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3097 DispatchValues.LB, // Lower
3098 DispatchValues.UB, // Upper
3099 CGF.Builder.getIntN(IVSize, 1), // Stride
3100 Chunk // Chunk
3101 };
3102 CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3103}
3104
3105static void emitForStaticInitCall(
3106 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3107 llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
3108 OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
3109 const CGOpenMPRuntime::StaticRTInput &Values) {
3110 if (!CGF.HaveInsertPoint())
3111 return;
3112
3113 assert(!Values.Ordered)(static_cast <bool> (!Values.Ordered) ? void (0) : __assert_fail
("!Values.Ordered", "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3113, __extension__ __PRETTY_FUNCTION__))
;
3114 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3118, __extension__ __PRETTY_FUNCTION__))
3115 Schedule == OMP_sch_static_balanced_chunked ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3118, __extension__ __PRETTY_FUNCTION__))
3116 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3118, __extension__ __PRETTY_FUNCTION__))
3117 Schedule == OMP_dist_sch_static ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3118, __extension__ __PRETTY_FUNCTION__))
3118 Schedule == OMP_dist_sch_static_chunked)(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3118, __extension__ __PRETTY_FUNCTION__))
;
3119
3120 // Call __kmpc_for_static_init(
3121 // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3122 // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3123 // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3124 // kmp_int[32|64] incr, kmp_int[32|64] chunk);
3125 llvm::Value *Chunk = Values.Chunk;
3126 if (Chunk == nullptr) {
3127 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||(static_cast <bool> ((Schedule == OMP_sch_static || Schedule
== OMP_ord_static || Schedule == OMP_dist_sch_static) &&
"expected static non-chunked schedule") ? void (0) : __assert_fail
("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3129, __extension__ __PRETTY_FUNCTION__))
3128 Schedule == OMP_dist_sch_static) &&(static_cast <bool> ((Schedule == OMP_sch_static || Schedule
== OMP_ord_static || Schedule == OMP_dist_sch_static) &&
"expected static non-chunked schedule") ? void (0) : __assert_fail
("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3129, __extension__ __PRETTY_FUNCTION__))
3129 "expected static non-chunked schedule")(static_cast <bool> ((Schedule == OMP_sch_static || Schedule
== OMP_ord_static || Schedule == OMP_dist_sch_static) &&
"expected static non-chunked schedule") ? void (0) : __assert_fail
("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3129, __extension__ __PRETTY_FUNCTION__))
;
3130 // If the Chunk was not specified in the clause - use default value 1.
3131 Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3132 } else {
3133 assert((Schedule == OMP_sch_static_chunked ||(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3137, __extension__ __PRETTY_FUNCTION__))
3134 Schedule == OMP_sch_static_balanced_chunked ||(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3137, __extension__ __PRETTY_FUNCTION__))
3135 Schedule == OMP_ord_static_chunked ||(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3137, __extension__ __PRETTY_FUNCTION__))
3136 Schedule == OMP_dist_sch_static_chunked) &&(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3137, __extension__ __PRETTY_FUNCTION__))
3137 "expected static chunked schedule")(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3137, __extension__ __PRETTY_FUNCTION__))
;
3138 }
3139 llvm::Value *Args[] = {
3140 UpdateLocation,
3141 ThreadId,
3142 CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3143 M2)), // Schedule type
3144 Values.IL.getPointer(), // &isLastIter
3145 Values.LB.getPointer(), // &LB
3146 Values.UB.getPointer(), // &UB
3147 Values.ST.getPointer(), // &Stride
3148 CGF.Builder.getIntN(Values.IVSize, 1), // Incr
3149 Chunk // Chunk
3150 };
3151 CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3152}
3153
3154void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
3155 SourceLocation Loc,
3156 OpenMPDirectiveKind DKind,
3157 const OpenMPScheduleTy &ScheduleKind,
3158 const StaticRTInput &Values) {
3159 OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3160 ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3161 assert(isOpenMPWorksharingDirective(DKind) &&(static_cast <bool> (isOpenMPWorksharingDirective(DKind
) && "Expected loop-based or sections-based directive."
) ? void (0) : __assert_fail ("isOpenMPWorksharingDirective(DKind) && \"Expected loop-based or sections-based directive.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3162, __extension__ __PRETTY_FUNCTION__))
3162 "Expected loop-based or sections-based directive.")(static_cast <bool> (isOpenMPWorksharingDirective(DKind
) && "Expected loop-based or sections-based directive."
) ? void (0) : __assert_fail ("isOpenMPWorksharingDirective(DKind) && \"Expected loop-based or sections-based directive.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3162, __extension__ __PRETTY_FUNCTION__))
;
3163 auto *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3164 isOpenMPLoopDirective(DKind)
3165 ? OMP_IDENT_WORK_LOOP
3166 : OMP_IDENT_WORK_SECTIONS);
3167 auto *ThreadId = getThreadID(CGF, Loc);
3168 auto *StaticInitFunction =
3169 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3170 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3171 ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3172}
3173
3174void CGOpenMPRuntime::emitDistributeStaticInit(
3175 CodeGenFunction &CGF, SourceLocation Loc,
3176 OpenMPDistScheduleClauseKind SchedKind,
3177 const CGOpenMPRuntime::StaticRTInput &Values) {
3178 OpenMPSchedType ScheduleNum =
3179 getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3180 auto *UpdatedLocation =
3181 emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3182 auto *ThreadId = getThreadID(CGF, Loc);
3183 auto *StaticInitFunction =
3184 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3185 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3186 ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3187 OMPC_SCHEDULE_MODIFIER_unknown, Values);
3188}
3189
3190void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
3191 SourceLocation Loc,
3192 OpenMPDirectiveKind DKind) {
3193 if (!CGF.HaveInsertPoint())
3194 return;
3195 // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3196 llvm::Value *Args[] = {
3197 emitUpdateLocation(CGF, Loc,
3198 isOpenMPDistributeDirective(DKind)
3199 ? OMP_IDENT_WORK_DISTRIBUTE
3200 : isOpenMPLoopDirective(DKind)
3201 ? OMP_IDENT_WORK_LOOP
3202 : OMP_IDENT_WORK_SECTIONS),
3203 getThreadID(CGF, Loc)};
3204 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
3205 Args);
3206}
3207
3208void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
3209 SourceLocation Loc,
3210 unsigned IVSize,
3211 bool IVSigned) {
3212 if (!CGF.HaveInsertPoint())
3213 return;
3214 // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3215 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3216 CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3217}
3218
3219llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
3220 SourceLocation Loc, unsigned IVSize,
3221 bool IVSigned, Address IL,
3222 Address LB, Address UB,
3223 Address ST) {
3224 // Call __kmpc_dispatch_next(
3225 // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3226 // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3227 // kmp_int[32|64] *p_stride);
3228 llvm::Value *Args[] = {
3229 emitUpdateLocation(CGF, Loc),
3230 getThreadID(CGF, Loc),
3231 IL.getPointer(), // &isLastIter
3232 LB.getPointer(), // &Lower
3233 UB.getPointer(), // &Upper
3234 ST.getPointer() // &Stride
3235 };
3236 llvm::Value *Call =
3237 CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3238 return CGF.EmitScalarConversion(
3239 Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true),
3240 CGF.getContext().BoolTy, Loc);
3241}
3242
3243void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
3244 llvm::Value *NumThreads,
3245 SourceLocation Loc) {
3246 if (!CGF.HaveInsertPoint())
3247 return;
3248 // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3249 llvm::Value *Args[] = {
3250 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3251 CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3252 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
3253 Args);
3254}
3255
3256void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
3257 OpenMPProcBindClauseKind ProcBind,
3258 SourceLocation Loc) {
3259 if (!CGF.HaveInsertPoint())
3260 return;
3261 // Constants for proc bind value accepted by the runtime.
3262 enum ProcBindTy {
3263 ProcBindFalse = 0,
3264 ProcBindTrue,
3265 ProcBindMaster,
3266 ProcBindClose,
3267 ProcBindSpread,
3268 ProcBindIntel,
3269 ProcBindDefault
3270 } RuntimeProcBind;
3271 switch (ProcBind) {
3272 case OMPC_PROC_BIND_master:
3273 RuntimeProcBind = ProcBindMaster;
3274 break;
3275 case OMPC_PROC_BIND_close:
3276 RuntimeProcBind = ProcBindClose;
3277 break;
3278 case OMPC_PROC_BIND_spread:
3279 RuntimeProcBind = ProcBindSpread;
3280 break;
3281 case OMPC_PROC_BIND_unknown:
3282 llvm_unreachable("Unsupported proc_bind value.")::llvm::llvm_unreachable_internal("Unsupported proc_bind value."
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3282)
;
3283 }
3284 // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3285 llvm::Value *Args[] = {
3286 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3287 llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3288 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
3289}
3290
3291void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3292 SourceLocation Loc) {
3293 if (!CGF.HaveInsertPoint())
3294 return;
3295 // Build call void __kmpc_flush(ident_t *loc)
3296 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
3297 emitUpdateLocation(CGF, Loc));
3298}
3299
3300namespace {
3301/// \brief Indexes of fields for type kmp_task_t.
3302enum KmpTaskTFields {
3303 /// \brief List of shared variables.
3304 KmpTaskTShareds,
3305 /// \brief Task routine.
3306 KmpTaskTRoutine,
3307 /// \brief Partition id for the untied tasks.
3308 KmpTaskTPartId,
3309 /// Function with call of destructors for private variables.
3310 Data1,
3311 /// Task priority.
3312 Data2,
3313 /// (Taskloops only) Lower bound.
3314 KmpTaskTLowerBound,
3315 /// (Taskloops only) Upper bound.
3316 KmpTaskTUpperBound,
3317 /// (Taskloops only) Stride.
3318 KmpTaskTStride,
3319 /// (Taskloops only) Is last iteration flag.
3320 KmpTaskTLastIter,
3321 /// (Taskloops only) Reduction data.
3322 KmpTaskTReductions,
3323};
3324} // anonymous namespace
3325
3326bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3327 // FIXME: Add other entries type when they become supported.
3328 return OffloadEntriesTargetRegion.empty();
3329}
3330
3331/// \brief Initialize target region entry.
3332void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3333 initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3334 StringRef ParentName, unsigned LineNum,
3335 unsigned Order) {
3336 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3338, __extension__ __PRETTY_FUNCTION__))
3337 "only required for the device "(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3338, __extension__ __PRETTY_FUNCTION__))
3338 "code generation.")(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3338, __extension__ __PRETTY_FUNCTION__))
;
3339 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3340 OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3341 /*Flags=*/0);
3342 ++OffloadingEntriesNum;
3343}
3344
3345void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3346 registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3347 StringRef ParentName, unsigned LineNum,
3348 llvm::Constant *Addr, llvm::Constant *ID,
3349 int32_t Flags) {
3350 // If we are emitting code for a target, the entry is already initialized,
3351 // only has to be registered.
3352 if (CGM.getLangOpts().OpenMPIsDevice) {
3353 assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&(static_cast <bool> (hasTargetRegionEntryInfo(DeviceID,
FileID, ParentName, LineNum) && "Entry must exist.")
? void (0) : __assert_fail ("hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) && \"Entry must exist.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3354, __extension__ __PRETTY_FUNCTION__))
3354 "Entry must exist.")(static_cast <bool> (hasTargetRegionEntryInfo(DeviceID,
FileID, ParentName, LineNum) && "Entry must exist.")
? void (0) : __assert_fail ("hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) && \"Entry must exist.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3354, __extension__ __PRETTY_FUNCTION__))
;
3355 auto &Entry =
3356 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3357 assert(Entry.isValid() && "Entry not initialized!")(static_cast <bool> (Entry.isValid() && "Entry not initialized!"
) ? void (0) : __assert_fail ("Entry.isValid() && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3357, __extension__ __PRETTY_FUNCTION__))
;
3358 Entry.setAddress(Addr);
3359 Entry.setID(ID);
3360 Entry.setFlags(Flags);
3361 return;
3362 } else {
3363 OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID, Flags);
3364 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3365 }
3366}
3367
3368bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3369 unsigned DeviceID, unsigned FileID, StringRef ParentName,
3370 unsigned LineNum) const {
3371 auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3372 if (PerDevice == OffloadEntriesTargetRegion.end())
3373 return false;
3374 auto PerFile = PerDevice->second.find(FileID);
3375 if (PerFile == PerDevice->second.end())
3376 return false;
3377 auto PerParentName = PerFile->second.find(ParentName);
3378 if (PerParentName == PerFile->second.end())
3379 return false;
3380 auto PerLine = PerParentName->second.find(LineNum);
3381 if (PerLine == PerParentName->second.end())
3382 return false;
3383 // Fail if this entry is already registered.
3384 if (PerLine->second.getAddress() || PerLine->second.getID())
3385 return false;
3386 return true;
3387}
3388
3389void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3390 const OffloadTargetRegionEntryInfoActTy &Action) {
3391 // Scan all target region entries and perform the provided action.
3392 for (auto &D : OffloadEntriesTargetRegion)
3393 for (auto &F : D.second)
3394 for (auto &P : F.second)
3395 for (auto &L : P.second)
3396 Action(D.first, F.first, P.first(), L.first, L.second);
3397}
3398
3399/// \brief Create a Ctor/Dtor-like function whose body is emitted through
3400/// \a Codegen. This is used to emit the two functions that register and
3401/// unregister the descriptor of the current compilation unit.
3402static llvm::Function *
3403createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
3404 const RegionCodeGenTy &Codegen) {
3405 auto &C = CGM.getContext();
3406 FunctionArgList Args;
3407 ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
3408 Args.push_back(&DummyPtr);
3409
3410 CodeGenFunction CGF(CGM);
3411 // Disable debug info for global (de-)initializer because they are not part of
3412 // some particular construct.
3413 CGF.disableDebugInfo();
3414 auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3415 auto FTy = CGM.getTypes().GetFunctionType(FI);
3416 auto *Fn = CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI);
3417 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args);
3418 Codegen(CGF);
3419 CGF.FinishFunction();
3420 return Fn;
3421}
3422
3423llvm::Function *
3424CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
3425 // If we don't have entries or if we are emitting code for the device, we
3426 // don't need to do anything.
3427 if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
2
Assuming the condition is false
3
Assuming the condition is false
4
Taking false branch
3428 return nullptr;
3429
3430 auto &M = CGM.getModule();
3431 auto &C = CGM.getContext();
3432
3433 // Get list of devices we care about
3434 auto &Devices = CGM.getLangOpts().OMPTargetTriples;
3435
3436 // We should be creating an offloading descriptor only if there are devices
3437 // specified.
3438 assert(!Devices.empty() && "No OpenMP offloading devices??")(static_cast <bool> (!Devices.empty() && "No OpenMP offloading devices??"
) ? void (0) : __assert_fail ("!Devices.empty() && \"No OpenMP offloading devices??\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3438, __extension__ __PRETTY_FUNCTION__))
;
3439
3440 // Create the external variables that will point to the begin and end of the
3441 // host entries section. These will be defined by the linker.
3442 auto *OffloadEntryTy =
3443 CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
3444 llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable(
3445 M, OffloadEntryTy, /*isConstant=*/true,
3446 llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3447 ".omp_offloading.entries_begin");
3448 llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable(
3449 M, OffloadEntryTy, /*isConstant=*/true,
3450 llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3451 ".omp_offloading.entries_end");
3452
3453 // Create all device images
3454 auto *DeviceImageTy = cast<llvm::StructType>(
3455 CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
3456 ConstantInitBuilder DeviceImagesBuilder(CGM);
3457 auto DeviceImagesEntries = DeviceImagesBuilder.beginArray(DeviceImageTy);
3458
3459 for (unsigned i = 0; i < Devices.size(); ++i) {
5
Assuming the condition is false
6
Loop condition is false. Execution continues on line 3479
3460 StringRef T = Devices[i].getTriple();
3461 auto *ImgBegin = new llvm::GlobalVariable(
3462 M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3463 /*Initializer=*/nullptr,
3464 Twine(".omp_offloading.img_start.") + Twine(T));
3465 auto *ImgEnd = new llvm::GlobalVariable(
3466 M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3467 /*Initializer=*/nullptr, Twine(".omp_offloading.img_end.") + Twine(T));
3468
3469 auto Dev = DeviceImagesEntries.beginStruct(DeviceImageTy);
3470 Dev.add(ImgBegin);
3471 Dev.add(ImgEnd);
3472 Dev.add(HostEntriesBegin);
3473 Dev.add(HostEntriesEnd);
3474 Dev.finishAndAddTo(DeviceImagesEntries);
3475 }
3476
3477 // Create device images global array.
3478 llvm::GlobalVariable *DeviceImages =
3479 DeviceImagesEntries.finishAndCreateGlobal(".omp_offloading.device_images",
3480 CGM.getPointerAlign(),
3481 /*isConstant=*/true);
3482 DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3483
3484 // This is a Zero array to be used in the creation of the constant expressions
3485 llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3486 llvm::Constant::getNullValue(CGM.Int32Ty)};
3487
3488 // Create the target region descriptor.
3489 auto *BinaryDescriptorTy = cast<llvm::StructType>(
3490 CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
7
Calling 'CGOpenMPRuntime::getTgtBinaryDescriptorQTy'
3491 ConstantInitBuilder DescBuilder(CGM);
3492 auto DescInit = DescBuilder.beginStruct(BinaryDescriptorTy);
3493 DescInit.addInt(CGM.Int32Ty, Devices.size());
3494 DescInit.add(llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3495 DeviceImages,
3496 Index));
3497 DescInit.add(HostEntriesBegin);
3498 DescInit.add(HostEntriesEnd);
3499
3500 auto *Desc = DescInit.finishAndCreateGlobal(".omp_offloading.descriptor",
3501 CGM.getPointerAlign(),
3502 /*isConstant=*/true);
3503
3504 // Emit code to register or unregister the descriptor at execution
3505 // startup or closing, respectively.
3506
3507 // Create a variable to drive the registration and unregistration of the
3508 // descriptor, so we can reuse the logic that emits Ctors and Dtors.
3509 auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var");
3510 ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(),
3511 IdentInfo, C.CharTy, ImplicitParamDecl::Other);
3512
3513 auto *UnRegFn = createOffloadingBinaryDescriptorFunction(
3514 CGM, ".omp_offloading.descriptor_unreg",
3515 [&](CodeGenFunction &CGF, PrePostActionTy &) {
3516 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
3517 Desc);
3518 });
3519 auto *RegFn = createOffloadingBinaryDescriptorFunction(
3520 CGM, ".omp_offloading.descriptor_reg",
3521 [&](CodeGenFunction &CGF, PrePostActionTy &) {
3522 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib),
3523 Desc);
3524 CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
3525 });
3526 if (CGM.supportsCOMDAT()) {
3527 // It is sufficient to call registration function only once, so create a
3528 // COMDAT group for registration/unregistration functions and associated
3529 // data. That would reduce startup time and code size. Registration
3530 // function serves as a COMDAT group key.
3531 auto ComdatKey = M.getOrInsertComdat(RegFn->getName());
3532 RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3533 RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3534 RegFn->setComdat(ComdatKey);
3535 UnRegFn->setComdat(ComdatKey);
3536 DeviceImages->setComdat(ComdatKey);
3537 Desc->setComdat(ComdatKey);
3538 }
3539 return RegFn;
3540}
3541
3542void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *ID,
3543 llvm::Constant *Addr, uint64_t Size,
3544 int32_t Flags) {
3545 StringRef Name = Addr->getName();
3546 auto *TgtOffloadEntryType = cast<llvm::StructType>(
3547 CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy()));
3548 llvm::LLVMContext &C = CGM.getModule().getContext();
3549 llvm::Module &M = CGM.getModule();
3550
3551 // Make sure the address has the right type.
3552 llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy);
3553
3554 // Create constant string with the name.
3555 llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3556
3557 llvm::GlobalVariable *Str =
3558 new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true,
3559 llvm::GlobalValue::InternalLinkage, StrPtrInit,
3560 ".omp_offloading.entry_name");
3561 Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3562 llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
3563
3564 // We can't have any padding between symbols, so we need to have 1-byte
3565 // alignment.
3566 auto Align = CharUnits::fromQuantity(1);
3567
3568 // Create the entry struct.
3569 ConstantInitBuilder EntryBuilder(CGM);
3570 auto EntryInit = EntryBuilder.beginStruct(TgtOffloadEntryType);
3571 EntryInit.add(AddrPtr);
3572 EntryInit.add(StrPtr);
3573 EntryInit.addInt(CGM.SizeTy, Size);
3574 EntryInit.addInt(CGM.Int32Ty, Flags);
3575 EntryInit.addInt(CGM.Int32Ty, 0);
3576 llvm::GlobalVariable *Entry = EntryInit.finishAndCreateGlobal(
3577 Twine(".omp_offloading.entry.") + Name, Align,
3578 /*constant*/ true, llvm::GlobalValue::ExternalLinkage);
3579
3580 // The entry has to be created in the section the linker expects it to be.
3581 Entry->setSection(".omp_offloading.entries");
3582}
3583
3584void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3585 // Emit the offloading entries and metadata so that the device codegen side
3586 // can easily figure out what to emit. The produced metadata looks like
3587 // this:
3588 //
3589 // !omp_offload.info = !{!1, ...}
3590 //
3591 // Right now we only generate metadata for function that contain target
3592 // regions.
3593
3594 // If we do not have entries, we dont need to do anything.
3595 if (OffloadEntriesInfoManager.empty())
3596 return;
3597
3598 llvm::Module &M = CGM.getModule();
3599 llvm::LLVMContext &C = M.getContext();
3600 SmallVector<OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
3601 OrderedEntries(OffloadEntriesInfoManager.size());
3602
3603 // Create the offloading info metadata node.
3604 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3605
3606 // Auxiliary methods to create metadata values and strings.
3607 auto getMDInt = [&](unsigned v) {
3608 return llvm::ConstantAsMetadata::get(
3609 llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v));
3610 };
3611
3612 auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); };
3613
3614 // Create function that emits metadata for each target region entry;
3615 auto &&TargetRegionMetadataEmitter = [&](
3616 unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line,
3617 OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3618 llvm::SmallVector<llvm::Metadata *, 32> Ops;
3619 // Generate metadata for target regions. Each entry of this metadata
3620 // contains:
3621 // - Entry 0 -> Kind of this type of metadata (0).
3622 // - Entry 1 -> Device ID of the file where the entry was identified.
3623 // - Entry 2 -> File ID of the file where the entry was identified.
3624 // - Entry 3 -> Mangled name of the function where the entry was identified.
3625 // - Entry 4 -> Line in the file where the entry was identified.
3626 // - Entry 5 -> Order the entry was created.
3627 // The first element of the metadata node is the kind.
3628 Ops.push_back(getMDInt(E.getKind()));
3629 Ops.push_back(getMDInt(DeviceID));
3630 Ops.push_back(getMDInt(FileID));
3631 Ops.push_back(getMDString(ParentName));
3632 Ops.push_back(getMDInt(Line));
3633 Ops.push_back(getMDInt(E.getOrder()));
3634
3635 // Save this entry in the right position of the ordered entries array.
3636 OrderedEntries[E.getOrder()] = &E;
3637
3638 // Add metadata to the named metadata node.
3639 MD->addOperand(llvm::MDNode::get(C, Ops));
3640 };
3641
3642 OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3643 TargetRegionMetadataEmitter);
3644
3645 for (auto *E : OrderedEntries) {
3646 assert(E && "All ordered entries must exist!")(static_cast <bool> (E && "All ordered entries must exist!"
) ? void (0) : __assert_fail ("E && \"All ordered entries must exist!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3646, __extension__ __PRETTY_FUNCTION__))
;
3647 if (auto *CE =
3648 dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3649 E)) {
3650 assert(CE->getID() && CE->getAddress() &&(static_cast <bool> (CE->getID() && CE->getAddress
() && "Entry ID and Addr are invalid!") ? void (0) : __assert_fail
("CE->getID() && CE->getAddress() && \"Entry ID and Addr are invalid!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3651, __extension__ __PRETTY_FUNCTION__))
3651 "Entry ID and Addr are invalid!")(static_cast <bool> (CE->getID() && CE->getAddress
() && "Entry ID and Addr are invalid!") ? void (0) : __assert_fail
("CE->getID() && CE->getAddress() && \"Entry ID and Addr are invalid!\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3651, __extension__ __PRETTY_FUNCTION__))
;
3652 createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0);
3653 } else
3654 llvm_unreachable("Unsupported entry kind.")::llvm::llvm_unreachable_internal("Unsupported entry kind.", "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3654)
;
3655 }
3656}
3657
3658/// \brief Loads all the offload entries information from the host IR
3659/// metadata.
3660void CGOpenMPRuntime::loadOffloadInfoMetadata() {
3661 // If we are in target mode, load the metadata from the host IR. This code has
3662 // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
3663
3664 if (!CGM.getLangOpts().OpenMPIsDevice)
3665 return;
3666
3667 if (CGM.getLangOpts().OMPHostIRFile.empty())
3668 return;
3669
3670 auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
3671 if (Buf.getError())
3672 return;
3673
3674 llvm::LLVMContext C;
3675 auto ME = expectedToErrorOrAndEmitErrors(
3676 C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
3677
3678 if (ME.getError())
3679 return;
3680
3681 llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
3682 if (!MD)
3683 return;
3684
3685 for (auto I : MD->operands()) {
3686 llvm::MDNode *MN = cast<llvm::MDNode>(I);
3687
3688 auto getMDInt = [&](unsigned Idx) {
3689 llvm::ConstantAsMetadata *V =
3690 cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
3691 return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
3692 };
3693
3694 auto getMDString = [&](unsigned Idx) {
3695 llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx));
3696 return V->getString();
3697 };
3698
3699 switch (getMDInt(0)) {
3700 default:
3701 llvm_unreachable("Unexpected metadata!")::llvm::llvm_unreachable_internal("Unexpected metadata!", "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3701)
;
3702 break;
3703 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3704 OFFLOAD_ENTRY_INFO_TARGET_REGION:
3705 OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
3706 /*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2),
3707 /*ParentName=*/getMDString(3), /*Line=*/getMDInt(4),
3708 /*Order=*/getMDInt(5));
3709 break;
3710 }
3711 }
3712}
3713
3714void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
3715 if (!KmpRoutineEntryPtrTy) {
3716 // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
3717 auto &C = CGM.getContext();
3718 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
3719 FunctionProtoType::ExtProtoInfo EPI;
3720 KmpRoutineEntryPtrQTy = C.getPointerType(
3721 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
3722 KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
3723 }
3724}
3725
3726static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
3727 QualType FieldTy) {
3728 auto *Field = FieldDecl::Create(
3729 C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
3730 C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
3731 /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
3732 Field->setAccess(AS_public);
3733 DC->addDecl(Field);
3734 return Field;
3735}
3736
3737QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
3738
3739 // Make sure the type of the entry is already created. This is the type we
3740 // have to create:
3741 // struct __tgt_offload_entry{
3742 // void *addr; // Pointer to the offload entry info.
3743 // // (function or global)
3744 // char *name; // Name of the function or global.
3745 // size_t size; // Size of the entry info (0 if it a function).
3746 // int32_t flags; // Flags associated with the entry, e.g. 'link'.
3747 // int32_t reserved; // Reserved, to use by the runtime library.
3748 // };
3749 if (TgtOffloadEntryQTy.isNull()) {
10
Taking true branch
3750 ASTContext &C = CGM.getContext();
3751 auto *RD = C.buildImplicitRecord("__tgt_offload_entry");
3752 RD->startDefinition();
3753 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3754 addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
3755 addFieldToRecordDecl(C, RD, C.getSizeType());
3756 addFieldToRecordDecl(
3757 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3758 addFieldToRecordDecl(
3759 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3760 RD->completeDefinition();
3761 RD->addAttr(PackedAttr::CreateImplicit(C));
11
Calling 'PackedAttr::CreateImplicit'
3762 TgtOffloadEntryQTy = C.getRecordType(RD);
3763 }
3764 return TgtOffloadEntryQTy;
3765}
3766
3767QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
3768 // These are the types we need to build:
3769 // struct __tgt_device_image{
3770 // void *ImageStart; // Pointer to the target code start.
3771 // void *ImageEnd; // Pointer to the target code end.
3772 // // We also add the host entries to the device image, as it may be useful
3773 // // for the target runtime to have access to that information.
3774 // __tgt_offload_entry *EntriesBegin; // Begin of the table with all
3775 // // the entries.
3776 // __tgt_offload_entry *EntriesEnd; // End of the table with all the
3777 // // entries (non inclusive).
3778 // };
3779 if (TgtDeviceImageQTy.isNull()) {
3780 ASTContext &C = CGM.getContext();
3781 auto *RD = C.buildImplicitRecord("__tgt_device_image");
3782 RD->startDefinition();
3783 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3784 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3785 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
3786 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
3787 RD->completeDefinition();
3788 TgtDeviceImageQTy = C.getRecordType(RD);
3789 }
3790 return TgtDeviceImageQTy;
3791}
3792
3793QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
3794 // struct __tgt_bin_desc{
3795 // int32_t NumDevices; // Number of devices supported.
3796 // __tgt_device_image *DeviceImages; // Arrays of device images
3797 // // (one per device).
3798 // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
3799 // // entries.
3800 // __tgt_offload_entry *EntriesEnd; // End of the table with all the
3801 // // entries (non inclusive).
3802 // };
3803 if (TgtBinaryDescriptorQTy.isNull()) {
8
Taking true branch
3804 ASTContext &C = CGM.getContext();
3805 auto *RD = C.buildImplicitRecord("__tgt_bin_desc");
3806 RD->startDefinition();
3807 addFieldToRecordDecl(
3808 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3809 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
3810 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
9
Calling 'CGOpenMPRuntime::getTgtOffloadEntryQTy'
3811 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
3812 RD->completeDefinition();
3813 TgtBinaryDescriptorQTy = C.getRecordType(RD);
3814 }
3815 return TgtBinaryDescriptorQTy;
3816}
3817
3818namespace {
3819struct PrivateHelpersTy {
3820 PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
3821 const VarDecl *PrivateElemInit)
3822 : Original(Original), PrivateCopy(PrivateCopy),
3823 PrivateElemInit(PrivateElemInit) {}
3824 const VarDecl *Original;
3825 const VarDecl *PrivateCopy;
3826 const VarDecl *PrivateElemInit;
3827};
3828typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
3829} // anonymous namespace
3830
3831static RecordDecl *
3832createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
3833 if (!Privates.empty()) {
3834 auto &C = CGM.getContext();
3835 // Build struct .kmp_privates_t. {
3836 // /* private vars */
3837 // };
3838 auto *RD = C.buildImplicitRecord(".kmp_privates.t");
3839 RD->startDefinition();
3840 for (auto &&Pair : Privates) {
3841 auto *VD = Pair.second.Original;
3842 auto Type = VD->getType();
3843 Type = Type.getNonReferenceType();
3844 auto *FD = addFieldToRecordDecl(C, RD, Type);
3845 if (VD->hasAttrs()) {
3846 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
3847 E(VD->getAttrs().end());
3848 I != E; ++I)
3849 FD->addAttr(*I);
3850 }
3851 }
3852 RD->completeDefinition();
3853 return RD;
3854 }
3855 return nullptr;
3856}
3857
3858static RecordDecl *
3859createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
3860 QualType KmpInt32Ty,
3861 QualType KmpRoutineEntryPointerQTy) {
3862 auto &C = CGM.getContext();
3863 // Build struct kmp_task_t {
3864 // void * shareds;
3865 // kmp_routine_entry_t routine;
3866 // kmp_int32 part_id;
3867 // kmp_cmplrdata_t data1;
3868 // kmp_cmplrdata_t data2;
3869 // For taskloops additional fields:
3870 // kmp_uint64 lb;
3871 // kmp_uint64 ub;
3872 // kmp_int64 st;
3873 // kmp_int32 liter;
3874 // void * reductions;
3875 // };
3876 auto *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
3877 UD->startDefinition();
3878 addFieldToRecordDecl(C, UD, KmpInt32Ty);
3879 addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
3880 UD->completeDefinition();
3881 QualType KmpCmplrdataTy = C.getRecordType(UD);
3882 auto *RD = C.buildImplicitRecord("kmp_task_t");
3883 RD->startDefinition();
3884 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3885 addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
3886 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3887 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3888 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3889 if (isOpenMPTaskLoopDirective(Kind)) {
3890 QualType KmpUInt64Ty =
3891 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
3892 QualType KmpInt64Ty =
3893 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
3894 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3895 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3896 addFieldToRecordDecl(C, RD, KmpInt64Ty);
3897 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3898 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3899 }
3900 RD->completeDefinition();
3901 return RD;
3902}
3903
3904static RecordDecl *
3905createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
3906 ArrayRef<PrivateDataTy> Privates) {
3907 auto &C = CGM.getContext();
3908 // Build struct kmp_task_t_with_privates {
3909 // kmp_task_t task_data;
3910 // .kmp_privates_t. privates;
3911 // };
3912 auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
3913 RD->startDefinition();
3914 addFieldToRecordDecl(C, RD, KmpTaskTQTy);
3915 if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) {
3916 addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
3917 }
3918 RD->completeDefinition();
3919 return RD;
3920}
3921
3922/// \brief Emit a proxy function which accepts kmp_task_t as the second
3923/// argument.
3924/// \code
3925/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
3926/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
3927/// For taskloops:
3928/// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3929/// tt->reductions, tt->shareds);
3930/// return 0;
3931/// }
3932/// \endcode
3933static llvm::Value *
3934emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
3935 OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
3936 QualType KmpTaskTWithPrivatesPtrQTy,
3937 QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
3938 QualType SharedsPtrTy, llvm::Value *TaskFunction,
3939 llvm::Value *TaskPrivatesMap) {
3940 auto &C = CGM.getContext();
3941 FunctionArgList Args;
3942 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3943 ImplicitParamDecl::Other);
3944 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3945 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3946 ImplicitParamDecl::Other);
3947 Args.push_back(&GtidArg);
3948 Args.push_back(&TaskTypeArg);
3949 auto &TaskEntryFnInfo =
3950 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3951 auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
3952 auto *TaskEntry =
3953 llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage,
3954 ".omp_task_entry.", &CGM.getModule());
3955 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskEntry, TaskEntryFnInfo);
3956 CodeGenFunction CGF(CGM);
3957 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
3958 Loc, Loc);
3959
3960 // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
3961 // tt,
3962 // For taskloops:
3963 // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3964 // tt->task_data.shareds);
3965 auto *GtidParam = CGF.EmitLoadOfScalar(
3966 CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
3967 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3968 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3969 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3970 auto *KmpTaskTWithPrivatesQTyRD =
3971 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3972 LValue Base =
3973 CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3974 auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3975 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3976 auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
3977 auto *PartidParam = PartIdLVal.getPointer();
3978
3979 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3980 auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
3981 auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3982 CGF.EmitLoadOfScalar(SharedsLVal, Loc),
3983 CGF.ConvertTypeForMem(SharedsPtrTy));
3984
3985 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3986 llvm::Value *PrivatesParam;
3987 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3988 auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
3989 PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3990 PrivatesLVal.getPointer(), CGF.VoidPtrTy);
3991 } else
3992 PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
3993
3994 llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
3995 TaskPrivatesMap,
3996 CGF.Builder
3997 .CreatePointerBitCastOrAddrSpaceCast(
3998 TDBase.getAddress(), CGF.VoidPtrTy)
3999 .getPointer()};
4000 SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4001 std::end(CommonArgs));
4002 if (isOpenMPTaskLoopDirective(Kind)) {
4003 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4004 auto LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4005 auto *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4006 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4007 auto UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4008 auto *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4009 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4010 auto StLVal = CGF.EmitLValueForField(Base, *StFI);
4011 auto *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4012 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4013 auto LILVal = CGF.EmitLValueForField(Base, *LIFI);
4014 auto *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4015 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4016 auto RLVal = CGF.EmitLValueForField(Base, *RFI);
4017 auto *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4018 CallArgs.push_back(LBParam);
4019 CallArgs.push_back(UBParam);
4020 CallArgs.push_back(StParam);
4021 CallArgs.push_back(LIParam);
4022 CallArgs.push_back(RParam);
4023 }
4024 CallArgs.push_back(SharedsParam);
4025
4026 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4027 CallArgs);
4028 CGF.EmitStoreThroughLValue(
4029 RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4030 CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4031 CGF.FinishFunction();
4032 return TaskEntry;
4033}
4034
4035static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
4036 SourceLocation Loc,
4037 QualType KmpInt32Ty,
4038 QualType KmpTaskTWithPrivatesPtrQTy,
4039 QualType KmpTaskTWithPrivatesQTy) {
4040 auto &C = CGM.getContext();
4041 FunctionArgList Args;
4042 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4043 ImplicitParamDecl::Other);
4044 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4045 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4046 ImplicitParamDecl::Other);
4047 Args.push_back(&GtidArg);
4048 Args.push_back(&TaskTypeArg);
4049 auto &DestructorFnInfo =
4050 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4051 auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo);
4052 auto *DestructorFn =
4053 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
4054 ".omp_task_destructor.", &CGM.getModule());
4055 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, DestructorFn,
4056 DestructorFnInfo);
4057 CodeGenFunction CGF(CGM);
4058 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4059 Args, Loc, Loc);
4060
4061 LValue Base = CGF.EmitLoadOfPointerLValue(
4062 CGF.GetAddrOfLocalVar(&TaskTypeArg),
4063 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4064 auto *KmpTaskTWithPrivatesQTyRD =
4065 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4066 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4067 Base = CGF.EmitLValueForField(Base, *FI);
4068 for (auto *Field :
4069 cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4070 if (auto DtorKind = Field->getType().isDestructedType()) {
4071 auto FieldLValue = CGF.EmitLValueForField(Base, Field);
4072 CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4073 }
4074 }
4075 CGF.FinishFunction();
4076 return DestructorFn;
4077}
4078
4079/// \brief Emit a privates mapping function for correct handling of private and
4080/// firstprivate variables.
4081/// \code
4082/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
4083/// **noalias priv1,..., <tyn> **noalias privn) {
4084/// *priv1 = &.privates.priv1;
4085/// ...;
4086/// *privn = &.privates.privn;
4087/// }
4088/// \endcode
4089static llvm::Value *
4090emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
4091 ArrayRef<const Expr *> PrivateVars,
4092 ArrayRef<const Expr *> FirstprivateVars,
4093 ArrayRef<const Expr *> LastprivateVars,
4094 QualType PrivatesQTy,
4095 ArrayRef<PrivateDataTy> Privates) {
4096 auto &C = CGM.getContext();
4097 FunctionArgList Args;
4098 ImplicitParamDecl TaskPrivatesArg(
4099 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4100 C.getPointerType(PrivatesQTy).withConst().withRestrict(),
4101 ImplicitParamDecl::Other);
4102 Args.push_back(&TaskPrivatesArg);
4103 llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
4104 unsigned Counter = 1;
4105 for (auto *E: PrivateVars) {
4106 Args.push_back(ImplicitParamDecl::Create(
4107 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4108 C.getPointerType(C.getPointerType(E->getType()))
4109 .withConst()
4110 .withRestrict(),
4111 ImplicitParamDecl::Other));
4112 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4113 PrivateVarsPos[VD] = Counter;
4114 ++Counter;
4115 }
4116 for (auto *E : FirstprivateVars) {
4117 Args.push_back(ImplicitParamDecl::Create(
4118 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4119 C.getPointerType(C.getPointerType(E->getType()))
4120 .withConst()
4121 .withRestrict(),
4122 ImplicitParamDecl::Other));
4123 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4124 PrivateVarsPos[VD] = Counter;
4125 ++Counter;
4126 }
4127 for (auto *E: LastprivateVars) {
4128 Args.push_back(ImplicitParamDecl::Create(
4129 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4130 C.getPointerType(C.getPointerType(E->getType()))
4131 .withConst()
4132 .withRestrict(),
4133 ImplicitParamDecl::Other));
4134 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4135 PrivateVarsPos[VD] = Counter;
4136 ++Counter;
4137 }
4138 auto &TaskPrivatesMapFnInfo =
4139 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4140 auto *TaskPrivatesMapTy =
4141 CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
4142 auto *TaskPrivatesMap = llvm::Function::Create(
4143 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage,
4144 ".omp_task_privates_map.", &CGM.getModule());
4145 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
4146 TaskPrivatesMapFnInfo);
4147 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
4148 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
4149 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
4150 CodeGenFunction CGF(CGM);
4151 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
4152 TaskPrivatesMapFnInfo, Args, Loc, Loc);
4153
4154 // *privi = &.privates.privi;
4155 LValue Base = CGF.EmitLoadOfPointerLValue(
4156 CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
4157 TaskPrivatesArg.getType()->castAs<PointerType>());
4158 auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
4159 Counter = 0;
4160 for (auto *Field : PrivatesQTyRD->fields()) {
4161 auto FieldLVal = CGF.EmitLValueForField(Base, Field);
4162 auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
4163 auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
4164 auto RefLoadLVal = CGF.EmitLoadOfPointerLValue(
4165 RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
4166 CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
4167 ++Counter;
4168 }
4169 CGF.FinishFunction();
4170 return TaskPrivatesMap;
4171}
4172
4173static bool stable_sort_comparator(const PrivateDataTy P1,
4174 const PrivateDataTy P2) {
4175 return P1.first > P2.first;
4176}
4177
4178/// Emit initialization for private variables in task-based directives.
4179static void emitPrivatesInit(CodeGenFunction &CGF,
4180 const OMPExecutableDirective &D,
4181 Address KmpTaskSharedsPtr, LValue TDBase,
4182 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4183 QualType SharedsTy, QualType SharedsPtrTy,
4184 const OMPTaskDataTy &Data,
4185 ArrayRef<PrivateDataTy> Privates, bool ForDup) {
4186 auto &C = CGF.getContext();
4187 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4188 LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
4189 OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
4190 ? OMPD_taskloop
4191 : OMPD_task;
4192 const CapturedStmt &CS = *D.getCapturedStmt(Kind);
4193 CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
4194 LValue SrcBase;
4195 bool IsTargetTask =
4196 isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
4197 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
4198 // For target-based directives skip 3 firstprivate arrays BasePointersArray,
4199 // PointersArray and SizesArray. The original variables for these arrays are
4200 // not captured and we get their addresses explicitly.
4201 if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
4202 (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
4203 SrcBase = CGF.MakeAddrLValue(
4204 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4205 KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
4206 SharedsTy);
4207 }
4208 FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
4209 for (auto &&Pair : Privates) {
4210 auto *VD = Pair.second.PrivateCopy;
4211 auto *Init = VD->getAnyInitializer();
4212 if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
4213 !CGF.isTrivialInitializer(Init)))) {
4214 LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
4215 if (auto *Elem = Pair.second.PrivateElemInit) {
4216 auto *OriginalVD = Pair.second.Original;
4217 // Check if the variable is the target-based BasePointersArray,
4218 // PointersArray or SizesArray.
4219 LValue SharedRefLValue;
4220 QualType Type = OriginalVD->getType();
4221 auto *SharedField = CapturesInfo.lookup(OriginalVD);
4222 if (IsTargetTask && !SharedField) {
4223 assert(isa<ImplicitParamDecl>(OriginalVD) &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
4224 isa<CapturedDecl>(OriginalVD->getDeclContext()) &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
4225 cast<CapturedDecl>(OriginalVD->getDeclContext())(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
4226 ->getNumParams() == 0 &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
4227 isa<TranslationUnitDecl>((static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
4228 cast<CapturedDecl>(OriginalVD->getDeclContext())(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
4229 ->getDeclContext()) &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
4230 "Expected artificial target data variable.")(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4230, __extension__ __PRETTY_FUNCTION__))
;
4231 SharedRefLValue =
4232 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
4233 } else {
4234 SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
4235 SharedRefLValue = CGF.MakeAddrLValue(
4236 Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
4237 SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
4238 SharedRefLValue.getTBAAInfo());
4239 }
4240 if (Type->isArrayType()) {
4241 // Initialize firstprivate array.
4242 if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
4243 // Perform simple memcpy.
4244 CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
4245 } else {
4246 // Initialize firstprivate array using element-by-element
4247 // initialization.
4248 CGF.EmitOMPAggregateAssign(
4249 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
4250 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
4251 Address SrcElement) {
4252 // Clean up any temporaries needed by the initialization.
4253 CodeGenFunction::OMPPrivateScope InitScope(CGF);
4254 InitScope.addPrivate(
4255 Elem, [SrcElement]() -> Address { return SrcElement; });
4256 (void)InitScope.Privatize();
4257 // Emit initialization for single element.
4258 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
4259 CGF, &CapturesInfo);
4260 CGF.EmitAnyExprToMem(Init, DestElement,
4261 Init->getType().getQualifiers(),
4262 /*IsInitializer=*/false);
4263 });
4264 }
4265 } else {
4266 CodeGenFunction::OMPPrivateScope InitScope(CGF);
4267 InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
4268 return SharedRefLValue.getAddress();
4269 });
4270 (void)InitScope.Privatize();
4271 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
4272 CGF.EmitExprAsInit(Init, VD, PrivateLValue,
4273 /*capturedByInit=*/false);
4274 }
4275 } else
4276 CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
4277 }
4278 ++FI;
4279 }
4280}
4281
4282/// Check if duplication function is required for taskloops.
4283static bool checkInitIsRequired(CodeGenFunction &CGF,
4284 ArrayRef<PrivateDataTy> Privates) {
4285 bool InitRequired = false;
4286 for (auto &&Pair : Privates) {
4287 auto *VD = Pair.second.PrivateCopy;
4288 auto *Init = VD->getAnyInitializer();
4289 InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
4290 !CGF.isTrivialInitializer(Init));
4291 }
4292 return InitRequired;
4293}
4294
4295
4296/// Emit task_dup function (for initialization of
4297/// private/firstprivate/lastprivate vars and last_iter flag)
4298/// \code
4299/// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
4300/// lastpriv) {
4301/// // setup lastprivate flag
4302/// task_dst->last = lastpriv;
4303/// // could be constructor calls here...
4304/// }
4305/// \endcode
4306static llvm::Value *
4307emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
4308 const OMPExecutableDirective &D,
4309 QualType KmpTaskTWithPrivatesPtrQTy,
4310 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4311 const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
4312 QualType SharedsPtrTy, const OMPTaskDataTy &Data,
4313 ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
4314 auto &C = CGM.getContext();
4315 FunctionArgList Args;
4316 ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4317 KmpTaskTWithPrivatesPtrQTy,
4318 ImplicitParamDecl::Other);
4319 ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4320 KmpTaskTWithPrivatesPtrQTy,
4321 ImplicitParamDecl::Other);
4322 ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
4323 ImplicitParamDecl::Other);
4324 Args.push_back(&DstArg);
4325 Args.push_back(&SrcArg);
4326 Args.push_back(&LastprivArg);
4327 auto &TaskDupFnInfo =
4328 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4329 auto *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4330 auto *TaskDup =
4331 llvm::Function::Create(TaskDupTy, llvm::GlobalValue::InternalLinkage,
4332 ".omp_task_dup.", &CGM.getModule());
4333 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskDup, TaskDupFnInfo);
4334 CodeGenFunction CGF(CGM);
4335 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4336 Loc);
4337
4338 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4339 CGF.GetAddrOfLocalVar(&DstArg),
4340 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4341 // task_dst->liter = lastpriv;
4342 if (WithLastIter) {
4343 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4344 LValue Base = CGF.EmitLValueForField(
4345 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4346 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4347 llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4348 CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4349 CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4350 }
4351
4352 // Emit initial values for private copies (if any).
4353 assert(!Privates.empty())(static_cast <bool> (!Privates.empty()) ? void (0) : __assert_fail
("!Privates.empty()", "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4353, __extension__ __PRETTY_FUNCTION__))
;
4354 Address KmpTaskSharedsPtr = Address::invalid();
4355 if (!Data.FirstprivateVars.empty()) {
4356 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4357 CGF.GetAddrOfLocalVar(&SrcArg),
4358 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4359 LValue Base = CGF.EmitLValueForField(
4360 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4361 KmpTaskSharedsPtr = Address(
4362 CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4363 Base, *std::next(KmpTaskTQTyRD->field_begin(),
4364 KmpTaskTShareds)),
4365 Loc),
4366 CGF.getNaturalTypeAlignment(SharedsTy));
4367 }
4368 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4369 SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4370 CGF.FinishFunction();
4371 return TaskDup;
4372}
4373
4374/// Checks if destructor function is required to be generated.
4375/// \return true if cleanups are required, false otherwise.
4376static bool
4377checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
4378 bool NeedsCleanup = false;
4379 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4380 auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
4381 for (auto *FD : PrivateRD->fields()) {
4382 NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
4383 if (NeedsCleanup)
4384 break;
4385 }
4386 return NeedsCleanup;
4387}
4388
4389CGOpenMPRuntime::TaskResultTy
4390CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4391 const OMPExecutableDirective &D,
4392 llvm::Value *TaskFunction, QualType SharedsTy,
4393 Address Shareds, const OMPTaskDataTy &Data) {
4394 auto &C = CGM.getContext();
4395 llvm::SmallVector<PrivateDataTy, 4> Privates;
4396 // Aggregate privates and sort them by the alignment.
4397 auto I = Data.PrivateCopies.begin();
4398 for (auto *E : Data.PrivateVars) {
4399 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4400 Privates.push_back(std::make_pair(
4401 C.getDeclAlign(VD),
4402 PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4403 /*PrivateElemInit=*/nullptr)));
4404 ++I;
4405 }
4406 I = Data.FirstprivateCopies.begin();
4407 auto IElemInitRef = Data.FirstprivateInits.begin();
4408 for (auto *E : Data.FirstprivateVars) {
4409 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4410 Privates.push_back(std::make_pair(
4411 C.getDeclAlign(VD),
4412 PrivateHelpersTy(
4413 VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4414 cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl()))));
4415 ++I;
4416 ++IElemInitRef;
4417 }
4418 I = Data.LastprivateCopies.begin();
4419 for (auto *E : Data.LastprivateVars) {
4420 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4421 Privates.push_back(std::make_pair(
4422 C.getDeclAlign(VD),
4423 PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4424 /*PrivateElemInit=*/nullptr)));
4425 ++I;
4426 }
4427 std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
4428 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4429 // Build type kmp_routine_entry_t (if not built yet).
4430 emitKmpRoutineEntryT(KmpInt32Ty);
4431 // Build type kmp_task_t (if not built yet).
4432 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4433 if (SavedKmpTaskloopTQTy.isNull()) {
4434 SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4435 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4436 }
4437 KmpTaskTQTy = SavedKmpTaskloopTQTy;
4438 } else {
4439 assert((D.getDirectiveKind() == OMPD_task ||(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4442, __extension__ __PRETTY_FUNCTION__))
4440 isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4442, __extension__ __PRETTY_FUNCTION__))
4441 isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4442, __extension__ __PRETTY_FUNCTION__))
4442 "Expected taskloop, task or target directive")(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4442, __extension__ __PRETTY_FUNCTION__))
;
4443 if (SavedKmpTaskTQTy.isNull()) {
4444 SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4445 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4446 }
4447 KmpTaskTQTy = SavedKmpTaskTQTy;
4448 }
4449 auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4450 // Build particular struct kmp_task_t for the given task.
4451 auto *KmpTaskTWithPrivatesQTyRD =
4452 createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4453 auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4454 QualType KmpTaskTWithPrivatesPtrQTy =
4455 C.getPointerType(KmpTaskTWithPrivatesQTy);
4456 auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4457 auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo();
4458 auto *KmpTaskTWithPrivatesTySize = CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4459 QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4460
4461 // Emit initial values for private copies (if any).
4462 llvm::Value *TaskPrivatesMap = nullptr;
4463 auto *TaskPrivatesMapTy =
4464 std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
4465 if (!Privates.empty()) {
4466 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4467 TaskPrivatesMap = emitTaskPrivateMappingFunction(
4468 CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
4469 FI->getType(), Privates);
4470 TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4471 TaskPrivatesMap, TaskPrivatesMapTy);
4472 } else {
4473 TaskPrivatesMap = llvm::ConstantPointerNull::get(
4474 cast<llvm::PointerType>(TaskPrivatesMapTy));
4475 }
4476 // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4477 // kmp_task_t *tt);
4478 auto *TaskEntry = emitProxyTaskFunction(
4479 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4480 KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4481 TaskPrivatesMap);
4482
4483 // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4484 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4485 // kmp_routine_entry_t *task_entry);
4486 // Task flags. Format is taken from
4487 // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
4488 // description of kmp_tasking_flags struct.
4489 enum {
4490 TiedFlag = 0x1,
4491 FinalFlag = 0x2,
4492 DestructorsFlag = 0x8,
4493 PriorityFlag = 0x20
4494 };
4495 unsigned Flags = Data.Tied ? TiedFlag : 0;
4496 bool NeedsCleanup = false;
4497 if (!Privates.empty()) {
4498 NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
4499 if (NeedsCleanup)
4500 Flags = Flags | DestructorsFlag;
4501 }
4502 if (Data.Priority.getInt())
4503 Flags = Flags | PriorityFlag;
4504 auto *TaskFlags =
4505 Data.Final.getPointer()
4506 ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4507 CGF.Builder.getInt32(FinalFlag),
4508 CGF.Builder.getInt32(/*C=*/0))
4509 : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4510 TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4511 auto *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4512 llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
4513 getThreadID(CGF, Loc), TaskFlags,
4514 KmpTaskTWithPrivatesTySize, SharedsSize,
4515 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4516 TaskEntry, KmpRoutineEntryPtrTy)};
4517 auto *NewTask = CGF.EmitRuntimeCall(
4518 createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
4519 auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4520 NewTask, KmpTaskTWithPrivatesPtrTy);
4521 LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4522 KmpTaskTWithPrivatesQTy);
4523 LValue TDBase =
4524 CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4525 // Fill the data in the resulting kmp_task_t record.
4526 // Copy shareds if there are any.
4527 Address KmpTaskSharedsPtr = Address::invalid();
4528 if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4529 KmpTaskSharedsPtr =
4530 Address(CGF.EmitLoadOfScalar(
4531 CGF.EmitLValueForField(
4532 TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4533 KmpTaskTShareds)),
4534 Loc),
4535 CGF.getNaturalTypeAlignment(SharedsTy));
4536 LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4537 LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4538 CGF.EmitAggregateCopy(Dest, Src, SharedsTy);
4539 }
4540 // Emit initial values for private copies (if any).
4541 TaskResultTy Result;
4542 if (!Privates.empty()) {
4543 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4544 SharedsTy, SharedsPtrTy, Data, Privates,
4545 /*ForDup=*/false);
4546 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4547 (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4548 Result.TaskDupFn = emitTaskDupFunction(
4549 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4550 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4551 /*WithLastIter=*/!Data.LastprivateVars.empty());
4552 }
4553 }
4554 // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4555 enum { Priority = 0, Destructors = 1 };
4556 // Provide pointer to function with destructors for privates.
4557 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4558 auto *KmpCmplrdataUD = (*FI)->getType()->getAsUnionType()->getDecl();
4559 if (NeedsCleanup) {
4560 llvm::Value *DestructorFn = emitDestructorsFunction(
4561 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4562 KmpTaskTWithPrivatesQTy);
4563 LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4564 LValue DestructorsLV = CGF.EmitLValueForField(
4565 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4566 CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4567 DestructorFn, KmpRoutineEntryPtrTy),
4568 DestructorsLV);
4569 }
4570 // Set priority.
4571 if (Data.Priority.getInt()) {
4572 LValue Data2LV = CGF.EmitLValueForField(
4573 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4574 LValue PriorityLV = CGF.EmitLValueForField(
4575 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4576 CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4577 }
4578 Result.NewTask = NewTask;
4579 Result.TaskEntry = TaskEntry;
4580 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4581 Result.TDBase = TDBase;
4582 Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4583 return Result;
4584}
4585
4586void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
4587 const OMPExecutableDirective &D,
4588 llvm::Value *TaskFunction,
4589 QualType SharedsTy, Address Shareds,
4590 const Expr *IfCond,
4591 const OMPTaskDataTy &Data) {
4592 if (!CGF.HaveInsertPoint())
4593 return;
4594
4595 TaskResultTy Result =
4596 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4597 llvm::Value *NewTask = Result.NewTask;
4598 llvm::Value *TaskEntry = Result.TaskEntry;
4599 llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
4600 LValue TDBase = Result.TDBase;
4601 RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
4602 auto &C = CGM.getContext();
4603 // Process list of dependences.
4604 Address DependenciesArray = Address::invalid();
4605 unsigned NumDependencies = Data.Dependences.size();
4606 if (NumDependencies) {
4607 // Dependence kind for RTL.
4608 enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
4609 enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4610 RecordDecl *KmpDependInfoRD;
4611 QualType FlagsTy =
4612 C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4613 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4614 if (KmpDependInfoTy.isNull()) {
4615 KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4616 KmpDependInfoRD->startDefinition();
4617 addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4618 addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4619 addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4620 KmpDependInfoRD->completeDefinition();
4621 KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4622 } else
4623 KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4624 CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
4625 // Define type kmp_depend_info[<Dependences.size()>];
4626 QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4627 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
4628 ArrayType::Normal, /*IndexTypeQuals=*/0);
4629 // kmp_depend_info[<Dependences.size()>] deps;
4630 DependenciesArray =
4631 CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4632 for (unsigned i = 0; i < NumDependencies; ++i) {
4633 const Expr *E = Data.Dependences[i].second;
4634 auto Addr = CGF.EmitLValue(E);
4635 llvm::Value *Size;
4636 QualType Ty = E->getType();
4637 if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
4638 LValue UpAddrLVal =
4639 CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
4640 llvm::Value *UpAddr =
4641 CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
4642 llvm::Value *LowIntPtr =
4643 CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
4644 llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
4645 Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
4646 } else
4647 Size = CGF.getTypeSize(Ty);
4648 auto Base = CGF.MakeAddrLValue(
4649 CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize),
4650 KmpDependInfoTy);
4651 // deps[i].base_addr = &<Dependences[i].second>;
4652 auto BaseAddrLVal = CGF.EmitLValueForField(
4653 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4654 CGF.EmitStoreOfScalar(
4655 CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
4656 BaseAddrLVal);
4657 // deps[i].len = sizeof(<Dependences[i].second>);
4658 auto LenLVal = CGF.EmitLValueForField(
4659 Base, *std::next(KmpDependInfoRD->field_begin(), Len));
4660 CGF.EmitStoreOfScalar(Size, LenLVal);
4661 // deps[i].flags = <Dependences[i].first>;
4662 RTLDependenceKindTy DepKind;
4663 switch (Data.Dependences[i].first) {
4664 case OMPC_DEPEND_in:
4665 DepKind = DepIn;
4666 break;
4667 // Out and InOut dependencies must use the same code.
4668 case OMPC_DEPEND_out:
4669 case OMPC_DEPEND_inout:
4670 DepKind = DepInOut;
4671 break;
4672 case OMPC_DEPEND_source:
4673 case OMPC_DEPEND_sink:
4674 case OMPC_DEPEND_unknown:
4675 llvm_unreachable("Unknown task dependence type")::llvm::llvm_unreachable_internal("Unknown task dependence type"
, "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4675)
;
4676 }
4677 auto FlagsLVal = CGF.EmitLValueForField(
4678 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
4679 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
4680 FlagsLVal);
4681 }
4682 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4683 CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
4684 CGF.VoidPtrTy);
4685 }
4686
4687 // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
4688 // libcall.
4689 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
4690 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
4691 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
4692 // list is not empty
4693 auto *ThreadID = getThreadID(CGF, Loc);
4694 auto *UpLoc = emitUpdateLocation(CGF, Loc);
4695 llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
4696 llvm::Value *DepTaskArgs[7];
4697 if (NumDependencies) {
4698 DepTaskArgs[0] = UpLoc;
4699 DepTaskArgs[1] = ThreadID;
4700 DepTaskArgs[2] = NewTask;
4701 DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
4702 DepTaskArgs[4] = DependenciesArray.getPointer();
4703 DepTaskArgs[5] = CGF.Builder.getInt32(0);
4704 DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4705 }
4706 auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
4707 &TaskArgs,
4708 &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
4709 if (!Data.Tied) {
4710 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4711 auto PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
4712 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
4713 }
4714 if (NumDependencies) {
4715 CGF.EmitRuntimeCall(
4716 createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
4717 } else {
4718 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
4719 TaskArgs);
4720 }
4721 // Check if parent region is untied and build return for untied task;
4722 if (auto *Region =
4723 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
4724 Region->emitUntiedSwitch(CGF);
4725 };
4726
4727 llvm::Value *DepWaitTaskArgs[6];
4728 if (NumDependencies) {
4729 DepWaitTaskArgs[0] = UpLoc;
4730 DepWaitTaskArgs[1] = ThreadID;
4731 DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
4732 DepWaitTaskArgs[3] = DependenciesArray.getPointer();
4733 DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
4734 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4735 }
4736 auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
4737 NumDependencies, &DepWaitTaskArgs,
4738 Loc](CodeGenFunction &CGF, PrePostActionTy &) {
4739 auto &RT = CGF.CGM.getOpenMPRuntime();
4740 CodeGenFunction::RunCleanupsScope LocalScope(CGF);
4741 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
4742 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
4743 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
4744 // is specified.
4745 if (NumDependencies)
4746 CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
4747 DepWaitTaskArgs);
4748 // Call proxy_task_entry(gtid, new_task);
4749 auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
4750 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
4751 Action.Enter(CGF);
4752 llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
4753 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
4754 OutlinedFnArgs);
4755 };
4756
4757 // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
4758 // kmp_task_t *new_task);
4759 // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
4760 // kmp_task_t *new_task);
4761 RegionCodeGenTy RCG(CodeGen);
4762 CommonActionTy Action(
4763 RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
4764 RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
4765 RCG.setAction(Action);
4766 RCG(CGF);
4767 };
4768
4769 if (IfCond)
4770 emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
4771 else {
4772 RegionCodeGenTy ThenRCG(ThenCodeGen);
4773 ThenRCG(CGF);
4774 }
4775}
4776
4777void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
4778 const OMPLoopDirective &D,
4779 llvm::Value *TaskFunction,
4780 QualType SharedsTy, Address Shareds,
4781 const Expr *IfCond,
4782 const OMPTaskDataTy &Data) {
4783 if (!CGF.HaveInsertPoint())
4784 return;
4785 TaskResultTy Result =
4786 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4787 // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
4788 // libcall.
4789 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
4790 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
4791 // sched, kmp_uint64 grainsize, void *task_dup);
4792 llvm::Value *ThreadID = getThreadID(CGF, Loc);
4793 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
4794 llvm::Value *IfVal;
4795 if (IfCond) {
4796 IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
4797 /*isSigned=*/true);
4798 } else
4799 IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
4800
4801 LValue LBLVal = CGF.EmitLValueForField(
4802 Result.TDBase,
4803 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
4804 auto *LBVar =
4805 cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
4806 CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
4807 /*IsInitializer=*/true);
4808 LValue UBLVal = CGF.EmitLValueForField(
4809 Result.TDBase,
4810 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
4811 auto *UBVar =
4812 cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
4813 CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
4814 /*IsInitializer=*/true);
4815 LValue StLVal = CGF.EmitLValueForField(
4816 Result.TDBase,
4817 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
4818 auto *StVar =
4819 cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
4820 CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
4821 /*IsInitializer=*/true);
4822 // Store reductions address.
4823 LValue RedLVal = CGF.EmitLValueForField(
4824 Result.TDBase,
4825 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
4826 if (Data.Reductions)
4827 CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
4828 else {
4829 CGF.EmitNullInitialization(RedLVal.getAddress(),
4830 CGF.getContext().VoidPtrTy);
4831 }
4832 enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
4833 llvm::Value *TaskArgs[] = {
4834 UpLoc,
4835 ThreadID,
4836 Result.NewTask,
4837 IfVal,
4838 LBLVal.getPointer(),
4839 UBLVal.getPointer(),
4840 CGF.EmitLoadOfScalar(StLVal, Loc),
4841 llvm::ConstantInt::getNullValue(
4842 CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
4843 llvm::ConstantInt::getSigned(
4844 CGF.IntTy, Data.Schedule.getPointer()
4845 ? Data.Schedule.getInt() ? NumTasks : Grainsize
4846 : NoSchedule),
4847 Data.Schedule.getPointer()
4848 ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
4849 /*isSigned=*/false)
4850 : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
4851 Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4852 Result.TaskDupFn, CGF.VoidPtrTy)
4853 : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
4854 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
4855}
4856
4857/// \brief Emit reduction operation for each element of array (required for
4858/// array sections) LHS op = RHS.
4859/// \param Type Type of array.
4860/// \param LHSVar Variable on the left side of the reduction operation
4861/// (references element of array in original variable).
4862/// \param RHSVar Variable on the right side of the reduction operation
4863/// (references element of array in original variable).
4864/// \param RedOpGen Generator of reduction operation with use of LHSVar and
4865/// RHSVar.
4866static void EmitOMPAggregateReduction(
4867 CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
4868 const VarDecl *RHSVar,
4869 const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
4870 const Expr *, const Expr *)> &RedOpGen,
4871 const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
4872 const Expr *UpExpr = nullptr) {
4873 // Perform element-by-element initialization.
4874 QualType ElementTy;
4875 Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
4876 Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
4877
4878 // Drill down to the base element type on both arrays.
4879 auto ArrayTy = Type->getAsArrayTypeUnsafe();
4880 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
4881
4882 auto RHSBegin = RHSAddr.getPointer();
4883 auto LHSBegin = LHSAddr.getPointer();
4884 // Cast from pointer to array type to pointer to single element.
4885 auto LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
4886 // The basic structure here is a while-do loop.
4887 auto BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
4888 auto DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
4889 auto IsEmpty =
4890 CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
4891 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
4892
4893 // Enter the loop body, making that address the current address.
4894 auto EntryBB = CGF.Builder.GetInsertBlock();
4895 CGF.EmitBlock(BodyBB);
4896
4897 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
4898
4899 llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
4900 RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
4901 RHSElementPHI->addIncoming(RHSBegin, EntryBB);
4902 Address RHSElementCurrent =
4903 Address(RHSElementPHI,
4904 RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
4905
4906 llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
4907 LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
4908 LHSElementPHI->addIncoming(LHSBegin, EntryBB);
4909 Address LHSElementCurrent =
4910 Address(LHSElementPHI,
4911 LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
4912
4913 // Emit copy.
4914 CodeGenFunction::OMPPrivateScope Scope(CGF);
4915 Scope.addPrivate(LHSVar, [=]() -> Address { return LHSElementCurrent; });
4916 Scope.addPrivate(RHSVar, [=]() -> Address { return RHSElementCurrent; });
4917 Scope.Privatize();
4918 RedOpGen(CGF, XExpr, EExpr, UpExpr);
4919 Scope.ForceCleanup();
4920
4921 // Shift the address forward by one element.
4922 auto LHSElementNext = CGF.Builder.CreateConstGEP1_32(
4923 LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
4924 auto RHSElementNext = CGF.Builder.CreateConstGEP1_32(
4925 RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
4926 // Check whether we've reached the end.
4927 auto Done =
4928 CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
4929 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
4930 LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
4931 RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
4932
4933 // Done.
4934 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
4935}
4936
4937/// Emit reduction combiner. If the combiner is a simple expression emit it as
4938/// is, otherwise consider it as combiner of UDR decl and emit it as a call of
4939/// UDR combiner function.
4940static void emitReductionCombiner(CodeGenFunction &CGF,
4941 const Expr *ReductionOp) {
4942 if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
4943 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
4944 if (auto *DRE =
4945 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
4946 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
4947 std::pair<llvm::Function *, llvm::Function *> Reduction =
4948 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
4949 RValue Func = RValue::get(Reduction.first);
4950 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
4951 CGF.EmitIgnoredExpr(ReductionOp);
4952 return;
4953 }
4954 CGF.EmitIgnoredExpr(ReductionOp);
4955}
4956
4957llvm::Value *CGOpenMPRuntime::emitReductionFunction(
4958 CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
4959 ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
4960 ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
4961 auto &C = CGM.getContext();
4962
4963 // void reduction_func(void *LHSArg, void *RHSArg);
4964 FunctionArgList Args;
4965 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
4966 ImplicitParamDecl::Other);
4967 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
4968 ImplicitParamDecl::Other);
4969 Args.push_back(&LHSArg);
4970 Args.push_back(&RHSArg);
4971 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4972 auto *Fn = llvm::Function::Create(
4973 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4974 ".omp.reduction.reduction_func", &CGM.getModule());
4975 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
4976 CodeGenFunction CGF(CGM);
4977 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
4978
4979 // Dst = (void*[n])(LHSArg);
4980 // Src = (void*[n])(RHSArg);
4981 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4982 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
4983 ArgsType), CGF.getPointerAlign());
4984 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4985 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
4986 ArgsType), CGF.getPointerAlign());
4987
4988 // ...
4989 // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
4990 // ...
4991 CodeGenFunction::OMPPrivateScope Scope(CGF);
4992 auto IPriv = Privates.begin();
4993 unsigned Idx = 0;
4994 for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
4995 auto RHSVar = cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
4996 Scope.addPrivate(RHSVar, [&]() -> Address {
4997 return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
4998 });
4999 auto LHSVar = cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5000 Scope.addPrivate(LHSVar, [&]() -> Address {
5001 return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5002 });
5003 QualType PrivTy = (*IPriv)->getType();
5004 if (PrivTy->isVariablyModifiedType()) {
5005 // Get array size and emit VLA type.
5006 ++Idx;
5007 Address Elem =
5008 CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
5009 llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5010 auto *VLA = CGF.getContext().getAsVariableArrayType(PrivTy);
5011 auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5012 CodeGenFunction::OpaqueValueMapping OpaqueMap(
5013 CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5014 CGF.EmitVariablyModifiedType(PrivTy);
5015 }
5016 }
5017 Scope.Privatize();
5018 IPriv = Privates.begin();
5019 auto ILHS = LHSExprs.begin();
5020 auto IRHS = RHSExprs.begin();
5021 for (auto *E : ReductionOps) {
5022 if ((*IPriv)->getType()->isArrayType()) {
5023 // Emit reduction for array section.
5024 auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5025 auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5026 EmitOMPAggregateReduction(
5027 CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5028 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5029 emitReductionCombiner(CGF, E);
5030 });
5031 } else
5032 // Emit reduction for array subscript or single variable.
5033 emitReductionCombiner(CGF, E);
5034 ++IPriv;
5035 ++ILHS;
5036 ++IRHS;
5037 }
5038 Scope.ForceCleanup();
5039 CGF.FinishFunction();
5040 return Fn;
5041}
5042
5043void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5044 const Expr *ReductionOp,
5045 const Expr *PrivateRef,
5046 const DeclRefExpr *LHS,
5047 const DeclRefExpr *RHS) {
5048 if (PrivateRef->getType()->isArrayType()) {
5049 // Emit reduction for array section.
5050 auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5051 auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5052 EmitOMPAggregateReduction(
5053 CGF, PrivateRef->getType(), LHSVar, RHSVar,
5054 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5055 emitReductionCombiner(CGF, ReductionOp);
5056 });
5057 } else
5058 // Emit reduction for array subscript or single variable.
5059 emitReductionCombiner(CGF, ReductionOp);
5060}
5061
5062void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5063 ArrayRef<const Expr *> Privates,
5064 ArrayRef<const Expr *> LHSExprs,
5065 ArrayRef<const Expr *> RHSExprs,
5066 ArrayRef<const Expr *> ReductionOps,
5067 ReductionOptionsTy Options) {
5068 if (!CGF.HaveInsertPoint())
5069 return;
5070
5071 bool WithNowait = Options.WithNowait;
5072 bool SimpleReduction = Options.SimpleReduction;
5073
5074 // Next code should be emitted for reduction:
5075 //
5076 // static kmp_critical_name lock = { 0 };
5077 //
5078 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5079 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5080 // ...
5081 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5082 // *(Type<n>-1*)rhs[<n>-1]);
5083 // }
5084 //
5085 // ...
5086 // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5087 // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5088 // RedList, reduce_func, &<lock>)) {
5089 // case 1:
5090 // ...
5091 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5092 // ...
5093 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5094 // break;
5095 // case 2:
5096 // ...
5097 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5098 // ...
5099 // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5100 // break;
5101 // default:;
5102 // }
5103 //
5104 // if SimpleReduction is true, only the next code is generated:
5105 // ...
5106 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5107 // ...
5108
5109 auto &C = CGM.getContext();
5110
5111 if (SimpleReduction) {
5112 CodeGenFunction::RunCleanupsScope Scope(CGF);
5113 auto IPriv = Privates.begin();
5114 auto ILHS = LHSExprs.begin();
5115 auto IRHS = RHSExprs.begin();
5116 for (auto *E : ReductionOps) {
5117 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5118 cast<DeclRefExpr>(*IRHS));
5119 ++IPriv;
5120 ++ILHS;
5121 ++IRHS;
5122 }
5123 return;
5124 }
5125
5126 // 1. Build a list of reduction variables.
5127 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5128 auto Size = RHSExprs.size();
5129 for (auto *E : Privates) {
5130 if (E->getType()->isVariablyModifiedType())
5131 // Reserve place for array size.
5132 ++Size;
5133 }
5134 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5135 QualType ReductionArrayTy =
5136 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
5137 /*IndexTypeQuals=*/0);
5138 Address ReductionList =
5139 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5140 auto IPriv = Privates.begin();
5141 unsigned Idx = 0;
5142 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5143 Address Elem =
5144 CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
5145 CGF.Builder.CreateStore(
5146 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5147 CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
5148 Elem);
5149 if ((*IPriv)->getType()->isVariablyModifiedType()) {
5150 // Store array size.
5151 ++Idx;
5152 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
5153 CGF.getPointerSize());
5154 llvm::Value *Size = CGF.Builder.CreateIntCast(
5155 CGF.getVLASize(
5156 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5157 .NumElts,
5158 CGF.SizeTy, /*isSigned=*/false);
5159 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5160 Elem);
5161 }
5162 }
5163
5164 // 2. Emit reduce_func().
5165 auto *ReductionFn = emitReductionFunction(
5166 CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
5167 Privates, LHSExprs, RHSExprs, ReductionOps);
5168
5169 // 3. Create static kmp_critical_name lock = { 0 };
5170 auto *Lock = getCriticalRegionLock(".reduction");
5171
5172 // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5173 // RedList, reduce_func, &<lock>);
5174 auto *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5175 auto *ThreadId = getThreadID(CGF, Loc);
5176 auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5177 auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5178 ReductionList.getPointer(), CGF.VoidPtrTy);
5179 llvm::Value *Args[] = {
5180 IdentTLoc, // ident_t *<loc>
5181 ThreadId, // i32 <gtid>
5182 CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5183 ReductionArrayTySize, // size_type sizeof(RedList)
5184 RL, // void *RedList
5185 ReductionFn, // void (*) (void *, void *) <reduce_func>
5186 Lock // kmp_critical_name *&<lock>
5187 };
5188 auto Res = CGF.EmitRuntimeCall(
5189 createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
5190 : OMPRTL__kmpc_reduce),
5191 Args);
5192
5193 // 5. Build switch(res)
5194 auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5195 auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5196
5197 // 6. Build case 1:
5198 // ...
5199 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5200 // ...
5201 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5202 // break;
5203 auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5204 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5205 CGF.EmitBlock(Case1BB);
5206
5207 // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5208 llvm::Value *EndArgs[] = {
5209 IdentTLoc, // ident_t *<loc>
5210 ThreadId, // i32 <gtid>
5211 Lock // kmp_critical_name *&<lock>
5212 };
5213 auto &&CodeGen = [&Privates, &LHSExprs, &RHSExprs, &ReductionOps](
5214 CodeGenFunction &CGF, PrePostActionTy &Action) {
5215 auto &RT = CGF.CGM.getOpenMPRuntime();
5216 auto IPriv = Privates.begin();
5217 auto ILHS = LHSExprs.begin();
5218 auto IRHS = RHSExprs.begin();
5219 for (auto *E : ReductionOps) {
5220 RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5221 cast<DeclRefExpr>(*IRHS));
5222 ++IPriv;
5223 ++ILHS;
5224 ++IRHS;
5225 }
5226 };
5227 RegionCodeGenTy RCG(CodeGen);
5228 CommonActionTy Action(
5229 nullptr, llvm::None,
5230 createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
5231 : OMPRTL__kmpc_end_reduce),
5232 EndArgs);
5233 RCG.setAction(Action);
5234 RCG(CGF);
5235
5236 CGF.EmitBranch(DefaultBB);
5237
5238 // 7. Build case 2:
5239 // ...
5240 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5241 // ...
5242 // break;
5243 auto *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5244 SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5245 CGF.EmitBlock(Case2BB);
5246
5247 auto &&AtomicCodeGen = [Loc, &Privates, &LHSExprs, &RHSExprs, &ReductionOps](
5248 CodeGenFunction &CGF, PrePostActionTy &Action) {
5249 auto ILHS = LHSExprs.begin();
5250 auto IRHS = RHSExprs.begin();
5251 auto IPriv = Privates.begin();
5252 for (auto *E : ReductionOps) {
5253 const Expr *XExpr = nullptr;
5254 const Expr *EExpr = nullptr;
5255 const Expr *UpExpr = nullptr;
5256 BinaryOperatorKind BO = BO_Comma;
5257 if (auto *BO = dyn_cast<BinaryOperator>(E)) {
5258 if (BO->getOpcode() == BO_Assign) {
5259 XExpr = BO->getLHS();
5260 UpExpr = BO->getRHS();
5261 }
5262 }
5263 // Try to emit update expression as a simple atomic.
5264 auto *RHSExpr = UpExpr;
5265 if (RHSExpr) {
5266 // Analyze RHS part of the whole expression.
5267 if (auto *ACO = dyn_cast<AbstractConditionalOperator>(
5268 RHSExpr->IgnoreParenImpCasts())) {
5269 // If this is a conditional operator, analyze its condition for
5270 // min/max reduction operator.
5271 RHSExpr = ACO->getCond();
5272 }
5273 if (auto *BORHS =
5274 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5275 EExpr = BORHS->getRHS();
5276 BO = BORHS->getOpcode();
5277 }
5278 }
5279 if (XExpr) {
5280 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5281 auto &&AtomicRedGen = [BO, VD,
5282 Loc](CodeGenFunction &CGF, const Expr *XExpr,
5283 const Expr *EExpr, const Expr *UpExpr) {
5284 LValue X = CGF.EmitLValue(XExpr);
5285 RValue E;
5286 if (EExpr)
5287 E = CGF.EmitAnyExpr(EExpr);
5288 CGF.EmitOMPAtomicSimpleUpdateExpr(
5289 X, E, BO, /*IsXLHSInRHSPart=*/true,
5290 llvm::AtomicOrdering::Monotonic, Loc,
5291 [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5292 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5293 PrivateScope.addPrivate(
5294 VD, [&CGF, VD, XRValue, Loc]() -> Address {
5295 Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5296 CGF.emitOMPSimpleStore(
5297 CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5298 VD->getType().getNonReferenceType(), Loc);
5299 return LHSTemp;
5300 });
5301 (void)PrivateScope.Privatize();
5302 return CGF.EmitAnyExpr(UpExpr);
5303 });
5304 };
5305 if ((*IPriv)->getType()->isArrayType()) {
5306 // Emit atomic reduction for array section.
5307 auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5308 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5309 AtomicRedGen, XExpr, EExpr, UpExpr);
5310 } else
5311 // Emit atomic reduction for array subscript or single variable.
5312 AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5313 } else {
5314 // Emit as a critical region.
5315 auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5316 const Expr *, const Expr *) {
5317 auto &RT = CGF.CGM.getOpenMPRuntime();
5318 RT.emitCriticalRegion(
5319 CGF, ".atomic_reduction",
5320 [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5321 Action.Enter(CGF);
5322 emitReductionCombiner(CGF, E);
5323 },
5324 Loc);
5325 };
5326 if ((*IPriv)->getType()->isArrayType()) {
5327 auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5328 auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5329 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5330 CritRedGen);
5331 } else
5332 CritRedGen(CGF, nullptr, nullptr, nullptr);
5333 }
5334 ++ILHS;
5335 ++IRHS;
5336 ++IPriv;
5337 }
5338 };
5339 RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5340 if (!WithNowait) {
5341 // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5342 llvm::Value *EndArgs[] = {
5343 IdentTLoc, // ident_t *<loc>
5344 ThreadId, // i32 <gtid>
5345 Lock // kmp_critical_name *&<lock>
5346 };
5347 CommonActionTy Action(nullptr, llvm::None,
5348 createRuntimeFunction(OMPRTL__kmpc_end_reduce),
5349 EndArgs);
5350 AtomicRCG.setAction(Action);
5351 AtomicRCG(CGF);
5352 } else
5353 AtomicRCG(CGF);
5354
5355 CGF.EmitBranch(DefaultBB);
5356 CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5357}
5358
5359/// Generates unique name for artificial threadprivate variables.
5360/// Format is: <Prefix> "." <Loc_raw_encoding> "_" <N>
5361static std::string generateUniqueName(StringRef Prefix, SourceLocation Loc,
5362 unsigned N) {
5363 SmallString<256> Buffer;
5364 llvm::raw_svector_ostream Out(Buffer);
5365 Out << Prefix << "." << Loc.getRawEncoding() << "_" << N;
5366 return Out.str();
5367}
5368
5369/// Emits reduction initializer function:
5370/// \code
5371/// void @.red_init(void* %arg) {
5372/// %0 = bitcast void* %arg to <type>*
5373/// store <type> <init>, <type>* %0
5374/// ret void
5375/// }
5376/// \endcode
5377static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5378 SourceLocation Loc,
5379 ReductionCodeGen &RCG, unsigned N) {
5380 auto &C = CGM.getContext();
5381 FunctionArgList Args;
5382 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5383 ImplicitParamDecl::Other);
5384 Args.emplace_back(&Param);
5385 auto &FnInfo =
5386 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5387 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5388 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5389 ".red_init.", &CGM.getModule());
5390 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
5391 CodeGenFunction CGF(CGM);
5392 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5393 Address PrivateAddr = CGF.EmitLoadOfPointer(
5394 CGF.GetAddrOfLocalVar(&Param),
5395 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5396 llvm::Value *Size = nullptr;
5397 // If the size of the reduction item is non-constant, load it from global
5398 // threadprivate variable.
5399 if (RCG.getSizes(N).second) {
5400 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5401 CGF, CGM.getContext().getSizeType(),
5402 generateUniqueName("reduction_size", Loc, N));
5403 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5404 CGM.getContext().getSizeType(), Loc);
5405 }
5406 RCG.emitAggregateType(CGF, N, Size);
5407 LValue SharedLVal;
5408 // If initializer uses initializer from declare reduction construct, emit a
5409 // pointer to the address of the original reduction item (reuired by reduction
5410 // initializer)
5411 if (RCG.usesReductionInitializer(N)) {
5412 Address SharedAddr =
5413 CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5414 CGF, CGM.getContext().VoidPtrTy,
5415 generateUniqueName("reduction", Loc, N));
5416 SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5417 } else {
5418 SharedLVal = CGF.MakeNaturalAlignAddrLValue(
5419 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5420 CGM.getContext().VoidPtrTy);
5421 }
5422 // Emit the initializer:
5423 // %0 = bitcast void* %arg to <type>*
5424 // store <type> <init>, <type>* %0
5425 RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
5426 [](CodeGenFunction &) { return false; });
5427 CGF.FinishFunction();
5428 return Fn;
5429}
5430
5431/// Emits reduction combiner function:
5432/// \code
5433/// void @.red_comb(void* %arg0, void* %arg1) {
5434/// %lhs = bitcast void* %arg0 to <type>*
5435/// %rhs = bitcast void* %arg1 to <type>*
5436/// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5437/// store <type> %2, <type>* %lhs
5438/// ret void
5439/// }
5440/// \endcode
5441static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5442 SourceLocation Loc,
5443 ReductionCodeGen &RCG, unsigned N,
5444 const Expr *ReductionOp,
5445 const Expr *LHS, const Expr *RHS,
5446 const Expr *PrivateRef) {
5447 auto &C = CGM.getContext();
5448 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5449 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5450 FunctionArgList Args;
5451 ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
5452 C.VoidPtrTy, ImplicitParamDecl::Other);
5453 ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5454 ImplicitParamDecl::Other);
5455 Args.emplace_back(&ParamInOut);
5456 Args.emplace_back(&ParamIn);
5457 auto &FnInfo =
5458 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5459 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5460 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5461 ".red_comb.", &CGM.getModule());
5462 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
5463 CodeGenFunction CGF(CGM);
5464 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5465 llvm::Value *Size = nullptr;
5466 // If the size of the reduction item is non-constant, load it from global
5467 // threadprivate variable.
5468 if (RCG.getSizes(N).second) {
5469 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5470 CGF, CGM.getContext().getSizeType(),
5471 generateUniqueName("reduction_size", Loc, N));
5472 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5473 CGM.getContext().getSizeType(), Loc);
5474 }
5475 RCG.emitAggregateType(CGF, N, Size);
5476 // Remap lhs and rhs variables to the addresses of the function arguments.
5477 // %lhs = bitcast void* %arg0 to <type>*
5478 // %rhs = bitcast void* %arg1 to <type>*
5479 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5480 PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() -> Address {
5481 // Pull out the pointer to the variable.
5482 Address PtrAddr = CGF.EmitLoadOfPointer(
5483 CGF.GetAddrOfLocalVar(&ParamInOut),
5484 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5485 return CGF.Builder.CreateElementBitCast(
5486 PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5487 });
5488 PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() -> Address {
5489 // Pull out the pointer to the variable.
5490 Address PtrAddr = CGF.EmitLoadOfPointer(
5491 CGF.GetAddrOfLocalVar(&ParamIn),
5492 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5493 return CGF.Builder.CreateElementBitCast(
5494 PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5495 });
5496 PrivateScope.Privatize();
5497 // Emit the combiner body:
5498 // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
5499 // store <type> %2, <type>* %lhs
5500 CGM.getOpenMPRuntime().emitSingleReductionCombiner(
5501 CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
5502 cast<DeclRefExpr>(RHS));
5503 CGF.FinishFunction();
5504 return Fn;
5505}
5506
5507/// Emits reduction finalizer function:
5508/// \code
5509/// void @.red_fini(void* %arg) {
5510/// %0 = bitcast void* %arg to <type>*
5511/// <destroy>(<type>* %0)
5512/// ret void
5513/// }
5514/// \endcode
5515static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
5516 SourceLocation Loc,
5517 ReductionCodeGen &RCG, unsigned N) {
5518 if (!RCG.needCleanups(N))
5519 return nullptr;
5520 auto &C = CGM.getContext();
5521 FunctionArgList Args;
5522 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5523 ImplicitParamDecl::Other);
5524 Args.emplace_back(&Param);
5525 auto &FnInfo =
5526 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5527 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5528 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5529 ".red_fini.", &CGM.getModule());
5530 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
5531 CodeGenFunction CGF(CGM);
5532 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5533 Address PrivateAddr = CGF.EmitLoadOfPointer(
5534 CGF.GetAddrOfLocalVar(&Param),
5535 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5536 llvm::Value *Size = nullptr;
5537 // If the size of the reduction item is non-constant, load it from global
5538 // threadprivate variable.
5539 if (RCG.getSizes(N).second) {
5540 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5541 CGF, CGM.getContext().getSizeType(),
5542 generateUniqueName("reduction_size", Loc, N));
5543 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5544 CGM.getContext().getSizeType(), Loc);
5545 }
5546 RCG.emitAggregateType(CGF, N, Size);
5547 // Emit the finalizer body:
5548 // <destroy>(<type>* %0)
5549 RCG.emitCleanups(CGF, N, PrivateAddr);
5550 CGF.FinishFunction();
5551 return Fn;
5552}
5553
5554llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
5555 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
5556 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
5557 if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
5558 return nullptr;
5559
5560 // Build typedef struct:
5561 // kmp_task_red_input {
5562 // void *reduce_shar; // shared reduction item
5563 // size_t reduce_size; // size of data item
5564 // void *reduce_init; // data initialization routine
5565 // void *reduce_fini; // data finalization routine
5566 // void *reduce_comb; // data combiner routine
5567 // kmp_task_red_flags_t flags; // flags for additional info from compiler
5568 // } kmp_task_red_input_t;
5569 ASTContext &C = CGM.getContext();
5570 auto *RD = C.buildImplicitRecord("kmp_task_red_input_t");
5571 RD->startDefinition();
5572 const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5573 const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
5574 const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5575 const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5576 const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5577 const FieldDecl *FlagsFD = addFieldToRecordDecl(
5578 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
5579 RD->completeDefinition();
5580 QualType RDType = C.getRecordType(RD);
5581 unsigned Size = Data.ReductionVars.size();
5582 llvm::APInt ArraySize(/*numBits=*/64, Size);
5583 QualType ArrayRDType = C.getConstantArrayType(
5584 RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
5585 // kmp_task_red_input_t .rd_input.[Size];
5586 Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
5587 ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
5588 Data.ReductionOps);
5589 for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
5590 // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
5591 llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
5592 llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
5593 llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
5594 TaskRedInput.getPointer(), Idxs,
5595 /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
5596 ".rd_input.gep.");
5597 LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
5598 // ElemLVal.reduce_shar = &Shareds[Cnt];
5599 LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
5600 RCG.emitSharedLValue(CGF, Cnt);
5601 llvm::Value *CastedShared =
5602 CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer());
5603 CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
5604 RCG.emitAggregateType(CGF, Cnt);
5605 llvm::Value *SizeValInChars;
5606 llvm::Value *SizeVal;
5607 std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
5608 // We use delayed creation/initialization for VLAs, array sections and
5609 // custom reduction initializations. It is required because runtime does not
5610 // provide the way to pass the sizes of VLAs/array sections to
5611 // initializer/combiner/finalizer functions and does not pass the pointer to
5612 // original reduction item to the initializer. Instead threadprivate global
5613 // variables are used to store these values and use them in the functions.
5614 bool DelayedCreation = !!SizeVal;
5615 SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
5616 /*isSigned=*/false);
5617 LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
5618 CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
5619 // ElemLVal.reduce_init = init;
5620 LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
5621 llvm::Value *InitAddr =
5622 CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
5623 CGF.EmitStoreOfScalar(InitAddr, InitLVal);
5624 DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
5625 // ElemLVal.reduce_fini = fini;
5626 LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
5627 llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
5628 llvm::Value *FiniAddr = Fini
5629 ? CGF.EmitCastToVoidPtr(Fini)
5630 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
5631 CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
5632 // ElemLVal.reduce_comb = comb;
5633 LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
5634 llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
5635 CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
5636 RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
5637 CGF.EmitStoreOfScalar(CombAddr, CombLVal);
5638 // ElemLVal.flags = 0;
5639 LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
5640 if (DelayedCreation) {
5641 CGF.EmitStoreOfScalar(
5642 llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
5643 FlagsLVal);
5644 } else
5645 CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
5646 }
5647 // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
5648 // *data);
5649 llvm::Value *Args[] = {
5650 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
5651 /*isSigned=*/true),
5652 llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
5653 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
5654 CGM.VoidPtrTy)};
5655 return CGF.EmitRuntimeCall(
5656 createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
5657}
5658
5659void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
5660 SourceLocation Loc,
5661 ReductionCodeGen &RCG,
5662 unsigned N) {
5663 auto Sizes = RCG.getSizes(N);
5664 // Emit threadprivate global variable if the type is non-constant
5665 // (Sizes.second = nullptr).
5666 if (Sizes.second) {
5667 llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
5668 /*isSigned=*/false);
5669 Address SizeAddr = getAddrOfArtificialThreadPrivate(
5670 CGF, CGM.getContext().getSizeType(),
5671 generateUniqueName("reduction_size", Loc, N));
5672 CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
5673 }
5674 // Store address of the original reduction item if custom initializer is used.
5675 if (RCG.usesReductionInitializer(N)) {
5676 Address SharedAddr = getAddrOfArtificialThreadPrivate(
5677 CGF, CGM.getContext().VoidPtrTy,
5678 generateUniqueName("reduction", Loc, N));
5679 CGF.Builder.CreateStore(
5680 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5681 RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
5682 SharedAddr, /*IsVolatile=*/false);
5683 }
5684}
5685
5686Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
5687 SourceLocation Loc,
5688 llvm::Value *ReductionsPtr,
5689 LValue SharedLVal) {
5690 // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
5691 // *d);
5692 llvm::Value *Args[] = {
5693 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
5694 /*isSigned=*/true),
5695 ReductionsPtr,
5696 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(),
5697 CGM.VoidPtrTy)};
5698 return Address(
5699 CGF.EmitRuntimeCall(
5700 createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
5701 SharedLVal.getAlignment());
5702}
5703
5704void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
5705 SourceLocation Loc) {
5706 if (!CGF.HaveInsertPoint())
5707 return;
5708 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
5709 // global_tid);
5710 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
5711 // Ignore return result until untied tasks are supported.
5712 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
5713 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5714 Region->emitUntiedSwitch(CGF);
5715}
5716
5717void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
5718 OpenMPDirectiveKind InnerKind,
5719 const RegionCodeGenTy &CodeGen,
5720 bool HasCancel) {
5721 if (!CGF.HaveInsertPoint())
5722 return;
5723 InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
5724 CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
5725}
5726
5727namespace {
5728enum RTCancelKind {
5729 CancelNoreq = 0,
5730 CancelParallel = 1,
5731 CancelLoop = 2,
5732 CancelSections = 3,
5733 CancelTaskgroup = 4
5734};
5735} // anonymous namespace
5736
5737static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
5738 RTCancelKind CancelKind = CancelNoreq;
5739 if (CancelRegion == OMPD_parallel)
5740 CancelKind = CancelParallel;
5741 else if (CancelRegion == OMPD_for)
5742 CancelKind = CancelLoop;
5743 else if (CancelRegion == OMPD_sections)
5744 CancelKind = CancelSections;
5745 else {
5746 assert(CancelRegion == OMPD_taskgroup)(static_cast <bool> (CancelRegion == OMPD_taskgroup) ? void
(0) : __assert_fail ("CancelRegion == OMPD_taskgroup", "/build/llvm-toolchain-snapshot-7~svn325118/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 5746, __extension__ __PRETTY_FUNCTION__))
;
5747 CancelKind = CancelTaskgroup;
5748 }
5749 return CancelKind;
5750}
5751
5752void CGOpenMPRuntime::emitCancellationPointCall(
5753 CodeGenFunction &CGF, SourceLocation Loc,
5754 OpenMPDirectiveKind CancelRegion) {
5755 if (!CGF.HaveInsertPoint())
5756 return;
5757 // Build call kmp_int32 __kmpc_cancellationpoint(ident_t