Bug Summary

File:clang/lib/CodeGen/CGOpenMPRuntime.cpp
Warning:line 8541, column 15
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGOpenMPRuntime.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/include -I /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-15-222444-33637-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp

/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp

1//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for OpenMP runtime code generation.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGOpenMPRuntime.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/OpenMPClause.h"
21#include "clang/AST/StmtOpenMP.h"
22#include "clang/AST/StmtVisitor.h"
23#include "clang/Basic/BitmaskEnum.h"
24#include "clang/Basic/FileManager.h"
25#include "clang/Basic/OpenMPKinds.h"
26#include "clang/Basic/SourceManager.h"
27#include "clang/CodeGen/ConstantInitBuilder.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/SetOperations.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/Bitcode/BitcodeReader.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/Value.h"
36#include "llvm/Support/AtomicOrdering.h"
37#include "llvm/Support/Format.h"
38#include "llvm/Support/raw_ostream.h"
39#include <cassert>
40#include <numeric>
41
42using namespace clang;
43using namespace CodeGen;
44using namespace llvm::omp;
45
46namespace {
47/// Base class for handling code generation inside OpenMP regions.
48class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
49public:
50 /// Kinds of OpenMP regions used in codegen.
51 enum CGOpenMPRegionKind {
52 /// Region with outlined function for standalone 'parallel'
53 /// directive.
54 ParallelOutlinedRegion,
55 /// Region with outlined function for standalone 'task' directive.
56 TaskOutlinedRegion,
57 /// Region for constructs that do not require function outlining,
58 /// like 'for', 'sections', 'atomic' etc. directives.
59 InlinedRegion,
60 /// Region with outlined function for standalone 'target' directive.
61 TargetRegion,
62 };
63
64 CGOpenMPRegionInfo(const CapturedStmt &CS,
65 const CGOpenMPRegionKind RegionKind,
66 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
67 bool HasCancel)
68 : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
69 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
70
71 CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
72 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
73 bool HasCancel)
74 : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
75 Kind(Kind), HasCancel(HasCancel) {}
76
77 /// Get a variable or parameter for storing global thread id
78 /// inside OpenMP construct.
79 virtual const VarDecl *getThreadIDVariable() const = 0;
80
81 /// Emit the captured statement body.
82 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
83
84 /// Get an LValue for the current ThreadID variable.
85 /// \return LValue for thread id variable. This LValue always has type int32*.
86 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
87
88 virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
89
90 CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
91
92 OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
93
94 bool hasCancel() const { return HasCancel; }
95
96 static bool classof(const CGCapturedStmtInfo *Info) {
97 return Info->getKind() == CR_OpenMP;
98 }
99
100 ~CGOpenMPRegionInfo() override = default;
101
102protected:
103 CGOpenMPRegionKind RegionKind;
104 RegionCodeGenTy CodeGen;
105 OpenMPDirectiveKind Kind;
106 bool HasCancel;
107};
108
109/// API for captured statement code generation in OpenMP constructs.
110class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
111public:
112 CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
113 const RegionCodeGenTy &CodeGen,
114 OpenMPDirectiveKind Kind, bool HasCancel,
115 StringRef HelperName)
116 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
117 HasCancel),
118 ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
119 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")((ThreadIDVar != nullptr && "No ThreadID in OpenMP region."
) ? static_cast<void> (0) : __assert_fail ("ThreadIDVar != nullptr && \"No ThreadID in OpenMP region.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 119, __PRETTY_FUNCTION__))
;
120 }
121
122 /// Get a variable or parameter for storing global thread id
123 /// inside OpenMP construct.
124 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
125
126 /// Get the name of the capture helper.
127 StringRef getHelperName() const override { return HelperName; }
128
129 static bool classof(const CGCapturedStmtInfo *Info) {
130 return CGOpenMPRegionInfo::classof(Info) &&
131 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
132 ParallelOutlinedRegion;
133 }
134
135private:
136 /// A variable or parameter storing global thread id for OpenMP
137 /// constructs.
138 const VarDecl *ThreadIDVar;
139 StringRef HelperName;
140};
141
142/// API for captured statement code generation in OpenMP constructs.
143class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
144public:
145 class UntiedTaskActionTy final : public PrePostActionTy {
146 bool Untied;
147 const VarDecl *PartIDVar;
148 const RegionCodeGenTy UntiedCodeGen;
149 llvm::SwitchInst *UntiedSwitch = nullptr;
150
151 public:
152 UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
153 const RegionCodeGenTy &UntiedCodeGen)
154 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
155 void Enter(CodeGenFunction &CGF) override {
156 if (Untied) {
157 // Emit task switching point.
158 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
159 CGF.GetAddrOfLocalVar(PartIDVar),
160 PartIDVar->getType()->castAs<PointerType>());
161 llvm::Value *Res =
162 CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
163 llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
164 UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
165 CGF.EmitBlock(DoneBB);
166 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
167 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
168 UntiedSwitch->addCase(CGF.Builder.getInt32(0),
169 CGF.Builder.GetInsertBlock());
170 emitUntiedSwitch(CGF);
171 }
172 }
173 void emitUntiedSwitch(CodeGenFunction &CGF) const {
174 if (Untied) {
175 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
176 CGF.GetAddrOfLocalVar(PartIDVar),
177 PartIDVar->getType()->castAs<PointerType>());
178 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
179 PartIdLVal);
180 UntiedCodeGen(CGF);
181 CodeGenFunction::JumpDest CurPoint =
182 CGF.getJumpDestInCurrentScope(".untied.next.");
183 CGF.EmitBranch(CGF.ReturnBlock.getBlock());
184 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
185 UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
186 CGF.Builder.GetInsertBlock());
187 CGF.EmitBranchThroughCleanup(CurPoint);
188 CGF.EmitBlock(CurPoint.getBlock());
189 }
190 }
191 unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
192 };
193 CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
194 const VarDecl *ThreadIDVar,
195 const RegionCodeGenTy &CodeGen,
196 OpenMPDirectiveKind Kind, bool HasCancel,
197 const UntiedTaskActionTy &Action)
198 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
199 ThreadIDVar(ThreadIDVar), Action(Action) {
200 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")((ThreadIDVar != nullptr && "No ThreadID in OpenMP region."
) ? static_cast<void> (0) : __assert_fail ("ThreadIDVar != nullptr && \"No ThreadID in OpenMP region.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 200, __PRETTY_FUNCTION__))
;
201 }
202
203 /// Get a variable or parameter for storing global thread id
204 /// inside OpenMP construct.
205 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
206
207 /// Get an LValue for the current ThreadID variable.
208 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
209
210 /// Get the name of the capture helper.
211 StringRef getHelperName() const override { return ".omp_outlined."; }
212
213 void emitUntiedSwitch(CodeGenFunction &CGF) override {
214 Action.emitUntiedSwitch(CGF);
215 }
216
217 static bool classof(const CGCapturedStmtInfo *Info) {
218 return CGOpenMPRegionInfo::classof(Info) &&
219 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
220 TaskOutlinedRegion;
221 }
222
223private:
224 /// A variable or parameter storing global thread id for OpenMP
225 /// constructs.
226 const VarDecl *ThreadIDVar;
227 /// Action for emitting code for untied tasks.
228 const UntiedTaskActionTy &Action;
229};
230
231/// API for inlined captured statement code generation in OpenMP
232/// constructs.
233class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
234public:
235 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
236 const RegionCodeGenTy &CodeGen,
237 OpenMPDirectiveKind Kind, bool HasCancel)
238 : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
239 OldCSI(OldCSI),
240 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
241
242 // Retrieve the value of the context parameter.
243 llvm::Value *getContextValue() const override {
244 if (OuterRegionInfo)
245 return OuterRegionInfo->getContextValue();
246 llvm_unreachable("No context value for inlined OpenMP region")::llvm::llvm_unreachable_internal("No context value for inlined OpenMP region"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 246)
;
247 }
248
249 void setContextValue(llvm::Value *V) override {
250 if (OuterRegionInfo) {
251 OuterRegionInfo->setContextValue(V);
252 return;
253 }
254 llvm_unreachable("No context value for inlined OpenMP region")::llvm::llvm_unreachable_internal("No context value for inlined OpenMP region"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 254)
;
255 }
256
257 /// Lookup the captured field decl for a variable.
258 const FieldDecl *lookup(const VarDecl *VD) const override {
259 if (OuterRegionInfo)
260 return OuterRegionInfo->lookup(VD);
261 // If there is no outer outlined region,no need to lookup in a list of
262 // captured variables, we can use the original one.
263 return nullptr;
264 }
265
266 FieldDecl *getThisFieldDecl() const override {
267 if (OuterRegionInfo)
268 return OuterRegionInfo->getThisFieldDecl();
269 return nullptr;
270 }
271
272 /// Get a variable or parameter for storing global thread id
273 /// inside OpenMP construct.
274 const VarDecl *getThreadIDVariable() const override {
275 if (OuterRegionInfo)
276 return OuterRegionInfo->getThreadIDVariable();
277 return nullptr;
278 }
279
280 /// Get an LValue for the current ThreadID variable.
281 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
282 if (OuterRegionInfo)
283 return OuterRegionInfo->getThreadIDVariableLValue(CGF);
284 llvm_unreachable("No LValue for inlined OpenMP construct")::llvm::llvm_unreachable_internal("No LValue for inlined OpenMP construct"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 284)
;
285 }
286
287 /// Get the name of the capture helper.
288 StringRef getHelperName() const override {
289 if (auto *OuterRegionInfo = getOldCSI())
290 return OuterRegionInfo->getHelperName();
291 llvm_unreachable("No helper name for inlined OpenMP construct")::llvm::llvm_unreachable_internal("No helper name for inlined OpenMP construct"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 291)
;
292 }
293
294 void emitUntiedSwitch(CodeGenFunction &CGF) override {
295 if (OuterRegionInfo)
296 OuterRegionInfo->emitUntiedSwitch(CGF);
297 }
298
299 CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
300
301 static bool classof(const CGCapturedStmtInfo *Info) {
302 return CGOpenMPRegionInfo::classof(Info) &&
303 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
304 }
305
306 ~CGOpenMPInlinedRegionInfo() override = default;
307
308private:
309 /// CodeGen info about outer OpenMP region.
310 CodeGenFunction::CGCapturedStmtInfo *OldCSI;
311 CGOpenMPRegionInfo *OuterRegionInfo;
312};
313
314/// API for captured statement code generation in OpenMP target
315/// constructs. For this captures, implicit parameters are used instead of the
316/// captured fields. The name of the target region has to be unique in a given
317/// application so it is provided by the client, because only the client has
318/// the information to generate that.
319class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
320public:
321 CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
322 const RegionCodeGenTy &CodeGen, StringRef HelperName)
323 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
324 /*HasCancel=*/false),
325 HelperName(HelperName) {}
326
327 /// This is unused for target regions because each starts executing
328 /// with a single thread.
329 const VarDecl *getThreadIDVariable() const override { return nullptr; }
330
331 /// Get the name of the capture helper.
332 StringRef getHelperName() const override { return HelperName; }
333
334 static bool classof(const CGCapturedStmtInfo *Info) {
335 return CGOpenMPRegionInfo::classof(Info) &&
336 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
337 }
338
339private:
340 StringRef HelperName;
341};
342
343static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
344 llvm_unreachable("No codegen for expressions")::llvm::llvm_unreachable_internal("No codegen for expressions"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 344)
;
345}
346/// API for generation of expressions captured in a innermost OpenMP
347/// region.
348class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
349public:
350 CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
351 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
352 OMPD_unknown,
353 /*HasCancel=*/false),
354 PrivScope(CGF) {
355 // Make sure the globals captured in the provided statement are local by
356 // using the privatization logic. We assume the same variable is not
357 // captured more than once.
358 for (const auto &C : CS.captures()) {
359 if (!C.capturesVariable() && !C.capturesVariableByCopy())
360 continue;
361
362 const VarDecl *VD = C.getCapturedVar();
363 if (VD->isLocalVarDeclOrParm())
364 continue;
365
366 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
367 /*RefersToEnclosingVariableOrCapture=*/false,
368 VD->getType().getNonReferenceType(), VK_LValue,
369 C.getLocation());
370 PrivScope.addPrivate(
371 VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); });
372 }
373 (void)PrivScope.Privatize();
374 }
375
376 /// Lookup the captured field decl for a variable.
377 const FieldDecl *lookup(const VarDecl *VD) const override {
378 if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
379 return FD;
380 return nullptr;
381 }
382
383 /// Emit the captured statement body.
384 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
385 llvm_unreachable("No body for expressions")::llvm::llvm_unreachable_internal("No body for expressions", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 385)
;
386 }
387
388 /// Get a variable or parameter for storing global thread id
389 /// inside OpenMP construct.
390 const VarDecl *getThreadIDVariable() const override {
391 llvm_unreachable("No thread id for expressions")::llvm::llvm_unreachable_internal("No thread id for expressions"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 391)
;
392 }
393
394 /// Get the name of the capture helper.
395 StringRef getHelperName() const override {
396 llvm_unreachable("No helper name for expressions")::llvm::llvm_unreachable_internal("No helper name for expressions"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 396)
;
397 }
398
399 static bool classof(const CGCapturedStmtInfo *Info) { return false; }
400
401private:
402 /// Private scope to capture global variables.
403 CodeGenFunction::OMPPrivateScope PrivScope;
404};
405
406/// RAII for emitting code of OpenMP constructs.
407class InlinedOpenMPRegionRAII {
408 CodeGenFunction &CGF;
409 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
410 FieldDecl *LambdaThisCaptureField = nullptr;
411 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
412
413public:
414 /// Constructs region for combined constructs.
415 /// \param CodeGen Code generation sequence for combined directives. Includes
416 /// a list of functions used for code generation of implicitly inlined
417 /// regions.
418 InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
419 OpenMPDirectiveKind Kind, bool HasCancel)
420 : CGF(CGF) {
421 // Start emission for the construct.
422 CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
423 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
424 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
425 LambdaThisCaptureField = CGF.LambdaThisCaptureField;
426 CGF.LambdaThisCaptureField = nullptr;
427 BlockInfo = CGF.BlockInfo;
428 CGF.BlockInfo = nullptr;
429 }
430
431 ~InlinedOpenMPRegionRAII() {
432 // Restore original CapturedStmtInfo only if we're done with code emission.
433 auto *OldCSI =
434 cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
435 delete CGF.CapturedStmtInfo;
436 CGF.CapturedStmtInfo = OldCSI;
437 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
438 CGF.LambdaThisCaptureField = LambdaThisCaptureField;
439 CGF.BlockInfo = BlockInfo;
440 }
441};
442
443/// Values for bit flags used in the ident_t to describe the fields.
444/// All enumeric elements are named and described in accordance with the code
445/// from https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
446enum OpenMPLocationFlags : unsigned {
447 /// Use trampoline for internal microtask.
448 OMP_IDENT_IMD = 0x01,
449 /// Use c-style ident structure.
450 OMP_IDENT_KMPC = 0x02,
451 /// Atomic reduction option for kmpc_reduce.
452 OMP_ATOMIC_REDUCE = 0x10,
453 /// Explicit 'barrier' directive.
454 OMP_IDENT_BARRIER_EXPL = 0x20,
455 /// Implicit barrier in code.
456 OMP_IDENT_BARRIER_IMPL = 0x40,
457 /// Implicit barrier in 'for' directive.
458 OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
459 /// Implicit barrier in 'sections' directive.
460 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
461 /// Implicit barrier in 'single' directive.
462 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
463 /// Call of __kmp_for_static_init for static loop.
464 OMP_IDENT_WORK_LOOP = 0x200,
465 /// Call of __kmp_for_static_init for sections.
466 OMP_IDENT_WORK_SECTIONS = 0x400,
467 /// Call of __kmp_for_static_init for distribute.
468 OMP_IDENT_WORK_DISTRIBUTE = 0x800,
469 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_IDENT_WORK_DISTRIBUTE
470};
471
472namespace {
473LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
474/// Values for bit flags for marking which requires clauses have been used.
475enum OpenMPOffloadingRequiresDirFlags : int64_t {
476 /// flag undefined.
477 OMP_REQ_UNDEFINED = 0x000,
478 /// no requires clause present.
479 OMP_REQ_NONE = 0x001,
480 /// reverse_offload clause.
481 OMP_REQ_REVERSE_OFFLOAD = 0x002,
482 /// unified_address clause.
483 OMP_REQ_UNIFIED_ADDRESS = 0x004,
484 /// unified_shared_memory clause.
485 OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
486 /// dynamic_allocators clause.
487 OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
488 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_REQ_DYNAMIC_ALLOCATORS
489};
490
491enum OpenMPOffloadingReservedDeviceIDs {
492 /// Device ID if the device was not defined, runtime should get it
493 /// from environment variables in the spec.
494 OMP_DEVICEID_UNDEF = -1,
495};
496} // anonymous namespace
497
498/// Describes ident structure that describes a source location.
499/// All descriptions are taken from
500/// https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
501/// Original structure:
502/// typedef struct ident {
503/// kmp_int32 reserved_1; /**< might be used in Fortran;
504/// see above */
505/// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
506/// KMP_IDENT_KMPC identifies this union
507/// member */
508/// kmp_int32 reserved_2; /**< not really used in Fortran any more;
509/// see above */
510///#if USE_ITT_BUILD
511/// /* but currently used for storing
512/// region-specific ITT */
513/// /* contextual information. */
514///#endif /* USE_ITT_BUILD */
515/// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
516/// C++ */
517/// char const *psource; /**< String describing the source location.
518/// The string is composed of semi-colon separated
519// fields which describe the source file,
520/// the function and a pair of line numbers that
521/// delimit the construct.
522/// */
523/// } ident_t;
524enum IdentFieldIndex {
525 /// might be used in Fortran
526 IdentField_Reserved_1,
527 /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
528 IdentField_Flags,
529 /// Not really used in Fortran any more
530 IdentField_Reserved_2,
531 /// Source[4] in Fortran, do not use for C++
532 IdentField_Reserved_3,
533 /// String describing the source location. The string is composed of
534 /// semi-colon separated fields which describe the source file, the function
535 /// and a pair of line numbers that delimit the construct.
536 IdentField_PSource
537};
538
539/// Schedule types for 'omp for' loops (these enumerators are taken from
540/// the enum sched_type in kmp.h).
541enum OpenMPSchedType {
542 /// Lower bound for default (unordered) versions.
543 OMP_sch_lower = 32,
544 OMP_sch_static_chunked = 33,
545 OMP_sch_static = 34,
546 OMP_sch_dynamic_chunked = 35,
547 OMP_sch_guided_chunked = 36,
548 OMP_sch_runtime = 37,
549 OMP_sch_auto = 38,
550 /// static with chunk adjustment (e.g., simd)
551 OMP_sch_static_balanced_chunked = 45,
552 /// Lower bound for 'ordered' versions.
553 OMP_ord_lower = 64,
554 OMP_ord_static_chunked = 65,
555 OMP_ord_static = 66,
556 OMP_ord_dynamic_chunked = 67,
557 OMP_ord_guided_chunked = 68,
558 OMP_ord_runtime = 69,
559 OMP_ord_auto = 70,
560 OMP_sch_default = OMP_sch_static,
561 /// dist_schedule types
562 OMP_dist_sch_static_chunked = 91,
563 OMP_dist_sch_static = 92,
564 /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
565 /// Set if the monotonic schedule modifier was present.
566 OMP_sch_modifier_monotonic = (1 << 29),
567 /// Set if the nonmonotonic schedule modifier was present.
568 OMP_sch_modifier_nonmonotonic = (1 << 30),
569};
570
571/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
572/// region.
573class CleanupTy final : public EHScopeStack::Cleanup {
574 PrePostActionTy *Action;
575
576public:
577 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
578 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
579 if (!CGF.HaveInsertPoint())
580 return;
581 Action->Exit(CGF);
582 }
583};
584
585} // anonymous namespace
586
587void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
588 CodeGenFunction::RunCleanupsScope Scope(CGF);
589 if (PrePostAction
13.1
Field 'PrePostAction' is null
13.1
Field 'PrePostAction' is null
) {
14
Taking false branch
590 CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
591 Callback(CodeGen, CGF, *PrePostAction);
592 } else {
593 PrePostActionTy Action;
594 Callback(CodeGen, CGF, Action);
15
Calling 'RegionCodeGenTy::CallbackFn'
595 }
596}
597
598/// Check if the combiner is a call to UDR combiner and if it is so return the
599/// UDR decl used for reduction.
600static const OMPDeclareReductionDecl *
601getReductionInit(const Expr *ReductionOp) {
602 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
603 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
604 if (const auto *DRE =
605 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
606 if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
607 return DRD;
608 return nullptr;
609}
610
611static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
612 const OMPDeclareReductionDecl *DRD,
613 const Expr *InitOp,
614 Address Private, Address Original,
615 QualType Ty) {
616 if (DRD->getInitializer()) {
617 std::pair<llvm::Function *, llvm::Function *> Reduction =
618 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
619 const auto *CE = cast<CallExpr>(InitOp);
620 const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
621 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
622 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
623 const auto *LHSDRE =
624 cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
625 const auto *RHSDRE =
626 cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
627 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
628 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
629 [=]() { return Private; });
630 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
631 [=]() { return Original; });
632 (void)PrivateScope.Privatize();
633 RValue Func = RValue::get(Reduction.second);
634 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
635 CGF.EmitIgnoredExpr(InitOp);
636 } else {
637 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
638 std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
639 auto *GV = new llvm::GlobalVariable(
640 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
641 llvm::GlobalValue::PrivateLinkage, Init, Name);
642 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
643 RValue InitRVal;
644 switch (CGF.getEvaluationKind(Ty)) {
645 case TEK_Scalar:
646 InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
647 break;
648 case TEK_Complex:
649 InitRVal =
650 RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
651 break;
652 case TEK_Aggregate:
653 InitRVal = RValue::getAggregate(LV.getAddress(CGF));
654 break;
655 }
656 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
657 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
658 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
659 /*IsInitializer=*/false);
660 }
661}
662
663/// Emit initialization of arrays of complex types.
664/// \param DestAddr Address of the array.
665/// \param Type Type of array.
666/// \param Init Initial expression of array.
667/// \param SrcAddr Address of the original array.
668static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
669 QualType Type, bool EmitDeclareReductionInit,
670 const Expr *Init,
671 const OMPDeclareReductionDecl *DRD,
672 Address SrcAddr = Address::invalid()) {
673 // Perform element-by-element initialization.
674 QualType ElementTy;
675
676 // Drill down to the base element type on both arrays.
677 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
678 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
679 DestAddr =
680 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
681 if (DRD)
682 SrcAddr =
683 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
684
685 llvm::Value *SrcBegin = nullptr;
686 if (DRD)
687 SrcBegin = SrcAddr.getPointer();
688 llvm::Value *DestBegin = DestAddr.getPointer();
689 // Cast from pointer to array type to pointer to single element.
690 llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
691 // The basic structure here is a while-do loop.
692 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
693 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
694 llvm::Value *IsEmpty =
695 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
696 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
697
698 // Enter the loop body, making that address the current address.
699 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
700 CGF.EmitBlock(BodyBB);
701
702 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
703
704 llvm::PHINode *SrcElementPHI = nullptr;
705 Address SrcElementCurrent = Address::invalid();
706 if (DRD) {
707 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
708 "omp.arraycpy.srcElementPast");
709 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
710 SrcElementCurrent =
711 Address(SrcElementPHI,
712 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
713 }
714 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
715 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
716 DestElementPHI->addIncoming(DestBegin, EntryBB);
717 Address DestElementCurrent =
718 Address(DestElementPHI,
719 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
720
721 // Emit copy.
722 {
723 CodeGenFunction::RunCleanupsScope InitScope(CGF);
724 if (EmitDeclareReductionInit) {
725 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
726 SrcElementCurrent, ElementTy);
727 } else
728 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
729 /*IsInitializer=*/false);
730 }
731
732 if (DRD) {
733 // Shift the address forward by one element.
734 llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
735 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
736 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
737 }
738
739 // Shift the address forward by one element.
740 llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
741 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
742 // Check whether we've reached the end.
743 llvm::Value *Done =
744 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
745 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
746 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
747
748 // Done.
749 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
750}
751
752LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
753 return CGF.EmitOMPSharedLValue(E);
754}
755
756LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
757 const Expr *E) {
758 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
759 return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
760 return LValue();
761}
762
763void ReductionCodeGen::emitAggregateInitialization(
764 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
765 const OMPDeclareReductionDecl *DRD) {
766 // Emit VarDecl with copy init for arrays.
767 // Get the address of the original variable captured in current
768 // captured region.
769 const auto *PrivateVD =
770 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
771 bool EmitDeclareReductionInit =
772 DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
773 EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
774 EmitDeclareReductionInit,
775 EmitDeclareReductionInit ? ClausesData[N].ReductionOp
776 : PrivateVD->getInit(),
777 DRD, SharedLVal.getAddress(CGF));
778}
779
780ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
781 ArrayRef<const Expr *> Origs,
782 ArrayRef<const Expr *> Privates,
783 ArrayRef<const Expr *> ReductionOps) {
784 ClausesData.reserve(Shareds.size());
785 SharedAddresses.reserve(Shareds.size());
786 Sizes.reserve(Shareds.size());
787 BaseDecls.reserve(Shareds.size());
788 const auto *IOrig = Origs.begin();
789 const auto *IPriv = Privates.begin();
790 const auto *IRed = ReductionOps.begin();
791 for (const Expr *Ref : Shareds) {
792 ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
793 std::advance(IOrig, 1);
794 std::advance(IPriv, 1);
795 std::advance(IRed, 1);
796 }
797}
798
799void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
800 assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&((SharedAddresses.size() == N && OrigAddresses.size()
== N && "Number of generated lvalues must be exactly N."
) ? static_cast<void> (0) : __assert_fail ("SharedAddresses.size() == N && OrigAddresses.size() == N && \"Number of generated lvalues must be exactly N.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 801, __PRETTY_FUNCTION__))
801 "Number of generated lvalues must be exactly N.")((SharedAddresses.size() == N && OrigAddresses.size()
== N && "Number of generated lvalues must be exactly N."
) ? static_cast<void> (0) : __assert_fail ("SharedAddresses.size() == N && OrigAddresses.size() == N && \"Number of generated lvalues must be exactly N.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 801, __PRETTY_FUNCTION__))
;
802 LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
803 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
804 SharedAddresses.emplace_back(First, Second);
805 if (ClausesData[N].Shared == ClausesData[N].Ref) {
806 OrigAddresses.emplace_back(First, Second);
807 } else {
808 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
809 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
810 OrigAddresses.emplace_back(First, Second);
811 }
812}
813
814void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
815 const auto *PrivateVD =
816 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
817 QualType PrivateType = PrivateVD->getType();
818 bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
819 if (!PrivateType->isVariablyModifiedType()) {
820 Sizes.emplace_back(
821 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
822 nullptr);
823 return;
824 }
825 llvm::Value *Size;
826 llvm::Value *SizeInChars;
827 auto *ElemType =
828 cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
829 ->getElementType();
830 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
831 if (AsArraySection) {
832 Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
833 OrigAddresses[N].first.getPointer(CGF));
834 Size = CGF.Builder.CreateNUWAdd(
835 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
836 SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
837 } else {
838 SizeInChars =
839 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
840 Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
841 }
842 Sizes.emplace_back(SizeInChars, Size);
843 CodeGenFunction::OpaqueValueMapping OpaqueMap(
844 CGF,
845 cast<OpaqueValueExpr>(
846 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
847 RValue::get(Size));
848 CGF.EmitVariablyModifiedType(PrivateType);
849}
850
851void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
852 llvm::Value *Size) {
853 const auto *PrivateVD =
854 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
855 QualType PrivateType = PrivateVD->getType();
856 if (!PrivateType->isVariablyModifiedType()) {
857 assert(!Size && !Sizes[N].second &&((!Size && !Sizes[N].second && "Size should be nullptr for non-variably modified reduction "
"items.") ? static_cast<void> (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 859, __PRETTY_FUNCTION__))
858 "Size should be nullptr for non-variably modified reduction "((!Size && !Sizes[N].second && "Size should be nullptr for non-variably modified reduction "
"items.") ? static_cast<void> (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 859, __PRETTY_FUNCTION__))
859 "items.")((!Size && !Sizes[N].second && "Size should be nullptr for non-variably modified reduction "
"items.") ? static_cast<void> (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 859, __PRETTY_FUNCTION__))
;
860 return;
861 }
862 CodeGenFunction::OpaqueValueMapping OpaqueMap(
863 CGF,
864 cast<OpaqueValueExpr>(
865 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
866 RValue::get(Size));
867 CGF.EmitVariablyModifiedType(PrivateType);
868}
869
870void ReductionCodeGen::emitInitialization(
871 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
872 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
873 assert(SharedAddresses.size() > N && "No variable was generated")((SharedAddresses.size() > N && "No variable was generated"
) ? static_cast<void> (0) : __assert_fail ("SharedAddresses.size() > N && \"No variable was generated\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 873, __PRETTY_FUNCTION__))
;
874 const auto *PrivateVD =
875 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
876 const OMPDeclareReductionDecl *DRD =
877 getReductionInit(ClausesData[N].ReductionOp);
878 QualType PrivateType = PrivateVD->getType();
879 PrivateAddr = CGF.Builder.CreateElementBitCast(
880 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
881 QualType SharedType = SharedAddresses[N].first.getType();
882 SharedLVal = CGF.MakeAddrLValue(
883 CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF),
884 CGF.ConvertTypeForMem(SharedType)),
885 SharedType, SharedAddresses[N].first.getBaseInfo(),
886 CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
887 if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
888 if (DRD && DRD->getInitializer())
889 (void)DefaultInit(CGF);
890 emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
891 } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
892 (void)DefaultInit(CGF);
893 emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
894 PrivateAddr, SharedLVal.getAddress(CGF),
895 SharedLVal.getType());
896 } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
897 !CGF.isTrivialInitializer(PrivateVD->getInit())) {
898 CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
899 PrivateVD->getType().getQualifiers(),
900 /*IsInitializer=*/false);
901 }
902}
903
904bool ReductionCodeGen::needCleanups(unsigned N) {
905 const auto *PrivateVD =
906 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
907 QualType PrivateType = PrivateVD->getType();
908 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
909 return DTorKind != QualType::DK_none;
910}
911
912void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
913 Address PrivateAddr) {
914 const auto *PrivateVD =
915 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
916 QualType PrivateType = PrivateVD->getType();
917 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
918 if (needCleanups(N)) {
919 PrivateAddr = CGF.Builder.CreateElementBitCast(
920 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
921 CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
922 }
923}
924
925static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
926 LValue BaseLV) {
927 BaseTy = BaseTy.getNonReferenceType();
928 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
929 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
930 if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
931 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
932 } else {
933 LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
934 BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
935 }
936 BaseTy = BaseTy->getPointeeType();
937 }
938 return CGF.MakeAddrLValue(
939 CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
940 CGF.ConvertTypeForMem(ElTy)),
941 BaseLV.getType(), BaseLV.getBaseInfo(),
942 CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
943}
944
945static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
946 llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
947 llvm::Value *Addr) {
948 Address Tmp = Address::invalid();
949 Address TopTmp = Address::invalid();
950 Address MostTopTmp = Address::invalid();
951 BaseTy = BaseTy.getNonReferenceType();
952 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
953 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
954 Tmp = CGF.CreateMemTemp(BaseTy);
955 if (TopTmp.isValid())
956 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
957 else
958 MostTopTmp = Tmp;
959 TopTmp = Tmp;
960 BaseTy = BaseTy->getPointeeType();
961 }
962 llvm::Type *Ty = BaseLVType;
963 if (Tmp.isValid())
964 Ty = Tmp.getElementType();
965 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
966 if (Tmp.isValid()) {
967 CGF.Builder.CreateStore(Addr, Tmp);
968 return MostTopTmp;
969 }
970 return Address(Addr, BaseLVAlignment);
971}
972
973static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
974 const VarDecl *OrigVD = nullptr;
975 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
976 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
977 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
978 Base = TempOASE->getBase()->IgnoreParenImpCasts();
979 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
980 Base = TempASE->getBase()->IgnoreParenImpCasts();
981 DE = cast<DeclRefExpr>(Base);
982 OrigVD = cast<VarDecl>(DE->getDecl());
983 } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
984 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
985 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
986 Base = TempASE->getBase()->IgnoreParenImpCasts();
987 DE = cast<DeclRefExpr>(Base);
988 OrigVD = cast<VarDecl>(DE->getDecl());
989 }
990 return OrigVD;
991}
992
993Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
994 Address PrivateAddr) {
995 const DeclRefExpr *DE;
996 if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
997 BaseDecls.emplace_back(OrigVD);
998 LValue OriginalBaseLValue = CGF.EmitLValue(DE);
999 LValue BaseLValue =
1000 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1001 OriginalBaseLValue);
1002 llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1003 BaseLValue.getPointer(CGF), SharedAddresses[N].first.getPointer(CGF));
1004 llvm::Value *PrivatePointer =
1005 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1006 PrivateAddr.getPointer(),
1007 SharedAddresses[N].first.getAddress(CGF).getType());
1008 llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1009 return castToBase(CGF, OrigVD->getType(),
1010 SharedAddresses[N].first.getType(),
1011 OriginalBaseLValue.getAddress(CGF).getType(),
1012 OriginalBaseLValue.getAlignment(), Ptr);
1013 }
1014 BaseDecls.emplace_back(
1015 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1016 return PrivateAddr;
1017}
1018
1019bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1020 const OMPDeclareReductionDecl *DRD =
1021 getReductionInit(ClausesData[N].ReductionOp);
1022 return DRD && DRD->getInitializer();
1023}
1024
1025LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1026 return CGF.EmitLoadOfPointerLValue(
1027 CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1028 getThreadIDVariable()->getType()->castAs<PointerType>());
1029}
1030
1031void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1032 if (!CGF.HaveInsertPoint())
1033 return;
1034 // 1.2.2 OpenMP Language Terminology
1035 // Structured block - An executable statement with a single entry at the
1036 // top and a single exit at the bottom.
1037 // The point of exit cannot be a branch out of the structured block.
1038 // longjmp() and throw() must not violate the entry/exit criteria.
1039 CGF.EHStack.pushTerminate();
1040 CodeGen(CGF);
1041 CGF.EHStack.popTerminate();
1042}
1043
1044LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1045 CodeGenFunction &CGF) {
1046 return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1047 getThreadIDVariable()->getType(),
1048 AlignmentSource::Decl);
1049}
1050
1051static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
1052 QualType FieldTy) {
1053 auto *Field = FieldDecl::Create(
1054 C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1055 C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1056 /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1057 Field->setAccess(AS_public);
1058 DC->addDecl(Field);
1059 return Field;
1060}
1061
1062CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1063 StringRef Separator)
1064 : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1065 OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
1066 KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1067
1068 // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
1069 OMPBuilder.initialize();
1070 loadOffloadInfoMetadata();
1071}
1072
1073void CGOpenMPRuntime::clear() {
1074 InternalVars.clear();
1075 // Clean non-target variable declarations possibly used only in debug info.
1076 for (const auto &Data : EmittedNonTargetVariables) {
1077 if (!Data.getValue().pointsToAliveValue())
1078 continue;
1079 auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
1080 if (!GV)
1081 continue;
1082 if (!GV->isDeclaration() || GV->getNumUses() > 0)
1083 continue;
1084 GV->eraseFromParent();
1085 }
1086}
1087
1088std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1089 SmallString<128> Buffer;
1090 llvm::raw_svector_ostream OS(Buffer);
1091 StringRef Sep = FirstSeparator;
1092 for (StringRef Part : Parts) {
1093 OS << Sep << Part;
1094 Sep = Separator;
1095 }
1096 return std::string(OS.str());
1097}
1098
1099static llvm::Function *
1100emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1101 const Expr *CombinerInitializer, const VarDecl *In,
1102 const VarDecl *Out, bool IsCombiner) {
1103 // void .omp_combiner.(Ty *in, Ty *out);
1104 ASTContext &C = CGM.getContext();
1105 QualType PtrTy = C.getPointerType(Ty).withRestrict();
1106 FunctionArgList Args;
1107 ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1108 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1109 ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1110 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1111 Args.push_back(&OmpOutParm);
1112 Args.push_back(&OmpInParm);
1113 const CGFunctionInfo &FnInfo =
1114 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1115 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1116 std::string Name = CGM.getOpenMPRuntime().getName(
1117 {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1118 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1119 Name, &CGM.getModule());
1120 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1121 if (CGM.getLangOpts().Optimize) {
1122 Fn->removeFnAttr(llvm::Attribute::NoInline);
1123 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1124 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1125 }
1126 CodeGenFunction CGF(CGM);
1127 // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1128 // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1129 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1130 Out->getLocation());
1131 CodeGenFunction::OMPPrivateScope Scope(CGF);
1132 Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1133 Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1134 return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1135 .getAddress(CGF);
1136 });
1137 Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1138 Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1139 return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1140 .getAddress(CGF);
1141 });
1142 (void)Scope.Privatize();
1143 if (!IsCombiner && Out->hasInit() &&
1144 !CGF.isTrivialInitializer(Out->getInit())) {
1145 CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1146 Out->getType().getQualifiers(),
1147 /*IsInitializer=*/true);
1148 }
1149 if (CombinerInitializer)
1150 CGF.EmitIgnoredExpr(CombinerInitializer);
1151 Scope.ForceCleanup();
1152 CGF.FinishFunction();
1153 return Fn;
1154}
1155
1156void CGOpenMPRuntime::emitUserDefinedReduction(
1157 CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1158 if (UDRMap.count(D) > 0)
1159 return;
1160 llvm::Function *Combiner = emitCombinerOrInitializer(
1161 CGM, D->getType(), D->getCombiner(),
1162 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
1163 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
1164 /*IsCombiner=*/true);
1165 llvm::Function *Initializer = nullptr;
1166 if (const Expr *Init = D->getInitializer()) {
1167 Initializer = emitCombinerOrInitializer(
1168 CGM, D->getType(),
1169 D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1170 : nullptr,
1171 cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
1172 cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
1173 /*IsCombiner=*/false);
1174 }
1175 UDRMap.try_emplace(D, Combiner, Initializer);
1176 if (CGF) {
1177 auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1178 Decls.second.push_back(D);
1179 }
1180}
1181
1182std::pair<llvm::Function *, llvm::Function *>
1183CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1184 auto I = UDRMap.find(D);
1185 if (I != UDRMap.end())
1186 return I->second;
1187 emitUserDefinedReduction(/*CGF=*/nullptr, D);
1188 return UDRMap.lookup(D);
1189}
1190
1191namespace {
1192// Temporary RAII solution to perform a push/pop stack event on the OpenMP IR
1193// Builder if one is present.
1194struct PushAndPopStackRAII {
1195 PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
1196 bool HasCancel)
1197 : OMPBuilder(OMPBuilder) {
1198 if (!OMPBuilder)
1199 return;
1200
1201 // The following callback is the crucial part of clangs cleanup process.
1202 //
1203 // NOTE:
1204 // Once the OpenMPIRBuilder is used to create parallel regions (and
1205 // similar), the cancellation destination (Dest below) is determined via
1206 // IP. That means if we have variables to finalize we split the block at IP,
1207 // use the new block (=BB) as destination to build a JumpDest (via
1208 // getJumpDestInCurrentScope(BB)) which then is fed to
1209 // EmitBranchThroughCleanup. Furthermore, there will not be the need
1210 // to push & pop an FinalizationInfo object.
1211 // The FiniCB will still be needed but at the point where the
1212 // OpenMPIRBuilder is asked to construct a parallel (or similar) construct.
1213 auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
1214 assert(IP.getBlock()->end() == IP.getPoint() &&((IP.getBlock()->end() == IP.getPoint() && "Clang CG should cause non-terminated block!"
) ? static_cast<void> (0) : __assert_fail ("IP.getBlock()->end() == IP.getPoint() && \"Clang CG should cause non-terminated block!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1215, __PRETTY_FUNCTION__))
1215 "Clang CG should cause non-terminated block!")((IP.getBlock()->end() == IP.getPoint() && "Clang CG should cause non-terminated block!"
) ? static_cast<void> (0) : __assert_fail ("IP.getBlock()->end() == IP.getPoint() && \"Clang CG should cause non-terminated block!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1215, __PRETTY_FUNCTION__))
;
1216 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1217 CGF.Builder.restoreIP(IP);
1218 CodeGenFunction::JumpDest Dest =
1219 CGF.getOMPCancelDestination(OMPD_parallel);
1220 CGF.EmitBranchThroughCleanup(Dest);
1221 };
1222
1223 // TODO: Remove this once we emit parallel regions through the
1224 // OpenMPIRBuilder as it can do this setup internally.
1225 llvm::OpenMPIRBuilder::FinalizationInfo FI(
1226 {FiniCB, OMPD_parallel, HasCancel});
1227 OMPBuilder->pushFinalizationCB(std::move(FI));
1228 }
1229 ~PushAndPopStackRAII() {
1230 if (OMPBuilder)
1231 OMPBuilder->popFinalizationCB();
1232 }
1233 llvm::OpenMPIRBuilder *OMPBuilder;
1234};
1235} // namespace
1236
1237static llvm::Function *emitParallelOrTeamsOutlinedFunction(
1238 CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1239 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1240 const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1241 assert(ThreadIDVar->getType()->isPointerType() &&((ThreadIDVar->getType()->isPointerType() && "thread id variable must be of type kmp_int32 *"
) ? static_cast<void> (0) : __assert_fail ("ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 *\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1242, __PRETTY_FUNCTION__))
1242 "thread id variable must be of type kmp_int32 *")((ThreadIDVar->getType()->isPointerType() && "thread id variable must be of type kmp_int32 *"
) ? static_cast<void> (0) : __assert_fail ("ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 *\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1242, __PRETTY_FUNCTION__))
;
1243 CodeGenFunction CGF(CGM, true);
1244 bool HasCancel = false;
1245 if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1246 HasCancel = OPD->hasCancel();
1247 else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
1248 HasCancel = OPD->hasCancel();
1249 else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1250 HasCancel = OPSD->hasCancel();
1251 else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1252 HasCancel = OPFD->hasCancel();
1253 else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1254 HasCancel = OPFD->hasCancel();
1255 else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1256 HasCancel = OPFD->hasCancel();
1257 else if (const auto *OPFD =
1258 dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1259 HasCancel = OPFD->hasCancel();
1260 else if (const auto *OPFD =
1261 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1262 HasCancel = OPFD->hasCancel();
1263
1264 // TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
1265 // parallel region to make cancellation barriers work properly.
1266 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1267 PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel);
1268 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1269 HasCancel, OutlinedHelperName);
1270 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1271 return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
1272}
1273
1274llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
1275 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1276 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1277 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1278 return emitParallelOrTeamsOutlinedFunction(
1279 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1280}
1281
1282llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1283 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1284 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1285 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1286 return emitParallelOrTeamsOutlinedFunction(
1287 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1288}
1289
1290llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
1291 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1292 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1293 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1294 bool Tied, unsigned &NumberOfParts) {
1295 auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1296 PrePostActionTy &) {
1297 llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
1298 llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
1299 llvm::Value *TaskArgs[] = {
1300 UpLoc, ThreadID,
1301 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1302 TaskTVar->getType()->castAs<PointerType>())
1303 .getPointer(CGF)};
1304 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1305 CGM.getModule(), OMPRTL___kmpc_omp_task),
1306 TaskArgs);
1307 };
1308 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1309 UntiedCodeGen);
1310 CodeGen.setAction(Action);
1311 assert(!ThreadIDVar->getType()->isPointerType() &&((!ThreadIDVar->getType()->isPointerType() && "thread id variable must be of type kmp_int32 for tasks"
) ? static_cast<void> (0) : __assert_fail ("!ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 for tasks\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1312, __PRETTY_FUNCTION__))
1312 "thread id variable must be of type kmp_int32 for tasks")((!ThreadIDVar->getType()->isPointerType() && "thread id variable must be of type kmp_int32 for tasks"
) ? static_cast<void> (0) : __assert_fail ("!ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 for tasks\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1312, __PRETTY_FUNCTION__))
;
1313 const OpenMPDirectiveKind Region =
1314 isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1315 : OMPD_task;
1316 const CapturedStmt *CS = D.getCapturedStmt(Region);
1317 bool HasCancel = false;
1318 if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
1319 HasCancel = TD->hasCancel();
1320 else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
1321 HasCancel = TD->hasCancel();
1322 else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
1323 HasCancel = TD->hasCancel();
1324 else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
1325 HasCancel = TD->hasCancel();
1326
1327 CodeGenFunction CGF(CGM, true);
1328 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1329 InnermostKind, HasCancel, Action);
1330 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1331 llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1332 if (!Tied)
1333 NumberOfParts = Action.getNumberOfParts();
1334 return Res;
1335}
1336
1337static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
1338 const RecordDecl *RD, const CGRecordLayout &RL,
1339 ArrayRef<llvm::Constant *> Data) {
1340 llvm::StructType *StructTy = RL.getLLVMType();
1341 unsigned PrevIdx = 0;
1342 ConstantInitBuilder CIBuilder(CGM);
1343 auto DI = Data.begin();
1344 for (const FieldDecl *FD : RD->fields()) {
1345 unsigned Idx = RL.getLLVMFieldNo(FD);
1346 // Fill the alignment.
1347 for (unsigned I = PrevIdx; I < Idx; ++I)
1348 Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1349 PrevIdx = Idx + 1;
1350 Fields.add(*DI);
1351 ++DI;
1352 }
1353}
1354
1355template <class... As>
1356static llvm::GlobalVariable *
1357createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
1358 ArrayRef<llvm::Constant *> Data, const Twine &Name,
1359 As &&... Args) {
1360 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1361 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1362 ConstantInitBuilder CIBuilder(CGM);
1363 ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1364 buildStructValue(Fields, CGM, RD, RL, Data);
1365 return Fields.finishAndCreateGlobal(
1366 Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
1367 std::forward<As>(Args)...);
1368}
1369
1370template <typename T>
1371static void
1372createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
1373 ArrayRef<llvm::Constant *> Data,
1374 T &Parent) {
1375 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1376 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1377 ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1378 buildStructValue(Fields, CGM, RD, RL, Data);
1379 Fields.finishAndAddTo(Parent);
1380}
1381
1382void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
1383 bool AtCurrentPoint) {
1384 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1385 assert(!Elem.second.ServiceInsertPt && "Insert point is set already.")((!Elem.second.ServiceInsertPt && "Insert point is set already."
) ? static_cast<void> (0) : __assert_fail ("!Elem.second.ServiceInsertPt && \"Insert point is set already.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1385, __PRETTY_FUNCTION__))
;
1386
1387 llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
1388 if (AtCurrentPoint) {
1389 Elem.second.ServiceInsertPt = new llvm::BitCastInst(
1390 Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
1391 } else {
1392 Elem.second.ServiceInsertPt =
1393 new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
1394 Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
1395 }
1396}
1397
1398void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
1399 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1400 if (Elem.second.ServiceInsertPt) {
1401 llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
1402 Elem.second.ServiceInsertPt = nullptr;
1403 Ptr->eraseFromParent();
1404 }
1405}
1406
1407static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
1408 SourceLocation Loc,
1409 SmallString<128> &Buffer) {
1410 llvm::raw_svector_ostream OS(Buffer);
1411 // Build debug location
1412 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1413 OS << ";" << PLoc.getFilename() << ";";
1414 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1415 OS << FD->getQualifiedNameAsString();
1416 OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1417 return OS.str();
1418}
1419
1420llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1421 SourceLocation Loc,
1422 unsigned Flags) {
1423 llvm::Constant *SrcLocStr;
1424 if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1425 Loc.isInvalid()) {
1426 SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
1427 } else {
1428 std::string FunctionName = "";
1429 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1430 FunctionName = FD->getQualifiedNameAsString();
1431 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1432 const char *FileName = PLoc.getFilename();
1433 unsigned Line = PLoc.getLine();
1434 unsigned Column = PLoc.getColumn();
1435 SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName.c_str(), FileName,
1436 Line, Column);
1437 }
1438 unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
1439 return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags),
1440 Reserved2Flags);
1441}
1442
1443llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1444 SourceLocation Loc) {
1445 assert(CGF.CurFn && "No function in current CodeGenFunction.")((CGF.CurFn && "No function in current CodeGenFunction."
) ? static_cast<void> (0) : __assert_fail ("CGF.CurFn && \"No function in current CodeGenFunction.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1445, __PRETTY_FUNCTION__))
;
1446 // If the OpenMPIRBuilder is used we need to use it for all thread id calls as
1447 // the clang invariants used below might be broken.
1448 if (CGM.getLangOpts().OpenMPIRBuilder) {
1449 SmallString<128> Buffer;
1450 OMPBuilder.updateToLocation(CGF.Builder.saveIP());
1451 auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
1452 getIdentStringFromSourceLocation(CGF, Loc, Buffer));
1453 return OMPBuilder.getOrCreateThreadID(
1454 OMPBuilder.getOrCreateIdent(SrcLocStr));
1455 }
1456
1457 llvm::Value *ThreadID = nullptr;
1458 // Check whether we've already cached a load of the thread id in this
1459 // function.
1460 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1461 if (I != OpenMPLocThreadIDMap.end()) {
1462 ThreadID = I->second.ThreadID;
1463 if (ThreadID != nullptr)
1464 return ThreadID;
1465 }
1466 // If exceptions are enabled, do not use parameter to avoid possible crash.
1467 if (auto *OMPRegionInfo =
1468 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1469 if (OMPRegionInfo->getThreadIDVariable()) {
1470 // Check if this an outlined function with thread id passed as argument.
1471 LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1472 llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
1473 if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1474 !CGF.getLangOpts().CXXExceptions ||
1475 CGF.Builder.GetInsertBlock() == TopBlock ||
1476 !isa<llvm::Instruction>(LVal.getPointer(CGF)) ||
1477 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1478 TopBlock ||
1479 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1480 CGF.Builder.GetInsertBlock()) {
1481 ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1482 // If value loaded in entry block, cache it and use it everywhere in
1483 // function.
1484 if (CGF.Builder.GetInsertBlock() == TopBlock) {
1485 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1486 Elem.second.ThreadID = ThreadID;
1487 }
1488 return ThreadID;
1489 }
1490 }
1491 }
1492
1493 // This is not an outlined function region - need to call __kmpc_int32
1494 // kmpc_global_thread_num(ident_t *loc).
1495 // Generate thread id value and cache this value for use across the
1496 // function.
1497 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1498 if (!Elem.second.ServiceInsertPt)
1499 setLocThreadIdInsertPt(CGF);
1500 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1501 CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1502 llvm::CallInst *Call = CGF.Builder.CreateCall(
1503 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
1504 OMPRTL___kmpc_global_thread_num),
1505 emitUpdateLocation(CGF, Loc));
1506 Call->setCallingConv(CGF.getRuntimeCC());
1507 Elem.second.ThreadID = Call;
1508 return Call;
1509}
1510
1511void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1512 assert(CGF.CurFn && "No function in current CodeGenFunction.")((CGF.CurFn && "No function in current CodeGenFunction."
) ? static_cast<void> (0) : __assert_fail ("CGF.CurFn && \"No function in current CodeGenFunction.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1512, __PRETTY_FUNCTION__))
;
1513 if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
1514 clearLocThreadIdInsertPt(CGF);
1515 OpenMPLocThreadIDMap.erase(CGF.CurFn);
1516 }
1517 if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1518 for(const auto *D : FunctionUDRMap[CGF.CurFn])
1519 UDRMap.erase(D);
1520 FunctionUDRMap.erase(CGF.CurFn);
1521 }
1522 auto I = FunctionUDMMap.find(CGF.CurFn);
1523 if (I != FunctionUDMMap.end()) {
1524 for(const auto *D : I->second)
1525 UDMMap.erase(D);
1526 FunctionUDMMap.erase(I);
1527 }
1528 LastprivateConditionalToTypes.erase(CGF.CurFn);
1529 FunctionToUntiedTaskStackMap.erase(CGF.CurFn);
1530}
1531
1532llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1533 return OMPBuilder.IdentPtr;
1534}
1535
1536llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1537 if (!Kmpc_MicroTy) {
1538 // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1539 llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1540 llvm::PointerType::getUnqual(CGM.Int32Ty)};
1541 Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1542 }
1543 return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1544}
1545
1546llvm::FunctionCallee
1547CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
1548 assert((IVSize == 32 || IVSize == 64) &&(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1549, __PRETTY_FUNCTION__))
1549 "IV size is not compatible with the omp runtime")(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1549, __PRETTY_FUNCTION__))
;
1550 StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
1551 : "__kmpc_for_static_init_4u")
1552 : (IVSigned ? "__kmpc_for_static_init_8"
1553 : "__kmpc_for_static_init_8u");
1554 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1555 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
1556 llvm::Type *TypeParams[] = {
1557 getIdentTyPointerTy(), // loc
1558 CGM.Int32Ty, // tid
1559 CGM.Int32Ty, // schedtype
1560 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
1561 PtrTy, // p_lower
1562 PtrTy, // p_upper
1563 PtrTy, // p_stride
1564 ITy, // incr
1565 ITy // chunk
1566 };
1567 auto *FnTy =
1568 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1569 return CGM.CreateRuntimeFunction(FnTy, Name);
1570}
1571
1572llvm::FunctionCallee
1573CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
1574 assert((IVSize == 32 || IVSize == 64) &&(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1575, __PRETTY_FUNCTION__))
1575 "IV size is not compatible with the omp runtime")(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1575, __PRETTY_FUNCTION__))
;
1576 StringRef Name =
1577 IVSize == 32
1578 ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
1579 : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
1580 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1581 llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
1582 CGM.Int32Ty, // tid
1583 CGM.Int32Ty, // schedtype
1584 ITy, // lower
1585 ITy, // upper
1586 ITy, // stride
1587 ITy // chunk
1588 };
1589 auto *FnTy =
1590 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1591 return CGM.CreateRuntimeFunction(FnTy, Name);
1592}
1593
1594llvm::FunctionCallee
1595CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
1596 assert((IVSize == 32 || IVSize == 64) &&(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1597, __PRETTY_FUNCTION__))
1597 "IV size is not compatible with the omp runtime")(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1597, __PRETTY_FUNCTION__))
;
1598 StringRef Name =
1599 IVSize == 32
1600 ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
1601 : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
1602 llvm::Type *TypeParams[] = {
1603 getIdentTyPointerTy(), // loc
1604 CGM.Int32Ty, // tid
1605 };
1606 auto *FnTy =
1607 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1608 return CGM.CreateRuntimeFunction(FnTy, Name);
1609}
1610
1611llvm::FunctionCallee
1612CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
1613 assert((IVSize == 32 || IVSize == 64) &&(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1614, __PRETTY_FUNCTION__))
1614 "IV size is not compatible with the omp runtime")(((IVSize == 32 || IVSize == 64) && "IV size is not compatible with the omp runtime"
) ? static_cast<void> (0) : __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1614, __PRETTY_FUNCTION__))
;
1615 StringRef Name =
1616 IVSize == 32
1617 ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
1618 : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
1619 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1620 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
1621 llvm::Type *TypeParams[] = {
1622 getIdentTyPointerTy(), // loc
1623 CGM.Int32Ty, // tid
1624 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
1625 PtrTy, // p_lower
1626 PtrTy, // p_upper
1627 PtrTy // p_stride
1628 };
1629 auto *FnTy =
1630 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1631 return CGM.CreateRuntimeFunction(FnTy, Name);
1632}
1633
1634/// Obtain information that uniquely identifies a target entry. This
1635/// consists of the file and device IDs as well as line number associated with
1636/// the relevant entry source location.
1637static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
1638 unsigned &DeviceID, unsigned &FileID,
1639 unsigned &LineNum) {
1640 SourceManager &SM = C.getSourceManager();
1641
1642 // The loc should be always valid and have a file ID (the user cannot use
1643 // #pragma directives in macros)
1644
1645 assert(Loc.isValid() && "Source location is expected to be always valid.")((Loc.isValid() && "Source location is expected to be always valid."
) ? static_cast<void> (0) : __assert_fail ("Loc.isValid() && \"Source location is expected to be always valid.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1645, __PRETTY_FUNCTION__))
;
1646
1647 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
1648 assert(PLoc.isValid() && "Source location is expected to be always valid.")((PLoc.isValid() && "Source location is expected to be always valid."
) ? static_cast<void> (0) : __assert_fail ("PLoc.isValid() && \"Source location is expected to be always valid.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1648, __PRETTY_FUNCTION__))
;
1649
1650 llvm::sys::fs::UniqueID ID;
1651 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
1652 SM.getDiagnostics().Report(diag::err_cannot_open_file)
1653 << PLoc.getFilename() << EC.message();
1654
1655 DeviceID = ID.getDevice();
1656 FileID = ID.getFile();
1657 LineNum = PLoc.getLine();
1658}
1659
1660Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
1661 if (CGM.getLangOpts().OpenMPSimd)
1662 return Address::invalid();
1663 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
1664 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1665 if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
1666 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
1667 HasRequiresUnifiedSharedMemory))) {
1668 SmallString<64> PtrName;
1669 {
1670 llvm::raw_svector_ostream OS(PtrName);
1671 OS << CGM.getMangledName(GlobalDecl(VD));
1672 if (!VD->isExternallyVisible()) {
1673 unsigned DeviceID, FileID, Line;
1674 getTargetEntryUniqueInfo(CGM.getContext(),
1675 VD->getCanonicalDecl()->getBeginLoc(),
1676 DeviceID, FileID, Line);
1677 OS << llvm::format("_%x", FileID);
1678 }
1679 OS << "_decl_tgt_ref_ptr";
1680 }
1681 llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
1682 if (!Ptr) {
1683 QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
1684 Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
1685 PtrName);
1686
1687 auto *GV = cast<llvm::GlobalVariable>(Ptr);
1688 GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
1689
1690 if (!CGM.getLangOpts().OpenMPIsDevice)
1691 GV->setInitializer(CGM.GetAddrOfGlobal(VD));
1692 registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
1693 }
1694 return Address(Ptr, CGM.getContext().getDeclAlign(VD));
1695 }
1696 return Address::invalid();
1697}
1698
1699llvm::Constant *
1700CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
1701 assert(!CGM.getLangOpts().OpenMPUseTLS ||((!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo
().isTLSSupported()) ? static_cast<void> (0) : __assert_fail
("!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo().isTLSSupported()"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1702, __PRETTY_FUNCTION__))
1702 !CGM.getContext().getTargetInfo().isTLSSupported())((!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo
().isTLSSupported()) ? static_cast<void> (0) : __assert_fail
("!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo().isTLSSupported()"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1702, __PRETTY_FUNCTION__))
;
1703 // Lookup the entry, lazily creating it if necessary.
1704 std::string Suffix = getName({"cache", ""});
1705 return getOrCreateInternalVariable(
1706 CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
1707}
1708
1709Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
1710 const VarDecl *VD,
1711 Address VDAddr,
1712 SourceLocation Loc) {
1713 if (CGM.getLangOpts().OpenMPUseTLS &&
1714 CGM.getContext().getTargetInfo().isTLSSupported())
1715 return VDAddr;
1716
1717 llvm::Type *VarTy = VDAddr.getElementType();
1718 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
1719 CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
1720 CGM.Int8PtrTy),
1721 CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
1722 getOrCreateThreadPrivateCache(VD)};
1723 return Address(CGF.EmitRuntimeCall(
1724 OMPBuilder.getOrCreateRuntimeFunction(
1725 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
1726 Args),
1727 VDAddr.getAlignment());
1728}
1729
1730void CGOpenMPRuntime::emitThreadPrivateVarInit(
1731 CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
1732 llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
1733 // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
1734 // library.
1735 llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
1736 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1737 CGM.getModule(), OMPRTL___kmpc_global_thread_num),
1738 OMPLoc);
1739 // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
1740 // to register constructor/destructor for variable.
1741 llvm::Value *Args[] = {
1742 OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
1743 Ctor, CopyCtor, Dtor};
1744 CGF.EmitRuntimeCall(
1745 OMPBuilder.getOrCreateRuntimeFunction(
1746 CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
1747 Args);
1748}
1749
1750llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
1751 const VarDecl *VD, Address VDAddr, SourceLocation Loc,
1752 bool PerformInit, CodeGenFunction *CGF) {
1753 if (CGM.getLangOpts().OpenMPUseTLS &&
1754 CGM.getContext().getTargetInfo().isTLSSupported())
1755 return nullptr;
1756
1757 VD = VD->getDefinition(CGM.getContext());
1758 if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
1759 QualType ASTTy = VD->getType();
1760
1761 llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
1762 const Expr *Init = VD->getAnyInitializer();
1763 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
1764 // Generate function that re-emits the declaration's initializer into the
1765 // threadprivate copy of the variable VD
1766 CodeGenFunction CtorCGF(CGM);
1767 FunctionArgList Args;
1768 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1769 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1770 ImplicitParamDecl::Other);
1771 Args.push_back(&Dst);
1772
1773 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1774 CGM.getContext().VoidPtrTy, Args);
1775 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1776 std::string Name = getName({"__kmpc_global_ctor_", ""});
1777 llvm::Function *Fn =
1778 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1779 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
1780 Args, Loc, Loc);
1781 llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
1782 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1783 CGM.getContext().VoidPtrTy, Dst.getLocation());
1784 Address Arg = Address(ArgVal, VDAddr.getAlignment());
1785 Arg = CtorCGF.Builder.CreateElementBitCast(
1786 Arg, CtorCGF.ConvertTypeForMem(ASTTy));
1787 CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
1788 /*IsInitializer=*/true);
1789 ArgVal = CtorCGF.EmitLoadOfScalar(
1790 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1791 CGM.getContext().VoidPtrTy, Dst.getLocation());
1792 CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
1793 CtorCGF.FinishFunction();
1794 Ctor = Fn;
1795 }
1796 if (VD->getType().isDestructedType() != QualType::DK_none) {
1797 // Generate function that emits destructor call for the threadprivate copy
1798 // of the variable VD
1799 CodeGenFunction DtorCGF(CGM);
1800 FunctionArgList Args;
1801 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1802 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1803 ImplicitParamDecl::Other);
1804 Args.push_back(&Dst);
1805
1806 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1807 CGM.getContext().VoidTy, Args);
1808 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1809 std::string Name = getName({"__kmpc_global_dtor_", ""});
1810 llvm::Function *Fn =
1811 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1812 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
1813 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
1814 Loc, Loc);
1815 // Create a scope with an artificial location for the body of this function.
1816 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
1817 llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
1818 DtorCGF.GetAddrOfLocalVar(&Dst),
1819 /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
1820 DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
1821 DtorCGF.getDestroyer(ASTTy.isDestructedType()),
1822 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
1823 DtorCGF.FinishFunction();
1824 Dtor = Fn;
1825 }
1826 // Do not emit init function if it is not required.
1827 if (!Ctor && !Dtor)
1828 return nullptr;
1829
1830 llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1831 auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
1832 /*isVarArg=*/false)
1833 ->getPointerTo();
1834 // Copying constructor for the threadprivate variable.
1835 // Must be NULL - reserved by runtime, but currently it requires that this
1836 // parameter is always NULL. Otherwise it fires assertion.
1837 CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
1838 if (Ctor == nullptr) {
1839 auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1840 /*isVarArg=*/false)
1841 ->getPointerTo();
1842 Ctor = llvm::Constant::getNullValue(CtorTy);
1843 }
1844 if (Dtor == nullptr) {
1845 auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
1846 /*isVarArg=*/false)
1847 ->getPointerTo();
1848 Dtor = llvm::Constant::getNullValue(DtorTy);
1849 }
1850 if (!CGF) {
1851 auto *InitFunctionTy =
1852 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
1853 std::string Name = getName({"__omp_threadprivate_init_", ""});
1854 llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
1855 InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
1856 CodeGenFunction InitCGF(CGM);
1857 FunctionArgList ArgList;
1858 InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
1859 CGM.getTypes().arrangeNullaryFunction(), ArgList,
1860 Loc, Loc);
1861 emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1862 InitCGF.FinishFunction();
1863 return InitFunction;
1864 }
1865 emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1866 }
1867 return nullptr;
1868}
1869
1870bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
1871 llvm::GlobalVariable *Addr,
1872 bool PerformInit) {
1873 if (CGM.getLangOpts().OMPTargetTriples.empty() &&
1874 !CGM.getLangOpts().OpenMPIsDevice)
1875 return false;
1876 Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
1877 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1878 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
1879 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
1880 HasRequiresUnifiedSharedMemory))
1881 return CGM.getLangOpts().OpenMPIsDevice;
1882 VD = VD->getDefinition(CGM.getContext());
1883 assert(VD && "Unknown VarDecl")((VD && "Unknown VarDecl") ? static_cast<void> (
0) : __assert_fail ("VD && \"Unknown VarDecl\"", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1883, __PRETTY_FUNCTION__))
;
1884
1885 if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
1886 return CGM.getLangOpts().OpenMPIsDevice;
1887
1888 QualType ASTTy = VD->getType();
1889 SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
1890
1891 // Produce the unique prefix to identify the new target regions. We use
1892 // the source location of the variable declaration which we know to not
1893 // conflict with any target region.
1894 unsigned DeviceID;
1895 unsigned FileID;
1896 unsigned Line;
1897 getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
1898 SmallString<128> Buffer, Out;
1899 {
1900 llvm::raw_svector_ostream OS(Buffer);
1901 OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
1902 << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
1903 }
1904
1905 const Expr *Init = VD->getAnyInitializer();
1906 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
1907 llvm::Constant *Ctor;
1908 llvm::Constant *ID;
1909 if (CGM.getLangOpts().OpenMPIsDevice) {
1910 // Generate function that re-emits the declaration's initializer into
1911 // the threadprivate copy of the variable VD
1912 CodeGenFunction CtorCGF(CGM);
1913
1914 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
1915 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1916 llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
1917 FTy, Twine(Buffer, "_ctor"), FI, Loc);
1918 auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
1919 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
1920 FunctionArgList(), Loc, Loc);
1921 auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
1922 CtorCGF.EmitAnyExprToMem(Init,
1923 Address(Addr, CGM.getContext().getDeclAlign(VD)),
1924 Init->getType().getQualifiers(),
1925 /*IsInitializer=*/true);
1926 CtorCGF.FinishFunction();
1927 Ctor = Fn;
1928 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
1929 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
1930 } else {
1931 Ctor = new llvm::GlobalVariable(
1932 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1933 llvm::GlobalValue::PrivateLinkage,
1934 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
1935 ID = Ctor;
1936 }
1937
1938 // Register the information for the entry associated with the constructor.
1939 Out.clear();
1940 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
1941 DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
1942 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
1943 }
1944 if (VD->getType().isDestructedType() != QualType::DK_none) {
1945 llvm::Constant *Dtor;
1946 llvm::Constant *ID;
1947 if (CGM.getLangOpts().OpenMPIsDevice) {
1948 // Generate function that emits destructor call for the threadprivate
1949 // copy of the variable VD
1950 CodeGenFunction DtorCGF(CGM);
1951
1952 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
1953 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1954 llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
1955 FTy, Twine(Buffer, "_dtor"), FI, Loc);
1956 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
1957 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
1958 FunctionArgList(), Loc, Loc);
1959 // Create a scope with an artificial location for the body of this
1960 // function.
1961 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
1962 DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
1963 ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
1964 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
1965 DtorCGF.FinishFunction();
1966 Dtor = Fn;
1967 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
1968 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
1969 } else {
1970 Dtor = new llvm::GlobalVariable(
1971 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1972 llvm::GlobalValue::PrivateLinkage,
1973 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
1974 ID = Dtor;
1975 }
1976 // Register the information for the entry associated with the destructor.
1977 Out.clear();
1978 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
1979 DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
1980 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
1981 }
1982 return CGM.getLangOpts().OpenMPIsDevice;
1983}
1984
1985Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
1986 QualType VarType,
1987 StringRef Name) {
1988 std::string Suffix = getName({"artificial", ""});
1989 llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
1990 llvm::Value *GAddr =
1991 getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
1992 if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
1993 CGM.getTarget().isTLSSupported()) {
1994 cast<llvm::GlobalVariable>(GAddr)->setThreadLocal(/*Val=*/true);
1995 return Address(GAddr, CGM.getContext().getTypeAlignInChars(VarType));
1996 }
1997 std::string CacheSuffix = getName({"cache", ""});
1998 llvm::Value *Args[] = {
1999 emitUpdateLocation(CGF, SourceLocation()),
2000 getThreadID(CGF, SourceLocation()),
2001 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2002 CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2003 /*isSigned=*/false),
2004 getOrCreateInternalVariable(
2005 CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2006 return Address(
2007 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2008 CGF.EmitRuntimeCall(
2009 OMPBuilder.getOrCreateRuntimeFunction(
2010 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
2011 Args),
2012 VarLVType->getPointerTo(/*AddrSpace=*/0)),
2013 CGM.getContext().getTypeAlignInChars(VarType));
2014}
2015
2016void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
2017 const RegionCodeGenTy &ThenGen,
2018 const RegionCodeGenTy &ElseGen) {
2019 CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2020
2021 // If the condition constant folds and can be elided, try to avoid emitting
2022 // the condition and the dead arm of the if/else.
2023 bool CondConstant;
2024 if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
9
Assuming the condition is true
10
Taking true branch
2025 if (CondConstant)
11
Assuming 'CondConstant' is true
12
Taking true branch
2026 ThenGen(CGF);
13
Calling 'RegionCodeGenTy::operator()'
2027 else
2028 ElseGen(CGF);
2029 return;
2030 }
2031
2032 // Otherwise, the condition did not fold, or we couldn't elide it. Just
2033 // emit the conditional branch.
2034 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2035 llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2036 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2037 CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2038
2039 // Emit the 'then' code.
2040 CGF.EmitBlock(ThenBlock);
2041 ThenGen(CGF);
2042 CGF.EmitBranch(ContBlock);
2043 // Emit the 'else' code if present.
2044 // There is no need to emit line number for unconditional branch.
2045 (void)ApplyDebugLocation::CreateEmpty(CGF);
2046 CGF.EmitBlock(ElseBlock);
2047 ElseGen(CGF);
2048 // There is no need to emit line number for unconditional branch.
2049 (void)ApplyDebugLocation::CreateEmpty(CGF);
2050 CGF.EmitBranch(ContBlock);
2051 // Emit the continuation block for code after the if.
2052 CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2053}
2054
2055void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2056 llvm::Function *OutlinedFn,
2057 ArrayRef<llvm::Value *> CapturedVars,
2058 const Expr *IfCond) {
2059 if (!CGF.HaveInsertPoint())
2060 return;
2061 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2062 auto &M = CGM.getModule();
2063 auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
2064 this](CodeGenFunction &CGF, PrePostActionTy &) {
2065 // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2066 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2067 llvm::Value *Args[] = {
2068 RTLoc,
2069 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2070 CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2071 llvm::SmallVector<llvm::Value *, 16> RealArgs;
2072 RealArgs.append(std::begin(Args), std::end(Args));
2073 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2074
2075 llvm::FunctionCallee RTLFn =
2076 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
2077 CGF.EmitRuntimeCall(RTLFn, RealArgs);
2078 };
2079 auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
2080 this](CodeGenFunction &CGF, PrePostActionTy &) {
2081 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2082 llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2083 // Build calls:
2084 // __kmpc_serialized_parallel(&Loc, GTid);
2085 llvm::Value *Args[] = {RTLoc, ThreadID};
2086 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2087 M, OMPRTL___kmpc_serialized_parallel),
2088 Args);
2089
2090 // OutlinedFn(&GTid, &zero_bound, CapturedStruct);
2091 Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
2092 Address ZeroAddrBound =
2093 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2094 /*Name=*/".bound.zero.addr");
2095 CGF.InitTempAlloca(ZeroAddrBound, CGF.Builder.getInt32(/*C*/ 0));
2096 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2097 // ThreadId for serialized parallels is 0.
2098 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2099 OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
2100 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2101 RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2102
2103 // __kmpc_end_serialized_parallel(&Loc, GTid);
2104 llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2105 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2106 M, OMPRTL___kmpc_end_serialized_parallel),
2107 EndArgs);
2108 };
2109 if (IfCond) {
2110 emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2111 } else {
2112 RegionCodeGenTy ThenRCG(ThenGen);
2113 ThenRCG(CGF);
2114 }
2115}
2116
2117// If we're inside an (outlined) parallel region, use the region info's
2118// thread-ID variable (it is passed in a first argument of the outlined function
2119// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2120// regular serial code region, get thread ID by calling kmp_int32
2121// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2122// return the address of that temp.
2123Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2124 SourceLocation Loc) {
2125 if (auto *OMPRegionInfo =
2126 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2127 if (OMPRegionInfo->getThreadIDVariable())
2128 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
2129
2130 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2131 QualType Int32Ty =
2132 CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2133 Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2134 CGF.EmitStoreOfScalar(ThreadID,
2135 CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2136
2137 return ThreadIDTemp;
2138}
2139
2140llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
2141 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
2142 SmallString<256> Buffer;
2143 llvm::raw_svector_ostream Out(Buffer);
2144 Out << Name;
2145 StringRef RuntimeName = Out.str();
2146 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2147 if (Elem.second) {
2148 assert(Elem.second->getType()->getPointerElementType() == Ty &&((Elem.second->getType()->getPointerElementType() == Ty
&& "OMP internal variable has different type than requested"
) ? static_cast<void> (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2149, __PRETTY_FUNCTION__))
2149 "OMP internal variable has different type than requested")((Elem.second->getType()->getPointerElementType() == Ty
&& "OMP internal variable has different type than requested"
) ? static_cast<void> (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2149, __PRETTY_FUNCTION__))
;
2150 return &*Elem.second;
2151 }
2152
2153 return Elem.second = new llvm::GlobalVariable(
2154 CGM.getModule(), Ty, /*IsConstant*/ false,
2155 llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2156 Elem.first(), /*InsertBefore=*/nullptr,
2157 llvm::GlobalValue::NotThreadLocal, AddressSpace);
2158}
2159
2160llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2161 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2162 std::string Name = getName({Prefix, "var"});
2163 return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
2164}
2165
2166namespace {
2167/// Common pre(post)-action for different OpenMP constructs.
2168class CommonActionTy final : public PrePostActionTy {
2169 llvm::FunctionCallee EnterCallee;
2170 ArrayRef<llvm::Value *> EnterArgs;
2171 llvm::FunctionCallee ExitCallee;
2172 ArrayRef<llvm::Value *> ExitArgs;
2173 bool Conditional;
2174 llvm::BasicBlock *ContBlock = nullptr;
2175
2176public:
2177 CommonActionTy(llvm::FunctionCallee EnterCallee,
2178 ArrayRef<llvm::Value *> EnterArgs,
2179 llvm::FunctionCallee ExitCallee,
2180 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
2181 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2182 ExitArgs(ExitArgs), Conditional(Conditional) {}
2183 void Enter(CodeGenFunction &CGF) override {
2184 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2185 if (Conditional) {
2186 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2187 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2188 ContBlock = CGF.createBasicBlock("omp_if.end");
2189 // Generate the branch (If-stmt)
2190 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2191 CGF.EmitBlock(ThenBlock);
2192 }
2193 }
2194 void Done(CodeGenFunction &CGF) {
2195 // Emit the rest of blocks/branches
2196 CGF.EmitBranch(ContBlock);
2197 CGF.EmitBlock(ContBlock, true);
2198 }
2199 void Exit(CodeGenFunction &CGF) override {
2200 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2201 }
2202};
2203} // anonymous namespace
2204
2205void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2206 StringRef CriticalName,
2207 const RegionCodeGenTy &CriticalOpGen,
2208 SourceLocation Loc, const Expr *Hint) {
2209 // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2210 // CriticalOpGen();
2211 // __kmpc_end_critical(ident_t *, gtid, Lock);
2212 // Prepare arguments and build a call to __kmpc_critical
2213 if (!CGF.HaveInsertPoint())
2214 return;
2215 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2216 getCriticalRegionLock(CriticalName)};
2217 llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2218 std::end(Args));
2219 if (Hint) {
2220 EnterArgs.push_back(CGF.Builder.CreateIntCast(
2221 CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
2222 }
2223 CommonActionTy Action(
2224 OMPBuilder.getOrCreateRuntimeFunction(
2225 CGM.getModule(),
2226 Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
2227 EnterArgs,
2228 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2229 OMPRTL___kmpc_end_critical),
2230 Args);
2231 CriticalOpGen.setAction(Action);
2232 emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2233}
2234
2235void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2236 const RegionCodeGenTy &MasterOpGen,
2237 SourceLocation Loc) {
2238 if (!CGF.HaveInsertPoint())
2239 return;
2240 // if(__kmpc_master(ident_t *, gtid)) {
2241 // MasterOpGen();
2242 // __kmpc_end_master(ident_t *, gtid);
2243 // }
2244 // Prepare arguments and build a call to __kmpc_master
2245 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2246 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2247 CGM.getModule(), OMPRTL___kmpc_master),
2248 Args,
2249 OMPBuilder.getOrCreateRuntimeFunction(
2250 CGM.getModule(), OMPRTL___kmpc_end_master),
2251 Args,
2252 /*Conditional=*/true);
2253 MasterOpGen.setAction(Action);
2254 emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2255 Action.Done(CGF);
2256}
2257
2258void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2259 SourceLocation Loc) {
2260 if (!CGF.HaveInsertPoint())
2261 return;
2262 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2263 OMPBuilder.CreateTaskyield(CGF.Builder);
2264 } else {
2265 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2266 llvm::Value *Args[] = {
2267 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2268 llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2269 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2270 CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
2271 Args);
2272 }
2273
2274 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2275 Region->emitUntiedSwitch(CGF);
2276}
2277
2278void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
2279 const RegionCodeGenTy &TaskgroupOpGen,
2280 SourceLocation Loc) {
2281 if (!CGF.HaveInsertPoint())
2282 return;
2283 // __kmpc_taskgroup(ident_t *, gtid);
2284 // TaskgroupOpGen();
2285 // __kmpc_end_taskgroup(ident_t *, gtid);
2286 // Prepare arguments and build a call to __kmpc_taskgroup
2287 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2288 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2289 CGM.getModule(), OMPRTL___kmpc_taskgroup),
2290 Args,
2291 OMPBuilder.getOrCreateRuntimeFunction(
2292 CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
2293 Args);
2294 TaskgroupOpGen.setAction(Action);
2295 emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
2296}
2297
2298/// Given an array of pointers to variables, project the address of a
2299/// given variable.
2300static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
2301 unsigned Index, const VarDecl *Var) {
2302 // Pull out the pointer to the variable.
2303 Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
2304 llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
2305
2306 Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
2307 Addr = CGF.Builder.CreateElementBitCast(
2308 Addr, CGF.ConvertTypeForMem(Var->getType()));
2309 return Addr;
2310}
2311
2312static llvm::Value *emitCopyprivateCopyFunction(
2313 CodeGenModule &CGM, llvm::Type *ArgsType,
2314 ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
2315 ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
2316 SourceLocation Loc) {
2317 ASTContext &C = CGM.getContext();
2318 // void copy_func(void *LHSArg, void *RHSArg);
2319 FunctionArgList Args;
2320 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2321 ImplicitParamDecl::Other);
2322 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2323 ImplicitParamDecl::Other);
2324 Args.push_back(&LHSArg);
2325 Args.push_back(&RHSArg);
2326 const auto &CGFI =
2327 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2328 std::string Name =
2329 CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
2330 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2331 llvm::GlobalValue::InternalLinkage, Name,
2332 &CGM.getModule());
2333 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2334 Fn->setDoesNotRecurse();
2335 CodeGenFunction CGF(CGM);
2336 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2337 // Dest = (void*[n])(LHSArg);
2338 // Src = (void*[n])(RHSArg);
2339 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2340 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
2341 ArgsType), CGF.getPointerAlign());
2342 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2343 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
2344 ArgsType), CGF.getPointerAlign());
2345 // *(Type0*)Dst[0] = *(Type0*)Src[0];
2346 // *(Type1*)Dst[1] = *(Type1*)Src[1];
2347 // ...
2348 // *(Typen*)Dst[n] = *(Typen*)Src[n];
2349 for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2350 const auto *DestVar =
2351 cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
2352 Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
2353
2354 const auto *SrcVar =
2355 cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
2356 Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
2357
2358 const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
2359 QualType Type = VD->getType();
2360 CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2361 }
2362 CGF.FinishFunction();
2363 return Fn;
2364}
2365
2366void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
2367 const RegionCodeGenTy &SingleOpGen,
2368 SourceLocation Loc,
2369 ArrayRef<const Expr *> CopyprivateVars,
2370 ArrayRef<const Expr *> SrcExprs,
2371 ArrayRef<const Expr *> DstExprs,
2372 ArrayRef<const Expr *> AssignmentOps) {
2373 if (!CGF.HaveInsertPoint())
2374 return;
2375 assert(CopyprivateVars.size() == SrcExprs.size() &&((CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars
.size() == DstExprs.size() && CopyprivateVars.size() ==
AssignmentOps.size()) ? static_cast<void> (0) : __assert_fail
("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2377, __PRETTY_FUNCTION__))
2376 CopyprivateVars.size() == DstExprs.size() &&((CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars
.size() == DstExprs.size() && CopyprivateVars.size() ==
AssignmentOps.size()) ? static_cast<void> (0) : __assert_fail
("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2377, __PRETTY_FUNCTION__))
2377 CopyprivateVars.size() == AssignmentOps.size())((CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars
.size() == DstExprs.size() && CopyprivateVars.size() ==
AssignmentOps.size()) ? static_cast<void> (0) : __assert_fail
("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2377, __PRETTY_FUNCTION__))
;
2378 ASTContext &C = CGM.getContext();
2379 // int32 did_it = 0;
2380 // if(__kmpc_single(ident_t *, gtid)) {
2381 // SingleOpGen();
2382 // __kmpc_end_single(ident_t *, gtid);
2383 // did_it = 1;
2384 // }
2385 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2386 // <copy_func>, did_it);
2387
2388 Address DidIt = Address::invalid();
2389 if (!CopyprivateVars.empty()) {
2390 // int32 did_it = 0;
2391 QualType KmpInt32Ty =
2392 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2393 DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
2394 CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
2395 }
2396 // Prepare arguments and build a call to __kmpc_single
2397 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2398 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2399 CGM.getModule(), OMPRTL___kmpc_single),
2400 Args,
2401 OMPBuilder.getOrCreateRuntimeFunction(
2402 CGM.getModule(), OMPRTL___kmpc_end_single),
2403 Args,
2404 /*Conditional=*/true);
2405 SingleOpGen.setAction(Action);
2406 emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
2407 if (DidIt.isValid()) {
2408 // did_it = 1;
2409 CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
2410 }
2411 Action.Done(CGF);
2412 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2413 // <copy_func>, did_it);
2414 if (DidIt.isValid()) {
2415 llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
2416 QualType CopyprivateArrayTy = C.getConstantArrayType(
2417 C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
2418 /*IndexTypeQuals=*/0);
2419 // Create a list of all private variables for copyprivate.
2420 Address CopyprivateList =
2421 CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
2422 for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2423 Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
2424 CGF.Builder.CreateStore(
2425 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2426 CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF),
2427 CGF.VoidPtrTy),
2428 Elem);
2429 }
2430 // Build function that copies private values from single region to all other
2431 // threads in the corresponding parallel region.
2432 llvm::Value *CpyFn = emitCopyprivateCopyFunction(
2433 CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
2434 CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
2435 llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
2436 Address CL =
2437 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
2438 CGF.VoidPtrTy);
2439 llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
2440 llvm::Value *Args[] = {
2441 emitUpdateLocation(CGF, Loc), // ident_t *<loc>
2442 getThreadID(CGF, Loc), // i32 <gtid>
2443 BufSize, // size_t <buf_size>
2444 CL.getPointer(), // void *<copyprivate list>
2445 CpyFn, // void (*) (void *, void *) <copy_func>
2446 DidItVal // i32 did_it
2447 };
2448 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2449 CGM.getModule(), OMPRTL___kmpc_copyprivate),
2450 Args);
2451 }
2452}
2453
2454void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
2455 const RegionCodeGenTy &OrderedOpGen,
2456 SourceLocation Loc, bool IsThreads) {
2457 if (!CGF.HaveInsertPoint())
2458 return;
2459 // __kmpc_ordered(ident_t *, gtid);
2460 // OrderedOpGen();
2461 // __kmpc_end_ordered(ident_t *, gtid);
2462 // Prepare arguments and build a call to __kmpc_ordered
2463 if (IsThreads) {
2464 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2465 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2466 CGM.getModule(), OMPRTL___kmpc_ordered),
2467 Args,
2468 OMPBuilder.getOrCreateRuntimeFunction(
2469 CGM.getModule(), OMPRTL___kmpc_end_ordered),
2470 Args);
2471 OrderedOpGen.setAction(Action);
2472 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2473 return;
2474 }
2475 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2476}
2477
2478unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
2479 unsigned Flags;
2480 if (Kind == OMPD_for)
2481 Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2482 else if (Kind == OMPD_sections)
2483 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2484 else if (Kind == OMPD_single)
2485 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2486 else if (Kind == OMPD_barrier)
2487 Flags = OMP_IDENT_BARRIER_EXPL;
2488 else
2489 Flags = OMP_IDENT_BARRIER_IMPL;
2490 return Flags;
2491}
2492
2493void CGOpenMPRuntime::getDefaultScheduleAndChunk(
2494 CodeGenFunction &CGF, const OMPLoopDirective &S,
2495 OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
2496 // Check if the loop directive is actually a doacross loop directive. In this
2497 // case choose static, 1 schedule.
2498 if (llvm::any_of(
2499 S.getClausesOfKind<OMPOrderedClause>(),
2500 [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
2501 ScheduleKind = OMPC_SCHEDULE_static;
2502 // Chunk size is 1 in this case.
2503 llvm::APInt ChunkSize(32, 1);
2504 ChunkExpr = IntegerLiteral::Create(
2505 CGF.getContext(), ChunkSize,
2506 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
2507 SourceLocation());
2508 }
2509}
2510
2511void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
2512 OpenMPDirectiveKind Kind, bool EmitChecks,
2513 bool ForceSimpleCall) {
2514 // Check if we should use the OMPBuilder
2515 auto *OMPRegionInfo =
2516 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
2517 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2518 CGF.Builder.restoreIP(OMPBuilder.CreateBarrier(
2519 CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
2520 return;
2521 }
2522
2523 if (!CGF.HaveInsertPoint())
2524 return;
2525 // Build call __kmpc_cancel_barrier(loc, thread_id);
2526 // Build call __kmpc_barrier(loc, thread_id);
2527 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2528 // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
2529 // thread_id);
2530 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2531 getThreadID(CGF, Loc)};
2532 if (OMPRegionInfo) {
2533 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2534 llvm::Value *Result = CGF.EmitRuntimeCall(
2535 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2536 OMPRTL___kmpc_cancel_barrier),
2537 Args);
2538 if (EmitChecks) {
2539 // if (__kmpc_cancel_barrier()) {
2540 // exit from construct;
2541 // }
2542 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
2543 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
2544 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
2545 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2546 CGF.EmitBlock(ExitBB);
2547 // exit from construct;
2548 CodeGenFunction::JumpDest CancelDestination =
2549 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
2550 CGF.EmitBranchThroughCleanup(CancelDestination);
2551 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
2552 }
2553 return;
2554 }
2555 }
2556 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2557 CGM.getModule(), OMPRTL___kmpc_barrier),
2558 Args);
2559}
2560
2561/// Map the OpenMP loop schedule to the runtime enumeration.
2562static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
2563 bool Chunked, bool Ordered) {
2564 switch (ScheduleKind) {
2565 case OMPC_SCHEDULE_static:
2566 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2567 : (Ordered ? OMP_ord_static : OMP_sch_static);
2568 case OMPC_SCHEDULE_dynamic:
2569 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
2570 case OMPC_SCHEDULE_guided:
2571 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
2572 case OMPC_SCHEDULE_runtime:
2573 return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
2574 case OMPC_SCHEDULE_auto:
2575 return Ordered ? OMP_ord_auto : OMP_sch_auto;
2576 case OMPC_SCHEDULE_unknown:
2577 assert(!Chunked && "chunk was specified but schedule kind not known")((!Chunked && "chunk was specified but schedule kind not known"
) ? static_cast<void> (0) : __assert_fail ("!Chunked && \"chunk was specified but schedule kind not known\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2577, __PRETTY_FUNCTION__))
;
2578 return Ordered ? OMP_ord_static : OMP_sch_static;
2579 }
2580 llvm_unreachable("Unexpected runtime schedule")::llvm::llvm_unreachable_internal("Unexpected runtime schedule"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2580)
;
2581}
2582
2583/// Map the OpenMP distribute schedule to the runtime enumeration.
2584static OpenMPSchedType
2585getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
2586 // only static is allowed for dist_schedule
2587 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
2588}
2589
2590bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
2591 bool Chunked) const {
2592 OpenMPSchedType Schedule =
2593 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2594 return Schedule == OMP_sch_static;
2595}
2596
2597bool CGOpenMPRuntime::isStaticNonchunked(
2598 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2599 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2600 return Schedule == OMP_dist_sch_static;
2601}
2602
2603bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
2604 bool Chunked) const {
2605 OpenMPSchedType Schedule =
2606 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2607 return Schedule == OMP_sch_static_chunked;
2608}
2609
2610bool CGOpenMPRuntime::isStaticChunked(
2611 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2612 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2613 return Schedule == OMP_dist_sch_static_chunked;
2614}
2615
2616bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
2617 OpenMPSchedType Schedule =
2618 getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
2619 assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here")((Schedule != OMP_sch_static_chunked && "cannot be chunked here"
) ? static_cast<void> (0) : __assert_fail ("Schedule != OMP_sch_static_chunked && \"cannot be chunked here\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2619, __PRETTY_FUNCTION__))
;
2620 return Schedule != OMP_sch_static;
2621}
2622
2623static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule,
2624 OpenMPScheduleClauseModifier M1,
2625 OpenMPScheduleClauseModifier M2) {
2626 int Modifier = 0;
2627 switch (M1) {
2628 case OMPC_SCHEDULE_MODIFIER_monotonic:
2629 Modifier = OMP_sch_modifier_monotonic;
2630 break;
2631 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2632 Modifier = OMP_sch_modifier_nonmonotonic;
2633 break;
2634 case OMPC_SCHEDULE_MODIFIER_simd:
2635 if (Schedule == OMP_sch_static_chunked)
2636 Schedule = OMP_sch_static_balanced_chunked;
2637 break;
2638 case OMPC_SCHEDULE_MODIFIER_last:
2639 case OMPC_SCHEDULE_MODIFIER_unknown:
2640 break;
2641 }
2642 switch (M2) {
2643 case OMPC_SCHEDULE_MODIFIER_monotonic:
2644 Modifier = OMP_sch_modifier_monotonic;
2645 break;
2646 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2647 Modifier = OMP_sch_modifier_nonmonotonic;
2648 break;
2649 case OMPC_SCHEDULE_MODIFIER_simd:
2650 if (Schedule == OMP_sch_static_chunked)
2651 Schedule = OMP_sch_static_balanced_chunked;
2652 break;
2653 case OMPC_SCHEDULE_MODIFIER_last:
2654 case OMPC_SCHEDULE_MODIFIER_unknown:
2655 break;
2656 }
2657 // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription.
2658 // If the static schedule kind is specified or if the ordered clause is
2659 // specified, and if the nonmonotonic modifier is not specified, the effect is
2660 // as if the monotonic modifier is specified. Otherwise, unless the monotonic
2661 // modifier is specified, the effect is as if the nonmonotonic modifier is
2662 // specified.
2663 if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
2664 if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
2665 Schedule == OMP_sch_static_balanced_chunked ||
2666 Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
2667 Schedule == OMP_dist_sch_static_chunked ||
2668 Schedule == OMP_dist_sch_static))
2669 Modifier = OMP_sch_modifier_nonmonotonic;
2670 }
2671 return Schedule | Modifier;
2672}
2673
2674void CGOpenMPRuntime::emitForDispatchInit(
2675 CodeGenFunction &CGF, SourceLocation Loc,
2676 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
2677 bool Ordered, const DispatchRTInput &DispatchValues) {
2678 if (!CGF.HaveInsertPoint())
2679 return;
2680 OpenMPSchedType Schedule = getRuntimeSchedule(
2681 ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
2682 assert(Ordered ||((Ordered || (Schedule != OMP_sch_static && Schedule !=
OMP_sch_static_chunked && Schedule != OMP_ord_static
&& Schedule != OMP_ord_static_chunked && Schedule
!= OMP_sch_static_balanced_chunked)) ? static_cast<void>
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2685, __PRETTY_FUNCTION__))
2683 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&((Ordered || (Schedule != OMP_sch_static && Schedule !=
OMP_sch_static_chunked && Schedule != OMP_ord_static
&& Schedule != OMP_ord_static_chunked && Schedule
!= OMP_sch_static_balanced_chunked)) ? static_cast<void>
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2685, __PRETTY_FUNCTION__))
2684 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&((Ordered || (Schedule != OMP_sch_static && Schedule !=
OMP_sch_static_chunked && Schedule != OMP_ord_static
&& Schedule != OMP_ord_static_chunked && Schedule
!= OMP_sch_static_balanced_chunked)) ? static_cast<void>
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2685, __PRETTY_FUNCTION__))
2685 Schedule != OMP_sch_static_balanced_chunked))((Ordered || (Schedule != OMP_sch_static && Schedule !=
OMP_sch_static_chunked && Schedule != OMP_ord_static
&& Schedule != OMP_ord_static_chunked && Schedule
!= OMP_sch_static_balanced_chunked)) ? static_cast<void>
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2685, __PRETTY_FUNCTION__))
;
2686 // Call __kmpc_dispatch_init(
2687 // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
2688 // kmp_int[32|64] lower, kmp_int[32|64] upper,
2689 // kmp_int[32|64] stride, kmp_int[32|64] chunk);
2690
2691 // If the Chunk was not specified in the clause - use default value 1.
2692 llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
2693 : CGF.Builder.getIntN(IVSize, 1);
2694 llvm::Value *Args[] = {
2695 emitUpdateLocation(CGF, Loc),
2696 getThreadID(CGF, Loc),
2697 CGF.Builder.getInt32(addMonoNonMonoModifier(
2698 CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
2699 DispatchValues.LB, // Lower
2700 DispatchValues.UB, // Upper
2701 CGF.Builder.getIntN(IVSize, 1), // Stride
2702 Chunk // Chunk
2703 };
2704 CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
2705}
2706
2707static void emitForStaticInitCall(
2708 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
2709 llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
2710 OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
2711 const CGOpenMPRuntime::StaticRTInput &Values) {
2712 if (!CGF.HaveInsertPoint())
2713 return;
2714
2715 assert(!Values.Ordered)((!Values.Ordered) ? static_cast<void> (0) : __assert_fail
("!Values.Ordered", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2715, __PRETTY_FUNCTION__))
;
2716 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||((Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule
== OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? static_cast<void> (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2720, __PRETTY_FUNCTION__))
2717 Schedule == OMP_sch_static_balanced_chunked ||((Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule
== OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? static_cast<void> (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2720, __PRETTY_FUNCTION__))
2718 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||((Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule
== OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? static_cast<void> (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2720, __PRETTY_FUNCTION__))
2719 Schedule == OMP_dist_sch_static ||((Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule
== OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? static_cast<void> (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2720, __PRETTY_FUNCTION__))
2720 Schedule == OMP_dist_sch_static_chunked)((Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule
== OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? static_cast<void> (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2720, __PRETTY_FUNCTION__))
;
2721
2722 // Call __kmpc_for_static_init(
2723 // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
2724 // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
2725 // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
2726 // kmp_int[32|64] incr, kmp_int[32|64] chunk);
2727 llvm::Value *Chunk = Values.Chunk;
2728 if (Chunk == nullptr) {
2729 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||(((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
Schedule == OMP_dist_sch_static) && "expected static non-chunked schedule"
) ? static_cast<void> (0) : __assert_fail ("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2731, __PRETTY_FUNCTION__))
2730 Schedule == OMP_dist_sch_static) &&(((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
Schedule == OMP_dist_sch_static) && "expected static non-chunked schedule"
) ? static_cast<void> (0) : __assert_fail ("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2731, __PRETTY_FUNCTION__))
2731 "expected static non-chunked schedule")(((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
Schedule == OMP_dist_sch_static) && "expected static non-chunked schedule"
) ? static_cast<void> (0) : __assert_fail ("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2731, __PRETTY_FUNCTION__))
;
2732 // If the Chunk was not specified in the clause - use default value 1.
2733 Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
2734 } else {
2735 assert((Schedule == OMP_sch_static_chunked ||(((Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? static_cast
<void> (0) : __assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2739, __PRETTY_FUNCTION__))
2736 Schedule == OMP_sch_static_balanced_chunked ||(((Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? static_cast
<void> (0) : __assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2739, __PRETTY_FUNCTION__))
2737 Schedule == OMP_ord_static_chunked ||(((Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? static_cast
<void> (0) : __assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2739, __PRETTY_FUNCTION__))
2738 Schedule == OMP_dist_sch_static_chunked) &&(((Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? static_cast
<void> (0) : __assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2739, __PRETTY_FUNCTION__))
2739 "expected static chunked schedule")(((Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? static_cast
<void> (0) : __assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2739, __PRETTY_FUNCTION__))
;
2740 }
2741 llvm::Value *Args[] = {
2742 UpdateLocation,
2743 ThreadId,
2744 CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
2745 M2)), // Schedule type
2746 Values.IL.getPointer(), // &isLastIter
2747 Values.LB.getPointer(), // &LB
2748 Values.UB.getPointer(), // &UB
2749 Values.ST.getPointer(), // &Stride
2750 CGF.Builder.getIntN(Values.IVSize, 1), // Incr
2751 Chunk // Chunk
2752 };
2753 CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
2754}
2755
2756void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
2757 SourceLocation Loc,
2758 OpenMPDirectiveKind DKind,
2759 const OpenMPScheduleTy &ScheduleKind,
2760 const StaticRTInput &Values) {
2761 OpenMPSchedType ScheduleNum = getRuntimeSchedule(
2762 ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
2763 assert(isOpenMPWorksharingDirective(DKind) &&((isOpenMPWorksharingDirective(DKind) && "Expected loop-based or sections-based directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPWorksharingDirective(DKind) && \"Expected loop-based or sections-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2764, __PRETTY_FUNCTION__))
2764 "Expected loop-based or sections-based directive.")((isOpenMPWorksharingDirective(DKind) && "Expected loop-based or sections-based directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPWorksharingDirective(DKind) && \"Expected loop-based or sections-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2764, __PRETTY_FUNCTION__))
;
2765 llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
2766 isOpenMPLoopDirective(DKind)
2767 ? OMP_IDENT_WORK_LOOP
2768 : OMP_IDENT_WORK_SECTIONS);
2769 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2770 llvm::FunctionCallee StaticInitFunction =
2771 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
2772 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
2773 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2774 ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
2775}
2776
2777void CGOpenMPRuntime::emitDistributeStaticInit(
2778 CodeGenFunction &CGF, SourceLocation Loc,
2779 OpenMPDistScheduleClauseKind SchedKind,
2780 const CGOpenMPRuntime::StaticRTInput &Values) {
2781 OpenMPSchedType ScheduleNum =
2782 getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
2783 llvm::Value *UpdatedLocation =
2784 emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
2785 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2786 llvm::FunctionCallee StaticInitFunction =
2787 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
2788 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2789 ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
2790 OMPC_SCHEDULE_MODIFIER_unknown, Values);
2791}
2792
2793void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
2794 SourceLocation Loc,
2795 OpenMPDirectiveKind DKind) {
2796 if (!CGF.HaveInsertPoint())
2797 return;
2798 // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
2799 llvm::Value *Args[] = {
2800 emitUpdateLocation(CGF, Loc,
2801 isOpenMPDistributeDirective(DKind)
2802 ? OMP_IDENT_WORK_DISTRIBUTE
2803 : isOpenMPLoopDirective(DKind)
2804 ? OMP_IDENT_WORK_LOOP
2805 : OMP_IDENT_WORK_SECTIONS),
2806 getThreadID(CGF, Loc)};
2807 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
2808 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2809 CGM.getModule(), OMPRTL___kmpc_for_static_fini),
2810 Args);
2811}
2812
2813void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
2814 SourceLocation Loc,
2815 unsigned IVSize,
2816 bool IVSigned) {
2817 if (!CGF.HaveInsertPoint())
2818 return;
2819 // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
2820 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2821 CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
2822}
2823
2824llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
2825 SourceLocation Loc, unsigned IVSize,
2826 bool IVSigned, Address IL,
2827 Address LB, Address UB,
2828 Address ST) {
2829 // Call __kmpc_dispatch_next(
2830 // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
2831 // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
2832 // kmp_int[32|64] *p_stride);
2833 llvm::Value *Args[] = {
2834 emitUpdateLocation(CGF, Loc),
2835 getThreadID(CGF, Loc),
2836 IL.getPointer(), // &isLastIter
2837 LB.getPointer(), // &Lower
2838 UB.getPointer(), // &Upper
2839 ST.getPointer() // &Stride
2840 };
2841 llvm::Value *Call =
2842 CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
2843 return CGF.EmitScalarConversion(
2844 Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
2845 CGF.getContext().BoolTy, Loc);
2846}
2847
2848void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
2849 llvm::Value *NumThreads,
2850 SourceLocation Loc) {
2851 if (!CGF.HaveInsertPoint())
2852 return;
2853 // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
2854 llvm::Value *Args[] = {
2855 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2856 CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
2857 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2858 CGM.getModule(), OMPRTL___kmpc_push_num_threads),
2859 Args);
2860}
2861
2862void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
2863 ProcBindKind ProcBind,
2864 SourceLocation Loc) {
2865 if (!CGF.HaveInsertPoint())
2866 return;
2867 assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value.")((ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value."
) ? static_cast<void> (0) : __assert_fail ("ProcBind != OMP_PROC_BIND_unknown && \"Unsupported proc_bind value.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2867, __PRETTY_FUNCTION__))
;
2868 // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
2869 llvm::Value *Args[] = {
2870 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2871 llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
2872 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2873 CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
2874 Args);
2875}
2876
2877void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
2878 SourceLocation Loc, llvm::AtomicOrdering AO) {
2879 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2880 OMPBuilder.CreateFlush(CGF.Builder);
2881 } else {
2882 if (!CGF.HaveInsertPoint())
2883 return;
2884 // Build call void __kmpc_flush(ident_t *loc)
2885 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2886 CGM.getModule(), OMPRTL___kmpc_flush),
2887 emitUpdateLocation(CGF, Loc));
2888 }
2889}
2890
2891namespace {
2892/// Indexes of fields for type kmp_task_t.
2893enum KmpTaskTFields {
2894 /// List of shared variables.
2895 KmpTaskTShareds,
2896 /// Task routine.
2897 KmpTaskTRoutine,
2898 /// Partition id for the untied tasks.
2899 KmpTaskTPartId,
2900 /// Function with call of destructors for private variables.
2901 Data1,
2902 /// Task priority.
2903 Data2,
2904 /// (Taskloops only) Lower bound.
2905 KmpTaskTLowerBound,
2906 /// (Taskloops only) Upper bound.
2907 KmpTaskTUpperBound,
2908 /// (Taskloops only) Stride.
2909 KmpTaskTStride,
2910 /// (Taskloops only) Is last iteration flag.
2911 KmpTaskTLastIter,
2912 /// (Taskloops only) Reduction data.
2913 KmpTaskTReductions,
2914};
2915} // anonymous namespace
2916
2917bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
2918 return OffloadEntriesTargetRegion.empty() &&
2919 OffloadEntriesDeviceGlobalVar.empty();
2920}
2921
2922/// Initialize target region entry.
2923void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
2924 initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
2925 StringRef ParentName, unsigned LineNum,
2926 unsigned Order) {
2927 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "((CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device " "code generation.") ? static_cast
<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2929, __PRETTY_FUNCTION__))
2928 "only required for the device "((CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device " "code generation.") ? static_cast
<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2929, __PRETTY_FUNCTION__))
2929 "code generation.")((CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device " "code generation.") ? static_cast
<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2929, __PRETTY_FUNCTION__))
;
2930 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
2931 OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
2932 OMPTargetRegionEntryTargetRegion);
2933 ++OffloadingEntriesNum;
2934}
2935
2936void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
2937 registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
2938 StringRef ParentName, unsigned LineNum,
2939 llvm::Constant *Addr, llvm::Constant *ID,
2940 OMPTargetRegionEntryKind Flags) {
2941 // If we are emitting code for a target, the entry is already initialized,
2942 // only has to be registered.
2943 if (CGM.getLangOpts().OpenMPIsDevice) {
2944 if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
2945 unsigned DiagID = CGM.getDiags().getCustomDiagID(
2946 DiagnosticsEngine::Error,
2947 "Unable to find target region on line '%0' in the device code.");
2948 CGM.getDiags().Report(DiagID) << LineNum;
2949 return;
2950 }
2951 auto &Entry =
2952 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
2953 assert(Entry.isValid() && "Entry not initialized!")((Entry.isValid() && "Entry not initialized!") ? static_cast
<void> (0) : __assert_fail ("Entry.isValid() && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2953, __PRETTY_FUNCTION__))
;
2954 Entry.setAddress(Addr);
2955 Entry.setID(ID);
2956 Entry.setFlags(Flags);
2957 } else {
2958 OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
2959 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
2960 ++OffloadingEntriesNum;
2961 }
2962}
2963
2964bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
2965 unsigned DeviceID, unsigned FileID, StringRef ParentName,
2966 unsigned LineNum) const {
2967 auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
2968 if (PerDevice == OffloadEntriesTargetRegion.end())
2969 return false;
2970 auto PerFile = PerDevice->second.find(FileID);
2971 if (PerFile == PerDevice->second.end())
2972 return false;
2973 auto PerParentName = PerFile->second.find(ParentName);
2974 if (PerParentName == PerFile->second.end())
2975 return false;
2976 auto PerLine = PerParentName->second.find(LineNum);
2977 if (PerLine == PerParentName->second.end())
2978 return false;
2979 // Fail if this entry is already registered.
2980 if (PerLine->second.getAddress() || PerLine->second.getID())
2981 return false;
2982 return true;
2983}
2984
2985void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
2986 const OffloadTargetRegionEntryInfoActTy &Action) {
2987 // Scan all target region entries and perform the provided action.
2988 for (const auto &D : OffloadEntriesTargetRegion)
2989 for (const auto &F : D.second)
2990 for (const auto &P : F.second)
2991 for (const auto &L : P.second)
2992 Action(D.first, F.first, P.first(), L.first, L.second);
2993}
2994
2995void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
2996 initializeDeviceGlobalVarEntryInfo(StringRef Name,
2997 OMPTargetGlobalVarEntryKind Flags,
2998 unsigned Order) {
2999 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "((CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device " "code generation.") ? static_cast
<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3001, __PRETTY_FUNCTION__))
3000 "only required for the device "((CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device " "code generation.") ? static_cast
<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3001, __PRETTY_FUNCTION__))
3001 "code generation.")((CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
"only required for the device " "code generation.") ? static_cast
<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3001, __PRETTY_FUNCTION__))
;
3002 OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3003 ++OffloadingEntriesNum;
3004}
3005
3006void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3007 registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3008 CharUnits VarSize,
3009 OMPTargetGlobalVarEntryKind Flags,
3010 llvm::GlobalValue::LinkageTypes Linkage) {
3011 if (CGM.getLangOpts().OpenMPIsDevice) {
3012 auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3013 assert(Entry.isValid() && Entry.getFlags() == Flags &&((Entry.isValid() && Entry.getFlags() == Flags &&
"Entry not initialized!") ? static_cast<void> (0) : __assert_fail
("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3014, __PRETTY_FUNCTION__))
3014 "Entry not initialized!")((Entry.isValid() && Entry.getFlags() == Flags &&
"Entry not initialized!") ? static_cast<void> (0) : __assert_fail
("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3014, __PRETTY_FUNCTION__))
;
3015 assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&(((!Entry.getAddress() || Entry.getAddress() == Addr) &&
"Resetting with the new address.") ? static_cast<void>
(0) : __assert_fail ("(!Entry.getAddress() || Entry.getAddress() == Addr) && \"Resetting with the new address.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3016, __PRETTY_FUNCTION__))
3016 "Resetting with the new address.")(((!Entry.getAddress() || Entry.getAddress() == Addr) &&
"Resetting with the new address.") ? static_cast<void>
(0) : __assert_fail ("(!Entry.getAddress() || Entry.getAddress() == Addr) && \"Resetting with the new address.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3016, __PRETTY_FUNCTION__))
;
3017 if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
3018 if (Entry.getVarSize().isZero()) {
3019 Entry.setVarSize(VarSize);
3020 Entry.setLinkage(Linkage);
3021 }
3022 return;
3023 }
3024 Entry.setVarSize(VarSize);
3025 Entry.setLinkage(Linkage);
3026 Entry.setAddress(Addr);
3027 } else {
3028 if (hasDeviceGlobalVarEntryInfo(VarName)) {
3029 auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3030 assert(Entry.isValid() && Entry.getFlags() == Flags &&((Entry.isValid() && Entry.getFlags() == Flags &&
"Entry not initialized!") ? static_cast<void> (0) : __assert_fail
("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3031, __PRETTY_FUNCTION__))
3031 "Entry not initialized!")((Entry.isValid() && Entry.getFlags() == Flags &&
"Entry not initialized!") ? static_cast<void> (0) : __assert_fail
("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3031, __PRETTY_FUNCTION__))
;
3032 assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&(((!Entry.getAddress() || Entry.getAddress() == Addr) &&
"Resetting with the new address.") ? static_cast<void>
(0) : __assert_fail ("(!Entry.getAddress() || Entry.getAddress() == Addr) && \"Resetting with the new address.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3033, __PRETTY_FUNCTION__))
3033 "Resetting with the new address.")(((!Entry.getAddress() || Entry.getAddress() == Addr) &&
"Resetting with the new address.") ? static_cast<void>
(0) : __assert_fail ("(!Entry.getAddress() || Entry.getAddress() == Addr) && \"Resetting with the new address.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3033, __PRETTY_FUNCTION__))
;
3034 if (Entry.getVarSize().isZero()) {
3035 Entry.setVarSize(VarSize);
3036 Entry.setLinkage(Linkage);
3037 }
3038 return;
3039 }
3040 OffloadEntriesDeviceGlobalVar.try_emplace(
3041 VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3042 ++OffloadingEntriesNum;
3043 }
3044}
3045
3046void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3047 actOnDeviceGlobalVarEntriesInfo(
3048 const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3049 // Scan all target region entries and perform the provided action.
3050 for (const auto &E : OffloadEntriesDeviceGlobalVar)
3051 Action(E.getKey(), E.getValue());
3052}
3053
3054void CGOpenMPRuntime::createOffloadEntry(
3055 llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
3056 llvm::GlobalValue::LinkageTypes Linkage) {
3057 StringRef Name = Addr->getName();
3058 llvm::Module &M = CGM.getModule();
3059 llvm::LLVMContext &C = M.getContext();
3060
3061 // Create constant string with the name.
3062 llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3063
3064 std::string StringName = getName({"omp_offloading", "entry_name"});
3065 auto *Str = new llvm::GlobalVariable(
3066 M, StrPtrInit->getType(), /*isConstant=*/true,
3067 llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
3068 Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3069
3070 llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
3071 llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
3072 llvm::ConstantInt::get(CGM.SizeTy, Size),
3073 llvm::ConstantInt::get(CGM.Int32Ty, Flags),
3074 llvm::ConstantInt::get(CGM.Int32Ty, 0)};
3075 std::string EntryName = getName({"omp_offloading", "entry", ""});
3076 llvm::GlobalVariable *Entry = createGlobalStruct(
3077 CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
3078 Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
3079
3080 // The entry has to be created in the section the linker expects it to be.
3081 Entry->setSection("omp_offloading_entries");
3082}
3083
3084void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3085 // Emit the offloading entries and metadata so that the device codegen side
3086 // can easily figure out what to emit. The produced metadata looks like
3087 // this:
3088 //
3089 // !omp_offload.info = !{!1, ...}
3090 //
3091 // Right now we only generate metadata for function that contain target
3092 // regions.
3093
3094 // If we are in simd mode or there are no entries, we don't need to do
3095 // anything.
3096 if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
3097 return;
3098
3099 llvm::Module &M = CGM.getModule();
3100 llvm::LLVMContext &C = M.getContext();
3101 SmallVector<std::tuple<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *,
3102 SourceLocation, StringRef>,
3103 16>
3104 OrderedEntries(OffloadEntriesInfoManager.size());
3105 llvm::SmallVector<StringRef, 16> ParentFunctions(
3106 OffloadEntriesInfoManager.size());
3107
3108 // Auxiliary methods to create metadata values and strings.
3109 auto &&GetMDInt = [this](unsigned V) {
3110 return llvm::ConstantAsMetadata::get(
3111 llvm::ConstantInt::get(CGM.Int32Ty, V));
3112 };
3113
3114 auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
3115
3116 // Create the offloading info metadata node.
3117 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3118
3119 // Create function that emits metadata for each target region entry;
3120 auto &&TargetRegionMetadataEmitter =
3121 [this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt,
3122 &GetMDString](
3123 unsigned DeviceID, unsigned FileID, StringRef ParentName,
3124 unsigned Line,
3125 const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3126 // Generate metadata for target regions. Each entry of this metadata
3127 // contains:
3128 // - Entry 0 -> Kind of this type of metadata (0).
3129 // - Entry 1 -> Device ID of the file where the entry was identified.
3130 // - Entry 2 -> File ID of the file where the entry was identified.
3131 // - Entry 3 -> Mangled name of the function where the entry was
3132 // identified.
3133 // - Entry 4 -> Line in the file where the entry was identified.
3134 // - Entry 5 -> Order the entry was created.
3135 // The first element of the metadata node is the kind.
3136 llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
3137 GetMDInt(FileID), GetMDString(ParentName),
3138 GetMDInt(Line), GetMDInt(E.getOrder())};
3139
3140 SourceLocation Loc;
3141 for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
3142 E = CGM.getContext().getSourceManager().fileinfo_end();
3143 I != E; ++I) {
3144 if (I->getFirst()->getUniqueID().getDevice() == DeviceID &&
3145 I->getFirst()->getUniqueID().getFile() == FileID) {
3146 Loc = CGM.getContext().getSourceManager().translateFileLineCol(
3147 I->getFirst(), Line, 1);
3148 break;
3149 }
3150 }
3151 // Save this entry in the right position of the ordered entries array.
3152 OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName);
3153 ParentFunctions[E.getOrder()] = ParentName;
3154
3155 // Add metadata to the named metadata node.
3156 MD->addOperand(llvm::MDNode::get(C, Ops));
3157 };
3158
3159 OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3160 TargetRegionMetadataEmitter);
3161
3162 // Create function that emits metadata for each device global variable entry;
3163 auto &&DeviceGlobalVarMetadataEmitter =
3164 [&C, &OrderedEntries, &GetMDInt, &GetMDString,
3165 MD](StringRef MangledName,
3166 const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
3167 &E) {
3168 // Generate metadata for global variables. Each entry of this metadata
3169 // contains:
3170 // - Entry 0 -> Kind of this type of metadata (1).
3171 // - Entry 1 -> Mangled name of the variable.
3172 // - Entry 2 -> Declare target kind.
3173 // - Entry 3 -> Order the entry was created.
3174 // The first element of the metadata node is the kind.
3175 llvm::Metadata *Ops[] = {
3176 GetMDInt(E.getKind()), GetMDString(MangledName),
3177 GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
3178
3179 // Save this entry in the right position of the ordered entries array.
3180 OrderedEntries[E.getOrder()] =
3181 std::make_tuple(&E, SourceLocation(), MangledName);
3182
3183 // Add metadata to the named metadata node.
3184 MD->addOperand(llvm::MDNode::get(C, Ops));
3185 };
3186
3187 OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
3188 DeviceGlobalVarMetadataEmitter);
3189
3190 for (const auto &E : OrderedEntries) {
3191 assert(std::get<0>(E) && "All ordered entries must exist!")((std::get<0>(E) && "All ordered entries must exist!"
) ? static_cast<void> (0) : __assert_fail ("std::get<0>(E) && \"All ordered entries must exist!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3191, __PRETTY_FUNCTION__))
;
3192 if (const auto *CE =
3193 dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3194 std::get<0>(E))) {
3195 if (!CE->getID() || !CE->getAddress()) {
3196 // Do not blame the entry if the parent funtion is not emitted.
3197 StringRef FnName = ParentFunctions[CE->getOrder()];
3198 if (!CGM.GetGlobalValue(FnName))
3199 continue;
3200 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3201 DiagnosticsEngine::Error,
3202 "Offloading entry for target region in %0 is incorrect: either the "
3203 "address or the ID is invalid.");
3204 CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName;
3205 continue;
3206 }
3207 createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
3208 CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
3209 } else if (const auto *CE = dyn_cast<OffloadEntriesInfoManagerTy::
3210 OffloadEntryInfoDeviceGlobalVar>(
3211 std::get<0>(E))) {
3212 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
3213 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3214 CE->getFlags());
3215 switch (Flags) {
3216 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
3217 if (CGM.getLangOpts().OpenMPIsDevice &&
3218 CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
3219 continue;
3220 if (!CE->getAddress()) {
3221 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3222 DiagnosticsEngine::Error, "Offloading entry for declare target "
3223 "variable %0 is incorrect: the "
3224 "address is invalid.");
3225 CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E);
3226 continue;
3227 }
3228 // The vaiable has no definition - no need to add the entry.
3229 if (CE->getVarSize().isZero())
3230 continue;
3231 break;
3232 }
3233 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
3234 assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||((((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress
()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress
())) && "Declaret target link address is set.") ? static_cast
<void> (0) : __assert_fail ("((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) && \"Declaret target link address is set.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3236, __PRETTY_FUNCTION__))
3235 (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&((((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress
()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress
())) && "Declaret target link address is set.") ? static_cast
<void> (0) : __assert_fail ("((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) && \"Declaret target link address is set.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3236, __PRETTY_FUNCTION__))
3236 "Declaret target link address is set.")((((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress
()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress
())) && "Declaret target link address is set.") ? static_cast
<void> (0) : __assert_fail ("((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) && \"Declaret target link address is set.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3236, __PRETTY_FUNCTION__))
;
3237 if (CGM.getLangOpts().OpenMPIsDevice)
3238 continue;
3239 if (!CE->getAddress()) {
3240 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3241 DiagnosticsEngine::Error,
3242 "Offloading entry for declare target variable is incorrect: the "
3243 "address is invalid.");
3244 CGM.getDiags().Report(DiagID);
3245 continue;
3246 }
3247 break;
3248 }
3249 createOffloadEntry(CE->getAddress(), CE->getAddress(),
3250 CE->getVarSize().getQuantity(), Flags,
3251 CE->getLinkage());
3252 } else {
3253 llvm_unreachable("Unsupported entry kind.")::llvm::llvm_unreachable_internal("Unsupported entry kind.", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3253)
;
3254 }
3255 }
3256}
3257
3258/// Loads all the offload entries information from the host IR
3259/// metadata.
3260void CGOpenMPRuntime::loadOffloadInfoMetadata() {
3261 // If we are in target mode, load the metadata from the host IR. This code has
3262 // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
3263
3264 if (!CGM.getLangOpts().OpenMPIsDevice)
3265 return;
3266
3267 if (CGM.getLangOpts().OMPHostIRFile.empty())
3268 return;
3269
3270 auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
3271 if (auto EC = Buf.getError()) {
3272 CGM.getDiags().Report(diag::err_cannot_open_file)
3273 << CGM.getLangOpts().OMPHostIRFile << EC.message();
3274 return;
3275 }
3276
3277 llvm::LLVMContext C;
3278 auto ME = expectedToErrorOrAndEmitErrors(
3279 C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
3280
3281 if (auto EC = ME.getError()) {
3282 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3283 DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
3284 CGM.getDiags().Report(DiagID)
3285 << CGM.getLangOpts().OMPHostIRFile << EC.message();
3286 return;
3287 }
3288
3289 llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
3290 if (!MD)
3291 return;
3292
3293 for (llvm::MDNode *MN : MD->operands()) {
3294 auto &&GetMDInt = [MN](unsigned Idx) {
3295 auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
3296 return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
3297 };
3298
3299 auto &&GetMDString = [MN](unsigned Idx) {
3300 auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
3301 return V->getString();
3302 };
3303
3304 switch (GetMDInt(0)) {
3305 default:
3306 llvm_unreachable("Unexpected metadata!")::llvm::llvm_unreachable_internal("Unexpected metadata!", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3306)
;
3307 break;
3308 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3309 OffloadingEntryInfoTargetRegion:
3310 OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
3311 /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
3312 /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
3313 /*Order=*/GetMDInt(5));
3314 break;
3315 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3316 OffloadingEntryInfoDeviceGlobalVar:
3317 OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
3318 /*MangledName=*/GetMDString(1),
3319 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3320 /*Flags=*/GetMDInt(2)),
3321 /*Order=*/GetMDInt(3));
3322 break;
3323 }
3324 }
3325}
3326
3327void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
3328 if (!KmpRoutineEntryPtrTy) {
3329 // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
3330 ASTContext &C = CGM.getContext();
3331 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
3332 FunctionProtoType::ExtProtoInfo EPI;
3333 KmpRoutineEntryPtrQTy = C.getPointerType(
3334 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
3335 KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
3336 }
3337}
3338
3339QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
3340 // Make sure the type of the entry is already created. This is the type we
3341 // have to create:
3342 // struct __tgt_offload_entry{
3343 // void *addr; // Pointer to the offload entry info.
3344 // // (function or global)
3345 // char *name; // Name of the function or global.
3346 // size_t size; // Size of the entry info (0 if it a function).
3347 // int32_t flags; // Flags associated with the entry, e.g. 'link'.
3348 // int32_t reserved; // Reserved, to use by the runtime library.
3349 // };
3350 if (TgtOffloadEntryQTy.isNull()) {
3351 ASTContext &C = CGM.getContext();
3352 RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
3353 RD->startDefinition();
3354 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3355 addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
3356 addFieldToRecordDecl(C, RD, C.getSizeType());
3357 addFieldToRecordDecl(
3358 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3359 addFieldToRecordDecl(
3360 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3361 RD->completeDefinition();
3362 RD->addAttr(PackedAttr::CreateImplicit(C));
3363 TgtOffloadEntryQTy = C.getRecordType(RD);
3364 }
3365 return TgtOffloadEntryQTy;
3366}
3367
3368namespace {
3369struct PrivateHelpersTy {
3370 PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
3371 const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
3372 : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
3373 PrivateElemInit(PrivateElemInit) {}
3374 PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
3375 const Expr *OriginalRef = nullptr;
3376 const VarDecl *Original = nullptr;
3377 const VarDecl *PrivateCopy = nullptr;
3378 const VarDecl *PrivateElemInit = nullptr;
3379 bool isLocalPrivate() const {
3380 return !OriginalRef && !PrivateCopy && !PrivateElemInit;
3381 }
3382};
3383typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
3384} // anonymous namespace
3385
3386static bool isAllocatableDecl(const VarDecl *VD) {
3387 const VarDecl *CVD = VD->getCanonicalDecl();
3388 if (!CVD->hasAttr<OMPAllocateDeclAttr>())
3389 return false;
3390 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
3391 // Use the default allocation.
3392 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
3393 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
3394 !AA->getAllocator());
3395}
3396
3397static RecordDecl *
3398createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
3399 if (!Privates.empty()) {
3400 ASTContext &C = CGM.getContext();
3401 // Build struct .kmp_privates_t. {
3402 // /* private vars */
3403 // };
3404 RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
3405 RD->startDefinition();
3406 for (const auto &Pair : Privates) {
3407 const VarDecl *VD = Pair.second.Original;
3408 QualType Type = VD->getType().getNonReferenceType();
3409 // If the private variable is a local variable with lvalue ref type,
3410 // allocate the pointer instead of the pointee type.
3411 if (Pair.second.isLocalPrivate()) {
3412 if (VD->getType()->isLValueReferenceType())
3413 Type = C.getPointerType(Type);
3414 if (isAllocatableDecl(VD))
3415 Type = C.getPointerType(Type);
3416 }
3417 FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
3418 if (VD->hasAttrs()) {
3419 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
3420 E(VD->getAttrs().end());
3421 I != E; ++I)
3422 FD->addAttr(*I);
3423 }
3424 }
3425 RD->completeDefinition();
3426 return RD;
3427 }
3428 return nullptr;
3429}
3430
3431static RecordDecl *
3432createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
3433 QualType KmpInt32Ty,
3434 QualType KmpRoutineEntryPointerQTy) {
3435 ASTContext &C = CGM.getContext();
3436 // Build struct kmp_task_t {
3437 // void * shareds;
3438 // kmp_routine_entry_t routine;
3439 // kmp_int32 part_id;
3440 // kmp_cmplrdata_t data1;
3441 // kmp_cmplrdata_t data2;
3442 // For taskloops additional fields:
3443 // kmp_uint64 lb;
3444 // kmp_uint64 ub;
3445 // kmp_int64 st;
3446 // kmp_int32 liter;
3447 // void * reductions;
3448 // };
3449 RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
3450 UD->startDefinition();
3451 addFieldToRecordDecl(C, UD, KmpInt32Ty);
3452 addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
3453 UD->completeDefinition();
3454 QualType KmpCmplrdataTy = C.getRecordType(UD);
3455 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
3456 RD->startDefinition();
3457 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3458 addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
3459 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3460 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3461 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3462 if (isOpenMPTaskLoopDirective(Kind)) {
3463 QualType KmpUInt64Ty =
3464 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
3465 QualType KmpInt64Ty =
3466 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
3467 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3468 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3469 addFieldToRecordDecl(C, RD, KmpInt64Ty);
3470 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3471 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3472 }
3473 RD->completeDefinition();
3474 return RD;
3475}
3476
3477static RecordDecl *
3478createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
3479 ArrayRef<PrivateDataTy> Privates) {
3480 ASTContext &C = CGM.getContext();
3481 // Build struct kmp_task_t_with_privates {
3482 // kmp_task_t task_data;
3483 // .kmp_privates_t. privates;
3484 // };
3485 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
3486 RD->startDefinition();
3487 addFieldToRecordDecl(C, RD, KmpTaskTQTy);
3488 if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
3489 addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
3490 RD->completeDefinition();
3491 return RD;
3492}
3493
3494/// Emit a proxy function which accepts kmp_task_t as the second
3495/// argument.
3496/// \code
3497/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
3498/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
3499/// For taskloops:
3500/// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3501/// tt->reductions, tt->shareds);
3502/// return 0;
3503/// }
3504/// \endcode
3505static llvm::Function *
3506emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
3507 OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
3508 QualType KmpTaskTWithPrivatesPtrQTy,
3509 QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
3510 QualType SharedsPtrTy, llvm::Function *TaskFunction,
3511 llvm::Value *TaskPrivatesMap) {
3512 ASTContext &C = CGM.getContext();
3513 FunctionArgList Args;
3514 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3515 ImplicitParamDecl::Other);
3516 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3517 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3518 ImplicitParamDecl::Other);
3519 Args.push_back(&GtidArg);
3520 Args.push_back(&TaskTypeArg);
3521 const auto &TaskEntryFnInfo =
3522 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3523 llvm::FunctionType *TaskEntryTy =
3524 CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
3525 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
3526 auto *TaskEntry = llvm::Function::Create(
3527 TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
3528 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
3529 TaskEntry->setDoesNotRecurse();
3530 CodeGenFunction CGF(CGM);
3531 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
3532 Loc, Loc);
3533
3534 // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
3535 // tt,
3536 // For taskloops:
3537 // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3538 // tt->task_data.shareds);
3539 llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
3540 CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
3541 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3542 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3543 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3544 const auto *KmpTaskTWithPrivatesQTyRD =
3545 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3546 LValue Base =
3547 CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3548 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3549 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3550 LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
3551 llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
3552
3553 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3554 LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
3555 llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3556 CGF.EmitLoadOfScalar(SharedsLVal, Loc),
3557 CGF.ConvertTypeForMem(SharedsPtrTy));
3558
3559 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3560 llvm::Value *PrivatesParam;
3561 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3562 LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
3563 PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3564 PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy);
3565 } else {
3566 PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
3567 }
3568
3569 llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
3570 TaskPrivatesMap,
3571 CGF.Builder
3572 .CreatePointerBitCastOrAddrSpaceCast(
3573 TDBase.getAddress(CGF), CGF.VoidPtrTy)
3574 .getPointer()};
3575 SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
3576 std::end(CommonArgs));
3577 if (isOpenMPTaskLoopDirective(Kind)) {
3578 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
3579 LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
3580 llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
3581 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
3582 LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
3583 llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
3584 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
3585 LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
3586 llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
3587 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3588 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
3589 llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
3590 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
3591 LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
3592 llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
3593 CallArgs.push_back(LBParam);
3594 CallArgs.push_back(UBParam);
3595 CallArgs.push_back(StParam);
3596 CallArgs.push_back(LIParam);
3597 CallArgs.push_back(RParam);
3598 }
3599 CallArgs.push_back(SharedsParam);
3600
3601 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
3602 CallArgs);
3603 CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
3604 CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
3605 CGF.FinishFunction();
3606 return TaskEntry;
3607}
3608
3609static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
3610 SourceLocation Loc,
3611 QualType KmpInt32Ty,
3612 QualType KmpTaskTWithPrivatesPtrQTy,
3613 QualType KmpTaskTWithPrivatesQTy) {
3614 ASTContext &C = CGM.getContext();
3615 FunctionArgList Args;
3616 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3617 ImplicitParamDecl::Other);
3618 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3619 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3620 ImplicitParamDecl::Other);
3621 Args.push_back(&GtidArg);
3622 Args.push_back(&TaskTypeArg);
3623 const auto &DestructorFnInfo =
3624 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3625 llvm::FunctionType *DestructorFnTy =
3626 CGM.getTypes().GetFunctionType(DestructorFnInfo);
3627 std::string Name =
3628 CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
3629 auto *DestructorFn =
3630 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
3631 Name, &CGM.getModule());
3632 CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
3633 DestructorFnInfo);
3634 DestructorFn->setDoesNotRecurse();
3635 CodeGenFunction CGF(CGM);
3636 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
3637 Args, Loc, Loc);
3638
3639 LValue Base = CGF.EmitLoadOfPointerLValue(
3640 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3641 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3642 const auto *KmpTaskTWithPrivatesQTyRD =
3643 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3644 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3645 Base = CGF.EmitLValueForField(Base, *FI);
3646 for (const auto *Field :
3647 cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
3648 if (QualType::DestructionKind DtorKind =
3649 Field->getType().isDestructedType()) {
3650 LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
3651 CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
3652 }
3653 }
3654 CGF.FinishFunction();
3655 return DestructorFn;
3656}
3657
3658/// Emit a privates mapping function for correct handling of private and
3659/// firstprivate variables.
3660/// \code
3661/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
3662/// **noalias priv1,..., <tyn> **noalias privn) {
3663/// *priv1 = &.privates.priv1;
3664/// ...;
3665/// *privn = &.privates.privn;
3666/// }
3667/// \endcode
3668static llvm::Value *
3669emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
3670 const OMPTaskDataTy &Data, QualType PrivatesQTy,
3671 ArrayRef<PrivateDataTy> Privates) {
3672 ASTContext &C = CGM.getContext();
3673 FunctionArgList Args;
3674 ImplicitParamDecl TaskPrivatesArg(
3675 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3676 C.getPointerType(PrivatesQTy).withConst().withRestrict(),
3677 ImplicitParamDecl::Other);
3678 Args.push_back(&TaskPrivatesArg);
3679 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
3680 unsigned Counter = 1;
3681 for (const Expr *E : Data.PrivateVars) {
3682 Args.push_back(ImplicitParamDecl::Create(
3683 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3684 C.getPointerType(C.getPointerType(E->getType()))
3685 .withConst()
3686 .withRestrict(),
3687 ImplicitParamDecl::Other));
3688 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3689 PrivateVarsPos[VD] = Counter;
3690 ++Counter;
3691 }
3692 for (const Expr *E : Data.FirstprivateVars) {
3693 Args.push_back(ImplicitParamDecl::Create(
3694 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3695 C.getPointerType(C.getPointerType(E->getType()))
3696 .withConst()
3697 .withRestrict(),
3698 ImplicitParamDecl::Other));
3699 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3700 PrivateVarsPos[VD] = Counter;
3701 ++Counter;
3702 }
3703 for (const Expr *E : Data.LastprivateVars) {
3704 Args.push_back(ImplicitParamDecl::Create(
3705 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3706 C.getPointerType(C.getPointerType(E->getType()))
3707 .withConst()
3708 .withRestrict(),
3709 ImplicitParamDecl::Other));
3710 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3711 PrivateVarsPos[VD] = Counter;
3712 ++Counter;
3713 }
3714 for (const VarDecl *VD : Data.PrivateLocals) {
3715 QualType Ty = VD->getType().getNonReferenceType();
3716 if (VD->getType()->isLValueReferenceType())
3717 Ty = C.getPointerType(Ty);
3718 if (isAllocatableDecl(VD))
3719 Ty = C.getPointerType(Ty);
3720 Args.push_back(ImplicitParamDecl::Create(
3721 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3722 C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
3723 ImplicitParamDecl::Other));
3724 PrivateVarsPos[VD] = Counter;
3725 ++Counter;
3726 }
3727 const auto &TaskPrivatesMapFnInfo =
3728 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3729 llvm::FunctionType *TaskPrivatesMapTy =
3730 CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
3731 std::string Name =
3732 CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
3733 auto *TaskPrivatesMap = llvm::Function::Create(
3734 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
3735 &CGM.getModule());
3736 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
3737 TaskPrivatesMapFnInfo);
3738 if (CGM.getLangOpts().Optimize) {
3739 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
3740 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
3741 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
3742 }
3743 CodeGenFunction CGF(CGM);
3744 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
3745 TaskPrivatesMapFnInfo, Args, Loc, Loc);
3746
3747 // *privi = &.privates.privi;
3748 LValue Base = CGF.EmitLoadOfPointerLValue(
3749 CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
3750 TaskPrivatesArg.getType()->castAs<PointerType>());
3751 const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
3752 Counter = 0;
3753 for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
3754 LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
3755 const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
3756 LValue RefLVal =
3757 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
3758 LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
3759 RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
3760 CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
3761 ++Counter;
3762 }
3763 CGF.FinishFunction();
3764 return TaskPrivatesMap;
3765}
3766
3767/// Emit initialization for private variables in task-based directives.
3768static void emitPrivatesInit(CodeGenFunction &CGF,
3769 const OMPExecutableDirective &D,
3770 Address KmpTaskSharedsPtr, LValue TDBase,
3771 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3772 QualType SharedsTy, QualType SharedsPtrTy,
3773 const OMPTaskDataTy &Data,
3774 ArrayRef<PrivateDataTy> Privates, bool ForDup) {
3775 ASTContext &C = CGF.getContext();
3776 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3777 LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
3778 OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
3779 ? OMPD_taskloop
3780 : OMPD_task;
3781 const CapturedStmt &CS = *D.getCapturedStmt(Kind);
3782 CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
3783 LValue SrcBase;
3784 bool IsTargetTask =
3785 isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
3786 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
3787 // For target-based directives skip 3 firstprivate arrays BasePointersArray,
3788 // PointersArray and SizesArray. The original variables for these arrays are
3789 // not captured and we get their addresses explicitly.
3790 if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
3791 (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
3792 SrcBase = CGF.MakeAddrLValue(
3793 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3794 KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
3795 SharedsTy);
3796 }
3797 FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
3798 for (const PrivateDataTy &Pair : Privates) {
3799 // Do not initialize private locals.
3800 if (Pair.second.isLocalPrivate()) {
3801 ++FI;
3802 continue;
3803 }
3804 const VarDecl *VD = Pair.second.PrivateCopy;
3805 const Expr *Init = VD->getAnyInitializer();
3806 if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
3807 !CGF.isTrivialInitializer(Init)))) {
3808 LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
3809 if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
3810 const VarDecl *OriginalVD = Pair.second.Original;
3811 // Check if the variable is the target-based BasePointersArray,
3812 // PointersArray or SizesArray.
3813 LValue SharedRefLValue;
3814 QualType Type = PrivateLValue.getType();
3815 const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
3816 if (IsTargetTask && !SharedField) {
3817 assert(isa<ImplicitParamDecl>(OriginalVD) &&((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
3818 isa<CapturedDecl>(OriginalVD->getDeclContext()) &&((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
3819 cast<CapturedDecl>(OriginalVD->getDeclContext())((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
3820 ->getNumParams() == 0 &&((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
3821 isa<TranslationUnitDecl>(((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
3822 cast<CapturedDecl>(OriginalVD->getDeclContext())((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
3823 ->getDeclContext()) &&((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
3824 "Expected artificial target data variable.")((isa<ImplicitParamDecl>(OriginalVD) && isa<
CapturedDecl>(OriginalVD->getDeclContext()) && cast
<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams
() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl
>(OriginalVD->getDeclContext()) ->getDeclContext()) &&
"Expected artificial target data variable.") ? static_cast<
void> (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3824, __PRETTY_FUNCTION__))
;
3825 SharedRefLValue =
3826 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
3827 } else if (ForDup) {
3828 SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
3829 SharedRefLValue = CGF.MakeAddrLValue(
3830 Address(SharedRefLValue.getPointer(CGF),
3831 C.getDeclAlign(OriginalVD)),
3832 SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
3833 SharedRefLValue.getTBAAInfo());
3834 } else if (CGF.LambdaCaptureFields.count(
3835 Pair.second.Original->getCanonicalDecl()) > 0 ||
3836 dyn_cast_or_null<BlockDecl>(CGF.CurCodeDecl)) {
3837 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3838 } else {
3839 // Processing for implicitly captured variables.
3840 InlinedOpenMPRegionRAII Region(
3841 CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
3842 /*HasCancel=*/false);
3843 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3844 }
3845 if (Type->isArrayType()) {
3846 // Initialize firstprivate array.
3847 if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
3848 // Perform simple memcpy.
3849 CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
3850 } else {
3851 // Initialize firstprivate array using element-by-element
3852 // initialization.
3853 CGF.EmitOMPAggregateAssign(
3854 PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
3855 Type,
3856 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
3857 Address SrcElement) {
3858 // Clean up any temporaries needed by the initialization.
3859 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3860 InitScope.addPrivate(
3861 Elem, [SrcElement]() -> Address { return SrcElement; });
3862 (void)InitScope.Privatize();
3863 // Emit initialization for single element.
3864 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
3865 CGF, &CapturesInfo);
3866 CGF.EmitAnyExprToMem(Init, DestElement,
3867 Init->getType().getQualifiers(),
3868 /*IsInitializer=*/false);
3869 });
3870 }
3871 } else {
3872 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3873 InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address {
3874 return SharedRefLValue.getAddress(CGF);
3875 });
3876 (void)InitScope.Privatize();
3877 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
3878 CGF.EmitExprAsInit(Init, VD, PrivateLValue,
3879 /*capturedByInit=*/false);
3880 }
3881 } else {
3882 CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
3883 }
3884 }
3885 ++FI;
3886 }
3887}
3888
3889/// Check if duplication function is required for taskloops.
3890static bool checkInitIsRequired(CodeGenFunction &CGF,
3891 ArrayRef<PrivateDataTy> Privates) {
3892 bool InitRequired = false;
3893 for (const PrivateDataTy &Pair : Privates) {
3894 if (Pair.second.isLocalPrivate())
3895 continue;
3896 const VarDecl *VD = Pair.second.PrivateCopy;
3897 const Expr *Init = VD->getAnyInitializer();
3898 InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
3899 !CGF.isTrivialInitializer(Init));
3900 if (InitRequired)
3901 break;
3902 }
3903 return InitRequired;
3904}
3905
3906
3907/// Emit task_dup function (for initialization of
3908/// private/firstprivate/lastprivate vars and last_iter flag)
3909/// \code
3910/// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
3911/// lastpriv) {
3912/// // setup lastprivate flag
3913/// task_dst->last = lastpriv;
3914/// // could be constructor calls here...
3915/// }
3916/// \endcode
3917static llvm::Value *
3918emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
3919 const OMPExecutableDirective &D,
3920 QualType KmpTaskTWithPrivatesPtrQTy,
3921 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3922 const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
3923 QualType SharedsPtrTy, const OMPTaskDataTy &Data,
3924 ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
3925 ASTContext &C = CGM.getContext();
3926 FunctionArgList Args;
3927 ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3928 KmpTaskTWithPrivatesPtrQTy,
3929 ImplicitParamDecl::Other);
3930 ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3931 KmpTaskTWithPrivatesPtrQTy,
3932 ImplicitParamDecl::Other);
3933 ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3934 ImplicitParamDecl::Other);
3935 Args.push_back(&DstArg);
3936 Args.push_back(&SrcArg);
3937 Args.push_back(&LastprivArg);
3938 const auto &TaskDupFnInfo =
3939 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3940 llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
3941 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
3942 auto *TaskDup = llvm::Function::Create(
3943 TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
3944 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
3945 TaskDup->setDoesNotRecurse();
3946 CodeGenFunction CGF(CGM);
3947 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
3948 Loc);
3949
3950 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3951 CGF.GetAddrOfLocalVar(&DstArg),
3952 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3953 // task_dst->liter = lastpriv;
3954 if (WithLastIter) {
3955 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3956 LValue Base = CGF.EmitLValueForField(
3957 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3958 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
3959 llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
3960 CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
3961 CGF.EmitStoreOfScalar(Lastpriv, LILVal);
3962 }
3963
3964 // Emit initial values for private copies (if any).
3965 assert(!Privates.empty())((!Privates.empty()) ? static_cast<void> (0) : __assert_fail
("!Privates.empty()", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3965, __PRETTY_FUNCTION__))
;
3966 Address KmpTaskSharedsPtr = Address::invalid();
3967 if (!Data.FirstprivateVars.empty()) {
3968 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3969 CGF.GetAddrOfLocalVar(&SrcArg),
3970 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3971 LValue Base = CGF.EmitLValueForField(
3972 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3973 KmpTaskSharedsPtr = Address(
3974 CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
3975 Base, *std::next(KmpTaskTQTyRD->field_begin(),
3976 KmpTaskTShareds)),
3977 Loc),
3978 CGM.getNaturalTypeAlignment(SharedsTy));
3979 }
3980 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
3981 SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
3982 CGF.FinishFunction();
3983 return TaskDup;
3984}
3985
3986/// Checks if destructor function is required to be generated.
3987/// \return true if cleanups are required, false otherwise.
3988static bool
3989checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3990 ArrayRef<PrivateDataTy> Privates) {
3991 for (const PrivateDataTy &P : Privates) {
3992 if (P.second.isLocalPrivate())
3993 continue;
3994 QualType Ty = P.second.Original->getType().getNonReferenceType();
3995 if (Ty.isDestructedType())
3996 return true;
3997 }
3998 return false;
3999}
4000
4001namespace {
4002/// Loop generator for OpenMP iterator expression.
4003class OMPIteratorGeneratorScope final
4004 : public CodeGenFunction::OMPPrivateScope {
4005 CodeGenFunction &CGF;
4006 const OMPIteratorExpr *E = nullptr;
4007 SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
4008 SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
4009 OMPIteratorGeneratorScope() = delete;
4010 OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
4011
4012public:
4013 OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
4014 : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
4015 if (!E)
4016 return;
4017 SmallVector<llvm::Value *, 4> Uppers;
4018 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
4019 Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
4020 const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
4021 addPrivate(VD, [&CGF, VD]() {
4022 return CGF.CreateMemTemp(VD->getType(), VD->getName());
4023 });
4024 const OMPIteratorHelperData &HelperData = E->getHelper(I);
4025 addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
4026 return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
4027 "counter.addr");
4028 });
4029 }
4030 Privatize();
4031
4032 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
4033 const OMPIteratorHelperData &HelperData = E->getHelper(I);
4034 LValue CLVal =
4035 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
4036 HelperData.CounterVD->getType());
4037 // Counter = 0;
4038 CGF.EmitStoreOfScalar(
4039 llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
4040 CLVal);
4041 CodeGenFunction::JumpDest &ContDest =
4042 ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
4043 CodeGenFunction::JumpDest &ExitDest =
4044 ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
4045 // N = <number-of_iterations>;
4046 llvm::Value *N = Uppers[I];
4047 // cont:
4048 // if (Counter < N) goto body; else goto exit;
4049 CGF.EmitBlock(ContDest.getBlock());
4050 auto *CVal =
4051 CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
4052 llvm::Value *Cmp =
4053 HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
4054 ? CGF.Builder.CreateICmpSLT(CVal, N)
4055 : CGF.Builder.CreateICmpULT(CVal, N);
4056 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
4057 CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
4058 // body:
4059 CGF.EmitBlock(BodyBB);
4060 // Iteri = Begini + Counter * Stepi;
4061 CGF.EmitIgnoredExpr(HelperData.Update);
4062 }
4063 }
4064 ~OMPIteratorGeneratorScope() {
4065 if (!E)
4066 return;
4067 for (unsigned I = E->numOfIterators(); I > 0; --I) {
4068 // Counter = Counter + 1;
4069 const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
4070 CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
4071 // goto cont;
4072 CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
4073 // exit:
4074 CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
4075 }
4076 }
4077};
4078} // namespace
4079
4080static std::pair<llvm::Value *, llvm::Value *>
4081getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
4082 const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
4083 llvm::Value *Addr;
4084 if (OASE) {
4085 const Expr *Base = OASE->getBase();
4086 Addr = CGF.EmitScalarExpr(Base);
4087 } else {
4088 Addr = CGF.EmitLValue(E).getPointer(CGF);
4089 }
4090 llvm::Value *SizeVal;
4091 QualType Ty = E->getType();
4092 if (OASE) {
4093 SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
4094 for (const Expr *SE : OASE->getDimensions()) {
4095 llvm::Value *Sz = CGF.EmitScalarExpr(SE);
4096 Sz = CGF.EmitScalarConversion(
4097 Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
4098 SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
4099 }
4100 } else if (const auto *ASE =
4101 dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
4102 LValue UpAddrLVal =
4103 CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
4104 llvm::Value *UpAddr =
4105 CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(CGF), /*Idx0=*/1);
4106 llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
4107 llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
4108 SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
4109 } else {
4110 SizeVal = CGF.getTypeSize(Ty);
4111 }
4112 return std::make_pair(Addr, SizeVal);
4113}
4114
4115/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
4116static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
4117 QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
4118 if (KmpTaskAffinityInfoTy.isNull()) {
4119 RecordDecl *KmpAffinityInfoRD =
4120 C.buildImplicitRecord("kmp_task_affinity_info_t");
4121 KmpAffinityInfoRD->startDefinition();
4122 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
4123 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
4124 addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
4125 KmpAffinityInfoRD->completeDefinition();
4126 KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
4127 }
4128}
4129
4130CGOpenMPRuntime::TaskResultTy
4131CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4132 const OMPExecutableDirective &D,
4133 llvm::Function *TaskFunction, QualType SharedsTy,
4134 Address Shareds, const OMPTaskDataTy &Data) {
4135 ASTContext &C = CGM.getContext();
4136 llvm::SmallVector<PrivateDataTy, 4> Privates;
4137 // Aggregate privates and sort them by the alignment.
4138 const auto *I = Data.PrivateCopies.begin();
4139 for (const Expr *E : Data.PrivateVars) {
4140 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4141 Privates.emplace_back(
4142 C.getDeclAlign(VD),
4143 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4144 /*PrivateElemInit=*/nullptr));
4145 ++I;
4146 }
4147 I = Data.FirstprivateCopies.begin();
4148 const auto *IElemInitRef = Data.FirstprivateInits.begin();
4149 for (const Expr *E : Data.FirstprivateVars) {
4150 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4151 Privates.emplace_back(
4152 C.getDeclAlign(VD),
4153 PrivateHelpersTy(
4154 E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4155 cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
4156 ++I;
4157 ++IElemInitRef;
4158 }
4159 I = Data.LastprivateCopies.begin();
4160 for (const Expr *E : Data.LastprivateVars) {
4161 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4162 Privates.emplace_back(
4163 C.getDeclAlign(VD),
4164 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4165 /*PrivateElemInit=*/nullptr));
4166 ++I;
4167 }
4168 for (const VarDecl *VD : Data.PrivateLocals) {
4169 if (isAllocatableDecl(VD))
4170 Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
4171 else
4172 Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
4173 }
4174 llvm::stable_sort(Privates,
4175 [](const PrivateDataTy &L, const PrivateDataTy &R) {
4176 return L.first > R.first;
4177 });
4178 QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4179 // Build type kmp_routine_entry_t (if not built yet).
4180 emitKmpRoutineEntryT(KmpInt32Ty);
4181 // Build type kmp_task_t (if not built yet).
4182 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4183 if (SavedKmpTaskloopTQTy.isNull()) {
4184 SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4185 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4186 }
4187 KmpTaskTQTy = SavedKmpTaskloopTQTy;
4188 } else {
4189 assert((D.getDirectiveKind() == OMPD_task ||(((D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective
(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective
(D.getDirectiveKind())) && "Expected taskloop, task or target directive"
) ? static_cast<void> (0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4192, __PRETTY_FUNCTION__))
4190 isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||(((D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective
(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective
(D.getDirectiveKind())) && "Expected taskloop, task or target directive"
) ? static_cast<void> (0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4192, __PRETTY_FUNCTION__))
4191 isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&(((D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective
(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective
(D.getDirectiveKind())) && "Expected taskloop, task or target directive"
) ? static_cast<void> (0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4192, __PRETTY_FUNCTION__))
4192 "Expected taskloop, task or target directive")(((D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective
(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective
(D.getDirectiveKind())) && "Expected taskloop, task or target directive"
) ? static_cast<void> (0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4192, __PRETTY_FUNCTION__))
;
4193 if (SavedKmpTaskTQTy.isNull()) {
4194 SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4195 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4196 }
4197 KmpTaskTQTy = SavedKmpTaskTQTy;
4198 }
4199 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4200 // Build particular struct kmp_task_t for the given task.
4201 const RecordDecl *KmpTaskTWithPrivatesQTyRD =
4202 createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4203 QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4204 QualType KmpTaskTWithPrivatesPtrQTy =
4205 C.getPointerType(KmpTaskTWithPrivatesQTy);
4206 llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4207 llvm::Type *KmpTaskTWithPrivatesPtrTy =
4208 KmpTaskTWithPrivatesTy->getPointerTo();
4209 llvm::Value *KmpTaskTWithPrivatesTySize =
4210 CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4211 QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4212
4213 // Emit initial values for private copies (if any).
4214 llvm::Value *TaskPrivatesMap = nullptr;
4215 llvm::Type *TaskPrivatesMapTy =
4216 std::next(TaskFunction->arg_begin(), 3)->getType();
4217 if (!Privates.empty()) {
4218 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4219 TaskPrivatesMap =
4220 emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
4221 TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4222 TaskPrivatesMap, TaskPrivatesMapTy);
4223 } else {
4224 TaskPrivatesMap = llvm::ConstantPointerNull::get(
4225 cast<llvm::PointerType>(TaskPrivatesMapTy));
4226 }
4227 // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4228 // kmp_task_t *tt);
4229 llvm::Function *TaskEntry = emitProxyTaskFunction(
4230 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4231 KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4232 TaskPrivatesMap);
4233
4234 // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4235 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4236 // kmp_routine_entry_t *task_entry);
4237 // Task flags. Format is taken from
4238 // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h,
4239 // description of kmp_tasking_flags struct.
4240 enum {
4241 TiedFlag = 0x1,
4242 FinalFlag = 0x2,
4243 DestructorsFlag = 0x8,
4244 PriorityFlag = 0x20,
4245 DetachableFlag = 0x40,
4246 };
4247 unsigned Flags = Data.Tied ? TiedFlag : 0;
4248 bool NeedsCleanup = false;
4249 if (!Privates.empty()) {
4250 NeedsCleanup =
4251 checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
4252 if (NeedsCleanup)
4253 Flags = Flags | DestructorsFlag;
4254 }
4255 if (Data.Priority.getInt())
4256 Flags = Flags | PriorityFlag;
4257 if (D.hasClausesOfKind<OMPDetachClause>())
4258 Flags = Flags | DetachableFlag;
4259 llvm::Value *TaskFlags =
4260 Data.Final.getPointer()
4261 ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4262 CGF.Builder.getInt32(FinalFlag),
4263 CGF.Builder.getInt32(/*C=*/0))
4264 : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4265 TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4266 llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4267 SmallVector<llvm::Value *, 8> AllocArgs = {emitUpdateLocation(CGF, Loc),
4268 getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
4269 SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4270 TaskEntry, KmpRoutineEntryPtrTy)};
4271 llvm::Value *NewTask;
4272 if (D.hasClausesOfKind<OMPNowaitClause>()) {
4273 // Check if we have any device clause associated with the directive.
4274 const Expr *Device = nullptr;
4275 if (auto *C = D.getSingleClause<OMPDeviceClause>())
4276 Device = C->getDevice();
4277 // Emit device ID if any otherwise use default value.
4278 llvm::Value *DeviceID;
4279 if (Device)
4280 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
4281 CGF.Int64Ty, /*isSigned=*/true);
4282 else
4283 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
4284 AllocArgs.push_back(DeviceID);
4285 NewTask = CGF.EmitRuntimeCall(
4286 OMPBuilder.getOrCreateRuntimeFunction(
4287 CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
4288 AllocArgs);
4289 } else {
4290 NewTask =
4291 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4292 CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
4293 AllocArgs);
4294 }
4295 // Emit detach clause initialization.
4296 // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
4297 // task_descriptor);
4298 if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
4299 const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
4300 LValue EvtLVal = CGF.EmitLValue(Evt);
4301
4302 // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
4303 // int gtid, kmp_task_t *task);
4304 llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
4305 llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
4306 Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
4307 llvm::Value *EvtVal = CGF.EmitRuntimeCall(
4308 OMPBuilder.getOrCreateRuntimeFunction(
4309 CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
4310 {Loc, Tid, NewTask});
4311 EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
4312 Evt->getExprLoc());
4313 CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
4314 }
4315 // Process affinity clauses.
4316 if (D.hasClausesOfKind<OMPAffinityClause>()) {
4317 // Process list of affinity data.
4318 ASTContext &C = CGM.getContext();
4319 Address AffinitiesArray = Address::invalid();
4320 // Calculate number of elements to form the array of affinity data.
4321 llvm::Value *NumOfElements = nullptr;
4322 unsigned NumAffinities = 0;
4323 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4324 if (const Expr *Modifier = C->getModifier()) {
4325 const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
4326 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4327 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4328 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
4329 NumOfElements =
4330 NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
4331 }
4332 } else {
4333 NumAffinities += C->varlist_size();
4334 }
4335 }
4336 getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
4337 // Fields ids in kmp_task_affinity_info record.
4338 enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
4339
4340 QualType KmpTaskAffinityInfoArrayTy;
4341 if (NumOfElements) {
4342 NumOfElements = CGF.Builder.CreateNUWAdd(
4343 llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
4344 OpaqueValueExpr OVE(
4345 Loc,
4346 C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
4347 VK_RValue);
4348 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
4349 RValue::get(NumOfElements));
4350 KmpTaskAffinityInfoArrayTy =
4351 C.getVariableArrayType(KmpTaskAffinityInfoTy, &OVE, ArrayType::Normal,
4352 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
4353 // Properly emit variable-sized array.
4354 auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
4355 ImplicitParamDecl::Other);
4356 CGF.EmitVarDecl(*PD);
4357 AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
4358 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
4359 /*isSigned=*/false);
4360 } else {
4361 KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
4362 KmpTaskAffinityInfoTy,
4363 llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
4364 ArrayType::Normal, /*IndexTypeQuals=*/0);
4365 AffinitiesArray =
4366 CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
4367 AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
4368 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
4369 /*isSigned=*/false);
4370 }
4371
4372 const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
4373 // Fill array by elements without iterators.
4374 unsigned Pos = 0;
4375 bool HasIterator = false;
4376 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4377 if (C->getModifier()) {
4378 HasIterator = true;
4379 continue;
4380 }
4381 for (const Expr *E : C->varlists()) {
4382 llvm::Value *Addr;
4383 llvm::Value *Size;
4384 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4385 LValue Base =
4386 CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
4387 KmpTaskAffinityInfoTy);
4388 // affs[i].base_addr = &<Affinities[i].second>;
4389 LValue BaseAddrLVal = CGF.EmitLValueForField(
4390 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
4391 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4392 BaseAddrLVal);
4393 // affs[i].len = sizeof(<Affinities[i].second>);
4394 LValue LenLVal = CGF.EmitLValueForField(
4395 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
4396 CGF.EmitStoreOfScalar(Size, LenLVal);
4397 ++Pos;
4398 }
4399 }
4400 LValue PosLVal;
4401 if (HasIterator) {
4402 PosLVal = CGF.MakeAddrLValue(
4403 CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
4404 C.getSizeType());
4405 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
4406 }
4407 // Process elements with iterators.
4408 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4409 const Expr *Modifier = C->getModifier();
4410 if (!Modifier)
4411 continue;
4412 OMPIteratorGeneratorScope IteratorScope(
4413 CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
4414 for (const Expr *E : C->varlists()) {
4415 llvm::Value *Addr;
4416 llvm::Value *Size;
4417 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4418 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4419 LValue Base = CGF.MakeAddrLValue(
4420 Address(CGF.Builder.CreateGEP(AffinitiesArray.getPointer(), Idx),
4421 AffinitiesArray.getAlignment()),
4422 KmpTaskAffinityInfoTy);
4423 // affs[i].base_addr = &<Affinities[i].second>;
4424 LValue BaseAddrLVal = CGF.EmitLValueForField(
4425 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
4426 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4427 BaseAddrLVal);
4428 // affs[i].len = sizeof(<Affinities[i].second>);
4429 LValue LenLVal = CGF.EmitLValueForField(
4430 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
4431 CGF.EmitStoreOfScalar(Size, LenLVal);
4432 Idx = CGF.Builder.CreateNUWAdd(
4433 Idx, llvm::ConstantInt::get(Idx->getType(), 1));
4434 CGF.EmitStoreOfScalar(Idx, PosLVal);
4435 }
4436 }
4437 // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
4438 // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
4439 // naffins, kmp_task_affinity_info_t *affin_list);
4440 llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
4441 llvm::Value *GTid = getThreadID(CGF, Loc);
4442 llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4443 AffinitiesArray.getPointer(), CGM.VoidPtrTy);
4444 // FIXME: Emit the function and ignore its result for now unless the
4445 // runtime function is properly implemented.
4446 (void)CGF.EmitRuntimeCall(
4447 OMPBuilder.getOrCreateRuntimeFunction(
4448 CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
4449 {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
4450 }
4451 llvm::Value *NewTaskNewTaskTTy =
4452 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4453 NewTask, KmpTaskTWithPrivatesPtrTy);
4454 LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4455 KmpTaskTWithPrivatesQTy);
4456 LValue TDBase =
4457 CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4458 // Fill the data in the resulting kmp_task_t record.
4459 // Copy shareds if there are any.
4460 Address KmpTaskSharedsPtr = Address::invalid();
4461 if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4462 KmpTaskSharedsPtr =
4463 Address(CGF.EmitLoadOfScalar(
4464 CGF.EmitLValueForField(
4465 TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4466 KmpTaskTShareds)),
4467 Loc),
4468 CGM.getNaturalTypeAlignment(SharedsTy));
4469 LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4470 LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4471 CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
4472 }
4473 // Emit initial values for private copies (if any).
4474 TaskResultTy Result;
4475 if (!Privates.empty()) {
4476 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4477 SharedsTy, SharedsPtrTy, Data, Privates,
4478 /*ForDup=*/false);
4479 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4480 (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4481 Result.TaskDupFn = emitTaskDupFunction(
4482 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4483 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4484 /*WithLastIter=*/!Data.LastprivateVars.empty());
4485 }
4486 }
4487 // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4488 enum { Priority = 0, Destructors = 1 };
4489 // Provide pointer to function with destructors for privates.
4490 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4491 const RecordDecl *KmpCmplrdataUD =
4492 (*FI)->getType()->getAsUnionType()->getDecl();
4493 if (NeedsCleanup) {
4494 llvm::Value *DestructorFn = emitDestructorsFunction(
4495 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4496 KmpTaskTWithPrivatesQTy);
4497 LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4498 LValue DestructorsLV = CGF.EmitLValueForField(
4499 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4500 CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4501 DestructorFn, KmpRoutineEntryPtrTy),
4502 DestructorsLV);
4503 }
4504 // Set priority.
4505 if (Data.Priority.getInt()) {
4506 LValue Data2LV = CGF.EmitLValueForField(
4507 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4508 LValue PriorityLV = CGF.EmitLValueForField(
4509 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4510 CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4511 }
4512 Result.NewTask = NewTask;
4513 Result.TaskEntry = TaskEntry;
4514 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4515 Result.TDBase = TDBase;
4516 Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4517 return Result;
4518}
4519
4520namespace {
4521/// Dependence kind for RTL.
4522enum RTLDependenceKindTy {
4523 DepIn = 0x01,
4524 DepInOut = 0x3,
4525 DepMutexInOutSet = 0x4
4526};
4527/// Fields ids in kmp_depend_info record.
4528enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4529} // namespace
4530
4531/// Translates internal dependency kind into the runtime kind.
4532static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
4533 RTLDependenceKindTy DepKind;
4534 switch (K) {
4535 case OMPC_DEPEND_in:
4536 DepKind = DepIn;
4537 break;
4538 // Out and InOut dependencies must use the same code.
4539 case OMPC_DEPEND_out:
4540 case OMPC_DEPEND_inout:
4541 DepKind = DepInOut;
4542 break;
4543 case OMPC_DEPEND_mutexinoutset:
4544 DepKind = DepMutexInOutSet;
4545 break;
4546 case OMPC_DEPEND_source:
4547 case OMPC_DEPEND_sink:
4548 case OMPC_DEPEND_depobj:
4549 case OMPC_DEPEND_unknown:
4550 llvm_unreachable("Unknown task dependence type")::llvm::llvm_unreachable_internal("Unknown task dependence type"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4550)
;
4551 }
4552 return DepKind;
4553}
4554
4555/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
4556static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
4557 QualType &FlagsTy) {
4558 FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4559 if (KmpDependInfoTy.isNull()) {
4560 RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4561 KmpDependInfoRD->startDefinition();
4562 addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4563 addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4564 addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4565 KmpDependInfoRD->completeDefinition();
4566 KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4567 }
4568}
4569
4570std::pair<llvm::Value *, LValue>
4571CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
4572 SourceLocation Loc) {
4573 ASTContext &C = CGM.getContext();
4574 QualType FlagsTy;
4575 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4576 RecordDecl *KmpDependInfoRD =
4577 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4578 LValue Base = CGF.EmitLoadOfPointerLValue(
4579 DepobjLVal.getAddress(CGF),
4580 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4581 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4582 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4583 Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
4584 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4585 Base.getTBAAInfo());
4586 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4587 Addr.getPointer(),
4588 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4589 LValue NumDepsBase = CGF.MakeAddrLValue(
4590 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4591 Base.getBaseInfo(), Base.getTBAAInfo());
4592 // NumDeps = deps[i].base_addr;
4593 LValue BaseAddrLVal = CGF.EmitLValueForField(
4594 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4595 llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
4596 return std::make_pair(NumDeps, Base);
4597}
4598
4599static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4600 llvm::PointerUnion<unsigned *, LValue *> Pos,
4601 const OMPTaskDataTy::DependData &Data,
4602 Address DependenciesArray) {
4603 CodeGenModule &CGM = CGF.CGM;
4604 ASTContext &C = CGM.getContext();
4605 QualType FlagsTy;
4606 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4607 RecordDecl *KmpDependInfoRD =
4608 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4609 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4610
4611 OMPIteratorGeneratorScope IteratorScope(
4612 CGF, cast_or_null<OMPIteratorExpr>(
4613 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4614 : nullptr));
4615 for (const Expr *E : Data.DepExprs) {
4616 llvm::Value *Addr;
4617 llvm::Value *Size;
4618 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4619 LValue Base;
4620 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4621 Base = CGF.MakeAddrLValue(
4622 CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
4623 } else {
4624 LValue &PosLVal = *Pos.get<LValue *>();
4625 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4626 Base = CGF.MakeAddrLValue(
4627 Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Idx),
4628 DependenciesArray.getAlignment()),
4629 KmpDependInfoTy);
4630 }
4631 // deps[i].base_addr = &<Dependencies[i].second>;
4632 LValue BaseAddrLVal = CGF.EmitLValueForField(
4633 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4634 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4635 BaseAddrLVal);
4636 // deps[i].len = sizeof(<Dependencies[i].second>);
4637 LValue LenLVal = CGF.EmitLValueForField(
4638 Base, *std::next(KmpDependInfoRD->field_begin(), Len));
4639 CGF.EmitStoreOfScalar(Size, LenLVal);
4640 // deps[i].flags = <Dependencies[i].first>;
4641 RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
4642 LValue FlagsLVal = CGF.EmitLValueForField(
4643 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
4644 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
4645 FlagsLVal);
4646 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4647 ++(*P);
4648 } else {
4649 LValue &PosLVal = *Pos.get<LValue *>();
4650 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4651 Idx = CGF.Builder.CreateNUWAdd(Idx,
4652 llvm::ConstantInt::get(Idx->getType(), 1));
4653 CGF.EmitStoreOfScalar(Idx, PosLVal);
4654 }
4655 }
4656}
4657
4658static SmallVector<llvm::Value *, 4>
4659emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4660 const OMPTaskDataTy::DependData &Data) {
4661 assert(Data.DepKind == OMPC_DEPEND_depobj &&((Data.DepKind == OMPC_DEPEND_depobj && "Expected depobj dependecy kind."
) ? static_cast<void> (0) : __assert_fail ("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4662, __PRETTY_FUNCTION__))
4662 "Expected depobj dependecy kind.")((Data.DepKind == OMPC_DEPEND_depobj && "Expected depobj dependecy kind."
) ? static_cast<void> (0) : __assert_fail ("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4662, __PRETTY_FUNCTION__))
;
4663 SmallVector<llvm::Value *, 4> Sizes;
4664 SmallVector<LValue, 4> SizeLVals;
4665 ASTContext &C = CGF.getContext();
4666 QualType FlagsTy;
4667 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4668 RecordDecl *KmpDependInfoRD =
4669 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4670 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4671 llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
4672 {
4673 OMPIteratorGeneratorScope IteratorScope(
4674 CGF, cast_or_null<OMPIteratorExpr>(
4675 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4676 : nullptr));
4677 for (const Expr *E : Data.DepExprs) {
4678 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4679 LValue Base = CGF.EmitLoadOfPointerLValue(
4680 DepobjLVal.getAddress(CGF),
4681 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4682 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4683 Base.getAddress(CGF), KmpDependInfoPtrT);
4684 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4685 Base.getTBAAInfo());
4686 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4687 Addr.getPointer(),
4688 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4689 LValue NumDepsBase = CGF.MakeAddrLValue(
4690 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4691 Base.getBaseInfo(), Base.getTBAAInfo());
4692 // NumDeps = deps[i].base_addr;
4693 LValue BaseAddrLVal = CGF.EmitLValueForField(
4694 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4695 llvm::Value *NumDeps =
4696 CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
4697 LValue NumLVal = CGF.MakeAddrLValue(
4698 CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
4699 C.getUIntPtrType());
4700 CGF.InitTempAlloca(NumLVal.getAddress(CGF),
4701 llvm::ConstantInt::get(CGF.IntPtrTy, 0));
4702 llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
4703 llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
4704 CGF.EmitStoreOfScalar(Add, NumLVal);
4705 SizeLVals.push_back(NumLVal);
4706 }
4707 }
4708 for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
4709 llvm::Value *Size =
4710 CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
4711 Sizes.push_back(Size);
4712 }
4713 return Sizes;
4714}
4715
4716static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4717 LValue PosLVal,
4718 const OMPTaskDataTy::DependData &Data,
4719 Address DependenciesArray) {
4720 assert(Data.DepKind == OMPC_DEPEND_depobj &&((Data.DepKind == OMPC_DEPEND_depobj && "Expected depobj dependecy kind."
) ? static_cast<void> (0) : __assert_fail ("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4721, __PRETTY_FUNCTION__))
4721 "Expected depobj dependecy kind.")((Data.DepKind == OMPC_DEPEND_depobj && "Expected depobj dependecy kind."
) ? static_cast<void> (0) : __assert_fail ("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4721, __PRETTY_FUNCTION__))
;
4722 ASTContext &C = CGF.getContext();
4723 QualType FlagsTy;
4724 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4725 RecordDecl *KmpDependInfoRD =
4726 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4727 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4728 llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
4729 llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
4730 {
4731 OMPIteratorGeneratorScope IteratorScope(
4732 CGF, cast_or_null<OMPIteratorExpr>(
4733 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4734 : nullptr));
4735 for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
4736 const Expr *E = Data.DepExprs[I];
4737 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4738 LValue Base = CGF.EmitLoadOfPointerLValue(
4739 DepobjLVal.getAddress(CGF),
4740 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4741 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4742 Base.getAddress(CGF), KmpDependInfoPtrT);
4743 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4744 Base.getTBAAInfo());
4745
4746 // Get number of elements in a single depobj.
4747 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4748 Addr.getPointer(),
4749 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4750 LValue NumDepsBase = CGF.MakeAddrLValue(
4751 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4752 Base.getBaseInfo(), Base.getTBAAInfo());
4753 // NumDeps = deps[i].base_addr;
4754 LValue BaseAddrLVal = CGF.EmitLValueForField(
4755 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4756 llvm::Value *NumDeps =
4757 CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
4758
4759 // memcopy dependency data.
4760 llvm::Value *Size = CGF.Builder.CreateNUWMul(
4761 ElSize,
4762 CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
4763 llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4764 Address DepAddr =
4765 Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Pos),
4766 DependenciesArray.getAlignment());
4767 CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
4768
4769 // Increase pos.
4770 // pos += size;
4771 llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
4772 CGF.EmitStoreOfScalar(Add, PosLVal);
4773 }
4774 }
4775}
4776
4777std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
4778 CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
4779 SourceLocation Loc) {
4780 if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
4781 return D.DepExprs.empty();
4782 }))
4783 return std::make_pair(nullptr, Address::invalid());
4784 // Process list of dependencies.
4785 ASTContext &C = CGM.getContext();
4786 Address DependenciesArray = Address::invalid();
4787 llvm::Value *NumOfElements = nullptr;
4788 unsigned NumDependencies = std::accumulate(
4789 Dependencies.begin(), Dependencies.end(), 0,
4790 [](unsigned V, const OMPTaskDataTy::DependData &D) {
4791 return D.DepKind == OMPC_DEPEND_depobj
4792 ? V
4793 : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
4794 });
4795 QualType FlagsTy;
4796 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4797 bool HasDepobjDeps = false;
4798 bool HasRegularWithIterators = false;
4799 llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
4800 llvm::Value *NumOfRegularWithIterators =
4801 llvm::ConstantInt::get(CGF.IntPtrTy, 1);
4802 // Calculate number of depobj dependecies and regular deps with the iterators.
4803 for (const OMPTaskDataTy::DependData &D : Dependencies) {
4804 if (D.DepKind == OMPC_DEPEND_depobj) {
4805 SmallVector<llvm::Value *, 4> Sizes =
4806 emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
4807 for (llvm::Value *Size : Sizes) {
4808 NumOfDepobjElements =
4809 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
4810 }
4811 HasDepobjDeps = true;
4812 continue;
4813 }
4814 // Include number of iterations, if any.
4815 if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
4816 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4817 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4818 Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
4819 NumOfRegularWithIterators =
4820 CGF.Builder.CreateNUWMul(NumOfRegularWithIterators, Sz);
4821 }
4822 HasRegularWithIterators = true;
4823 continue;
4824 }
4825 }
4826
4827 QualType KmpDependInfoArrayTy;
4828 if (HasDepobjDeps || HasRegularWithIterators) {
4829 NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
4830 /*isSigned=*/false);
4831 if (HasDepobjDeps) {
4832 NumOfElements =
4833 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
4834 }
4835 if (HasRegularWithIterators) {
4836 NumOfElements =
4837 CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
4838 }
4839 OpaqueValueExpr OVE(Loc,
4840 C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
4841 VK_RValue);
4842 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
4843 RValue::get(NumOfElements));
4844 KmpDependInfoArrayTy =
4845 C.getVariableArrayType(KmpDependInfoTy, &OVE, ArrayType::Normal,
4846 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
4847 // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
4848 // Properly emit variable-sized array.
4849 auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
4850 ImplicitParamDecl::Other);
4851 CGF.EmitVarDecl(*PD);
4852 DependenciesArray = CGF.GetAddrOfLocalVar(PD);
4853 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
4854 /*isSigned=*/false);
4855 } else {
4856 KmpDependInfoArrayTy = C.getConstantArrayType(
4857 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
4858 ArrayType::Normal, /*IndexTypeQuals=*/0);
4859 DependenciesArray =
4860 CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4861 DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
4862 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
4863 /*isSigned=*/false);
4864 }
4865 unsigned Pos = 0;
4866 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4867 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4868 Dependencies[I].IteratorExpr)
4869 continue;
4870 emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
4871 DependenciesArray);
4872 }
4873 // Copy regular dependecies with iterators.
4874 LValue PosLVal = CGF.MakeAddrLValue(
4875 CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
4876 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
4877 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4878 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4879 !Dependencies[I].IteratorExpr)
4880 continue;
4881 emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
4882 DependenciesArray);
4883 }
4884 // Copy final depobj arrays without iterators.
4885 if (HasDepobjDeps) {
4886 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4887 if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
4888 continue;
4889 emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
4890 DependenciesArray);
4891 }
4892 }
4893 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4894 DependenciesArray, CGF.VoidPtrTy);
4895 return std::make_pair(NumOfElements, DependenciesArray);
4896}
4897
4898Address CGOpenMPRuntime::emitDepobjDependClause(
4899 CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
4900 SourceLocation Loc) {
4901 if (Dependencies.DepExprs.empty())
4902 return Address::invalid();
4903 // Process list of dependencies.
4904 ASTContext &C = CGM.getContext();
4905 Address DependenciesArray = Address::invalid();
4906 unsigned NumDependencies = Dependencies.DepExprs.size();
4907 QualType FlagsTy;
4908 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4909 RecordDecl *KmpDependInfoRD =
4910 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4911
4912 llvm::Value *Size;
4913 // Define type kmp_depend_info[<Dependencies.size()>];
4914 // For depobj reserve one extra element to store the number of elements.
4915 // It is required to handle depobj(x) update(in) construct.
4916 // kmp_depend_info[<Dependencies.size()>] deps;
4917 llvm::Value *NumDepsVal;
4918 CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
4919 if (const auto *IE =
4920 cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
4921 NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
4922 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4923 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4924 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
4925 NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
4926 }
4927 Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
4928 NumDepsVal);
4929 CharUnits SizeInBytes =
4930 C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
4931 llvm::Value *RecSize = CGM.getSize(SizeInBytes);
4932 Size = CGF.Builder.CreateNUWMul(Size, RecSize);
4933 NumDepsVal =
4934 CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
4935 } else {
4936 QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4937 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
4938 nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4939 CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
4940 Size = CGM.getSize(Sz.alignTo(Align));
4941 NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
4942 }
4943 // Need to allocate on the dynamic memory.
4944 llvm::Value *ThreadID = getThreadID(CGF, Loc);
4945 // Use default allocator.
4946 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4947 llvm::Value *Args[] = {ThreadID, Size, Allocator};
4948
4949 llvm::Value *Addr =
4950 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4951 CGM.getModule(), OMPRTL___kmpc_alloc),
4952 Args, ".dep.arr.addr");
4953 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4954 Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
4955 DependenciesArray = Address(Addr, Align);
4956 // Write number of elements in the first element of array for depobj.
4957 LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
4958 // deps[i].base_addr = NumDependencies;
4959 LValue BaseAddrLVal = CGF.EmitLValueForField(
4960 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4961 CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
4962 llvm::PointerUnion<unsigned *, LValue *> Pos;
4963 unsigned Idx = 1;
4964 LValue PosLVal;
4965 if (Dependencies.IteratorExpr) {
4966 PosLVal = CGF.MakeAddrLValue(
4967 CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
4968 C.getSizeType());
4969 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
4970 /*IsInit=*/true);
4971 Pos = &PosLVal;
4972 } else {
4973 Pos = &Idx;
4974 }
4975 emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
4976 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4977 CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
4978 return DependenciesArray;
4979}
4980
4981void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
4982 SourceLocation Loc) {
4983 ASTContext &C = CGM.getContext();
4984 QualType FlagsTy;
4985 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4986 LValue Base = CGF.EmitLoadOfPointerLValue(
4987 DepobjLVal.getAddress(CGF),
4988 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4989 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4990 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4991 Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
4992 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4993 Addr.getPointer(),
4994 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4995 DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
4996 CGF.VoidPtrTy);
4997 llvm::Value *ThreadID = getThreadID(CGF, Loc);
4998 // Use default allocator.
4999 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5000 llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
5001
5002 // _kmpc_free(gtid, addr, nullptr);
5003 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5004 CGM.getModule(), OMPRTL___kmpc_free),
5005 Args);
5006}
5007
5008void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
5009 OpenMPDependClauseKind NewDepKind,
5010 SourceLocation Loc) {
5011 ASTContext &C = CGM.getContext();
5012 QualType FlagsTy;
5013 getDependTypes(C, KmpDependInfoTy, FlagsTy);
5014 RecordDecl *KmpDependInfoRD =
5015 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
5016 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
5017 llvm::Value *NumDeps;
5018 LValue Base;
5019 std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
5020
5021 Address Begin = Base.getAddress(CGF);
5022 // Cast from pointer to array type to pointer to single element.
5023 llvm::Value *End = CGF.Builder.CreateGEP(Begin.getPointer(), NumDeps);
5024 // The basic structure here is a while-do loop.
5025 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
5026 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
5027 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5028 CGF.EmitBlock(BodyBB);
5029 llvm::PHINode *ElementPHI =
5030 CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
5031 ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
5032 Begin = Address(ElementPHI, Begin.getAlignment());
5033 Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
5034 Base.getTBAAInfo());
5035 // deps[i].flags = NewDepKind;
5036 RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
5037 LValue FlagsLVal = CGF.EmitLValueForField(
5038 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
5039 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
5040 FlagsLVal);
5041
5042 // Shift the address forward by one element.
5043 Address ElementNext =
5044 CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
5045 ElementPHI->addIncoming(ElementNext.getPointer(),
5046 CGF.Builder.GetInsertBlock());
5047 llvm::Value *IsEmpty =
5048 CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
5049 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5050 // Done.
5051 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5052}
5053
5054void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
5055 const OMPExecutableDirective &D,
5056 llvm::Function *TaskFunction,
5057 QualType SharedsTy, Address Shareds,
5058 const Expr *IfCond,
5059 const OMPTaskDataTy &Data) {
5060 if (!CGF.HaveInsertPoint())
5061 return;
5062
5063 TaskResultTy Result =
5064 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5065 llvm::Value *NewTask = Result.NewTask;
5066 llvm::Function *TaskEntry = Result.TaskEntry;
5067 llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
5068 LValue TDBase = Result.TDBase;
5069 const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
5070 // Process list of dependences.
5071 Address DependenciesArray = Address::invalid();
5072 llvm::Value *NumOfElements;
5073 std::tie(NumOfElements, DependenciesArray) =
5074 emitDependClause(CGF, Data.Dependences, Loc);
5075
5076 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5077 // libcall.
5078 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
5079 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
5080 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
5081 // list is not empty
5082 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5083 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5084 llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
5085 llvm::Value *DepTaskArgs[7];
5086 if (!Data.Dependences.empty()) {
5087 DepTaskArgs[0] = UpLoc;
5088 DepTaskArgs[1] = ThreadID;
5089 DepTaskArgs[2] = NewTask;
5090 DepTaskArgs[3] = NumOfElements;
5091 DepTaskArgs[4] = DependenciesArray.getPointer();
5092 DepTaskArgs[5] = CGF.Builder.getInt32(0);
5093 DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5094 }
5095 auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
5096 &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
5097 if (!Data.Tied) {
5098 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
5099 LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
5100 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
5101 }
5102 if (!Data.Dependences.empty()) {
5103 CGF.EmitRuntimeCall(
5104 OMPBuilder.getOrCreateRuntimeFunction(
5105 CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
5106 DepTaskArgs);
5107 } else {
5108 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5109 CGM.getModule(), OMPRTL___kmpc_omp_task),
5110 TaskArgs);
5111 }
5112 // Check if parent region is untied and build return for untied task;
5113 if (auto *Region =
5114 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5115 Region->emitUntiedSwitch(CGF);
5116 };
5117
5118 llvm::Value *DepWaitTaskArgs[6];
5119 if (!Data.Dependences.empty()) {
5120 DepWaitTaskArgs[0] = UpLoc;
5121 DepWaitTaskArgs[1] = ThreadID;
5122 DepWaitTaskArgs[2] = NumOfElements;
5123 DepWaitTaskArgs[3] = DependenciesArray.getPointer();
5124 DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
5125 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5126 }
5127 auto &M = CGM.getModule();
5128 auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
5129 TaskEntry, &Data, &DepWaitTaskArgs,
5130 Loc](CodeGenFunction &CGF, PrePostActionTy &) {
5131 CodeGenFunction::RunCleanupsScope LocalScope(CGF);
5132 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
5133 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
5134 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
5135 // is specified.
5136 if (!Data.Dependences.empty())
5137 CGF.EmitRuntimeCall(
5138 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
5139 DepWaitTaskArgs);
5140 // Call proxy_task_entry(gtid, new_task);
5141 auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
5142 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
5143 Action.Enter(CGF);
5144 llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
5145 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
5146 OutlinedFnArgs);
5147 };
5148
5149 // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
5150 // kmp_task_t *new_task);
5151 // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
5152 // kmp_task_t *new_task);
5153 RegionCodeGenTy RCG(CodeGen);
5154 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
5155 M, OMPRTL___kmpc_omp_task_begin_if0),
5156 TaskArgs,
5157 OMPBuilder.getOrCreateRuntimeFunction(
5158 M, OMPRTL___kmpc_omp_task_complete_if0),
5159 TaskArgs);
5160 RCG.setAction(Action);
5161 RCG(CGF);
5162 };
5163
5164 if (IfCond) {
5165 emitIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
5166 } else {
5167 RegionCodeGenTy ThenRCG(ThenCodeGen);
5168 ThenRCG(CGF);
5169 }
5170}
5171
5172void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
5173 const OMPLoopDirective &D,
5174 llvm::Function *TaskFunction,
5175 QualType SharedsTy, Address Shareds,
5176 const Expr *IfCond,
5177 const OMPTaskDataTy &Data) {
5178 if (!CGF.HaveInsertPoint())
5179 return;
5180 TaskResultTy Result =
5181 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5182 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5183 // libcall.
5184 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
5185 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
5186 // sched, kmp_uint64 grainsize, void *task_dup);
5187 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5188 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5189 llvm::Value *IfVal;
5190 if (IfCond) {
5191 IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
5192 /*isSigned=*/true);
5193 } else {
5194 IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
5195 }
5196
5197 LValue LBLVal = CGF.EmitLValueForField(
5198 Result.TDBase,
5199 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
5200 const auto *LBVar =
5201 cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
5202 CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
5203 LBLVal.getQuals(),
5204 /*IsInitializer=*/true);
5205 LValue UBLVal = CGF.EmitLValueForField(
5206 Result.TDBase,
5207 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
5208 const auto *UBVar =
5209 cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
5210 CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
5211 UBLVal.getQuals(),
5212 /*IsInitializer=*/true);
5213 LValue StLVal = CGF.EmitLValueForField(
5214 Result.TDBase,
5215 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
5216 const auto *StVar =
5217 cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
5218 CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
5219 StLVal.getQuals(),
5220 /*IsInitializer=*/true);
5221 // Store reductions address.
5222 LValue RedLVal = CGF.EmitLValueForField(
5223 Result.TDBase,
5224 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
5225 if (Data.Reductions) {
5226 CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
5227 } else {
5228 CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
5229 CGF.getContext().VoidPtrTy);
5230 }
5231 enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
5232 llvm::Value *TaskArgs[] = {
5233 UpLoc,
5234 ThreadID,
5235 Result.NewTask,
5236 IfVal,
5237 LBLVal.getPointer(CGF),
5238 UBLVal.getPointer(CGF),
5239 CGF.EmitLoadOfScalar(StLVal, Loc),
5240 llvm::ConstantInt::getSigned(
5241 CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
5242 llvm::ConstantInt::getSigned(
5243 CGF.IntTy, Data.Schedule.getPointer()
5244 ? Data.Schedule.getInt() ? NumTasks : Grainsize
5245 : NoSchedule),
5246 Data.Schedule.getPointer()
5247 ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
5248 /*isSigned=*/false)
5249 : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
5250 Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5251 Result.TaskDupFn, CGF.VoidPtrTy)
5252 : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
5253 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5254 CGM.getModule(), OMPRTL___kmpc_taskloop),
5255 TaskArgs);
5256}
5257
5258/// Emit reduction operation for each element of array (required for
5259/// array sections) LHS op = RHS.
5260/// \param Type Type of array.
5261/// \param LHSVar Variable on the left side of the reduction operation
5262/// (references element of array in original variable).
5263/// \param RHSVar Variable on the right side of the reduction operation
5264/// (references element of array in original variable).
5265/// \param RedOpGen Generator of reduction operation with use of LHSVar and
5266/// RHSVar.
5267static void EmitOMPAggregateReduction(
5268 CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
5269 const VarDecl *RHSVar,
5270 const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
5271 const Expr *, const Expr *)> &RedOpGen,
5272 const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
5273 const Expr *UpExpr = nullptr) {
5274 // Perform element-by-element initialization.
5275 QualType ElementTy;
5276 Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
5277 Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
5278
5279 // Drill down to the base element type on both arrays.
5280 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
5281 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
5282
5283 llvm::Value *RHSBegin = RHSAddr.getPointer();
5284 llvm::Value *LHSBegin = LHSAddr.getPointer();
5285 // Cast from pointer to array type to pointer to single element.
5286 llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
5287 // The basic structure here is a while-do loop.
5288 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
5289 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
5290 llvm::Value *IsEmpty =
5291 CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
5292 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5293
5294 // Enter the loop body, making that address the current address.
5295 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5296 CGF.EmitBlock(BodyBB);
5297
5298 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
5299
5300 llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
5301 RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
5302 RHSElementPHI->addIncoming(RHSBegin, EntryBB);
5303 Address RHSElementCurrent =
5304 Address(RHSElementPHI,
5305 RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5306
5307 llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
5308 LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
5309 LHSElementPHI->addIncoming(LHSBegin, EntryBB);
5310 Address LHSElementCurrent =
5311 Address(LHSElementPHI,
5312 LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5313
5314 // Emit copy.
5315 CodeGenFunction::OMPPrivateScope Scope(CGF);
5316 Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
5317 Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
5318 Scope.Privatize();
5319 RedOpGen(CGF, XExpr, EExpr, UpExpr);
5320 Scope.ForceCleanup();
5321
5322 // Shift the address forward by one element.
5323 llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
5324 LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
5325 llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
5326 RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
5327 // Check whether we've reached the end.
5328 llvm::Value *Done =
5329 CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
5330 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
5331 LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
5332 RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
5333
5334 // Done.
5335 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5336}
5337
5338/// Emit reduction combiner. If the combiner is a simple expression emit it as
5339/// is, otherwise consider it as combiner of UDR decl and emit it as a call of
5340/// UDR combiner function.
5341static void emitReductionCombiner(CodeGenFunction &CGF,
5342 const Expr *ReductionOp) {
5343 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
5344 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
5345 if (const auto *DRE =
5346 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
5347 if (const auto *DRD =
5348 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
5349 std::pair<llvm::Function *, llvm::Function *> Reduction =
5350 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
5351 RValue Func = RValue::get(Reduction.first);
5352 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
5353 CGF.EmitIgnoredExpr(ReductionOp);
5354 return;
5355 }
5356 CGF.EmitIgnoredExpr(ReductionOp);
5357}
5358
5359llvm::Function *CGOpenMPRuntime::emitReductionFunction(
5360 SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
5361 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
5362 ArrayRef<const Expr *> ReductionOps) {
5363 ASTContext &C = CGM.getContext();
5364
5365 // void reduction_func(void *LHSArg, void *RHSArg);
5366 FunctionArgList Args;
5367 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5368 ImplicitParamDecl::Other);
5369 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5370 ImplicitParamDecl::Other);
5371 Args.push_back(&LHSArg);
5372 Args.push_back(&RHSArg);
5373 const auto &CGFI =
5374 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5375 std::string Name = getName({"omp", "reduction", "reduction_func"});
5376 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
5377 llvm::GlobalValue::InternalLinkage, Name,
5378 &CGM.getModule());
5379 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
5380 Fn->setDoesNotRecurse();
5381 CodeGenFunction CGF(CGM);
5382 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
5383
5384 // Dst = (void*[n])(LHSArg);
5385 // Src = (void*[n])(RHSArg);
5386 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5387 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
5388 ArgsType), CGF.getPointerAlign());
5389 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5390 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
5391 ArgsType), CGF.getPointerAlign());
5392
5393 // ...
5394 // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
5395 // ...
5396 CodeGenFunction::OMPPrivateScope Scope(CGF);
5397 auto IPriv = Privates.begin();
5398 unsigned Idx = 0;
5399 for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
5400 const auto *RHSVar =
5401 cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
5402 Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
5403 return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
5404 });
5405 const auto *LHSVar =
5406 cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5407 Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
5408 return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5409 });
5410 QualType PrivTy = (*IPriv)->getType();
5411 if (PrivTy->isVariablyModifiedType()) {
5412 // Get array size and emit VLA type.
5413 ++Idx;
5414 Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx);
5415 llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5416 const VariableArrayType *VLA =
5417 CGF.getContext().getAsVariableArrayType(PrivTy);
5418 const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5419 CodeGenFunction::OpaqueValueMapping OpaqueMap(
5420 CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5421 CGF.EmitVariablyModifiedType(PrivTy);
5422 }
5423 }
5424 Scope.Privatize();
5425 IPriv = Privates.begin();
5426 auto ILHS = LHSExprs.begin();
5427 auto IRHS = RHSExprs.begin();
5428 for (const Expr *E : ReductionOps) {
5429 if ((*IPriv)->getType()->isArrayType()) {
5430 // Emit reduction for array section.
5431 const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5432 const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5433 EmitOMPAggregateReduction(
5434 CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5435 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5436 emitReductionCombiner(CGF, E);
5437 });
5438 } else {
5439 // Emit reduction for array subscript or single variable.
5440 emitReductionCombiner(CGF, E);
5441 }
5442 ++IPriv;
5443 ++ILHS;
5444 ++IRHS;
5445 }
5446 Scope.ForceCleanup();
5447 CGF.FinishFunction();
5448 return Fn;
5449}
5450
5451void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5452 const Expr *ReductionOp,
5453 const Expr *PrivateRef,
5454 const DeclRefExpr *LHS,
5455 const DeclRefExpr *RHS) {
5456 if (PrivateRef->getType()->isArrayType()) {
5457 // Emit reduction for array section.
5458 const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5459 const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5460 EmitOMPAggregateReduction(
5461 CGF, PrivateRef->getType(), LHSVar, RHSVar,
5462 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5463 emitReductionCombiner(CGF, ReductionOp);
5464 });
5465 } else {
5466 // Emit reduction for array subscript or single variable.
5467 emitReductionCombiner(CGF, ReductionOp);
5468 }
5469}
5470
5471void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5472 ArrayRef<const Expr *> Privates,
5473 ArrayRef<const Expr *> LHSExprs,
5474 ArrayRef<const Expr *> RHSExprs,
5475 ArrayRef<const Expr *> ReductionOps,
5476 ReductionOptionsTy Options) {
5477 if (!CGF.HaveInsertPoint())
5478 return;
5479
5480 bool WithNowait = Options.WithNowait;
5481 bool SimpleReduction = Options.SimpleReduction;
5482
5483 // Next code should be emitted for reduction:
5484 //
5485 // static kmp_critical_name lock = { 0 };
5486 //
5487 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5488 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5489 // ...
5490 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5491 // *(Type<n>-1*)rhs[<n>-1]);
5492 // }
5493 //
5494 // ...
5495 // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5496 // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5497 // RedList, reduce_func, &<lock>)) {
5498 // case 1:
5499 // ...
5500 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5501 // ...
5502 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5503 // break;
5504 // case 2:
5505 // ...
5506 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5507 // ...
5508 // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5509 // break;
5510 // default:;
5511 // }
5512 //
5513 // if SimpleReduction is true, only the next code is generated:
5514 // ...
5515 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5516 // ...
5517
5518 ASTContext &C = CGM.getContext();
5519
5520 if (SimpleReduction) {
5521 CodeGenFunction::RunCleanupsScope Scope(CGF);
5522 auto IPriv = Privates.begin();
5523 auto ILHS = LHSExprs.begin();
5524 auto IRHS = RHSExprs.begin();
5525 for (const Expr *E : ReductionOps) {
5526 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5527 cast<DeclRefExpr>(*IRHS));
5528 ++IPriv;
5529 ++ILHS;
5530 ++IRHS;
5531 }
5532 return;
5533 }
5534
5535 // 1. Build a list of reduction variables.
5536 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5537 auto Size = RHSExprs.size();
5538 for (const Expr *E : Privates) {
5539 if (E->getType()->isVariablyModifiedType())
5540 // Reserve place for array size.
5541 ++Size;
5542 }
5543 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5544 QualType ReductionArrayTy =
5545 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
5546 /*IndexTypeQuals=*/0);
5547 Address ReductionList =
5548 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5549 auto IPriv = Privates.begin();
5550 unsigned Idx = 0;
5551 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5552 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
5553 CGF.Builder.CreateStore(
5554 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5555 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
5556 Elem);
5557 if ((*IPriv)->getType()->isVariablyModifiedType()) {
5558 // Store array size.
5559 ++Idx;
5560 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
5561 llvm::Value *Size = CGF.Builder.CreateIntCast(
5562 CGF.getVLASize(
5563 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5564 .NumElts,
5565 CGF.SizeTy, /*isSigned=*/false);
5566 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5567 Elem);
5568 }
5569 }
5570
5571 // 2. Emit reduce_func().
5572 llvm::Function *ReductionFn = emitReductionFunction(
5573 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
5574 LHSExprs, RHSExprs, ReductionOps);
5575
5576 // 3. Create static kmp_critical_name lock = { 0 };
5577 std::string Name = getName({"reduction"});
5578 llvm::Value *Lock = getCriticalRegionLock(Name);
5579
5580 // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5581 // RedList, reduce_func, &<lock>);
5582 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5583 llvm::Value *ThreadId = getThreadID(CGF, Loc);
5584 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5585 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5586 ReductionList.getPointer(), CGF.VoidPtrTy);
5587 llvm::Value *Args[] = {
5588 IdentTLoc, // ident_t *<loc>
5589 ThreadId, // i32 <gtid>
5590 CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5591 ReductionArrayTySize, // size_type sizeof(RedList)
5592 RL, // void *RedList
5593 ReductionFn, // void (*) (void *, void *) <reduce_func>
5594 Lock // kmp_critical_name *&<lock>
5595 };
5596 llvm::Value *Res = CGF.EmitRuntimeCall(
5597 OMPBuilder.getOrCreateRuntimeFunction(
5598 CGM.getModule(),
5599 WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
5600 Args);
5601
5602 // 5. Build switch(res)
5603 llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5604 llvm::SwitchInst *SwInst =
5605 CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5606
5607 // 6. Build case 1:
5608 // ...
5609 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5610 // ...
5611 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5612 // break;
5613 llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5614 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5615 CGF.EmitBlock(Case1BB);
5616
5617 // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5618 llvm::Value *EndArgs[] = {
5619 IdentTLoc, // ident_t *<loc>
5620 ThreadId, // i32 <gtid>
5621 Lock // kmp_critical_name *&<lock>
5622 };
5623 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5624 CodeGenFunction &CGF, PrePostActionTy &Action) {
5625 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5626 auto IPriv = Privates.begin();
5627 auto ILHS = LHSExprs.begin();
5628 auto IRHS = RHSExprs.begin();
5629 for (const Expr *E : ReductionOps) {
5630 RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5631 cast<DeclRefExpr>(*IRHS));
5632 ++IPriv;
5633 ++ILHS;
5634 ++IRHS;
5635 }
5636 };
5637 RegionCodeGenTy RCG(CodeGen);
5638 CommonActionTy Action(
5639 nullptr, llvm::None,
5640 OMPBuilder.getOrCreateRuntimeFunction(
5641 CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
5642 : OMPRTL___kmpc_end_reduce),
5643 EndArgs);
5644 RCG.setAction(Action);
5645 RCG(CGF);
5646
5647 CGF.EmitBranch(DefaultBB);
5648
5649 // 7. Build case 2:
5650 // ...
5651 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5652 // ...
5653 // break;
5654 llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5655 SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5656 CGF.EmitBlock(Case2BB);
5657
5658 auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5659 CodeGenFunction &CGF, PrePostActionTy &Action) {
5660 auto ILHS = LHSExprs.begin();
5661 auto IRHS = RHSExprs.begin();
5662 auto IPriv = Privates.begin();
5663 for (const Expr *E : ReductionOps) {
5664 const Expr *XExpr = nullptr;
5665 const Expr *EExpr = nullptr;
5666 const Expr *UpExpr = nullptr;
5667 BinaryOperatorKind BO = BO_Comma;
5668 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
5669 if (BO->getOpcode() == BO_Assign) {
5670 XExpr = BO->getLHS();
5671 UpExpr = BO->getRHS();
5672 }
5673 }
5674 // Try to emit update expression as a simple atomic.
5675 const Expr *RHSExpr = UpExpr;
5676 if (RHSExpr) {
5677 // Analyze RHS part of the whole expression.
5678 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5679 RHSExpr->IgnoreParenImpCasts())) {
5680 // If this is a conditional operator, analyze its condition for
5681 // min/max reduction operator.
5682 RHSExpr = ACO->getCond();
5683 }
5684 if (const auto *BORHS =
5685 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5686 EExpr = BORHS->getRHS();
5687 BO = BORHS->getOpcode();
5688 }
5689 }
5690 if (XExpr) {
5691 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5692 auto &&AtomicRedGen = [BO, VD,
5693 Loc](CodeGenFunction &CGF, const Expr *XExpr,
5694 const Expr *EExpr, const Expr *UpExpr) {
5695 LValue X = CGF.EmitLValue(XExpr);
5696 RValue E;
5697 if (EExpr)
5698 E = CGF.EmitAnyExpr(EExpr);
5699 CGF.EmitOMPAtomicSimpleUpdateExpr(
5700 X, E, BO, /*IsXLHSInRHSPart=*/true,
5701 llvm::AtomicOrdering::Monotonic, Loc,
5702 [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5703 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5704 PrivateScope.addPrivate(
5705 VD, [&CGF, VD, XRValue, Loc]() {
5706 Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5707 CGF.emitOMPSimpleStore(
5708 CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5709 VD->getType().getNonReferenceType(), Loc);
5710 return LHSTemp;
5711 });
5712 (void)PrivateScope.Privatize();
5713 return CGF.EmitAnyExpr(UpExpr);
5714 });
5715 };
5716 if ((*IPriv)->getType()->isArrayType()) {
5717 // Emit atomic reduction for array section.
5718 const auto *RHSVar =
5719 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5720 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5721 AtomicRedGen, XExpr, EExpr, UpExpr);
5722 } else {
5723 // Emit atomic reduction for array subscript or single variable.
5724 AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5725 }
5726 } else {
5727 // Emit as a critical region.
5728 auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5729 const Expr *, const Expr *) {
5730 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5731 std::string Name = RT.getName({"atomic_reduction"});
5732 RT.emitCriticalRegion(
5733 CGF, Name,
5734 [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5735 Action.Enter(CGF);
5736 emitReductionCombiner(CGF, E);
5737 },
5738 Loc);
5739 };
5740 if ((*IPriv)->getType()->isArrayType()) {
5741 const auto *LHSVar =
5742 cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5743 const auto *RHSVar =
5744 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5745 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5746 CritRedGen);
5747 } else {
5748 CritRedGen(CGF, nullptr, nullptr, nullptr);
5749 }
5750 }
5751 ++ILHS;
5752 ++IRHS;
5753 ++IPriv;
5754 }
5755 };
5756 RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5757 if (!WithNowait) {
5758 // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5759 llvm::Value *EndArgs[] = {
5760 IdentTLoc, // ident_t *<loc>
5761 ThreadId, // i32 <gtid>
5762 Lock // kmp_critical_name *&<lock>
5763 };
5764 CommonActionTy Action(nullptr, llvm::None,
5765 OMPBuilder.getOrCreateRuntimeFunction(
5766 CGM.getModule(), OMPRTL___kmpc_end_reduce),
5767 EndArgs);
5768 AtomicRCG.setAction(Action);
5769 AtomicRCG(CGF);
5770 } else {
5771 AtomicRCG(CGF);
5772 }
5773
5774 CGF.EmitBranch(DefaultBB);
5775 CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5776}
5777
5778/// Generates unique name for artificial threadprivate variables.
5779/// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
5780static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
5781 const Expr *Ref) {
5782 SmallString<256> Buffer;
5783 llvm::raw_svector_ostream Out(Buffer);
5784 const clang::DeclRefExpr *DE;
5785 const VarDecl *D = ::getBaseDecl(Ref, DE);
5786 if (!D)
5787 D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
5788 D = D->getCanonicalDecl();
5789 std::string Name = CGM.getOpenMPRuntime().getName(
5790 {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
5791 Out << Prefix << Name << "_"
5792 << D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
5793 return std::string(Out.str());
5794}
5795
5796/// Emits reduction initializer function:
5797/// \code
5798/// void @.red_init(void* %arg, void* %orig) {
5799/// %0 = bitcast void* %arg to <type>*
5800/// store <type> <init>, <type>* %0
5801/// ret void
5802/// }
5803/// \endcode
5804static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5805 SourceLocation Loc,
5806 ReductionCodeGen &RCG, unsigned N) {
5807 ASTContext &C = CGM.getContext();
5808 QualType VoidPtrTy = C.VoidPtrTy;
5809 VoidPtrTy.addRestrict();
5810 FunctionArgList Args;
5811 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
5812 ImplicitParamDecl::Other);
5813 ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
5814 ImplicitParamDecl::Other);
5815 Args.emplace_back(&Param);
5816 Args.emplace_back(&ParamOrig);
5817 const auto &FnInfo =
5818 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5819 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5820 std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
5821 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5822 Name, &CGM.getModule());
5823 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5824 Fn->setDoesNotRecurse();
5825 CodeGenFunction CGF(CGM);
5826 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5827 Address PrivateAddr = CGF.EmitLoadOfPointer(
5828 CGF.GetAddrOfLocalVar(&Param),
5829 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5830 llvm::Value *Size = nullptr;
5831 // If the size of the reduction item is non-constant, load it from global
5832 // threadprivate variable.
5833 if (RCG.getSizes(N).second) {
5834 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5835 CGF, CGM.getContext().getSizeType(),
5836 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5837 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5838 CGM.getContext().getSizeType(), Loc);
5839 }
5840 RCG.emitAggregateType(CGF, N, Size);
5841 LValue OrigLVal;
5842 // If initializer uses initializer from declare reduction construct, emit a
5843 // pointer to the address of the original reduction item (reuired by reduction
5844 // initializer)
5845 if (RCG.usesReductionInitializer(N)) {
5846 Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
5847 SharedAddr = CGF.EmitLoadOfPointer(
5848 SharedAddr,
5849 CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
5850 OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5851 } else {
5852 OrigLVal = CGF.MakeNaturalAlignAddrLValue(
5853 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5854 CGM.getContext().VoidPtrTy);
5855 }
5856 // Emit the initializer:
5857 // %0 = bitcast void* %arg to <type>*
5858 // store <type> <init>, <type>* %0
5859 RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal,
5860 [](CodeGenFunction &) { return false; });
5861 CGF.FinishFunction();
5862 return Fn;
5863}
5864
5865/// Emits reduction combiner function:
5866/// \code
5867/// void @.red_comb(void* %arg0, void* %arg1) {
5868/// %lhs = bitcast void* %arg0 to <type>*
5869/// %rhs = bitcast void* %arg1 to <type>*
5870/// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5871/// store <type> %2, <type>* %lhs
5872/// ret void
5873/// }
5874/// \endcode
5875static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5876 SourceLocation Loc,
5877 ReductionCodeGen &RCG, unsigned N,
5878 const Expr *ReductionOp,
5879 const Expr *LHS, const Expr *RHS,
5880 const Expr *PrivateRef) {
5881 ASTContext &C = CGM.getContext();
5882 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5883 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5884 FunctionArgList Args;
5885 ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
5886 C.VoidPtrTy, ImplicitParamDecl::Other);
5887 ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5888 ImplicitParamDecl::Other);
5889 Args.emplace_back(&ParamInOut);
5890 Args.emplace_back(&ParamIn);
5891 const auto &FnInfo =
5892 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5893 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5894 std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
5895 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5896 Name, &CGM.getModule());
5897 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5898 Fn->setDoesNotRecurse();
5899 CodeGenFunction CGF(CGM);
5900 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5901 llvm::Value *Size = nullptr;
5902 // If the size of the reduction item is non-constant, load it from global
5903 // threadprivate variable.
5904 if (RCG.getSizes(N).second) {
5905 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5906 CGF, CGM.getContext().getSizeType(),
5907 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5908 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5909 CGM.getContext().getSizeType(), Loc);
5910 }
5911 RCG.emitAggregateType(CGF, N, Size);
5912 // Remap lhs and rhs variables to the addresses of the function arguments.
5913 // %lhs = bitcast void* %arg0 to <type>*
5914 // %rhs = bitcast void* %arg1 to <type>*
5915 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5916 PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
5917 // Pull out the pointer to the variable.
5918 Address PtrAddr = CGF.EmitLoadOfPointer(
5919 CGF.GetAddrOfLocalVar(&ParamInOut),
5920 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5921 return CGF.Builder.CreateElementBitCast(
5922 PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5923 });
5924 PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
5925 // Pull out the pointer to the variable.
5926 Address PtrAddr = CGF.EmitLoadOfPointer(
5927 CGF.GetAddrOfLocalVar(&ParamIn),
5928 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5929 return CGF.Builder.CreateElementBitCast(
5930 PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5931 });
5932 PrivateScope.Privatize();
5933 // Emit the combiner body:
5934 // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
5935 // store <type> %2, <type>* %lhs
5936 CGM.getOpenMPRuntime().emitSingleReductionCombiner(
5937 CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
5938 cast<DeclRefExpr>(RHS));
5939 CGF.FinishFunction();
5940 return Fn;
5941}
5942
5943/// Emits reduction finalizer function:
5944/// \code
5945/// void @.red_fini(void* %arg) {
5946/// %0 = bitcast void* %arg to <type>*
5947/// <destroy>(<type>* %0)
5948/// ret void
5949/// }
5950/// \endcode
5951static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
5952 SourceLocation Loc,
5953 ReductionCodeGen &RCG, unsigned N) {
5954 if (!RCG.needCleanups(N))
5955 return nullptr;
5956 ASTContext &C = CGM.getContext();
5957 FunctionArgList Args;
5958 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5959 ImplicitParamDecl::Other);
5960 Args.emplace_back(&Param);
5961 const auto &FnInfo =
5962 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5963 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5964 std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
5965 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5966 Name, &CGM.getModule());
5967 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5968 Fn->setDoesNotRecurse();
5969 CodeGenFunction CGF(CGM);
5970 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5971 Address PrivateAddr = CGF.EmitLoadOfPointer(
5972 CGF.GetAddrOfLocalVar(&Param),
5973 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5974 llvm::Value *Size = nullptr;
5975 // If the size of the reduction item is non-constant, load it from global
5976 // threadprivate variable.
5977 if (RCG.getSizes(N).second) {
5978 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5979 CGF, CGM.getContext().getSizeType(),
5980 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5981 Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5982 CGM.getContext().getSizeType(), Loc);
5983 }
5984 RCG.emitAggregateType(CGF, N, Size);
5985 // Emit the finalizer body:
5986 // <destroy>(<type>* %0)
5987 RCG.emitCleanups(CGF, N, PrivateAddr);
5988 CGF.FinishFunction(Loc);
5989 return Fn;
5990}
5991
5992llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
5993 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
5994 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
5995 if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
5996 return nullptr;
5997
5998 // Build typedef struct:
5999 // kmp_taskred_input {
6000 // void *reduce_shar; // shared reduction item
6001 // void *reduce_orig; // original reduction item used for initialization
6002 // size_t reduce_size; // size of data item
6003 // void *reduce_init; // data initialization routine
6004 // void *reduce_fini; // data finalization routine
6005 // void *reduce_comb; // data combiner routine
6006 // kmp_task_red_flags_t flags; // flags for additional info from compiler
6007 // } kmp_taskred_input_t;
6008 ASTContext &C = CGM.getContext();
6009 RecordDecl *RD = C.buildImplicitRecord("kmp_taskred_input_t");
6010 RD->startDefinition();
6011 const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6012 const FieldDecl *OrigFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6013 const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
6014 const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6015 const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6016 const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
6017 const FieldDecl *FlagsFD = addFieldToRecordDecl(
6018 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
6019 RD->completeDefinition();
6020 QualType RDType = C.getRecordType(RD);
6021 unsigned Size = Data.ReductionVars.size();
6022 llvm::APInt ArraySize(/*numBits=*/64, Size);
6023 QualType ArrayRDType = C.getConstantArrayType(
6024 RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
6025 // kmp_task_red_input_t .rd_input.[Size];
6026 Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
6027 ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
6028 Data.ReductionCopies, Data.ReductionOps);
6029 for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
6030 // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
6031 llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
6032 llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
6033 llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
6034 TaskRedInput.getPointer(), Idxs,
6035 /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
6036 ".rd_input.gep.");
6037 LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
6038 // ElemLVal.reduce_shar = &Shareds[Cnt];
6039 LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
6040 RCG.emitSharedOrigLValue(CGF, Cnt);
6041 llvm::Value *CastedShared =
6042 CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
6043 CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
6044 // ElemLVal.reduce_orig = &Origs[Cnt];
6045 LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
6046 llvm::Value *CastedOrig =
6047 CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
6048 CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
6049 RCG.emitAggregateType(CGF, Cnt);
6050 llvm::Value *SizeValInChars;
6051 llvm::Value *SizeVal;
6052 std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
6053 // We use delayed creation/initialization for VLAs and array sections. It is
6054 // required because runtime does not provide the way to pass the sizes of
6055 // VLAs/array sections to initializer/combiner/finalizer functions. Instead
6056 // threadprivate global variables are used to store these values and use
6057 // them in the functions.
6058 bool DelayedCreation = !!SizeVal;
6059 SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
6060 /*isSigned=*/false);
6061 LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
6062 CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
6063 // ElemLVal.reduce_init = init;
6064 LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
6065 llvm::Value *InitAddr =
6066 CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
6067 CGF.EmitStoreOfScalar(InitAddr, InitLVal);
6068 // ElemLVal.reduce_fini = fini;
6069 LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
6070 llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
6071 llvm::Value *FiniAddr = Fini
6072 ? CGF.EmitCastToVoidPtr(Fini)
6073 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
6074 CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
6075 // ElemLVal.reduce_comb = comb;
6076 LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
6077 llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
6078 CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
6079 RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
6080 CGF.EmitStoreOfScalar(CombAddr, CombLVal);
6081 // ElemLVal.flags = 0;
6082 LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
6083 if (DelayedCreation) {
6084 CGF.EmitStoreOfScalar(
6085 llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
6086 FlagsLVal);
6087 } else
6088 CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
6089 FlagsLVal.getType());
6090 }
6091 if (Data.IsReductionWithTaskMod) {
6092 // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
6093 // is_ws, int num, void *data);
6094 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
6095 llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
6096 CGM.IntTy, /*isSigned=*/true);
6097 llvm::Value *Args[] = {
6098 IdentTLoc, GTid,
6099 llvm::ConstantInt::get(CGM.IntTy, Data.IsWorksharingReduction ? 1 : 0,
6100 /*isSigned=*/true),
6101 llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6102 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6103 TaskRedInput.getPointer(), CGM.VoidPtrTy)};
6104 return CGF.EmitRuntimeCall(
6105 OMPBuilder.getOrCreateRuntimeFunction(
6106 CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
6107 Args);
6108 }
6109 // Build call void *__kmpc_taskred_init(int gtid, int num_data, void *data);
6110 llvm::Value *Args[] = {
6111 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6112 /*isSigned=*/true),
6113 llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6114 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
6115 CGM.VoidPtrTy)};
6116 return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
6117 CGM.getModule(), OMPRTL___kmpc_taskred_init),
6118 Args);
6119}
6120
6121void CGOpenMPRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
6122 SourceLocation Loc,
6123 bool IsWorksharingReduction) {
6124 // Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
6125 // is_ws, int num, void *data);
6126 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc);
6127 llvm::Value *GTid = CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
6128 CGM.IntTy, /*isSigned=*/true);
6129 llvm::Value *Args[] = {IdentTLoc, GTid,
6130 llvm::ConstantInt::get(CGM.IntTy,
6131 IsWorksharingReduction ? 1 : 0,
6132 /*isSigned=*/true)};
6133 (void)CGF.EmitRuntimeCall(
6134 OMPBuilder.getOrCreateRuntimeFunction(
6135 CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
6136 Args);
6137}
6138
6139void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
6140 SourceLocation Loc,
6141 ReductionCodeGen &RCG,
6142 unsigned N) {
6143 auto Sizes = RCG.getSizes(N);
6144 // Emit threadprivate global variable if the type is non-constant
6145 // (Sizes.second = nullptr).
6146 if (Sizes.second) {
6147 llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
6148 /*isSigned=*/false);
6149 Address SizeAddr = getAddrOfArtificialThreadPrivate(
6150 CGF, CGM.getContext().getSizeType(),
6151 generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
6152 CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
6153 }
6154}
6155
6156Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
6157 SourceLocation Loc,
6158 llvm::Value *ReductionsPtr,
6159 LValue SharedLVal) {
6160 // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
6161 // *d);
6162 llvm::Value *Args[] = {CGF.Builder.CreateIntCast(getThreadID(CGF, Loc),
6163 CGM.IntTy,
6164 /*isSigned=*/true),
6165 ReductionsPtr,
6166 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6167 SharedLVal.getPointer(CGF), CGM.VoidPtrTy)};
6168 return Address(
6169 CGF.EmitRuntimeCall(
6170 OMPBuilder.getOrCreateRuntimeFunction(
6171 CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
6172 Args),
6173 SharedLVal.getAlignment());
6174}
6175
6176void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
6177 SourceLocation Loc) {
6178 if (!CGF.HaveInsertPoint())
6179 return;
6180
6181 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
6182 OMPBuilder.CreateTaskwait(CGF.Builder);
6183 } else {
6184 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
6185 // global_tid);
6186 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
6187 // Ignore return result until untied tasks are supported.
6188 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
6189 CGM.getModule(), OMPRTL___kmpc_omp_taskwait),
6190 Args);
6191 }
6192
6193 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
6194 Region->emitUntiedSwitch(CGF);
6195}
6196
6197void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
6198 OpenMPDirectiveKind InnerKind,
6199 const RegionCodeGenTy &CodeGen,
6200 bool HasCancel) {
6201 if (!CGF.HaveInsertPoint())
6202 return;
6203 InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
6204 CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
6205}
6206
6207namespace {
6208enum RTCancelKind {
6209 CancelNoreq = 0,
6210 CancelParallel = 1,
6211 CancelLoop = 2,
6212 CancelSections = 3,
6213 CancelTaskgroup = 4
6214};
6215} // anonymous namespace
6216
6217static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
6218 RTCancelKind CancelKind = CancelNoreq;
6219 if (CancelRegion == OMPD_parallel)
6220 CancelKind = CancelParallel;
6221 else if (CancelRegion == OMPD_for)
6222 CancelKind = CancelLoop;
6223 else if (CancelRegion == OMPD_sections)
6224 CancelKind = CancelSections;
6225 else {
6226 assert(CancelRegion == OMPD_taskgroup)((CancelRegion == OMPD_taskgroup) ? static_cast<void> (
0) : __assert_fail ("CancelRegion == OMPD_taskgroup", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6226, __PRETTY_FUNCTION__))
;
6227 CancelKind = CancelTaskgroup;
6228 }
6229 return CancelKind;
6230}
6231
6232void CGOpenMPRuntime::emitCancellationPointCall(
6233 CodeGenFunction &CGF, SourceLocation Loc,
6234 OpenMPDirectiveKind CancelRegion) {
6235 if (!CGF.HaveInsertPoint())
6236 return;
6237 // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
6238 // global_tid, kmp_int32 cncl_kind);
6239 if (auto *OMPRegionInfo =
6240 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6241 // For 'cancellation point taskgroup', the task region info may not have a
6242 // cancel. This may instead happen in another adjacent task.
6243 if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
6244 llvm::Value *Args[] = {
6245 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
6246 CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6247 // Ignore return result until untied tasks are supported.
6248 llvm::Value *Result = CGF.EmitRuntimeCall(
6249 OMPBuilder.getOrCreateRuntimeFunction(
6250 CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
6251 Args);
6252 // if (__kmpc_cancellationpoint()) {
6253 // exit from construct;
6254 // }
6255 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6256 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6257 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6258 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6259 CGF.EmitBlock(ExitBB);
6260 // exit from construct;
6261 CodeGenFunction::JumpDest CancelDest =
6262 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6263 CGF.EmitBranchThroughCleanup(CancelDest);
6264 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6265 }
6266 }
6267}
6268
6269void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
6270 const Expr *IfCond,
6271 OpenMPDirectiveKind CancelRegion) {
6272 if (!CGF.HaveInsertPoint())
6273 return;
6274 // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
6275 // kmp_int32 cncl_kind);
6276 auto &M = CGM.getModule();
6277 if (auto *OMPRegionInfo =
6278 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6279 auto &&ThenGen = [this, &M, Loc, CancelRegion,
6280 OMPRegionInfo](CodeGenFunction &CGF, PrePostActionTy &) {
6281 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
6282 llvm::Value *Args[] = {
6283 RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
6284 CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6285 // Ignore return result until untied tasks are supported.
6286 llvm::Value *Result = CGF.EmitRuntimeCall(
6287 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
6288 // if (__kmpc_cancel()) {
6289 // exit from construct;
6290 // }
6291 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6292 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6293 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6294 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6295 CGF.EmitBlock(ExitBB);
6296 // exit from construct;
6297 CodeGenFunction::JumpDest CancelDest =
6298 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6299 CGF.EmitBranchThroughCleanup(CancelDest);
6300 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6301 };
6302 if (IfCond) {
6303 emitIfClause(CGF, IfCond, ThenGen,
6304 [](CodeGenFunction &, PrePostActionTy &) {});
6305 } else {
6306 RegionCodeGenTy ThenRCG(ThenGen);
6307 ThenRCG(CGF);
6308 }
6309 }
6310}
6311
6312namespace {
6313/// Cleanup action for uses_allocators support.
6314class OMPUsesAllocatorsActionTy final : public PrePostActionTy {
6315 ArrayRef<std::pair<const Expr *, const Expr *>> Allocators;
6316
6317public:
6318 OMPUsesAllocatorsActionTy(
6319 ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
6320 : Allocators(Allocators) {}
6321 void Enter(CodeGenFunction &CGF) override {
6322 if (!CGF.HaveInsertPoint())
6323 return;
6324 for (const auto &AllocatorData : Allocators) {
6325 CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsInit(
6326 CGF, AllocatorData.first, AllocatorData.second);
6327 }
6328 }
6329 void Exit(CodeGenFunction &CGF) override {
6330 if (!CGF.HaveInsertPoint())
6331 return;
6332 for (const auto &AllocatorData : Allocators) {
6333 CGF.CGM.getOpenMPRuntime().emitUsesAllocatorsFini(CGF,
6334 AllocatorData.first);
6335 }
6336 }
6337};
6338} // namespace
6339
6340void CGOpenMPRuntime::emitTargetOutlinedFunction(
6341 const OMPExecutableDirective &D, StringRef ParentName,
6342 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6343 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6344 assert(!ParentName.empty() && "Invalid target region parent name!")((!ParentName.empty() && "Invalid target region parent name!"
) ? static_cast<void> (0) : __assert_fail ("!ParentName.empty() && \"Invalid target region parent name!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6344, __PRETTY_FUNCTION__))
;
6345 HasEmittedTargetRegion = true;
6346 SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
6347 for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
6348 for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
6349 const OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
6350 if (!D.AllocatorTraits)
6351 continue;
6352 Allocators.emplace_back(D.Allocator, D.AllocatorTraits);
6353 }
6354 }
6355 OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
6356 CodeGen.setAction(UsesAllocatorAction);
6357 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
6358 IsOffloadEntry, CodeGen);
6359}
6360
6361void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
6362 const Expr *Allocator,
6363 const Expr *AllocatorTraits) {
6364 llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
6365 ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
6366 // Use default memspace handle.
6367 llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
6368 llvm::Value *NumTraits = llvm::ConstantInt::get(
6369 CGF.IntTy, cast<ConstantArrayType>(
6370 AllocatorTraits->getType()->getAsArrayTypeUnsafe())
6371 ->getSize()
6372 .getLimitedValue());
6373 LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
6374 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6375 AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy);
6376 AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
6377 AllocatorTraitsLVal.getBaseInfo(),
6378 AllocatorTraitsLVal.getTBAAInfo());
6379 llvm::Value *Traits =
6380 CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
6381
6382 llvm::Value *AllocatorVal =
6383 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
6384 CGM.getModule(), OMPRTL___kmpc_init_allocator),
6385 {ThreadId, MemSpaceHandle, NumTraits, Traits});
6386 // Store to allocator.
6387 CGF.EmitVarDecl(*cast<VarDecl>(
6388 cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
6389 LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
6390 AllocatorVal =
6391 CGF.EmitScalarConversion(AllocatorVal, CGF.getContext().VoidPtrTy,
6392 Allocator->getType(), Allocator->getExprLoc());
6393 CGF.EmitStoreOfScalar(AllocatorVal, AllocatorLVal);
6394}
6395
6396void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
6397 const Expr *Allocator) {
6398 llvm::Value *ThreadId = getThreadID(CGF, Allocator->getExprLoc());
6399 ThreadId = CGF.Builder.CreateIntCast(ThreadId, CGF.IntTy, /*isSigned=*/true);
6400 LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
6401 llvm::Value *AllocatorVal =
6402 CGF.EmitLoadOfScalar(AllocatorLVal, Allocator->getExprLoc());
6403 AllocatorVal = CGF.EmitScalarConversion(AllocatorVal, Allocator->getType(),
6404 CGF.getContext().VoidPtrTy,
6405 Allocator->getExprLoc());
6406 (void)CGF.EmitRuntimeCall(
6407 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
6408 OMPRTL___kmpc_destroy_allocator),
6409 {ThreadId, AllocatorVal});
6410}
6411
6412void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
6413 const OMPExecutableDirective &D, StringRef ParentName,
6414 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6415 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6416 // Create a unique name for the entry function using the source location
6417 // information of the current target region. The name will be something like:
6418 //
6419 // __omp_offloading_DD_FFFF_PP_lBB
6420 //
6421 // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
6422 // mangled name of the function that encloses the target region and BB is the
6423 // line number of the target region.
6424
6425 unsigned DeviceID;
6426 unsigned FileID;
6427 unsigned Line;
6428 getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID,
6429 Line);
6430 SmallString<64> EntryFnName;
6431 {
6432 llvm::raw_svector_ostream OS(EntryFnName);
6433 OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
6434 << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
6435 }
6436
6437 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6438
6439 CodeGenFunction CGF(CGM, true);
6440 CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
6441 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6442
6443 OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
6444
6445 // If this target outline function is not an offload entry, we don't need to
6446 // register it.
6447 if (!IsOffloadEntry)
6448 return;
6449
6450 // The target region ID is used by the runtime library to identify the current
6451 // target region, so it only has to be unique and not necessarily point to
6452 // anything. It could be the pointer to the outlined function that implements
6453 // the target region, but we aren't using that so that the compiler doesn't
6454 // need to keep that, and could therefore inline the host function if proven
6455 // worthwhile during optimization. In the other hand, if emitting code for the
6456 // device, the ID has to be the function address so that it can retrieved from
6457 // the offloading entry and launched by the runtime library. We also mark the
6458 // outlined function to have external linkage in case we are emitting code for
6459 // the device, because these functions will be entry points to the device.
6460
6461 if (CGM.getLangOpts().OpenMPIsDevice) {
6462 OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
6463 OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
6464 OutlinedFn->setDSOLocal(false);
6465 } else {
6466 std::string Name = getName({EntryFnName, "region_id"});
6467 OutlinedFnID = new llvm::GlobalVariable(
6468 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
6469 llvm::GlobalValue::WeakAnyLinkage,
6470 llvm::Constant::getNullValue(CGM.Int8Ty), Name);
6471 }
6472
6473 // Register the information for the entry associated with this target region.
6474 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
6475 DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
6476 OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
6477}
6478
6479/// Checks if the expression is constant or does not have non-trivial function
6480/// calls.
6481static bool isTrivial(ASTContext &Ctx, const Expr * E) {
6482 // We can skip constant expressions.
6483 // We can skip expressions with trivial calls or simple expressions.
6484 return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
6485 !E->hasNonTrivialCall(Ctx)) &&
6486 !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
6487}
6488
6489const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
6490 const Stmt *Body) {
6491 const Stmt *Child = Body->IgnoreContainers();
6492 while (const auto *C = dyn_cast_or_null<CompoundStmt>(Child)) {
6493 Child = nullptr;
6494 for (const Stmt *S : C->body()) {
6495 if (const auto *E = dyn_cast<Expr>(S)) {
6496 if (isTrivial(Ctx, E))
6497 continue;
6498 }
6499 // Some of the statements can be ignored.
6500 if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
6501 isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
6502 continue;
6503 // Analyze declarations.
6504 if (const auto *DS = dyn_cast<DeclStmt>(S)) {
6505 if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
6506 if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
6507 isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
6508 isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
6509 isa<UsingDirectiveDecl>(D) ||
6510 isa<OMPDeclareReductionDecl>(D) ||
6511 isa<OMPThreadPrivateDecl>(D) || isa<OMPAllocateDecl>(D))
6512 return true;
6513 const auto *VD = dyn_cast<VarDecl>(D);
6514 if (!VD)
6515 return false;
6516 return VD->isConstexpr() ||
6517 ((VD->getType().isTrivialType(Ctx) ||
6518 VD->getType()->isReferenceType()) &&
6519 (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
6520 }))
6521 continue;
6522 }
6523 // Found multiple children - cannot get the one child only.
6524 if (Child)
6525 return nullptr;
6526 Child = S;
6527 }
6528 if (Child)
6529 Child = Child->IgnoreContainers();
6530 }
6531 return Child;
6532}
6533
6534/// Emit the number of teams for a target directive. Inspect the num_teams
6535/// clause associated with a teams construct combined or closely nested
6536/// with the target directive.
6537///
6538/// Emit a team of size one for directives such as 'target parallel' that
6539/// have no associated teams construct.
6540///
6541/// Otherwise, return nullptr.
6542static llvm::Value *
6543emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
6544 const OMPExecutableDirective &D) {
6545 assert(!CGF.getLangOpts().OpenMPIsDevice &&((!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted "
"only for the host!") ? static_cast<void> (0) : __assert_fail
("!CGF.getLangOpts().OpenMPIsDevice && \"Clauses associated with the teams directive expected to be emitted \" \"only for the host!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6547, __PRETTY_FUNCTION__))
6546 "Clauses associated with the teams directive expected to be emitted "((!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted "
"only for the host!") ? static_cast<void> (0) : __assert_fail
("!CGF.getLangOpts().OpenMPIsDevice && \"Clauses associated with the teams directive expected to be emitted \" \"only for the host!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6547, __PRETTY_FUNCTION__))
6547 "only for the host!")((!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted "
"only for the host!") ? static_cast<void> (0) : __assert_fail
("!CGF.getLangOpts().OpenMPIsDevice && \"Clauses associated with the teams directive expected to be emitted \" \"only for the host!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6547, __PRETTY_FUNCTION__))
;
6548 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
6549 assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&((isOpenMPTargetExecutionDirective(DirectiveKind) && "Expected target-based executable directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTargetExecutionDirective(DirectiveKind) && \"Expected target-based executable directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6550, __PRETTY_FUNCTION__))
6550 "Expected target-based executable directive.")((isOpenMPTargetExecutionDirective(DirectiveKind) && "Expected target-based executable directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTargetExecutionDirective(DirectiveKind) && \"Expected target-based executable directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6550, __PRETTY_FUNCTION__))
;
6551 CGBuilderTy &Bld = CGF.Builder;
6552 switch (DirectiveKind) {
6553 case OMPD_target: {
6554 const auto *CS = D.getInnermostCapturedStmt();
6555 const auto *Body =
6556 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
6557 const Stmt *ChildStmt =
6558 CGOpenMPRuntime::getSingleCompoundChild(CGF.getContext(), Body);
6559 if (const auto *NestedDir =
6560 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
6561 if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
6562 if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
6563 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6564 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6565 const Expr *NumTeams =
6566 NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
6567 llvm::Value *NumTeamsVal =
6568 CGF.EmitScalarExpr(NumTeams,
6569 /*IgnoreResultAssign*/ true);
6570 return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
6571 /*isSigned=*/true);
6572 }
6573 return Bld.getInt32(0);
6574 }
6575 if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
6576 isOpenMPSimdDirective(NestedDir->getDirectiveKind()))
6577 return Bld.getInt32(1);
6578 return Bld.getInt32(0);
6579 }
6580 return nullptr;
6581 }
6582 case OMPD_target_teams:
6583 case OMPD_target_teams_distribute:
6584 case OMPD_target_teams_distribute_simd:
6585 case OMPD_target_teams_distribute_parallel_for:
6586 case OMPD_target_teams_distribute_parallel_for_simd: {
6587 if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
6588 CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
6589 const Expr *NumTeams =
6590 D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
6591 llvm::Value *NumTeamsVal =
6592 CGF.EmitScalarExpr(NumTeams,
6593 /*IgnoreResultAssign*/ true);
6594 return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
6595 /*isSigned=*/true);
6596 }
6597 return Bld.getInt32(0);
6598 }
6599 case OMPD_target_parallel:
6600 case OMPD_target_parallel_for:
6601 case OMPD_target_parallel_for_simd:
6602 case OMPD_target_simd:
6603 return Bld.getInt32(1);
6604 case OMPD_parallel:
6605 case OMPD_for:
6606 case OMPD_parallel_for:
6607 case OMPD_parallel_master:
6608 case OMPD_parallel_sections:
6609 case OMPD_for_simd:
6610 case OMPD_parallel_for_simd:
6611 case OMPD_cancel:
6612 case OMPD_cancellation_point:
6613 case OMPD_ordered:
6614 case OMPD_threadprivate:
6615 case OMPD_allocate:
6616 case OMPD_task:
6617 case OMPD_simd:
6618 case OMPD_sections:
6619 case OMPD_section:
6620 case OMPD_single:
6621 case OMPD_master:
6622 case OMPD_critical:
6623 case OMPD_taskyield:
6624 case OMPD_barrier:
6625 case OMPD_taskwait:
6626 case OMPD_taskgroup:
6627 case OMPD_atomic:
6628 case OMPD_flush:
6629 case OMPD_depobj:
6630 case OMPD_scan:
6631 case OMPD_teams:
6632 case OMPD_target_data:
6633 case OMPD_target_exit_data:
6634 case OMPD_target_enter_data:
6635 case OMPD_distribute:
6636 case OMPD_distribute_simd:
6637 case OMPD_distribute_parallel_for:
6638 case OMPD_distribute_parallel_for_simd:
6639 case OMPD_teams_distribute:
6640 case OMPD_teams_distribute_simd:
6641 case OMPD_teams_distribute_parallel_for:
6642 case OMPD_teams_distribute_parallel_for_simd:
6643 case OMPD_target_update:
6644 case OMPD_declare_simd:
6645 case OMPD_declare_variant:
6646 case OMPD_begin_declare_variant:
6647 case OMPD_end_declare_variant:
6648 case OMPD_declare_target:
6649 case OMPD_end_declare_target:
6650 case OMPD_declare_reduction:
6651 case OMPD_declare_mapper:
6652 case OMPD_taskloop:
6653 case OMPD_taskloop_simd:
6654 case OMPD_master_taskloop:
6655 case OMPD_master_taskloop_simd:
6656 case OMPD_parallel_master_taskloop:
6657 case OMPD_parallel_master_taskloop_simd:
6658 case OMPD_requires:
6659 case OMPD_unknown:
6660 break;
6661 default:
6662 break;
6663 }
6664 llvm_unreachable("Unexpected directive kind.")::llvm::llvm_unreachable_internal("Unexpected directive kind."
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6664)
;
6665}
6666
6667static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
6668 llvm::Value *DefaultThreadLimitVal) {
6669 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
6670 CGF.getContext(), CS->getCapturedStmt());
6671 if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
6672 if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
6673 llvm::Value *NumThreads = nullptr;
6674 llvm::Value *CondVal = nullptr;
6675 // Handle if clause. If if clause present, the number of threads is
6676 // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
6677 if (Dir->hasClausesOfKind<OMPIfClause>()) {
6678 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6679 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6680 const OMPIfClause *IfClause = nullptr;
6681 for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
6682 if (C->getNameModifier() == OMPD_unknown ||
6683 C->getNameModifier() == OMPD_parallel) {
6684 IfClause = C;
6685 break;
6686 }
6687 }
6688 if (IfClause) {
6689 const Expr *Cond = IfClause->getCondition();
6690 bool Result;
6691 if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
6692 if (!Result)
6693 return CGF.Builder.getInt32(1);
6694 } else {
6695 CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange());
6696 if (const auto *PreInit =
6697 cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
6698 for (const auto *I : PreInit->decls()) {
6699 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6700 CGF.EmitVarDecl(cast<VarDecl>(*I));
6701 } else {
6702 CodeGenFunction::AutoVarEmission Emission =
6703 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
6704 CGF.EmitAutoVarCleanups(Emission);
6705 }
6706 }
6707 }
6708 CondVal = CGF.EvaluateExprAsBool(Cond);
6709 }
6710 }
6711 }
6712 // Check the value of num_threads clause iff if clause was not specified
6713 // or is not evaluated to false.
6714 if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
6715 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6716 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6717 const auto *NumThreadsClause =
6718 Dir->getSingleClause<OMPNumThreadsClause>();
6719 CodeGenFunction::LexicalScope Scope(
6720 CGF, NumThreadsClause->getNumThreads()->getSourceRange());
6721 if (const auto *PreInit =
6722 cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
6723 for (const auto *I : PreInit->decls()) {
6724 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6725 CGF.EmitVarDecl(cast<VarDecl>(*I));
6726 } else {
6727 CodeGenFunction::AutoVarEmission Emission =
6728 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
6729 CGF.EmitAutoVarCleanups(Emission);
6730 }
6731 }
6732 }
6733 NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads());
6734 NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty,
6735 /*isSigned=*/false);
6736 if (DefaultThreadLimitVal)
6737 NumThreads = CGF.Builder.CreateSelect(
6738 CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads),
6739 DefaultThreadLimitVal, NumThreads);
6740 } else {
6741 NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal
6742 : CGF.Builder.getInt32(0);
6743 }
6744 // Process condition of the if clause.
6745 if (CondVal) {
6746 NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads,
6747 CGF.Builder.getInt32(1));
6748 }
6749 return NumThreads;
6750 }
6751 if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
6752 return CGF.Builder.getInt32(1);
6753 return DefaultThreadLimitVal;
6754 }
6755 return DefaultThreadLimitVal ? DefaultThreadLimitVal
6756 : CGF.Builder.getInt32(0);
6757}
6758
6759/// Emit the number of threads for a target directive. Inspect the
6760/// thread_limit clause associated with a teams construct combined or closely
6761/// nested with the target directive.
6762///
6763/// Emit the num_threads clause for directives such as 'target parallel' that
6764/// have no associated teams construct.
6765///
6766/// Otherwise, return nullptr.
6767static llvm::Value *
6768emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
6769 const OMPExecutableDirective &D) {
6770 assert(!CGF.getLangOpts().OpenMPIsDevice &&((!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted "
"only for the host!") ? static_cast<void> (0) : __assert_fail
("!CGF.getLangOpts().OpenMPIsDevice && \"Clauses associated with the teams directive expected to be emitted \" \"only for the host!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6772, __PRETTY_FUNCTION__))
6771 "Clauses associated with the teams directive expected to be emitted "((!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted "
"only for the host!") ? static_cast<void> (0) : __assert_fail
("!CGF.getLangOpts().OpenMPIsDevice && \"Clauses associated with the teams directive expected to be emitted \" \"only for the host!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6772, __PRETTY_FUNCTION__))
6772 "only for the host!")((!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the teams directive expected to be emitted "
"only for the host!") ? static_cast<void> (0) : __assert_fail
("!CGF.getLangOpts().OpenMPIsDevice && \"Clauses associated with the teams directive expected to be emitted \" \"only for the host!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6772, __PRETTY_FUNCTION__))
;
6773 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
6774 assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&((isOpenMPTargetExecutionDirective(DirectiveKind) && "Expected target-based executable directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTargetExecutionDirective(DirectiveKind) && \"Expected target-based executable directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6775, __PRETTY_FUNCTION__))
6775 "Expected target-based executable directive.")((isOpenMPTargetExecutionDirective(DirectiveKind) && "Expected target-based executable directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTargetExecutionDirective(DirectiveKind) && \"Expected target-based executable directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6775, __PRETTY_FUNCTION__))
;
6776 CGBuilderTy &Bld = CGF.Builder;
6777 llvm::Value *ThreadLimitVal = nullptr;
6778 llvm::Value *NumThreadsVal = nullptr;
6779 switch (DirectiveKind) {
6780 case OMPD_target: {
6781 const CapturedStmt *CS = D.getInnermostCapturedStmt();
6782 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
6783 return NumThreads;
6784 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
6785 CGF.getContext(), CS->getCapturedStmt());
6786 if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
6787 if (Dir->hasClausesOfKind<OMPThreadLimitClause>()) {
6788 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6789 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6790 const auto *ThreadLimitClause =
6791 Dir->getSingleClause<OMPThreadLimitClause>();
6792 CodeGenFunction::LexicalScope Scope(
6793 CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
6794 if (const auto *PreInit =
6795 cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
6796 for (const auto *I : PreInit->decls()) {
6797 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6798 CGF.EmitVarDecl(cast<VarDecl>(*I));
6799 } else {
6800 CodeGenFunction::AutoVarEmission Emission =
6801 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
6802 CGF.EmitAutoVarCleanups(Emission);
6803 }
6804 }
6805 }
6806 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
6807 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
6808 ThreadLimitVal =
6809 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
6810 }
6811 if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) &&
6812 !isOpenMPDistributeDirective(Dir->getDirectiveKind())) {
6813 CS = Dir->getInnermostCapturedStmt();
6814 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
6815 CGF.getContext(), CS->getCapturedStmt());
6816 Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
6817 }
6818 if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) &&
6819 !isOpenMPSimdDirective(Dir->getDirectiveKind())) {
6820 CS = Dir->getInnermostCapturedStmt();
6821 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
6822 return NumThreads;
6823 }
6824 if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
6825 return Bld.getInt32(1);
6826 }
6827 return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
6828 }
6829 case OMPD_target_teams: {
6830 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
6831 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6832 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
6833 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
6834 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
6835 ThreadLimitVal =
6836 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
6837 }
6838 const CapturedStmt *CS = D.getInnermostCapturedStmt();
6839 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
6840 return NumThreads;
6841 const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
6842 CGF.getContext(), CS->getCapturedStmt());
6843 if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
6844 if (Dir->getDirectiveKind() == OMPD_distribute) {
6845 CS = Dir->getInnermostCapturedStmt();
6846 if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
6847 return NumThreads;
6848 }
6849 }
6850 return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
6851 }
6852 case OMPD_target_teams_distribute:
6853 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
6854 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6855 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
6856 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
6857 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
6858 ThreadLimitVal =
6859 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
6860 }
6861 return getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal);
6862 case OMPD_target_parallel:
6863 case OMPD_target_parallel_for:
6864 case OMPD_target_parallel_for_simd:
6865 case OMPD_target_teams_distribute_parallel_for:
6866 case OMPD_target_teams_distribute_parallel_for_simd: {
6867 llvm::Value *CondVal = nullptr;
6868 // Handle if clause. If if clause present, the number of threads is
6869 // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
6870 if (D.hasClausesOfKind<OMPIfClause>()) {
6871 const OMPIfClause *IfClause = nullptr;
6872 for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
6873 if (C->getNameModifier() == OMPD_unknown ||
6874 C->getNameModifier() == OMPD_parallel) {
6875 IfClause = C;
6876 break;
6877 }
6878 }
6879 if (IfClause) {
6880 const Expr *Cond = IfClause->getCondition();
6881 bool Result;
6882 if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
6883 if (!Result)
6884 return Bld.getInt32(1);
6885 } else {
6886 CodeGenFunction::RunCleanupsScope Scope(CGF);
6887 CondVal = CGF.EvaluateExprAsBool(Cond);
6888 }
6889 }
6890 }
6891 if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
6892 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6893 const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
6894 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
6895 ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
6896 ThreadLimitVal =
6897 Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
6898 }
6899 if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
6900 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
6901 const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
6902 llvm::Value *NumThreads = CGF.EmitScalarExpr(
6903 NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true);
6904 NumThreadsVal =
6905 Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false);
6906 ThreadLimitVal = ThreadLimitVal
6907 ? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal,
6908 ThreadLimitVal),
6909 NumThreadsVal, ThreadLimitVal)
6910 : NumThreadsVal;
6911 }
6912 if (!ThreadLimitVal)
6913 ThreadLimitVal = Bld.getInt32(0);
6914 if (CondVal)
6915 return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1));
6916 return ThreadLimitVal;
6917 }
6918 case OMPD_target_teams_distribute_simd:
6919 case OMPD_target_simd:
6920 return Bld.getInt32(1);
6921 case OMPD_parallel:
6922 case OMPD_for:
6923 case OMPD_parallel_for:
6924 case OMPD_parallel_master:
6925 case OMPD_parallel_sections:
6926 case OMPD_for_simd:
6927 case OMPD_parallel_for_simd:
6928 case OMPD_cancel:
6929 case OMPD_cancellation_point:
6930 case OMPD_ordered:
6931 case OMPD_threadprivate:
6932 case OMPD_allocate:
6933 case OMPD_task:
6934 case OMPD_simd:
6935 case OMPD_sections:
6936 case OMPD_section:
6937 case OMPD_single:
6938 case OMPD_master:
6939 case OMPD_critical:
6940 case OMPD_taskyield:
6941 case OMPD_barrier:
6942 case OMPD_taskwait:
6943 case OMPD_taskgroup:
6944 case OMPD_atomic:
6945 case OMPD_flush:
6946 case OMPD_depobj:
6947 case OMPD_scan:
6948 case OMPD_teams:
6949 case OMPD_target_data:
6950 case OMPD_target_exit_data:
6951 case OMPD_target_enter_data:
6952 case OMPD_distribute:
6953 case OMPD_distribute_simd:
6954 case OMPD_distribute_parallel_for:
6955 case OMPD_distribute_parallel_for_simd:
6956 case OMPD_teams_distribute:
6957 case OMPD_teams_distribute_simd:
6958 case OMPD_teams_distribute_parallel_for:
6959 case OMPD_teams_distribute_parallel_for_simd:
6960 case OMPD_target_update:
6961 case OMPD_declare_simd:
6962 case OMPD_declare_variant:
6963 case OMPD_begin_declare_variant:
6964 case OMPD_end_declare_variant:
6965 case OMPD_declare_target:
6966 case OMPD_end_declare_target:
6967 case OMPD_declare_reduction:
6968 case OMPD_declare_mapper:
6969 case OMPD_taskloop:
6970 case OMPD_taskloop_simd:
6971 case OMPD_master_taskloop:
6972 case OMPD_master_taskloop_simd:
6973 case OMPD_parallel_master_taskloop:
6974 case OMPD_parallel_master_taskloop_simd:
6975 case OMPD_requires:
6976 case OMPD_unknown:
6977 break;
6978 default:
6979 break;
6980 }
6981 llvm_unreachable("Unsupported directive kind.")::llvm::llvm_unreachable_internal("Unsupported directive kind."
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 6981)
;
6982}
6983
6984namespace {
6985LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
6986
6987// Utility to handle information from clauses associated with a given
6988// construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
6989// It provides a convenient interface to obtain the information and generate
6990// code for that information.
6991class MappableExprsHandler {
6992public:
6993 /// Values for bit flags used to specify the mapping type for
6994 /// offloading.
6995 enum OpenMPOffloadMappingFlags : uint64_t {
6996 /// No flags
6997 OMP_MAP_NONE = 0x0,
6998 /// Allocate memory on the device and move data from host to device.
6999 OMP_MAP_TO = 0x01,
7000 /// Allocate memory on the device and move data from device to host.
7001 OMP_MAP_FROM = 0x02,
7002 /// Always perform the requested mapping action on the element, even
7003 /// if it was already mapped before.
7004 OMP_MAP_ALWAYS = 0x04,
7005 /// Delete the element from the device environment, ignoring the
7006 /// current reference count associated with the element.
7007 OMP_MAP_DELETE = 0x08,
7008 /// The element being mapped is a pointer-pointee pair; both the
7009 /// pointer and the pointee should be mapped.
7010 OMP_MAP_PTR_AND_OBJ = 0x10,
7011 /// This flags signals that the base address of an entry should be
7012 /// passed to the target kernel as an argument.
7013 OMP_MAP_TARGET_PARAM = 0x20,
7014 /// Signal that the runtime library has to return the device pointer
7015 /// in the current position for the data being mapped. Used when we have the
7016 /// use_device_ptr or use_device_addr clause.
7017 OMP_MAP_RETURN_PARAM = 0x40,
7018 /// This flag signals that the reference being passed is a pointer to
7019 /// private data.
7020 OMP_MAP_PRIVATE = 0x80,
7021 /// Pass the element to the device by value.
7022 OMP_MAP_LITERAL = 0x100,
7023 /// Implicit map
7024 OMP_MAP_IMPLICIT = 0x200,
7025 /// Close is a hint to the runtime to allocate memory close to
7026 /// the target device.
7027 OMP_MAP_CLOSE = 0x400,
7028 /// 0x800 is reserved for compatibility with XLC.
7029 /// Produce a runtime error if the data is not already allocated.
7030 OMP_MAP_PRESENT = 0x1000,
7031 /// The 16 MSBs of the flags indicate whether the entry is member of some
7032 /// struct/class.
7033 OMP_MAP_MEMBER_OF = 0xffff000000000000,
7034 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_MAP_MEMBER_OF,
7035 };
7036
7037 /// Get the offset of the OMP_MAP_MEMBER_OF field.
7038 static unsigned getFlagMemberOffset() {
7039 unsigned Offset = 0;
7040 for (uint64_t Remain = OMP_MAP_MEMBER_OF; !(Remain & 1);
7041 Remain = Remain >> 1)
7042 Offset++;
7043 return Offset;
7044 }
7045
7046 /// Class that associates information with a base pointer to be passed to the
7047 /// runtime library.
7048 class BasePointerInfo {
7049 /// The base pointer.
7050 llvm::Value *Ptr = nullptr;
7051 /// The base declaration that refers to this device pointer, or null if
7052 /// there is none.
7053 const ValueDecl *DevPtrDecl = nullptr;
7054
7055 public:
7056 BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
7057 : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
7058 llvm::Value *operator*() const { return Ptr; }
7059 const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
7060 void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
7061 };
7062
7063 using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
7064 using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
7065 using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
7066 using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
7067
7068 /// This structure contains combined information generated for mappable
7069 /// clauses, including base pointers, pointers, sizes, map types, and
7070 /// user-defined mappers.
7071 struct MapCombinedInfoTy {
7072 MapBaseValuesArrayTy BasePointers;
7073 MapValuesArrayTy Pointers;
7074 MapValuesArrayTy Sizes;
7075 MapFlagsArrayTy Types;
7076 MapMappersArrayTy Mappers;
7077
7078 /// Append arrays in \a CurInfo.
7079 void append(MapCombinedInfoTy &CurInfo) {
7080 BasePointers.append(CurInfo.BasePointers.begin(),
7081 CurInfo.BasePointers.end());
7082 Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
7083 Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
7084 Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
7085 Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
7086 }
7087 };
7088
7089 /// Map between a struct and the its lowest & highest elements which have been
7090 /// mapped.
7091 /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
7092 /// HE(FieldIndex, Pointer)}
7093 struct StructRangeInfoTy {
7094 std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
7095 0, Address::invalid()};
7096 std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
7097 0, Address::invalid()};
7098 Address Base = Address::invalid();
7099 };
7100
7101private:
7102 /// Kind that defines how a device pointer has to be returned.
7103 struct MapInfo {
7104 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
7105 OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
7106 ArrayRef<OpenMPMapModifierKind> MapModifiers;
7107 ArrayRef<OpenMPMotionModifierKind> MotionModifiers;
7108 bool ReturnDevicePointer = false;
7109 bool IsImplicit = false;
7110 const ValueDecl *Mapper = nullptr;
7111 bool ForDeviceAddr = false;
7112
7113 MapInfo() = default;
7114 MapInfo(
7115 OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
7116 OpenMPMapClauseKind MapType,
7117 ArrayRef<OpenMPMapModifierKind> MapModifiers,
7118 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
7119 bool ReturnDevicePointer, bool IsImplicit,
7120 const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false)
7121 : Components(Components), MapType(MapType), MapModifiers(MapModifiers),
7122 MotionModifiers(MotionModifiers),
7123 ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
7124 Mapper(Mapper), ForDeviceAddr(ForDeviceAddr) {}
7125 };
7126
7127 /// If use_device_ptr or use_device_addr is used on a decl which is a struct
7128 /// member and there is no map information about it, then emission of that
7129 /// entry is deferred until the whole struct has been processed.
7130 struct DeferredDevicePtrEntryTy {
7131 const Expr *IE = nullptr;
7132 const ValueDecl *VD = nullptr;
7133 bool ForDeviceAddr = false;
7134
7135 DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD,
7136 bool ForDeviceAddr)
7137 : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
7138 };
7139
7140 /// The target directive from where the mappable clauses were extracted. It
7141 /// is either a executable directive or a user-defined mapper directive.
7142 llvm::PointerUnion<const OMPExecutableDirective *,
7143 const OMPDeclareMapperDecl *>
7144 CurDir;
7145
7146 /// Function the directive is being generated for.
7147 CodeGenFunction &CGF;
7148
7149 /// Set of all first private variables in the current directive.
7150 /// bool data is set to true if the variable is implicitly marked as
7151 /// firstprivate, false otherwise.
7152 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, bool> FirstPrivateDecls;
7153
7154 /// Map between device pointer declarations and their expression components.
7155 /// The key value for declarations in 'this' is null.
7156 llvm::DenseMap<
7157 const ValueDecl *,
7158 SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
7159 DevPointersMap;
7160
7161 llvm::Value *getExprTypeSize(const Expr *E) const {
7162 QualType ExprTy = E->getType().getCanonicalType();
7163
7164 // Calculate the size for array shaping expression.
7165 if (const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
7166 llvm::Value *Size =
7167 CGF.getTypeSize(OAE->getBase()->getType()->getPointeeType());
7168 for (const Expr *SE : OAE->getDimensions()) {
7169 llvm::Value *Sz = CGF.EmitScalarExpr(SE);
7170 Sz = CGF.EmitScalarConversion(Sz, SE->getType(),
7171 CGF.getContext().getSizeType(),
7172 SE->getExprLoc());
7173 Size = CGF.Builder.CreateNUWMul(Size, Sz);
7174 }
7175 return Size;
7176 }
7177
7178 // Reference types are ignored for mapping purposes.
7179 if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
7180 ExprTy = RefTy->getPointeeType().getCanonicalType();
7181
7182 // Given that an array section is considered a built-in type, we need to
7183 // do the calculation based on the length of the section instead of relying
7184 // on CGF.getTypeSize(E->getType()).
7185 if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
7186 QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
7187 OAE->getBase()->IgnoreParenImpCasts())
7188 .getCanonicalType();
7189
7190 // If there is no length associated with the expression and lower bound is
7191 // not specified too, that means we are using the whole length of the
7192 // base.
7193 if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
7194 !OAE->getLowerBound())
7195 return CGF.getTypeSize(BaseTy);
7196
7197 llvm::Value *ElemSize;
7198 if (const auto *PTy = BaseTy->getAs<PointerType>()) {
7199 ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
7200 } else {
7201 const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
7202 assert(ATy && "Expecting array type if not a pointer type.")((ATy && "Expecting array type if not a pointer type."
) ? static_cast<void> (0) : __assert_fail ("ATy && \"Expecting array type if not a pointer type.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7202, __PRETTY_FUNCTION__))
;
7203 ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
7204 }
7205
7206 // If we don't have a length at this point, that is because we have an
7207 // array section with a single element.
7208 if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
7209 return ElemSize;
7210
7211 if (const Expr *LenExpr = OAE->getLength()) {
7212 llvm::Value *LengthVal = CGF.EmitScalarExpr(LenExpr);
7213 LengthVal = CGF.EmitScalarConversion(LengthVal, LenExpr->getType(),
7214 CGF.getContext().getSizeType(),
7215 LenExpr->getExprLoc());
7216 return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
7217 }
7218 assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&((!OAE->getLength() && OAE->getColonLocFirst().
isValid() && OAE->getLowerBound() && "expected array_section[lb:]."
) ? static_cast<void> (0) : __assert_fail ("!OAE->getLength() && OAE->getColonLocFirst().isValid() && OAE->getLowerBound() && \"expected array_section[lb:].\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7219, __PRETTY_FUNCTION__))
7219 OAE->getLowerBound() && "expected array_section[lb:].")((!OAE->getLength() && OAE->getColonLocFirst().
isValid() && OAE->getLowerBound() && "expected array_section[lb:]."
) ? static_cast<void> (0) : __assert_fail ("!OAE->getLength() && OAE->getColonLocFirst().isValid() && OAE->getLowerBound() && \"expected array_section[lb:].\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7219, __PRETTY_FUNCTION__))
;
7220 // Size = sizetype - lb * elemtype;
7221 llvm::Value *LengthVal = CGF.getTypeSize(BaseTy);
7222 llvm::Value *LBVal = CGF.EmitScalarExpr(OAE->getLowerBound());
7223 LBVal = CGF.EmitScalarConversion(LBVal, OAE->getLowerBound()->getType(),
7224 CGF.getContext().getSizeType(),
7225 OAE->getLowerBound()->getExprLoc());
7226 LBVal = CGF.Builder.CreateNUWMul(LBVal, ElemSize);
7227 llvm::Value *Cmp = CGF.Builder.CreateICmpUGT(LengthVal, LBVal);
7228 llvm::Value *TrueVal = CGF.Builder.CreateNUWSub(LengthVal, LBVal);
7229 LengthVal = CGF.Builder.CreateSelect(
7230 Cmp, TrueVal, llvm::ConstantInt::get(CGF.SizeTy, 0));
7231 return LengthVal;
7232 }
7233 return CGF.getTypeSize(ExprTy);
7234 }
7235
7236 /// Return the corresponding bits for a given map clause modifier. Add
7237 /// a flag marking the map as a pointer if requested. Add a flag marking the
7238 /// map as the first one of a series of maps that relate to the same map
7239 /// expression.
7240 OpenMPOffloadMappingFlags getMapTypeBits(
7241 OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
7242 ArrayRef<OpenMPMotionModifierKind> MotionModifiers, bool IsImplicit,
7243 bool AddPtrFlag, bool AddIsTargetParamFlag) const {
7244 OpenMPOffloadMappingFlags Bits =
7245 IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
7246 switch (MapType) {
7247 case OMPC_MAP_alloc:
7248 case OMPC_MAP_release:
7249 // alloc and release is the default behavior in the runtime library, i.e.
7250 // if we don't pass any bits alloc/release that is what the runtime is
7251 // going to do. Therefore, we don't need to signal anything for these two
7252 // type modifiers.
7253 break;
7254 case OMPC_MAP_to:
7255 Bits |= OMP_MAP_TO;
7256 break;
7257 case OMPC_MAP_from:
7258 Bits |= OMP_MAP_FROM;
7259 break;
7260 case OMPC_MAP_tofrom:
7261 Bits |= OMP_MAP_TO | OMP_MAP_FROM;
7262 break;
7263 case OMPC_MAP_delete:
7264 Bits |= OMP_MAP_DELETE;
7265 break;
7266 case OMPC_MAP_unknown:
7267 llvm_unreachable("Unexpected map type!")::llvm::llvm_unreachable_internal("Unexpected map type!", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7267)
;
7268 }
7269 if (AddPtrFlag)
7270 Bits |= OMP_MAP_PTR_AND_OBJ;
7271 if (AddIsTargetParamFlag)
7272 Bits |= OMP_MAP_TARGET_PARAM;
7273 if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_always)
7274 != MapModifiers.end())
7275 Bits |= OMP_MAP_ALWAYS;
7276 if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_close)
7277 != MapModifiers.end())
7278 Bits |= OMP_MAP_CLOSE;
7279 if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_present)
7280 != MapModifiers.end())
7281 Bits |= OMP_MAP_PRESENT;
7282 if (llvm::find(MotionModifiers, OMPC_MOTION_MODIFIER_present)
7283 != MotionModifiers.end())
7284 Bits |= OMP_MAP_PRESENT;
7285 return Bits;
7286 }
7287
7288 /// Return true if the provided expression is a final array section. A
7289 /// final array section, is one whose length can't be proved to be one.
7290 bool isFinalArraySectionExpression(const Expr *E) const {
7291 const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
7292
7293 // It is not an array section and therefore not a unity-size one.
7294 if (!OASE)
7295 return false;
7296
7297 // An array section with no colon always refer to a single element.
7298 if (OASE->getColonLocFirst().isInvalid())
7299 return false;
7300
7301 const Expr *Length = OASE->getLength();
7302
7303 // If we don't have a length we have to check if the array has size 1
7304 // for this dimension. Also, we should always expect a length if the
7305 // base type is pointer.
7306 if (!Length) {
7307 QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
7308 OASE->getBase()->IgnoreParenImpCasts())
7309 .getCanonicalType();
7310 if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
7311 return ATy->getSize().getSExtValue() != 1;
7312 // If we don't have a constant dimension length, we have to consider
7313 // the current section as having any size, so it is not necessarily
7314 // unitary. If it happen to be unity size, that's user fault.
7315 return true;
7316 }
7317
7318 // Check if the length evaluates to 1.
7319 Expr::EvalResult Result;
7320 if (!Length->EvaluateAsInt(Result, CGF.getContext()))
7321 return true; // Can have more that size 1.
7322
7323 llvm::APSInt ConstLength = Result.Val.getInt();
7324 return ConstLength.getSExtValue() != 1;
7325 }
7326
7327 /// Generate the base pointers, section pointers, sizes, map type bits, and
7328 /// user-defined mappers (all included in \a CombinedInfo) for the provided
7329 /// map type, map or motion modifiers, and expression components.
7330 /// \a IsFirstComponent should be set to true if the provided set of
7331 /// components is the first associated with a capture.
7332 void generateInfoForComponentList(
7333 OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
7334 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
7335 OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
7336 MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct,
7337 bool IsFirstComponentList, bool IsImplicit,
7338 const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
7339 ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
7340 OverlappedElements = llvm::None) const {
7341 // The following summarizes what has to be generated for each map and the
7342 // types below. The generated information is expressed in this order:
7343 // base pointer, section pointer, size, flags
7344 // (to add to the ones that come from the map type and modifier).
7345 //
7346 // double d;
7347 // int i[100];
7348 // float *p;
7349 //
7350 // struct S1 {
7351 // int i;
7352 // float f[50];
7353 // }
7354 // struct S2 {
7355 // int i;
7356 // float f[50];
7357 // S1 s;
7358 // double *p;
7359 // struct S2 *ps;
7360 // }
7361 // S2 s;
7362 // S2 *ps;
7363 //
7364 // map(d)
7365 // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
7366 //
7367 // map(i)
7368 // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
7369 //
7370 // map(i[1:23])
7371 // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
7372 //
7373 // map(p)
7374 // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
7375 //
7376 // map(p[1:24])
7377 // &p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM | PTR_AND_OBJ
7378 // in unified shared memory mode or for local pointers
7379 // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
7380 //
7381 // map(s)
7382 // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
7383 //
7384 // map(s.i)
7385 // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
7386 //
7387 // map(s.s.f)
7388 // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
7389 //
7390 // map(s.p)
7391 // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
7392 //
7393 // map(to: s.p[:22])
7394 // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
7395 // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
7396 // &(s.p), &(s.p[0]), 22*sizeof(double),
7397 // MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
7398 // (*) alloc space for struct members, only this is a target parameter
7399 // (**) map the pointer (nothing to be mapped in this example) (the compiler
7400 // optimizes this entry out, same in the examples below)
7401 // (***) map the pointee (map: to)
7402 //
7403 // map(s.ps)
7404 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
7405 //
7406 // map(from: s.ps->s.i)
7407 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7408 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7409 // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7410 //
7411 // map(to: s.ps->ps)
7412 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7413 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7414 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO
7415 //
7416 // map(s.ps->ps->ps)
7417 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7418 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7419 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7420 // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
7421 //
7422 // map(to: s.ps->ps->s.f[:22])
7423 // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
7424 // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
7425 // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7426 // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
7427 //
7428 // map(ps)
7429 // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
7430 //
7431 // map(ps->i)
7432 // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
7433 //
7434 // map(ps->s.f)
7435 // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
7436 //
7437 // map(from: ps->p)
7438 // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
7439 //
7440 // map(to: ps->p[:22])
7441 // ps, &(ps->p), sizeof(double*), TARGET_PARAM
7442 // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
7443 // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
7444 //
7445 // map(ps->ps)
7446 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
7447 //
7448 // map(from: ps->ps->s.i)
7449 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7450 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7451 // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7452 //
7453 // map(from: ps->ps->ps)
7454 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7455 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7456 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7457 //
7458 // map(ps->ps->ps->ps)
7459 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7460 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7461 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7462 // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
7463 //
7464 // map(to: ps->ps->ps->s.f[:22])
7465 // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
7466 // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
7467 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
7468 // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
7469 //
7470 // map(to: s.f[:22]) map(from: s.p[:33])
7471 // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
7472 // sizeof(double*) (**), TARGET_PARAM
7473 // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
7474 // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
7475 // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
7476 // (*) allocate contiguous space needed to fit all mapped members even if
7477 // we allocate space for members not mapped (in this example,
7478 // s.f[22..49] and s.s are not mapped, yet we must allocate space for
7479 // them as well because they fall between &s.f[0] and &s.p)
7480 //
7481 // map(from: s.f[:22]) map(to: ps->p[:33])
7482 // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
7483 // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
7484 // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
7485 // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
7486 // (*) the struct this entry pertains to is the 2nd element in the list of
7487 // arguments, hence MEMBER_OF(2)
7488 //
7489 // map(from: s.f[:22], s.s) map(to: ps->p[:33])
7490 // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
7491 // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
7492 // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
7493 // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
7494 // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
7495 // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
7496 // (*) the struct this entry pertains to is the 4th element in the list
7497 // of arguments, hence MEMBER_OF(4)
7498
7499 // Track if the map information being generated is the first for a capture.
7500 bool IsCaptureFirstInfo = IsFirstComponentList;
7501 // When the variable is on a declare target link or in a to clause with
7502 // unified memory, a reference is needed to hold the host/device address
7503 // of the variable.
7504 bool RequiresReference = false;
7505
7506 // Scan the components from the base to the complete expression.
7507 auto CI = Components.rbegin();
7508 auto CE = Components.rend();
7509 auto I = CI;
7510
7511 // Track if the map information being generated is the first for a list of
7512 // components.
7513 bool IsExpressionFirstInfo = true;
7514 bool FirstPointerInComplexData = false;
7515 Address BP = Address::invalid();
7516 const Expr *AssocExpr = I->getAssociatedExpression();
7517 const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
7518 const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
7519 const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
7520
7521 if (isa<MemberExpr>(AssocExpr)) {
7522 // The base is the 'this' pointer. The content of the pointer is going
7523 // to be the base of the field being mapped.
7524 BP = CGF.LoadCXXThisAddress();
7525 } else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
7526 (OASE &&
7527 isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
7528 BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
7529 } else if (OAShE &&
7530 isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
7531 BP = Address(
7532 CGF.EmitScalarExpr(OAShE->getBase()),
7533 CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
7534 } else {
7535 // The base is the reference to the variable.
7536 // BP = &Var.
7537 BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
7538 if (const auto *VD =
7539 dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
7540 if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7541 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
7542 if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
7543 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
7544 CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) {
7545 RequiresReference = true;
7546 BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
7547 }
7548 }
7549 }
7550
7551 // If the variable is a pointer and is being dereferenced (i.e. is not
7552 // the last component), the base has to be the pointer itself, not its
7553 // reference. References are ignored for mapping purposes.
7554 QualType Ty =
7555 I->getAssociatedDeclaration()->getType().getNonReferenceType();
7556 if (Ty->isAnyPointerType() && std::next(I) != CE) {
7557 // No need to generate individual map information for the pointer, it
7558 // can be associated with the combined storage if shared memory mode is
7559 // active or the base declaration is not global variable.
7560 const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
7561 if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
7562 !VD || VD->hasLocalStorage())
7563 BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
7564 else
7565 FirstPointerInComplexData = true;
7566 ++I;
7567 }
7568 }
7569
7570 // Track whether a component of the list should be marked as MEMBER_OF some
7571 // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
7572 // in a component list should be marked as MEMBER_OF, all subsequent entries
7573 // do not belong to the base struct. E.g.
7574 // struct S2 s;
7575 // s.ps->ps->ps->f[:]
7576 // (1) (2) (3) (4)
7577 // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
7578 // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
7579 // is the pointee of ps(2) which is not member of struct s, so it should not
7580 // be marked as such (it is still PTR_AND_OBJ).
7581 // The variable is initialized to false so that PTR_AND_OBJ entries which
7582 // are not struct members are not considered (e.g. array of pointers to
7583 // data).
7584 bool ShouldBeMemberOf = false;
7585
7586 // Variable keeping track of whether or not we have encountered a component
7587 // in the component list which is a member expression. Useful when we have a
7588 // pointer or a final array section, in which case it is the previous
7589 // component in the list which tells us whether we have a member expression.
7590 // E.g. X.f[:]
7591 // While processing the final array section "[:]" it is "f" which tells us
7592 // whether we are dealing with a member of a declared struct.
7593 const MemberExpr *EncounteredME = nullptr;
7594
7595 for (; I != CE; ++I) {
7596 // If the current component is member of a struct (parent struct) mark it.
7597 if (!EncounteredME) {
7598 EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
7599 // If we encounter a PTR_AND_OBJ entry from now on it should be marked
7600 // as MEMBER_OF the parent struct.
7601 if (EncounteredME) {
7602 ShouldBeMemberOf = true;
7603 // Do not emit as complex pointer if this is actually not array-like
7604 // expression.
7605 if (FirstPointerInComplexData) {
7606 QualType Ty = std::prev(I)
7607 ->getAssociatedDeclaration()
7608 ->getType()
7609 .getNonReferenceType();
7610 BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
7611 FirstPointerInComplexData = false;
7612 }
7613 }
7614 }
7615
7616 auto Next = std::next(I);
7617
7618 // We need to generate the addresses and sizes if this is the last
7619 // component, if the component is a pointer or if it is an array section
7620 // whose length can't be proved to be one. If this is a pointer, it
7621 // becomes the base address for the following components.
7622
7623 // A final array section, is one whose length can't be proved to be one.
7624 bool IsFinalArraySection =
7625 isFinalArraySectionExpression(I->getAssociatedExpression());
7626
7627 // Get information on whether the element is a pointer. Have to do a
7628 // special treatment for array sections given that they are built-in
7629 // types.
7630 const auto *OASE =
7631 dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
7632 const auto *OAShE =
7633 dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
7634 const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
7635 const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
7636 bool IsPointer =
7637 OAShE ||
7638 (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
7639 .getCanonicalType()
7640 ->isAnyPointerType()) ||
7641 I->getAssociatedExpression()->getType()->isAnyPointerType();
7642 bool IsNonDerefPointer = IsPointer && !UO && !BO;
7643
7644 if (Next == CE || IsNonDerefPointer || IsFinalArraySection) {
7645 // If this is not the last component, we expect the pointer to be
7646 // associated with an array expression or member expression.
7647 assert((Next == CE ||(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
7648 isa<MemberExpr>(Next->getAssociatedExpression()) ||(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
7649 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
7650 isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) ||(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
7651 isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) ||(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
7652 isa<UnaryOperator>(Next->getAssociatedExpression()) ||(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
7653 isa<BinaryOperator>(Next->getAssociatedExpression())) &&(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
7654 "Unexpected expression")(((Next == CE || isa<MemberExpr>(Next->getAssociatedExpression
()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression
()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression
()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression
()) || isa<UnaryOperator>(Next->getAssociatedExpression
()) || isa<BinaryOperator>(Next->getAssociatedExpression
())) && "Unexpected expression") ? static_cast<void
> (0) : __assert_fail ("(Next == CE || isa<MemberExpr>(Next->getAssociatedExpression()) || isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || isa<OMPArraySectionExpr>(Next->getAssociatedExpression()) || isa<OMPArrayShapingExpr>(Next->getAssociatedExpression()) || isa<UnaryOperator>(Next->getAssociatedExpression()) || isa<BinaryOperator>(Next->getAssociatedExpression())) && \"Unexpected expression\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7654, __PRETTY_FUNCTION__))
;
7655
7656 Address LB = Address::invalid();
7657 if (OAShE) {
7658 LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
7659 CGF.getContext().getTypeAlignInChars(
7660 OAShE->getBase()->getType()));
7661 } else {
7662 LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
7663 .getAddress(CGF);
7664 }
7665
7666 // If this component is a pointer inside the base struct then we don't
7667 // need to create any entry for it - it will be combined with the object
7668 // it is pointing to into a single PTR_AND_OBJ entry.
7669 bool IsMemberPointerOrAddr =
7670 (IsPointer || ForDeviceAddr) && EncounteredME &&
7671 (dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
7672 EncounteredME);
7673 if (!OverlappedElements.empty()) {
7674 // Handle base element with the info for overlapped elements.
7675 assert(!PartialStruct.Base.isValid() && "The base element is set.")((!PartialStruct.Base.isValid() && "The base element is set."
) ? static_cast<void> (0) : __assert_fail ("!PartialStruct.Base.isValid() && \"The base element is set.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7675, __PRETTY_FUNCTION__))
;
7676 assert(Next == CE &&((Next == CE && "Expected last element for the overlapped elements."
) ? static_cast<void> (0) : __assert_fail ("Next == CE && \"Expected last element for the overlapped elements.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7677, __PRETTY_FUNCTION__))
7677 "Expected last element for the overlapped elements.")((Next == CE && "Expected last element for the overlapped elements."
) ? static_cast<void> (0) : __assert_fail ("Next == CE && \"Expected last element for the overlapped elements.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7677, __PRETTY_FUNCTION__))
;
7678 assert(!IsPointer &&((!IsPointer && "Unexpected base element with the pointer type."
) ? static_cast<void> (0) : __assert_fail ("!IsPointer && \"Unexpected base element with the pointer type.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7679, __PRETTY_FUNCTION__))
7679 "Unexpected base element with the pointer type.")((!IsPointer && "Unexpected base element with the pointer type."
) ? static_cast<void> (0) : __assert_fail ("!IsPointer && \"Unexpected base element with the pointer type.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7679, __PRETTY_FUNCTION__))
;
7680 // Mark the whole struct as the struct that requires allocation on the
7681 // device.
7682 PartialStruct.LowestElem = {0, LB};
7683 CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
7684 I->getAssociatedExpression()->getType());
7685 Address HB = CGF.Builder.CreateConstGEP(
7686 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LB,
7687 CGF.VoidPtrTy),
7688 TypeSize.getQuantity() - 1);
7689 PartialStruct.HighestElem = {
7690 std::numeric_limits<decltype(
7691 PartialStruct.HighestElem.first)>::max(),
7692 HB};
7693 PartialStruct.Base = BP;
7694 // Emit data for non-overlapped data.
7695 OpenMPOffloadMappingFlags Flags =
7696 OMP_MAP_MEMBER_OF |
7697 getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
7698 /*AddPtrFlag=*/false,
7699 /*AddIsTargetParamFlag=*/false);
7700 LB = BP;
7701 llvm::Value *Size = nullptr;
7702 // Do bitcopy of all non-overlapped structure elements.
7703 for (OMPClauseMappableExprCommon::MappableExprComponentListRef
7704 Component : OverlappedElements) {
7705 Address ComponentLB = Address::invalid();
7706 for (const OMPClauseMappableExprCommon::MappableComponent &MC :
7707 Component) {
7708 if (MC.getAssociatedDeclaration()) {
7709 ComponentLB =
7710 CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
7711 .getAddress(CGF);
7712 Size = CGF.Builder.CreatePtrDiff(
7713 CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
7714 CGF.EmitCastToVoidPtr(LB.getPointer()));
7715 break;
7716 }
7717 }
7718 assert(Size && "Failed to determine structure size")((Size && "Failed to determine structure size") ? static_cast
<void> (0) : __assert_fail ("Size && \"Failed to determine structure size\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7718, __PRETTY_FUNCTION__))
;
7719 CombinedInfo.BasePointers.push_back(BP.getPointer());
7720 CombinedInfo.Pointers.push_back(LB.getPointer());
7721 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
7722 Size, CGF.Int64Ty, /*isSigned=*/true));
7723 CombinedInfo.Types.push_back(Flags);
7724 CombinedInfo.Mappers.push_back(nullptr);
7725 LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
7726 }
7727 CombinedInfo.BasePointers.push_back(BP.getPointer());
7728 CombinedInfo.Pointers.push_back(LB.getPointer());
7729 Size = CGF.Builder.CreatePtrDiff(
7730 CGF.EmitCastToVoidPtr(
7731 CGF.Builder.CreateConstGEP(HB, 1).getPointer()),
7732 CGF.EmitCastToVoidPtr(LB.getPointer()));
7733 CombinedInfo.Sizes.push_back(
7734 CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
7735 CombinedInfo.Types.push_back(Flags);
7736 CombinedInfo.Mappers.push_back(nullptr);
7737 break;
7738 }
7739 llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
7740 if (!IsMemberPointerOrAddr) {
7741 CombinedInfo.BasePointers.push_back(BP.getPointer());
7742 CombinedInfo.Pointers.push_back(LB.getPointer());
7743 CombinedInfo.Sizes.push_back(
7744 CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
7745
7746 // If Mapper is valid, the last component inherits the mapper.
7747 bool HasMapper = Mapper && Next == CE;
7748 CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
7749
7750 // We need to add a pointer flag for each map that comes from the
7751 // same expression except for the first one. We also need to signal
7752 // this map is the first one that relates with the current capture
7753 // (there is a set of entries for each capture).
7754 OpenMPOffloadMappingFlags Flags =
7755 getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
7756 !IsExpressionFirstInfo || RequiresReference ||
7757 FirstPointerInComplexData,
7758 IsCaptureFirstInfo && !RequiresReference);
7759
7760 if (!IsExpressionFirstInfo) {
7761 // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
7762 // then we reset the TO/FROM/ALWAYS/DELETE/CLOSE flags.
7763 if (IsPointer)
7764 Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
7765 OMP_MAP_DELETE | OMP_MAP_CLOSE);
7766
7767 if (ShouldBeMemberOf) {
7768 // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
7769 // should be later updated with the correct value of MEMBER_OF.
7770 Flags |= OMP_MAP_MEMBER_OF;
7771 // From now on, all subsequent PTR_AND_OBJ entries should not be
7772 // marked as MEMBER_OF.
7773 ShouldBeMemberOf = false;
7774 }
7775 }
7776
7777 CombinedInfo.Types.push_back(Flags);
7778 }
7779
7780 // If we have encountered a member expression so far, keep track of the
7781 // mapped member. If the parent is "*this", then the value declaration
7782 // is nullptr.
7783 if (EncounteredME) {
7784 const auto *FD = cast<FieldDecl>(EncounteredME->getMemberDecl());
7785 unsigned FieldIndex = FD->getFieldIndex();
7786
7787 // Update info about the lowest and highest elements for this struct
7788 if (!PartialStruct.Base.isValid()) {
7789 PartialStruct.LowestElem = {FieldIndex, LB};
7790 if (IsFinalArraySection) {
7791 Address HB =
7792 CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
7793 .getAddress(CGF);
7794 PartialStruct.HighestElem = {FieldIndex, HB};
7795 } else {
7796 PartialStruct.HighestElem = {FieldIndex, LB};
7797 }
7798 PartialStruct.Base = BP;
7799 } else if (FieldIndex < PartialStruct.LowestElem.first) {
7800 PartialStruct.LowestElem = {FieldIndex, LB};
7801 } else if (FieldIndex > PartialStruct.HighestElem.first) {
7802 PartialStruct.HighestElem = {FieldIndex, LB};
7803 }
7804 }
7805
7806 // If we have a final array section, we are done with this expression.
7807 if (IsFinalArraySection)
7808 break;
7809
7810 // The pointer becomes the base for the next element.
7811 if (Next != CE)
7812 BP = LB;
7813
7814 IsExpressionFirstInfo = false;
7815 IsCaptureFirstInfo = false;
7816 FirstPointerInComplexData = false;
7817 }
7818 }
7819 }
7820
7821 /// Return the adjusted map modifiers if the declaration a capture refers to
7822 /// appears in a first-private clause. This is expected to be used only with
7823 /// directives that start with 'target'.
7824 MappableExprsHandler::OpenMPOffloadMappingFlags
7825 getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
7826 assert(Cap.capturesVariable() && "Expected capture by reference only!")((Cap.capturesVariable() && "Expected capture by reference only!"
) ? static_cast<void> (0) : __assert_fail ("Cap.capturesVariable() && \"Expected capture by reference only!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7826, __PRETTY_FUNCTION__))
;
7827
7828 // A first private variable captured by reference will use only the
7829 // 'private ptr' and 'map to' flag. Return the right flags if the captured
7830 // declaration is known as first-private in this handler.
7831 if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
7832 if (Cap.getCapturedVar()->getType().isConstant(CGF.getContext()) &&
7833 Cap.getCaptureKind() == CapturedStmt::VCK_ByRef)
7834 return MappableExprsHandler::OMP_MAP_ALWAYS |
7835 MappableExprsHandler::OMP_MAP_TO;
7836 if (Cap.getCapturedVar()->getType()->isAnyPointerType())
7837 return MappableExprsHandler::OMP_MAP_TO |
7838 MappableExprsHandler::OMP_MAP_PTR_AND_OBJ;
7839 return MappableExprsHandler::OMP_MAP_PRIVATE |
7840 MappableExprsHandler::OMP_MAP_TO;
7841 }
7842 return MappableExprsHandler::OMP_MAP_TO |
7843 MappableExprsHandler::OMP_MAP_FROM;
7844 }
7845
7846 static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
7847 // Rotate by getFlagMemberOffset() bits.
7848 return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
7849 << getFlagMemberOffset());
7850 }
7851
7852 static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
7853 OpenMPOffloadMappingFlags MemberOfFlag) {
7854 // If the entry is PTR_AND_OBJ but has not been marked with the special
7855 // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
7856 // marked as MEMBER_OF.
7857 if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
7858 ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
7859 return;
7860
7861 // Reset the placeholder value to prepare the flag for the assignment of the
7862 // proper MEMBER_OF value.
7863 Flags &= ~OMP_MAP_MEMBER_OF;
7864 Flags |= MemberOfFlag;
7865 }
7866
7867 void getPlainLayout(const CXXRecordDecl *RD,
7868 llvm::SmallVectorImpl<const FieldDecl *> &Layout,
7869 bool AsBase) const {
7870 const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
7871
7872 llvm::StructType *St =
7873 AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType();
7874
7875 unsigned NumElements = St->getNumElements();
7876 llvm::SmallVector<
7877 llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
7878 RecordLayout(NumElements);
7879
7880 // Fill bases.
7881 for (const auto &I : RD->bases()) {
7882 if (I.isVirtual())
7883 continue;
7884 const auto *Base = I.getType()->getAsCXXRecordDecl();
7885 // Ignore empty bases.
7886 if (Base->isEmpty() || CGF.getContext()
7887 .getASTRecordLayout(Base)
7888 .getNonVirtualSize()
7889 .isZero())
7890 continue;
7891
7892 unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
7893 RecordLayout[FieldIndex] = Base;
7894 }
7895 // Fill in virtual bases.
7896 for (const auto &I : RD->vbases()) {
7897 const auto *Base = I.getType()->getAsCXXRecordDecl();
7898 // Ignore empty bases.
7899 if (Base->isEmpty())
7900 continue;
7901 unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
7902 if (RecordLayout[FieldIndex])
7903 continue;
7904 RecordLayout[FieldIndex] = Base;
7905 }
7906 // Fill in all the fields.
7907 assert(!RD->isUnion() && "Unexpected union.")((!RD->isUnion() && "Unexpected union.") ? static_cast
<void> (0) : __assert_fail ("!RD->isUnion() && \"Unexpected union.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 7907, __PRETTY_FUNCTION__))
;
7908 for (const auto *Field : RD->fields()) {
7909 // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
7910 // will fill in later.)
7911 if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) {
7912 unsigned FieldIndex = RL.getLLVMFieldNo(Field);
7913 RecordLayout[FieldIndex] = Field;
7914 }
7915 }
7916 for (const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
7917 &Data : RecordLayout) {
7918 if (Data.isNull())
7919 continue;
7920 if (const auto *Base = Data.dyn_cast<const CXXRecordDecl *>())
7921 getPlainLayout(Base, Layout, /*AsBase=*/true);
7922 else
7923 Layout.push_back(Data.get<const FieldDecl *>());
7924 }
7925 }
7926
7927public:
7928 MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
7929 : CurDir(&Dir), CGF(CGF) {
7930 // Extract firstprivate clause information.
7931 for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
7932 for (const auto *D : C->varlists())
7933 FirstPrivateDecls.try_emplace(
7934 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
7935 // Extract implicit firstprivates from uses_allocators clauses.
7936 for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
7937 for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
7938 OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
7939 if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
7940 FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
7941 /*Implicit=*/true);
7942 else if (const auto *VD = dyn_cast<VarDecl>(
7943 cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
7944 ->getDecl()))
7945 FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
7946 }
7947 }
7948 // Extract device pointer clause information.
7949 for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
7950 for (auto L : C->component_lists())
7951 DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
7952 }
7953
7954 /// Constructor for the declare mapper directive.
7955 MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
7956 : CurDir(&Dir), CGF(CGF) {}
7957
7958 /// Generate code for the combined entry if we have a partially mapped struct
7959 /// and take care of the mapping flags of the arguments corresponding to
7960 /// individual struct members.
7961 void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
7962 MapFlagsArrayTy &CurTypes,
7963 const StructRangeInfoTy &PartialStruct,
7964 bool NotTargetParams = false) const {
7965 // Base is the base of the struct
7966 CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
7967 // Pointer is the address of the lowest element
7968 llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
7969 CombinedInfo.Pointers.push_back(LB);
7970 // There should not be a mapper for a combined entry.
7971 CombinedInfo.Mappers.push_back(nullptr);
7972 // Size is (addr of {highest+1} element) - (addr of lowest element)
7973 llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
7974 llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
7975 llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
7976 llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
7977 llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
7978 llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
7979 /*isSigned=*/false);
7980 CombinedInfo.Sizes.push_back(Size);
7981 // Map type is always TARGET_PARAM, if generate info for captures.
7982 CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
7983 : OMP_MAP_TARGET_PARAM);
7984 // If any element has the present modifier, then make sure the runtime
7985 // doesn't attempt to allocate the struct.
7986 if (CurTypes.end() !=
7987 llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
7988 return Type & OMP_MAP_PRESENT;
7989 }))
7990 CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
7991 // Remove TARGET_PARAM flag from the first element
7992 (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
7993
7994 // All other current entries will be MEMBER_OF the combined entry
7995 // (except for PTR_AND_OBJ entries which do not have a placeholder value
7996 // 0xFFFF in the MEMBER_OF field).
7997 OpenMPOffloadMappingFlags MemberOfFlag =
7998 getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
7999 for (auto &M : CurTypes)
8000 setCorrectMemberOfFlag(M, MemberOfFlag);
8001 }
8002
8003 /// Generate all the base pointers, section pointers, sizes, map types, and
8004 /// mappers for the extracted mappable expressions (all included in \a
8005 /// CombinedInfo). Also, for each item that relates with a device pointer, a
8006 /// pair of the relevant declaration and index where it occurs is appended to
8007 /// the device pointers info array.
8008 void generateAllInfo(
8009 MapCombinedInfoTy &CombinedInfo, bool NotTargetParams = false,
8010 const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
8011 llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
8012 // We have to process the component lists that relate with the same
8013 // declaration in a single chunk so that we can generate the map flags
8014 // correctly. Therefore, we organize all lists in a map.
8015 llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
8016
8017 // Helper function to fill the information map for the different supported
8018 // clauses.
8019 auto &&InfoGen =
8020 [&Info, &SkipVarSet](
8021 const ValueDecl *D,
8022 OMPClauseMappableExprCommon::MappableExprComponentListRef L,
8023 OpenMPMapClauseKind MapType,
8024 ArrayRef<OpenMPMapModifierKind> MapModifiers,
8025 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
8026 bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper,
8027 bool ForDeviceAddr = false) {
8028 const ValueDecl *VD =
8029 D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
8030 if (SkipVarSet.count(VD))
8031 return;
8032 Info[VD].emplace_back(L, MapType, MapModifiers, MotionModifiers,
8033 ReturnDevicePointer, IsImplicit, Mapper,
8034 ForDeviceAddr);
8035 };
8036
8037 assert(CurDir.is<const OMPExecutableDirective *>() &&((CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive") ? static_cast<void> (
0) : __assert_fail ("CurDir.is<const OMPExecutableDirective *>() && \"Expect a executable directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8038, __PRETTY_FUNCTION__))
8038 "Expect a executable directive")((CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive") ? static_cast<void> (
0) : __assert_fail ("CurDir.is<const OMPExecutableDirective *>() && \"Expect a executable directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8038, __PRETTY_FUNCTION__))
;
8039 const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
8040 for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>())
8041 for (const auto L : C->component_lists()) {
8042 InfoGen(std::get<0>(L), std::get<1>(L), C->getMapType(),
8043 C->getMapTypeModifiers(), llvm::None,
8044 /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L));
8045 }
8046 for (const auto *C : CurExecDir->getClausesOfKind<OMPToClause>())
8047 for (const auto L : C->component_lists()) {
8048 InfoGen(std::get<0>(L), std::get<1>(L), OMPC_MAP_to, llvm::None,
8049 C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
8050 C->isImplicit(), std::get<2>(L));
8051 }
8052 for (const auto *C : CurExecDir->getClausesOfKind<OMPFromClause>())
8053 for (const auto L : C->component_lists()) {
8054 InfoGen(std::get<0>(L), std::get<1>(L), OMPC_MAP_from, llvm::None,
8055 C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
8056 C->isImplicit(), std::get<2>(L));
8057 }
8058
8059 // Look at the use_device_ptr clause information and mark the existing map
8060 // entries as such. If there is no map information for an entry in the
8061 // use_device_ptr list, we create one with map type 'alloc' and zero size
8062 // section. It is the user fault if that was not mapped before. If there is
8063 // no map information and the pointer is a struct member, then we defer the
8064 // emission of that entry until the whole struct has been processed.
8065 llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
8066 DeferredInfo;
8067 MapCombinedInfoTy UseDevicePtrCombinedInfo;
8068
8069 for (const auto *C :
8070 CurExecDir->getClausesOfKind<OMPUseDevicePtrClause>()) {
8071 for (const auto L : C->component_lists()) {
8072 OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
8073 std::get<1>(L);
8074 assert(!Components.empty() &&((!Components.empty() && "Not expecting empty list of components!"
) ? static_cast<void> (0) : __assert_fail ("!Components.empty() && \"Not expecting empty list of components!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8075, __PRETTY_FUNCTION__))
8075 "Not expecting empty list of components!")((!Components.empty() && "Not expecting empty list of components!"
) ? static_cast<void> (0) : __assert_fail ("!Components.empty() && \"Not expecting empty list of components!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8075, __PRETTY_FUNCTION__))
;
8076 const ValueDecl *VD = Components.back().getAssociatedDeclaration();
8077 VD = cast<ValueDecl>(VD->getCanonicalDecl());
8078 const Expr *IE = Components.back().getAssociatedExpression();
8079 // If the first component is a member expression, we have to look into
8080 // 'this', which maps to null in the map of map information. Otherwise
8081 // look directly for the information.
8082 auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
8083
8084 // We potentially have map information for this declaration already.
8085 // Look for the first set of components that refer to it.
8086 if (It != Info.end()) {
8087 auto *CI = llvm::find_if(It->second, [VD](const MapInfo &MI) {
8088 return MI.Components.back().getAssociatedDeclaration() == VD;
8089 });
8090 // If we found a map entry, signal that the pointer has to be returned
8091 // and move on to the next declaration.
8092 // Exclude cases where the base pointer is mapped as array subscript,
8093 // array section or array shaping. The base address is passed as a
8094 // pointer to base in this case and cannot be used as a base for
8095 // use_device_ptr list item.
8096 if (CI != It->second.end()) {
8097 auto PrevCI = std::next(CI->Components.rbegin());
8098 const auto *VarD = dyn_cast<VarDecl>(VD);
8099 if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
8100 isa<MemberExpr>(IE) ||
8101 !VD->getType().getNonReferenceType()->isPointerType() ||
8102 PrevCI == CI->Components.rend() ||
8103 isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
8104 VarD->hasLocalStorage()) {
8105 CI->ReturnDevicePointer = true;
8106 continue;
8107 }
8108 }
8109 }
8110
8111 // We didn't find any match in our map information - generate a zero
8112 // size array section - if the pointer is a struct member we defer this
8113 // action until the whole struct has been processed.
8114 if (isa<MemberExpr>(IE)) {
8115 // Insert the pointer into Info to be processed by
8116 // generateInfoForComponentList. Because it is a member pointer
8117 // without a pointee, no entry will be generated for it, therefore
8118 // we need to generate one after the whole struct has been processed.
8119 // Nonetheless, generateInfoForComponentList must be called to take
8120 // the pointer into account for the calculation of the range of the
8121 // partial struct.
8122 InfoGen(nullptr, Components, OMPC_MAP_unknown, llvm::None, llvm::None,
8123 /*ReturnDevicePointer=*/false, C->isImplicit(), nullptr);
8124 DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
8125 } else {
8126 llvm::Value *Ptr =
8127 CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
8128 UseDevicePtrCombinedInfo.BasePointers.emplace_back(Ptr, VD);
8129 UseDevicePtrCombinedInfo.Pointers.push_back(Ptr);
8130 UseDevicePtrCombinedInfo.Sizes.push_back(
8131 llvm::Constant::getNullValue(CGF.Int64Ty));
8132 UseDevicePtrCombinedInfo.Types.push_back(
8133 OMP_MAP_RETURN_PARAM |
8134 (NotTargetParams ? OMP_MAP_NONE : OMP_MAP_TARGET_PARAM));
8135 UseDevicePtrCombinedInfo.Mappers.push_back(nullptr);
8136 }
8137 }
8138 }
8139
8140 // Look at the use_device_addr clause information and mark the existing map
8141 // entries as such. If there is no map information for an entry in the
8142 // use_device_addr list, we create one with map type 'alloc' and zero size
8143 // section. It is the user fault if that was not mapped before. If there is
8144 // no map information and the pointer is a struct member, then we defer the
8145 // emission of that entry until the whole struct has been processed.
8146 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
8147 for (const auto *C :
8148 CurExecDir->getClausesOfKind<OMPUseDeviceAddrClause>()) {
8149 for (const auto L : C->component_lists()) {
8150 assert(!std::get<1>(L).empty() &&((!std::get<1>(L).empty() && "Not expecting empty list of components!"
) ? static_cast<void> (0) : __assert_fail ("!std::get<1>(L).empty() && \"Not expecting empty list of components!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8151, __PRETTY_FUNCTION__))
8151 "Not expecting empty list of components!")((!std::get<1>(L).empty() && "Not expecting empty list of components!"
) ? static_cast<void> (0) : __assert_fail ("!std::get<1>(L).empty() && \"Not expecting empty list of components!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8151, __PRETTY_FUNCTION__))
;
8152 const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
8153 if (!Processed.insert(VD).second)
8154 continue;
8155 VD = cast<ValueDecl>(VD->getCanonicalDecl());
8156 const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
8157 // If the first component is a member expression, we have to look into
8158 // 'this', which maps to null in the map of map information. Otherwise
8159 // look directly for the information.
8160 auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
8161
8162 // We potentially have map information for this declaration already.
8163 // Look for the first set of components that refer to it.
8164 if (It != Info.end()) {
8165 auto *CI = llvm::find_if(It->second, [VD](const MapInfo &MI) {
8166 return MI.Components.back().getAssociatedDeclaration() == VD;
8167 });
8168 // If we found a map entry, signal that the pointer has to be returned
8169 // and move on to the next declaration.
8170 if (CI != It->second.end()) {
8171 CI->ReturnDevicePointer = true;
8172 continue;
8173 }
8174 }
8175
8176 // We didn't find any match in our map information - generate a zero
8177 // size array section - if the pointer is a struct member we defer this
8178 // action until the whole struct has been processed.
8179 if (isa<MemberExpr>(IE)) {
8180 // Insert the pointer into Info to be processed by
8181 // generateInfoForComponentList. Because it is a member pointer
8182 // without a pointee, no entry will be generated for it, therefore
8183 // we need to generate one after the whole struct has been processed.
8184 // Nonetheless, generateInfoForComponentList must be called to take
8185 // the pointer into account for the calculation of the range of the
8186 // partial struct.
8187 InfoGen(nullptr, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
8188 llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
8189 nullptr, /*ForDeviceAddr=*/true);
8190 DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
8191 } else {
8192 llvm::Value *Ptr;
8193 if (IE->isGLValue())
8194 Ptr = CGF.EmitLValue(IE).getPointer(CGF);
8195 else
8196 Ptr = CGF.EmitScalarExpr(IE);
8197 CombinedInfo.BasePointers.emplace_back(Ptr, VD);
8198 CombinedInfo.Pointers.push_back(Ptr);
8199 CombinedInfo.Sizes.push_back(
8200 llvm::Constant::getNullValue(CGF.Int64Ty));
8201 CombinedInfo.Types.push_back(
8202 OMP_MAP_RETURN_PARAM |
8203 (NotTargetParams ? OMP_MAP_NONE : OMP_MAP_TARGET_PARAM));
8204 CombinedInfo.Mappers.push_back(nullptr);
8205 }
8206 }
8207 }
8208
8209 for (const auto &M : Info) {
8210 // We need to know when we generate information for the first component
8211 // associated with a capture, because the mapping flags depend on it.
8212 bool IsFirstComponentList = !NotTargetParams;
8213
8214 // Temporary generated information.
8215 MapCombinedInfoTy CurInfo;
8216 StructRangeInfoTy PartialStruct;
8217
8218 for (const MapInfo &L : M.second) {
8219 assert(!L.Components.empty() &&((!L.Components.empty() && "Not expecting declaration with no component lists."
) ? static_cast<void> (0) : __assert_fail ("!L.Components.empty() && \"Not expecting declaration with no component lists.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8220, __PRETTY_FUNCTION__))
8220 "Not expecting declaration with no component lists.")((!L.Components.empty() && "Not expecting declaration with no component lists."
) ? static_cast<void> (0) : __assert_fail ("!L.Components.empty() && \"Not expecting declaration with no component lists.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8220, __PRETTY_FUNCTION__))
;
8221
8222 // Remember the current base pointer index.
8223 unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
8224 generateInfoForComponentList(L.MapType, L.MapModifiers,
8225 L.MotionModifiers, L.Components, CurInfo,
8226 PartialStruct, IsFirstComponentList,
8227 L.IsImplicit, L.Mapper, L.ForDeviceAddr);
8228
8229 // If this entry relates with a device pointer, set the relevant
8230 // declaration and add the 'return pointer' flag.
8231 if (L.ReturnDevicePointer) {
8232 assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&((CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
"Unexpected number of mapped base pointers.") ? static_cast<
void> (0) : __assert_fail ("CurInfo.BasePointers.size() > CurrentBasePointersIdx && \"Unexpected number of mapped base pointers.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8233, __PRETTY_FUNCTION__))
8233 "Unexpected number of mapped base pointers.")((CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
"Unexpected number of mapped base pointers.") ? static_cast<
void> (0) : __assert_fail ("CurInfo.BasePointers.size() > CurrentBasePointersIdx && \"Unexpected number of mapped base pointers.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8233, __PRETTY_FUNCTION__))
;
8234
8235 const ValueDecl *RelevantVD =
8236 L.Components.back().getAssociatedDeclaration();
8237 assert(RelevantVD &&((RelevantVD && "No relevant declaration related with device pointer??"
) ? static_cast<void> (0) : __assert_fail ("RelevantVD && \"No relevant declaration related with device pointer??\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8238, __PRETTY_FUNCTION__))
8238 "No relevant declaration related with device pointer??")((RelevantVD && "No relevant declaration related with device pointer??"
) ? static_cast<void> (0) : __assert_fail ("RelevantVD && \"No relevant declaration related with device pointer??\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8238, __PRETTY_FUNCTION__))
;
8239
8240 CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
8241 RelevantVD);
8242 CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
8243 }
8244 IsFirstComponentList = false;
8245 }
8246
8247 // Append any pending zero-length pointers which are struct members and
8248 // used with use_device_ptr or use_device_addr.
8249 auto CI = DeferredInfo.find(M.first);
8250 if (CI != DeferredInfo.end()) {
8251 for (const DeferredDevicePtrEntryTy &L : CI->second) {
8252 llvm::Value *BasePtr;
8253 llvm::Value *Ptr;
8254 if (L.ForDeviceAddr) {
8255 if (L.IE->isGLValue())
8256 Ptr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
8257 else
8258 Ptr = this->CGF.EmitScalarExpr(L.IE);
8259 BasePtr = Ptr;
8260 // Entry is RETURN_PARAM. Also, set the placeholder value
8261 // MEMBER_OF=FFFF so that the entry is later updated with the
8262 // correct value of MEMBER_OF.
8263 CurInfo.Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
8264 } else {
8265 BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
8266 Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
8267 L.IE->getExprLoc());
8268 // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
8269 // value MEMBER_OF=FFFF so that the entry is later updated with the
8270 // correct value of MEMBER_OF.
8271 CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
8272 OMP_MAP_MEMBER_OF);
8273 }
8274 CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
8275 CurInfo.Pointers.push_back(Ptr);
8276 CurInfo.Sizes.push_back(
8277 llvm::Constant::getNullValue(this->CGF.Int64Ty));
8278 CurInfo.Mappers.push_back(nullptr);
8279 }
8280 }
8281
8282 // If there is an entry in PartialStruct it means we have a struct with
8283 // individual members mapped. Emit an extra combined entry.
8284 if (PartialStruct.Base.isValid())
8285 emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct,
8286 NotTargetParams);
8287
8288 // We need to append the results of this capture to what we already have.
8289 CombinedInfo.append(CurInfo);
8290 }
8291 // Append data for use_device_ptr clauses.
8292 CombinedInfo.append(UseDevicePtrCombinedInfo);
8293 }
8294
8295 /// Generate all the base pointers, section pointers, sizes, map types, and
8296 /// mappers for the extracted map clauses of user-defined mapper (all included
8297 /// in \a CombinedInfo).
8298 void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
8299 assert(CurDir.is<const OMPDeclareMapperDecl *>() &&((CurDir.is<const OMPDeclareMapperDecl *>() && "Expect a declare mapper directive"
) ? static_cast<void> (0) : __assert_fail ("CurDir.is<const OMPDeclareMapperDecl *>() && \"Expect a declare mapper directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8300, __PRETTY_FUNCTION__))
8300 "Expect a declare mapper directive")((CurDir.is<const OMPDeclareMapperDecl *>() && "Expect a declare mapper directive"
) ? static_cast<void> (0) : __assert_fail ("CurDir.is<const OMPDeclareMapperDecl *>() && \"Expect a declare mapper directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8300, __PRETTY_FUNCTION__))
;
8301 const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
8302 // We have to process the component lists that relate with the same
8303 // declaration in a single chunk so that we can generate the map flags
8304 // correctly. Therefore, we organize all lists in a map.
8305 llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
8306
8307 // Fill the information map for map clauses.
8308 for (const auto *C : CurMapperDir->clauselists()) {
8309 const auto *MC = cast<OMPMapClause>(C);
8310 for (const auto L : MC->component_lists()) {
8311 const ValueDecl *VD =
8312 std::get<0>(L) ? cast<ValueDecl>(std::get<0>(L)->getCanonicalDecl())
8313 : nullptr;
8314 // Get the corresponding user-defined mapper.
8315 Info[VD].emplace_back(std::get<1>(L), MC->getMapType(),
8316 MC->getMapTypeModifiers(), llvm::None,
8317 /*ReturnDevicePointer=*/false, MC->isImplicit(),
8318 std::get<2>(L));
8319 }
8320 }
8321
8322 for (const auto &M : Info) {
8323 // We need to know when we generate information for the first component
8324 // associated with a capture, because the mapping flags depend on it.
8325 bool IsFirstComponentList = true;
8326
8327 // Temporary generated information.
8328 MapCombinedInfoTy CurInfo;
8329 StructRangeInfoTy PartialStruct;
8330
8331 for (const MapInfo &L : M.second) {
8332 assert(!L.Components.empty() &&((!L.Components.empty() && "Not expecting declaration with no component lists."
) ? static_cast<void> (0) : __assert_fail ("!L.Components.empty() && \"Not expecting declaration with no component lists.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8333, __PRETTY_FUNCTION__))
8333 "Not expecting declaration with no component lists.")((!L.Components.empty() && "Not expecting declaration with no component lists."
) ? static_cast<void> (0) : __assert_fail ("!L.Components.empty() && \"Not expecting declaration with no component lists.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8333, __PRETTY_FUNCTION__))
;
8334 generateInfoForComponentList(L.MapType, L.MapModifiers,
8335 L.MotionModifiers, L.Components, CurInfo,
8336 PartialStruct, IsFirstComponentList,
8337 L.IsImplicit, L.Mapper, L.ForDeviceAddr);
8338 IsFirstComponentList = false;
8339 }
8340
8341 // If there is an entry in PartialStruct it means we have a struct with
8342 // individual members mapped. Emit an extra combined entry.
8343 if (PartialStruct.Base.isValid())
8344 emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct);
8345
8346 // We need to append the results of this capture to what we already have.
8347 CombinedInfo.append(CurInfo);
8348 }
8349 }
8350
8351 /// Emit capture info for lambdas for variables captured by reference.
8352 void generateInfoForLambdaCaptures(
8353 const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
8354 llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
8355 const auto *RD = VD->getType()
8356 .getCanonicalType()
8357 .getNonReferenceType()
8358 ->getAsCXXRecordDecl();
8359 if (!RD || !RD->isLambda())
8360 return;
8361 Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
8362 LValue VDLVal = CGF.MakeAddrLValue(
8363 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
8364 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
8365 FieldDecl *ThisCapture = nullptr;
8366 RD->getCaptureFields(Captures, ThisCapture);
8367 if (ThisCapture) {
8368 LValue ThisLVal =
8369 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
8370 LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
8371 LambdaPointers.try_emplace(ThisLVal.getPointer(CGF),
8372 VDLVal.getPointer(CGF));
8373 CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
8374 CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
8375 CombinedInfo.Sizes.push_back(
8376 CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
8377 CGF.Int64Ty, /*isSigned=*/true));
8378 CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
8379 OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
8380 CombinedInfo.Mappers.push_back(nullptr);
8381 }
8382 for (const LambdaCapture &LC : RD->captures()) {
8383 if (!LC.capturesVariable())
8384 continue;
8385 const VarDecl *VD = LC.getCapturedVar();
8386 if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType())
8387 continue;
8388 auto It = Captures.find(VD);
8389 assert(It != Captures.end() && "Found lambda capture without field.")((It != Captures.end() && "Found lambda capture without field."
) ? static_cast<void> (0) : __assert_fail ("It != Captures.end() && \"Found lambda capture without field.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8389, __PRETTY_FUNCTION__))
;
8390 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
8391 if (LC.getCaptureKind() == LCK_ByRef) {
8392 LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
8393 LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
8394 VDLVal.getPointer(CGF));
8395 CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
8396 CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
8397 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
8398 CGF.getTypeSize(
8399 VD->getType().getCanonicalType().getNonReferenceType()),
8400 CGF.Int64Ty, /*isSigned=*/true));
8401 } else {
8402 RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation());
8403 LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
8404 VDLVal.getPointer(CGF));
8405 CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
8406 CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
8407 CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
8408 }
8409 CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
8410 OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
8411 CombinedInfo.Mappers.push_back(nullptr);
8412 }
8413 }
8414
8415 /// Set correct indices for lambdas captures.
8416 void adjustMemberOfForLambdaCaptures(
8417 const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
8418 MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
8419 MapFlagsArrayTy &Types) const {
8420 for (unsigned I = 0, E = Types.size(); I < E; ++I) {
8421 // Set correct member_of idx for all implicit lambda captures.
8422 if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
8423 OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
8424 continue;
8425 llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
8426 assert(BasePtr && "Unable to find base lambda address.")((BasePtr && "Unable to find base lambda address.") ?
static_cast<void> (0) : __assert_fail ("BasePtr && \"Unable to find base lambda address.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8426, __PRETTY_FUNCTION__))
;
8427 int TgtIdx = -1;
8428 for (unsigned J = I; J > 0; --J) {
8429 unsigned Idx = J - 1;
8430 if (Pointers[Idx] != BasePtr)
8431 continue;
8432 TgtIdx = Idx;
8433 break;
8434 }
8435 assert(TgtIdx != -1 && "Unable to find parent lambda.")((TgtIdx != -1 && "Unable to find parent lambda.") ? static_cast
<void> (0) : __assert_fail ("TgtIdx != -1 && \"Unable to find parent lambda.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8435, __PRETTY_FUNCTION__))
;
8436 // All other current entries will be MEMBER_OF the combined entry
8437 // (except for PTR_AND_OBJ entries which do not have a placeholder value
8438 // 0xFFFF in the MEMBER_OF field).
8439 OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
8440 setCorrectMemberOfFlag(Types[I], MemberOfFlag);
8441 }
8442 }
8443
8444 /// Generate the base pointers, section pointers, sizes, map types, and
8445 /// mappers associated to a given capture (all included in \a CombinedInfo).
8446 void generateInfoForCapture(const CapturedStmt::Capture *Cap,
8447 llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
8448 StructRangeInfoTy &PartialStruct) const {
8449 assert(!Cap->capturesVariableArrayType() &&((!Cap->capturesVariableArrayType() && "Not expecting to generate map info for a variable array type!"
) ? static_cast<void> (0) : __assert_fail ("!Cap->capturesVariableArrayType() && \"Not expecting to generate map info for a variable array type!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8450, __PRETTY_FUNCTION__))
20
'?' condition is true
8450 "Not expecting to generate map info for a variable array type!")((!Cap->capturesVariableArrayType() && "Not expecting to generate map info for a variable array type!"
) ? static_cast<void> (0) : __assert_fail ("!Cap->capturesVariableArrayType() && \"Not expecting to generate map info for a variable array type!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8450, __PRETTY_FUNCTION__))
;
8451
8452 // We need to know when we generating information for the first component
8453 const ValueDecl *VD = Cap->capturesThis()
21
'?' condition is true
22
'VD' initialized to a null pointer value
8454 ? nullptr
8455 : Cap->getCapturedVar()->getCanonicalDecl();
8456
8457 // If this declaration appears in a is_device_ptr clause we just have to
8458 // pass the pointer by value. If it is a reference to a declaration, we just
8459 // pass its value.
8460 if (DevPointersMap.count(VD)) {
23
Assuming the condition is false
24
Taking false branch
8461 CombinedInfo.BasePointers.emplace_back(Arg, VD);
8462 CombinedInfo.Pointers.push_back(Arg);
8463 CombinedInfo.Sizes.push_back(
8464 CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
8465 CGF.Int64Ty, /*isSigned=*/true));
8466 CombinedInfo.Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
8467 CombinedInfo.Mappers.push_back(nullptr);
8468 return;
8469 }
8470
8471 using MapData =
8472 std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
8473 OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool,
8474 const ValueDecl *>;
8475 SmallVector<MapData, 4> DeclComponentLists;
8476 assert(CurDir.is<const OMPExecutableDirective *>() &&((CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive") ? static_cast<void> (
0) : __assert_fail ("CurDir.is<const OMPExecutableDirective *>() && \"Expect a executable directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8477, __PRETTY_FUNCTION__))
25
'?' condition is true
8477 "Expect a executable directive")((CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive") ? static_cast<void> (
0) : __assert_fail ("CurDir.is<const OMPExecutableDirective *>() && \"Expect a executable directive\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8477, __PRETTY_FUNCTION__))
;
8478 const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
8479 for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
8480 for (const auto L : C->decl_component_lists(VD)) {
8481 const ValueDecl *VDecl, *Mapper;
8482 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
8483 std::tie(VDecl, Components, Mapper) = L;
8484 assert(VDecl == VD && "We got information for the wrong declaration??")((VDecl == VD && "We got information for the wrong declaration??"
) ? static_cast<void> (0) : __assert_fail ("VDecl == VD && \"We got information for the wrong declaration??\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8484, __PRETTY_FUNCTION__))
;
8485 assert(!Components.empty() &&((!Components.empty() && "Not expecting declaration with no component lists."
) ? static_cast<void> (0) : __assert_fail ("!Components.empty() && \"Not expecting declaration with no component lists.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8486, __PRETTY_FUNCTION__))
8486 "Not expecting declaration with no component lists.")((!Components.empty() && "Not expecting declaration with no component lists."
) ? static_cast<void> (0) : __assert_fail ("!Components.empty() && \"Not expecting declaration with no component lists.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8486, __PRETTY_FUNCTION__))
;
8487 DeclComponentLists.emplace_back(Components, C->getMapType(),
8488 C->getMapTypeModifiers(),
8489 C->isImplicit(), Mapper);
8490 }
8491 }
8492
8493 // Find overlapping elements (including the offset from the base element).
8494 llvm::SmallDenseMap<
8495 const MapData *,
8496 llvm::SmallVector<
8497 OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>,
8498 4>
8499 OverlappedData;
8500 size_t Count = 0;
8501 for (const MapData &L : DeclComponentLists) {
26
Assuming '__begin2' is equal to '__end2'
8502 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
8503 OpenMPMapClauseKind MapType;
8504 ArrayRef<OpenMPMapModifierKind> MapModifiers;
8505 bool IsImplicit;
8506 const ValueDecl *Mapper;
8507 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper) = L;
8508 ++Count;
8509 for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
8510 OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
8511 std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper) = L1;
8512 auto CI = Components.rbegin();
8513 auto CE = Components.rend();
8514 auto SI = Components1.rbegin();
8515 auto SE = Components1.rend();
8516 for (; CI != CE && SI != SE; ++CI, ++SI) {
8517 if (CI->getAssociatedExpression()->getStmtClass() !=
8518 SI->getAssociatedExpression()->getStmtClass())
8519 break;
8520 // Are we dealing with different variables/fields?
8521 if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
8522 break;
8523 }
8524 // Found overlapping if, at least for one component, reached the head of
8525 // the components list.
8526 if (CI == CE || SI == SE) {
8527 assert((CI != CE || SI != SE) &&(((CI != CE || SI != SE) && "Unexpected full match of the mapping components."
) ? static_cast<void> (0) : __assert_fail ("(CI != CE || SI != SE) && \"Unexpected full match of the mapping components.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8528, __PRETTY_FUNCTION__))
8528 "Unexpected full match of the mapping components.")(((CI != CE || SI != SE) && "Unexpected full match of the mapping components."
) ? static_cast<void> (0) : __assert_fail ("(CI != CE || SI != SE) && \"Unexpected full match of the mapping components.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8528, __PRETTY_FUNCTION__))
;
8529 const MapData &BaseData = CI == CE ? L : L1;
8530 OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
8531 SI == SE ? Components : Components1;
8532 auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData);
8533 OverlappedElements.getSecond().push_back(SubData);
8534 }
8535 }
8536 }
8537 // Sort the overlapped elements for each item.
8538 llvm::SmallVector<const FieldDecl *, 4> Layout;
8539 if (!OverlappedData.empty()) {
27
Assuming the condition is true
28
Taking true branch
8540 if (const auto *CRD =
8541 VD->getType().getCanonicalType()->getAsCXXRecordDecl())
29
Called C++ object pointer is null
8542 getPlainLayout(CRD, Layout, /*AsBase=*/false);
8543 else {
8544 const auto *RD = VD->getType().getCanonicalType()->getAsRecordDecl();
8545 Layout.append(RD->field_begin(), RD->field_end());
8546 }
8547 }
8548 for (auto &Pair : OverlappedData) {
8549 llvm::sort(
8550 Pair.getSecond(),
8551 [&Layout](
8552 OMPClauseMappableExprCommon::MappableExprComponentListRef First,
8553 OMPClauseMappableExprCommon::MappableExprComponentListRef
8554 Second) {
8555 auto CI = First.rbegin();
8556 auto CE = First.rend();
8557 auto SI = Second.rbegin();
8558 auto SE = Second.rend();
8559 for (; CI != CE && SI != SE; ++CI, ++SI) {
8560 if (CI->getAssociatedExpression()->getStmtClass() !=
8561 SI->getAssociatedExpression()->getStmtClass())
8562 break;
8563 // Are we dealing with different variables/fields?
8564 if (CI->getAssociatedDeclaration() !=
8565 SI->getAssociatedDeclaration())
8566 break;
8567 }
8568
8569 // Lists contain the same elements.
8570 if (CI == CE && SI == SE)
8571 return false;
8572
8573 // List with less elements is less than list with more elements.
8574 if (CI == CE || SI == SE)
8575 return CI == CE;
8576
8577 const auto *FD1 = cast<FieldDecl>(CI->getAssociatedDeclaration());
8578 const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
8579 if (FD1->getParent() == FD2->getParent())
8580 return FD1->getFieldIndex() < FD2->getFieldIndex();
8581 const auto It =
8582 llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
8583 return FD == FD1 || FD == FD2;
8584 });
8585 return *It == FD1;
8586 });
8587 }
8588
8589 // Associated with a capture, because the mapping flags depend on it.
8590 // Go through all of the elements with the overlapped elements.
8591 for (const auto &Pair : OverlappedData) {
8592 const MapData &L = *Pair.getFirst();
8593 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
8594 OpenMPMapClauseKind MapType;
8595 ArrayRef<OpenMPMapModifierKind> MapModifiers;
8596 bool IsImplicit;
8597 const ValueDecl *Mapper;
8598 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper) = L;
8599 ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
8600 OverlappedComponents = Pair.getSecond();
8601 bool IsFirstComponentList = true;
8602 generateInfoForComponentList(
8603 MapType, MapModifiers, llvm::None, Components, CombinedInfo,
8604 PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
8605 /*ForDeviceAddr=*/false, OverlappedComponents);
8606 }
8607 // Go through other elements without overlapped elements.
8608 bool IsFirstComponentList = OverlappedData.empty();
8609 for (const MapData &L : DeclComponentLists) {
8610 OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
8611 OpenMPMapClauseKind MapType;
8612 ArrayRef<OpenMPMapModifierKind> MapModifiers;
8613 bool IsImplicit;
8614 const ValueDecl *Mapper;
8615 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper) = L;
8616 auto It = OverlappedData.find(&L);
8617 if (It == OverlappedData.end())
8618 generateInfoForComponentList(MapType, MapModifiers, llvm::None,
8619 Components, CombinedInfo, PartialStruct,
8620 IsFirstComponentList, IsImplicit, Mapper);
8621 IsFirstComponentList = false;
8622 }
8623 }
8624
8625 /// Generate the default map information for a given capture \a CI,
8626 /// record field declaration \a RI and captured value \a CV.
8627 void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
8628 const FieldDecl &RI, llvm::Value *CV,
8629 MapCombinedInfoTy &CombinedInfo) const {
8630 bool IsImplicit = true;
8631 // Do the default mapping.
8632 if (CI.capturesThis()) {
8633 CombinedInfo.BasePointers.push_back(CV);
8634 CombinedInfo.Pointers.push_back(CV);
8635 const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
8636 CombinedInfo.Sizes.push_back(
8637 CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
8638 CGF.Int64Ty, /*isSigned=*/true));
8639 // Default map type.
8640 CombinedInfo.Types.push_back(OMP_MAP_TO | OMP_MAP_FROM);
8641 } else if (CI.capturesVariableByCopy()) {
8642 CombinedInfo.BasePointers.push_back(CV);
8643 CombinedInfo.Pointers.push_back(CV);
8644 if (!RI.getType()->isAnyPointerType()) {
8645 // We have to signal to the runtime captures passed by value that are
8646 // not pointers.
8647 CombinedInfo.Types.push_back(OMP_MAP_LITERAL);
8648 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
8649 CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true));
8650 } else {
8651 // Pointers are implicitly mapped with a zero size and no flags
8652 // (other than first map that is added for all implicit maps).
8653 CombinedInfo.Types.push_back(OMP_MAP_NONE);
8654 CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
8655 }
8656 const VarDecl *VD = CI.getCapturedVar();
8657 auto I = FirstPrivateDecls.find(VD);
8658 if (I != FirstPrivateDecls.end())
8659 IsImplicit = I->getSecond();
8660 } else {
8661 assert(CI.capturesVariable() && "Expected captured reference.")((CI.capturesVariable() && "Expected captured reference."
) ? static_cast<void> (0) : __assert_fail ("CI.capturesVariable() && \"Expected captured reference.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8661, __PRETTY_FUNCTION__))
;
8662 const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
8663 QualType ElementType = PtrTy->getPointeeType();
8664 CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
8665 CGF.getTypeSize(ElementType), CGF.Int64Ty, /*isSigned=*/true));
8666 // The default map type for a scalar/complex type is 'to' because by
8667 // default the value doesn't have to be retrieved. For an aggregate
8668 // type, the default is 'tofrom'.
8669 CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
8670 const VarDecl *VD = CI.getCapturedVar();
8671 auto I = FirstPrivateDecls.find(VD);
8672 if (I != FirstPrivateDecls.end() &&
8673 VD->getType().isConstant(CGF.getContext())) {
8674 llvm::Constant *Addr =
8675 CGF.CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(CGF, VD);
8676 // Copy the value of the original variable to the new global copy.
8677 CGF.Builder.CreateMemCpy(
8678 CGF.MakeNaturalAlignAddrLValue(Addr, ElementType).getAddress(CGF),
8679 Address(CV, CGF.getContext().getTypeAlignInChars(ElementType)),
8680 CombinedInfo.Sizes.back(), /*IsVolatile=*/false);
8681 // Use new global variable as the base pointers.
8682 CombinedInfo.BasePointers.push_back(Addr);
8683 CombinedInfo.Pointers.push_back(Addr);
8684 } else {
8685 CombinedInfo.BasePointers.push_back(CV);
8686 if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
8687 Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
8688 CV, ElementType, CGF.getContext().getDeclAlign(VD),
8689 AlignmentSource::Decl));
8690 CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
8691 } else {
8692 CombinedInfo.Pointers.push_back(CV);
8693 }
8694 }
8695 if (I != FirstPrivateDecls.end())
8696 IsImplicit = I->getSecond();
8697 }
8698 // Every default map produces a single argument which is a target parameter.
8699 CombinedInfo.Types.back() |= OMP_MAP_TARGET_PARAM;
8700
8701 // Add flag stating this is an implicit map.
8702 if (IsImplicit)
8703 CombinedInfo.Types.back() |= OMP_MAP_IMPLICIT;
8704
8705 // No user-defined mapper for default mapping.
8706 CombinedInfo.Mappers.push_back(nullptr);
8707 }
8708};
8709} // anonymous namespace
8710
8711/// Emit the arrays used to pass the captures and map information to the
8712/// offloading runtime library. If there is no map or capture information,
8713/// return nullptr by reference.
8714static void
8715emitOffloadingArrays(CodeGenFunction &CGF,
8716 MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
8717 CGOpenMPRuntime::TargetDataInfo &Info) {
8718 CodeGenModule &CGM = CGF.CGM;
8719 ASTContext &Ctx = CGF.getContext();
8720
8721 // Reset the array information.
8722 Info.clearArrayInfo();
8723 Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
8724
8725 if (Info.NumberOfPtrs) {
8726 // Detect if we have any capture size requiring runtime evaluation of the
8727 // size so that a constant array could be eventually used.
8728 bool hasRuntimeEvaluationCaptureSize = false;
8729 for (llvm::Value *S : CombinedInfo.Sizes)
8730 if (!isa<llvm::Constant>(S)) {
8731 hasRuntimeEvaluationCaptureSize = true;
8732 break;
8733 }
8734
8735 llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
8736 QualType PointerArrayType = Ctx.getConstantArrayType(
8737 Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal,
8738 /*IndexTypeQuals=*/0);
8739
8740 Info.BasePointersArray =
8741 CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
8742 Info.PointersArray =
8743 CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
8744 Address MappersArray =
8745 CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
8746 Info.MappersArray = MappersArray.getPointer();
8747
8748 // If we don't have any VLA types or other types that require runtime
8749 // evaluation, we can use a constant array for the map sizes, otherwise we
8750 // need to fill up the arrays as we do for the pointers.
8751 QualType Int64Ty =
8752 Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
8753 if (hasRuntimeEvaluationCaptureSize) {
8754 QualType SizeArrayType = Ctx.getConstantArrayType(
8755 Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
8756 /*IndexTypeQuals=*/0);
8757 Info.SizesArray =
8758 CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
8759 } else {
8760 // We expect all the sizes to be constant, so we collect them to create
8761 // a constant array.
8762 SmallVector<llvm::Constant *, 16> ConstSizes;
8763 for (llvm::Value *S : CombinedInfo.Sizes)
8764 ConstSizes.push_back(cast<llvm::Constant>(S));
8765
8766 auto *SizesArrayInit = llvm::ConstantArray::get(
8767 llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
8768 std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
8769 auto *SizesArrayGbl = new llvm::GlobalVariable(
8770 CGM.getModule(), SizesArrayInit->getType(),
8771 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
8772 SizesArrayInit, Name);
8773 SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
8774 Info.SizesArray = SizesArrayGbl;
8775 }
8776
8777 // The map types are always constant so we don't need to generate code to
8778 // fill arrays. Instead, we create an array constant.
8779 SmallVector<uint64_t, 4> Mapping(CombinedInfo.Types.size(), 0);
8780 llvm::copy(CombinedInfo.Types, Mapping.begin());
8781 llvm::Constant *MapTypesArrayInit =
8782 llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
8783 std::string MaptypesName =
8784 CGM.getOpenMPRuntime().getName({"offload_maptypes"});
8785 auto *MapTypesArrayGbl = new llvm::GlobalVariable(
8786 CGM.getModule(), MapTypesArrayInit->getType(),
8787 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
8788 MapTypesArrayInit, MaptypesName);
8789 MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
8790 Info.MapTypesArray = MapTypesArrayGbl;
8791
8792 // If there's a present map type modifier, it must not be applied to the end
8793 // of a region, so generate a separate map type array in that case.
8794 if (Info.separateBeginEndCalls()) {
8795 bool EndMapTypesDiffer = false;
8796 for (uint64_t &Type : Mapping) {
8797 if (Type & MappableExprsHandler::OMP_MAP_PRESENT) {
8798 Type &= ~MappableExprsHandler::OMP_MAP_PRESENT;
8799 EndMapTypesDiffer = true;
8800 }
8801 }
8802 if (EndMapTypesDiffer) {
8803 MapTypesArrayInit =
8804 llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
8805 MaptypesName = CGM.getOpenMPRuntime().getName({"offload_maptypes"});
8806 MapTypesArrayGbl = new llvm::GlobalVariable(
8807 CGM.getModule(), MapTypesArrayInit->getType(),
8808 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
8809 MapTypesArrayInit, MaptypesName);
8810 MapTypesArrayGbl->setUnnamedAddr(
8811 llvm::GlobalValue::UnnamedAddr::Global);
8812 Info.MapTypesArrayEnd = MapTypesArrayGbl;
8813 }
8814 }
8815
8816 for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
8817 llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
8818 llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
8819 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
8820 Info.BasePointersArray, 0, I);
8821 BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
8822 BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
8823 Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
8824 CGF.Builder.CreateStore(BPVal, BPAddr);
8825
8826 if (Info.requiresDevicePointerInfo())
8827 if (const ValueDecl *DevVD =
8828 CombinedInfo.BasePointers[I].getDevicePtrDecl())
8829 Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
8830
8831 llvm::Value *PVal = CombinedInfo.Pointers[I];
8832 llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
8833 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
8834 Info.PointersArray, 0, I);
8835 P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
8836 P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
8837 Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
8838 CGF.Builder.CreateStore(PVal, PAddr);
8839
8840 if (hasRuntimeEvaluationCaptureSize) {
8841 llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
8842 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
8843 Info.SizesArray,
8844 /*Idx0=*/0,
8845 /*Idx1=*/I);
8846 Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty));
8847 CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
8848 CGM.Int64Ty,
8849 /*isSigned=*/true),
8850 SAddr);
8851 }
8852
8853 // Fill up the mapper array.
8854 llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
8855 if (CombinedInfo.Mappers[I]) {
8856 MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
8857 cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
8858 MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
8859 Info.HasMapper = true;
8860 }
8861 Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
8862 CGF.Builder.CreateStore(MFunc, MAddr);
8863 }
8864 }
8865}
8866
8867/// Emit the arguments to be passed to the runtime library based on the
8868/// arrays of base pointers, pointers, sizes, map types, and mappers. If
8869/// ForEndCall, emit map types to be passed for the end of the region instead of
8870/// the beginning.
8871static void emitOffloadingArraysArgument(
8872 CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
8873 llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
8874 llvm::Value *&MapTypesArrayArg, llvm::Value *&MappersArrayArg,
8875 CGOpenMPRuntime::TargetDataInfo &Info, bool ForEndCall = false) {
8876 assert((!ForEndCall || Info.separateBeginEndCalls()) &&(((!ForEndCall || Info.separateBeginEndCalls()) && "expected region end call to runtime only when end call is separate"
) ? static_cast<void> (0) : __assert_fail ("(!ForEndCall || Info.separateBeginEndCalls()) && \"expected region end call to runtime only when end call is separate\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8877, __PRETTY_FUNCTION__))
8877 "expected region end call to runtime only when end call is separate")(((!ForEndCall || Info.separateBeginEndCalls()) && "expected region end call to runtime only when end call is separate"
) ? static_cast<void> (0) : __assert_fail ("(!ForEndCall || Info.separateBeginEndCalls()) && \"expected region end call to runtime only when end call is separate\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 8877, __PRETTY_FUNCTION__))
;
8878 CodeGenModule &CGM = CGF.CGM;
8879 if (Info.NumberOfPtrs) {
8880 BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
8881 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
8882 Info.BasePointersArray,
8883 /*Idx0=*/0, /*Idx1=*/0);
8884 PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
8885 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
8886 Info.PointersArray,
8887 /*Idx0=*/0,
8888 /*Idx1=*/0);
8889 SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
8890 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray,
8891 /*Idx0=*/0, /*Idx1=*/0);
8892 MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
8893 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
8894 ForEndCall && Info.MapTypesArrayEnd ? Info.MapTypesArrayEnd
8895 : Info.MapTypesArray,
8896 /*Idx0=*/0,
8897 /*Idx1=*/0);
8898 MappersArrayArg =
8899 Info.HasMapper
8900 ? CGF.Builder.CreatePointerCast(Info.MappersArray, CGM.VoidPtrPtrTy)
8901 : llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
8902 } else {
8903 BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
8904 PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
8905 SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
8906 MapTypesArrayArg =
8907 llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
8908 MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
8909 }
8910}
8911
8912/// Check for inner distribute directive.
8913static const OMPExecutableDirective *
8914getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
8915 const auto *CS = D.getInnermostCapturedStmt();
8916 const auto *Body =
8917 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
8918 const Stmt *ChildStmt =
8919 CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
8920
8921 if (const auto *NestedDir =
8922 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
8923 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
8924 switch (D.getDirectiveKind()) {
8925 case OMPD_target:
8926 if (isOpenMPDistributeDirective(DKind))
8927 return NestedDir;
8928 if (DKind == OMPD_teams) {
8929 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
8930 /*IgnoreCaptured=*/true);
8931 if (!Body)
8932 return nullptr;
8933 ChildStmt = CGOpenMPSIMDRuntime::getSingleCompoundChild(Ctx, Body);
8934 if (const auto *NND =
8935 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
8936 DKind = NND->getDirectiveKind();
8937 if (isOpenMPDistributeDirective(DKind))
8938 return NND;
8939 }
8940 }
8941 return nullptr;
8942 case OMPD_target_teams:
8943 if (isOpenMPDistributeDirective(DKind))
8944 return NestedDir;
8945 return nullptr;
8946 case OMPD_target_parallel:
8947 case OMPD_target_simd:
8948 case OMPD_target_parallel_for:
8949 case OMPD_target_parallel_for_simd:
8950 return nullptr;
8951 case OMPD_target_teams_distribute:
8952 case OMPD_target_teams_distribute_simd:
8953 case OMPD_target_teams_distribute_parallel_for:
8954 case OMPD_target_teams_distribute_parallel_for_simd:
8955 case OMPD_parallel:
8956 case OMPD_for:
8957 case OMPD_parallel_for:
8958 case OMPD_parallel_master:
8959 case OMPD_parallel_sections:
8960 case OMPD_for_simd:
8961 case OMPD_parallel_for_simd:
8962 case OMPD_cancel:
8963 case OMPD_cancellation_point:
8964 case OMPD_ordered:
8965 case OMPD_threadprivate:
8966 case OMPD_allocate:
8967 case OMPD_task:
8968 case OMPD_simd:
8969 case OMPD_sections:
8970 case OMPD_section:
8971 case OMPD_single:
8972 case OMPD_master:
8973 case OMPD_critical:
8974 case OMPD_taskyield:
8975 case OMPD_barrier:
8976 case OMPD_taskwait:
8977 case OMPD_taskgroup:
8978 case OMPD_atomic:
8979 case OMPD_flush:
8980 case OMPD_depobj:
8981 case OMPD_scan:
8982 case OMPD_teams:
8983 case OMPD_target_data:
8984 case OMPD_target_exit_data:
8985 case OMPD_target_enter_data:
8986 case OMPD_distribute:
8987 case OMPD_distribute_simd:
8988 case OMPD_distribute_parallel_for:
8989 case OMPD_distribute_parallel_for_simd:
8990 case OMPD_teams_distribute:
8991 case OMPD_teams_distribute_simd:
8992 case OMPD_teams_distribute_parallel_for:
8993 case OMPD_teams_distribute_parallel_for_simd:
8994 case OMPD_target_update:
8995 case OMPD_declare_simd:
8996 case OMPD_declare_variant:
8997 case OMPD_begin_declare_variant:
8998 case OMPD_end_declare_variant:
8999 case OMPD_declare_target:
9000 case OMPD_end_declare_target:
9001 case OMPD_declare_reduction:
9002 case OMPD_declare_mapper:
9003 case OMPD_taskloop:
9004 case OMPD_taskloop_simd:
9005 case OMPD_master_taskloop:
9006 case OMPD_master_taskloop_simd:
9007 case OMPD_parallel_master_taskloop:
9008 case OMPD_parallel_master_taskloop_simd:
9009 case OMPD_requires:
9010 case OMPD_unknown:
9011 default:
9012 llvm_unreachable("Unexpected directive.")::llvm::llvm_unreachable_internal("Unexpected directive.", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9012)
;
9013 }
9014 }
9015
9016 return nullptr;
9017}
9018
9019/// Emit the user-defined mapper function. The code generation follows the
9020/// pattern in the example below.
9021/// \code
9022/// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
9023/// void *base, void *begin,
9024/// int64_t size, int64_t type) {
9025/// // Allocate space for an array section first.
9026/// if (size > 1 && !maptype.IsDelete)
9027/// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
9028/// size*sizeof(Ty), clearToFrom(type));
9029/// // Map members.
9030/// for (unsigned i = 0; i < size; i++) {
9031/// // For each component specified by this mapper:
9032/// for (auto c : all_components) {
9033/// if (c.hasMapper())
9034/// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin, c.arg_size,
9035/// c.arg_type);
9036/// else
9037/// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
9038/// c.arg_begin, c.arg_size, c.arg_type);
9039/// }
9040/// }
9041/// // Delete the array section.
9042/// if (size > 1 && maptype.IsDelete)
9043/// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
9044/// size*sizeof(Ty), clearToFrom(type));
9045/// }
9046/// \endcode
9047void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
9048 CodeGenFunction *CGF) {
9049 if (UDMMap.count(D) > 0)
9050 return;
9051 ASTContext &C = CGM.getContext();
9052 QualType Ty = D->getType();
9053 QualType PtrTy = C.getPointerType(Ty).withRestrict();
9054 QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
9055 auto *MapperVarDecl =
9056 cast<VarDecl>(cast<DeclRefExpr>(D->getMapperVarRef())->getDecl());
9057 SourceLocation Loc = D->getLocation();
9058 CharUnits ElementSize = C.getTypeSizeInChars(Ty);
9059
9060 // Prepare mapper function arguments and attributes.
9061 ImplicitParamDecl HandleArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
9062 C.VoidPtrTy, ImplicitParamDecl::Other);
9063 ImplicitParamDecl BaseArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
9064 ImplicitParamDecl::Other);
9065 ImplicitParamDecl BeginArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
9066 C.VoidPtrTy, ImplicitParamDecl::Other);
9067 ImplicitParamDecl SizeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
9068 ImplicitParamDecl::Other);
9069 ImplicitParamDecl TypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
9070 ImplicitParamDecl::Other);
9071 FunctionArgList Args;
9072 Args.push_back(&HandleArg);
9073 Args.push_back(&BaseArg);
9074 Args.push_back(&BeginArg);
9075 Args.push_back(&SizeArg);
9076 Args.push_back(&TypeArg);
9077 const CGFunctionInfo &FnInfo =
9078 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
9079 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
9080 SmallString<64> TyStr;
9081 llvm::raw_svector_ostream Out(TyStr);
9082 CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out);
9083 std::string Name = getName({"omp_mapper", TyStr, D->getName()});
9084 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
9085 Name, &CGM.getModule());
9086 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
9087 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
9088 // Start the mapper function code generation.
9089 CodeGenFunction MapperCGF(CGM);
9090 MapperCGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
9091 // Compute the starting and end addreses of array elements.
9092 llvm::Value *Size = MapperCGF.EmitLoadOfScalar(
9093 MapperCGF.GetAddrOfLocalVar(&SizeArg), /*Volatile=*/false,
9094 C.getPointerType(Int64Ty), Loc);
9095 // Convert the size in bytes into the number of array elements.
9096 Size = MapperCGF.Builder.CreateExactUDiv(
9097 Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
9098 llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
9099 MapperCGF.GetAddrOfLocalVar(&BeginArg).getPointer(),
9100 CGM.getTypes().ConvertTypeForMem(C.getPointerType(PtrTy)));
9101 llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(PtrBegin, Size);
9102 llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
9103 MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
9104 C.getPointerType(Int64Ty), Loc);
9105 // Prepare common arguments for array initiation and deletion.
9106 llvm::Value *Handle = MapperCGF.EmitLoadOfScalar(
9107 MapperCGF.GetAddrOfLocalVar(&HandleArg),
9108 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
9109 llvm::Value *BaseIn = MapperCGF.EmitLoadOfScalar(
9110 MapperCGF.GetAddrOfLocalVar(&BaseArg),
9111 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
9112 llvm::Value *BeginIn = MapperCGF.EmitLoadOfScalar(
9113 MapperCGF.GetAddrOfLocalVar(&BeginArg),
9114 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
9115
9116 // Emit array initiation if this is an array section and \p MapType indicates
9117 // that memory allocation is required.
9118 llvm::BasicBlock *HeadBB = MapperCGF.createBasicBlock("omp.arraymap.head");
9119 emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
9120 ElementSize, HeadBB, /*IsInit=*/true);
9121
9122 // Emit a for loop to iterate through SizeArg of elements and map all of them.
9123
9124 // Emit the loop header block.
9125 MapperCGF.EmitBlock(HeadBB);
9126 llvm::BasicBlock *BodyBB = MapperCGF.createBasicBlock("omp.arraymap.body");
9127 llvm::BasicBlock *DoneBB = MapperCGF.createBasicBlock("omp.done");
9128 // Evaluate whether the initial condition is satisfied.
9129 llvm::Value *IsEmpty =
9130 MapperCGF.Builder.CreateICmpEQ(PtrBegin, PtrEnd, "omp.arraymap.isempty");
9131 MapperCGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
9132 llvm::BasicBlock *EntryBB = MapperCGF.Builder.GetInsertBlock();
9133
9134 // Emit the loop body block.
9135 MapperCGF.EmitBlock(BodyBB);
9136 llvm::BasicBlock *LastBB = BodyBB;
9137 llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
9138 PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
9139 PtrPHI->addIncoming(PtrBegin, EntryBB);
9140 Address PtrCurrent =
9141 Address(PtrPHI, MapperCGF.GetAddrOfLocalVar(&BeginArg)
9142 .getAlignment()
9143 .alignmentOfArrayElement(ElementSize));
9144 // Privatize the declared variable of mapper to be the current array element.
9145 CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
9146 Scope.addPrivate(MapperVarDecl, [&MapperCGF, PtrCurrent, PtrTy]() {
9147 return MapperCGF
9148 .EmitLoadOfPointerLValue(PtrCurrent, PtrTy->castAs<PointerType>())
9149 .getAddress(MapperCGF);
9150 });
9151 (void)Scope.Privatize();
9152
9153 // Get map clause information. Fill up the arrays with all mapped variables.
9154 MappableExprsHandler::MapCombinedInfoTy Info;
9155 MappableExprsHandler MEHandler(*D, MapperCGF);
9156 MEHandler.generateAllInfoForMapper(Info);
9157
9158 // Call the runtime API __tgt_mapper_num_components to get the number of
9159 // pre-existing components.
9160 llvm::Value *OffloadingArgs[] = {Handle};
9161 llvm::Value *PreviousSize = MapperCGF.EmitRuntimeCall(
9162 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
9163 OMPRTL___tgt_mapper_num_components),
9164 OffloadingArgs);
9165 llvm::Value *ShiftedPreviousSize = MapperCGF.Builder.CreateShl(
9166 PreviousSize,
9167 MapperCGF.Builder.getInt64(MappableExprsHandler::getFlagMemberOffset()));
9168
9169 // Fill up the runtime mapper handle for all components.
9170 for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
9171 llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
9172 *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
9173 llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
9174 Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
9175 llvm::Value *CurSizeArg = Info.Sizes[I];
9176
9177 // Extract the MEMBER_OF field from the map type.
9178 llvm::BasicBlock *MemberBB = MapperCGF.createBasicBlock("omp.member");
9179 MapperCGF.EmitBlock(MemberBB);
9180 llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]);
9181 llvm::Value *Member = MapperCGF.Builder.CreateAnd(
9182 OriMapType,
9183 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_MEMBER_OF));
9184 llvm::BasicBlock *MemberCombineBB =
9185 MapperCGF.createBasicBlock("omp.member.combine");
9186 llvm::BasicBlock *TypeBB = MapperCGF.createBasicBlock("omp.type");
9187 llvm::Value *IsMember = MapperCGF.Builder.CreateIsNull(Member);
9188 MapperCGF.Builder.CreateCondBr(IsMember, TypeBB, MemberCombineBB);
9189 // Add the number of pre-existing components to the MEMBER_OF field if it
9190 // is valid.
9191 MapperCGF.EmitBlock(MemberCombineBB);
9192 llvm::Value *CombinedMember =
9193 MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize);
9194 // Do nothing if it is not a member of previous components.
9195 MapperCGF.EmitBlock(TypeBB);
9196 llvm::PHINode *MemberMapType =
9197 MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.membermaptype");
9198 MemberMapType->addIncoming(OriMapType, MemberBB);
9199 MemberMapType->addIncoming(CombinedMember, MemberCombineBB);
9200
9201 // Combine the map type inherited from user-defined mapper with that
9202 // specified in the program. According to the OMP_MAP_TO and OMP_MAP_FROM
9203 // bits of the \a MapType, which is the input argument of the mapper
9204 // function, the following code will set the OMP_MAP_TO and OMP_MAP_FROM
9205 // bits of MemberMapType.
9206 // [OpenMP 5.0], 1.2.6. map-type decay.
9207 // | alloc | to | from | tofrom | release | delete
9208 // ----------------------------------------------------------
9209 // alloc | alloc | alloc | alloc | alloc | release | delete
9210 // to | alloc | to | alloc | to | release | delete
9211 // from | alloc | alloc | from | from | release | delete
9212 // tofrom | alloc | to | from | tofrom | release | delete
9213 llvm::Value *LeftToFrom = MapperCGF.Builder.CreateAnd(
9214 MapType,
9215 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO |
9216 MappableExprsHandler::OMP_MAP_FROM));
9217 llvm::BasicBlock *AllocBB = MapperCGF.createBasicBlock("omp.type.alloc");
9218 llvm::BasicBlock *AllocElseBB =
9219 MapperCGF.createBasicBlock("omp.type.alloc.else");
9220 llvm::BasicBlock *ToBB = MapperCGF.createBasicBlock("omp.type.to");
9221 llvm::BasicBlock *ToElseBB = MapperCGF.createBasicBlock("omp.type.to.else");
9222 llvm::BasicBlock *FromBB = MapperCGF.createBasicBlock("omp.type.from");
9223 llvm::BasicBlock *EndBB = MapperCGF.createBasicBlock("omp.type.end");
9224 llvm::Value *IsAlloc = MapperCGF.Builder.CreateIsNull(LeftToFrom);
9225 MapperCGF.Builder.CreateCondBr(IsAlloc, AllocBB, AllocElseBB);
9226 // In case of alloc, clear OMP_MAP_TO and OMP_MAP_FROM.
9227 MapperCGF.EmitBlock(AllocBB);
9228 llvm::Value *AllocMapType = MapperCGF.Builder.CreateAnd(
9229 MemberMapType,
9230 MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
9231 MappableExprsHandler::OMP_MAP_FROM)));
9232 MapperCGF.Builder.CreateBr(EndBB);
9233 MapperCGF.EmitBlock(AllocElseBB);
9234 llvm::Value *IsTo = MapperCGF.Builder.CreateICmpEQ(
9235 LeftToFrom,
9236 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO));
9237 MapperCGF.Builder.CreateCondBr(IsTo, ToBB, ToElseBB);
9238 // In case of to, clear OMP_MAP_FROM.
9239 MapperCGF.EmitBlock(ToBB);
9240 llvm::Value *ToMapType = MapperCGF.Builder.CreateAnd(
9241 MemberMapType,
9242 MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_FROM));
9243 MapperCGF.Builder.CreateBr(EndBB);
9244 MapperCGF.EmitBlock(ToElseBB);
9245 llvm::Value *IsFrom = MapperCGF.Builder.CreateICmpEQ(
9246 LeftToFrom,
9247 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_FROM));
9248 MapperCGF.Builder.CreateCondBr(IsFrom, FromBB, EndBB);
9249 // In case of from, clear OMP_MAP_TO.
9250 MapperCGF.EmitBlock(FromBB);
9251 llvm::Value *FromMapType = MapperCGF.Builder.CreateAnd(
9252 MemberMapType,
9253 MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_TO));
9254 // In case of tofrom, do nothing.
9255 MapperCGF.EmitBlock(EndBB);
9256 LastBB = EndBB;
9257 llvm::PHINode *CurMapType =
9258 MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.maptype");
9259 CurMapType->addIncoming(AllocMapType, AllocBB);
9260 CurMapType->addIncoming(ToMapType, ToBB);
9261 CurMapType->addIncoming(FromMapType, FromBB);
9262 CurMapType->addIncoming(MemberMapType, ToElseBB);
9263
9264 llvm::Value *OffloadingArgs[] = {Handle, CurBaseArg, CurBeginArg,
9265 CurSizeArg, CurMapType};
9266 if (Info.Mappers[I]) {
9267 // Call the corresponding mapper function.
9268 llvm::Function *MapperFunc = getOrCreateUserDefinedMapperFunc(
9269 cast<OMPDeclareMapperDecl>(Info.Mappers[I]));
9270 assert(MapperFunc && "Expect a valid mapper function is available.")((MapperFunc && "Expect a valid mapper function is available."
) ? static_cast<void> (0) : __assert_fail ("MapperFunc && \"Expect a valid mapper function is available.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9270, __PRETTY_FUNCTION__))
;
9271 MapperCGF.EmitNounwindRuntimeCall(MapperFunc, OffloadingArgs);
9272 } else {
9273 // Call the runtime API __tgt_push_mapper_component to fill up the runtime
9274 // data structure.
9275 MapperCGF.EmitRuntimeCall(
9276 OMPBuilder.getOrCreateRuntimeFunction(
9277 CGM.getModule(), OMPRTL___tgt_push_mapper_component),
9278 OffloadingArgs);
9279 }
9280 }
9281
9282 // Update the pointer to point to the next element that needs to be mapped,
9283 // and check whether we have mapped all elements.
9284 llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
9285 PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
9286 PtrPHI->addIncoming(PtrNext, LastBB);
9287 llvm::Value *IsDone =
9288 MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone");
9289 llvm::BasicBlock *ExitBB = MapperCGF.createBasicBlock("omp.arraymap.exit");
9290 MapperCGF.Builder.CreateCondBr(IsDone, ExitBB, BodyBB);
9291
9292 MapperCGF.EmitBlock(ExitBB);
9293 // Emit array deletion if this is an array section and \p MapType indicates
9294 // that deletion is required.
9295 emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
9296 ElementSize, DoneBB, /*IsInit=*/false);
9297
9298 // Emit the function exit block.
9299 MapperCGF.EmitBlock(DoneBB, /*IsFinished=*/true);
9300 MapperCGF.FinishFunction();
9301 UDMMap.try_emplace(D, Fn);
9302 if (CGF) {
9303 auto &Decls = FunctionUDMMap.FindAndConstruct(CGF->CurFn);
9304 Decls.second.push_back(D);
9305 }
9306}
9307
9308/// Emit the array initialization or deletion portion for user-defined mapper
9309/// code generation. First, it evaluates whether an array section is mapped and
9310/// whether the \a MapType instructs to delete this section. If \a IsInit is
9311/// true, and \a MapType indicates to not delete this array, array
9312/// initialization code is generated. If \a IsInit is false, and \a MapType
9313/// indicates to not this array, array deletion code is generated.
9314void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
9315 CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *Base,
9316 llvm::Value *Begin, llvm::Value *Size, llvm::Value *MapType,
9317 CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit) {
9318 StringRef Prefix = IsInit ? ".init" : ".del";
9319
9320 // Evaluate if this is an array section.
9321 llvm::BasicBlock *IsDeleteBB =
9322 MapperCGF.createBasicBlock(getName({"omp.array", Prefix, ".evaldelete"}));
9323 llvm::BasicBlock *BodyBB =
9324 MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
9325 llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGE(
9326 Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
9327 MapperCGF.Builder.CreateCondBr(IsArray, IsDeleteBB, ExitBB);
9328
9329 // Evaluate if we are going to delete this section.
9330 MapperCGF.EmitBlock(IsDeleteBB);
9331 llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd(
9332 MapType,
9333 MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_DELETE));
9334 llvm::Value *DeleteCond;
9335 if (IsInit) {
9336 DeleteCond = MapperCGF.Builder.CreateIsNull(
9337 DeleteBit, getName({"omp.array", Prefix, ".delete"}));
9338 } else {
9339 DeleteCond = MapperCGF.Builder.CreateIsNotNull(
9340 DeleteBit, getName({"omp.array", Prefix, ".delete"}));
9341 }
9342 MapperCGF.Builder.CreateCondBr(DeleteCond, BodyBB, ExitBB);
9343
9344 MapperCGF.EmitBlock(BodyBB);
9345 // Get the array size by multiplying element size and element number (i.e., \p
9346 // Size).
9347 llvm::Value *ArraySize = MapperCGF.Builder.CreateNUWMul(
9348 Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
9349 // Remove OMP_MAP_TO and OMP_MAP_FROM from the map type, so that it achieves
9350 // memory allocation/deletion purpose only.
9351 llvm::Value *MapTypeArg = MapperCGF.Builder.CreateAnd(
9352 MapType,
9353 MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
9354 MappableExprsHandler::OMP_MAP_FROM)));
9355 // Call the runtime API __tgt_push_mapper_component to fill up the runtime
9356 // data structure.
9357 llvm::Value *OffloadingArgs[] = {Handle, Base, Begin, ArraySize, MapTypeArg};
9358 MapperCGF.EmitRuntimeCall(
9359 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
9360 OMPRTL___tgt_push_mapper_component),
9361 OffloadingArgs);
9362}
9363
9364llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc(
9365 const OMPDeclareMapperDecl *D) {
9366 auto I = UDMMap.find(D);
9367 if (I != UDMMap.end())
9368 return I->second;
9369 emitUserDefinedMapper(D);
9370 return UDMMap.lookup(D);
9371}
9372
9373void CGOpenMPRuntime::emitTargetNumIterationsCall(
9374 CodeGenFunction &CGF, const OMPExecutableDirective &D,
9375 llvm::Value *DeviceID,
9376 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
9377 const OMPLoopDirective &D)>
9378 SizeEmitter) {
9379 OpenMPDirectiveKind Kind = D.getDirectiveKind();
9380 const OMPExecutableDirective *TD = &D;
9381 // Get nested teams distribute kind directive, if any.
9382 if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
9383 TD = getNestedDistributeDirective(CGM.getContext(), D);
9384 if (!TD)
9385 return;
9386 const auto *LD = cast<OMPLoopDirective>(TD);
9387 auto &&CodeGen = [LD, DeviceID, SizeEmitter, this](CodeGenFunction &CGF,
9388 PrePostActionTy &) {
9389 if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) {
9390 llvm::Value *Args[] = {DeviceID, NumIterations};
9391 CGF.EmitRuntimeCall(
9392 OMPBuilder.getOrCreateRuntimeFunction(
9393 CGM.getModule(), OMPRTL___kmpc_push_target_tripcount),
9394 Args);
9395 }
9396 };
9397 emitInlinedDirective(CGF, OMPD_unknown, CodeGen);
9398}
9399
9400void CGOpenMPRuntime::emitTargetCall(
9401 CodeGenFunction &CGF, const OMPExecutableDirective &D,
9402 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
9403 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
9404 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
9405 const OMPLoopDirective &D)>
9406 SizeEmitter) {
9407 if (!CGF.HaveInsertPoint())
1
Taking false branch
9408 return;
9409
9410 assert(OutlinedFn && "Invalid outlined function!")((OutlinedFn && "Invalid outlined function!") ? static_cast
<void> (0) : __assert_fail ("OutlinedFn && \"Invalid outlined function!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9410, __PRETTY_FUNCTION__))
;
2
Assuming 'OutlinedFn' is non-null
3
'?' condition is true
9411
9412 const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
9413 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
9414 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
9415 auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
9416 PrePostActionTy &) {
9417 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
9418 };
9419 emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
9420
9421 CodeGenFunction::OMPTargetDataInfo InputInfo;
9422 llvm::Value *MapTypesArray = nullptr;
9423 // Fill up the pointer arrays and transfer execution to the device.
9424 auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
9425 &MapTypesArray, &CS, RequiresOuterTask, &CapturedVars,
9426 SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
9427 if (Device.getInt() == OMPC_DEVICE_ancestor) {
9428 // Reverse offloading is not supported, so just execute on the host.
9429 if (RequiresOuterTask) {
9430 CapturedVars.clear();
9431 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
9432 }
9433 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
9434 return;
9435 }
9436
9437 // On top of the arrays that were filled up, the target offloading call
9438 // takes as arguments the device id as well as the host pointer. The host
9439 // pointer is used by the runtime library to identify the current target
9440 // region, so it only has to be unique and not necessarily point to
9441 // anything. It could be the pointer to the outlined function that
9442 // implements the target region, but we aren't using that so that the
9443 // compiler doesn't need to keep that, and could therefore inline the host
9444 // function if proven worthwhile during optimization.
9445
9446 // From this point on, we need to have an ID of the target region defined.
9447 assert(OutlinedFnID && "Invalid outlined function ID!")((OutlinedFnID && "Invalid outlined function ID!") ? static_cast
<void> (0) : __assert_fail ("OutlinedFnID && \"Invalid outlined function ID!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9447, __PRETTY_FUNCTION__))
;
9448
9449 // Emit device ID if any.
9450 llvm::Value *DeviceID;
9451 if (Device.getPointer()) {
9452 assert((Device.getInt() == OMPC_DEVICE_unknown ||(((Device.getInt() == OMPC_DEVICE_unknown || Device.getInt() ==
OMPC_DEVICE_device_num) && "Expected device_num modifier."
) ? static_cast<void> (0) : __assert_fail ("(Device.getInt() == OMPC_DEVICE_unknown || Device.getInt() == OMPC_DEVICE_device_num) && \"Expected device_num modifier.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9454, __PRETTY_FUNCTION__))
9453 Device.getInt() == OMPC_DEVICE_device_num) &&(((Device.getInt() == OMPC_DEVICE_unknown || Device.getInt() ==
OMPC_DEVICE_device_num) && "Expected device_num modifier."
) ? static_cast<void> (0) : __assert_fail ("(Device.getInt() == OMPC_DEVICE_unknown || Device.getInt() == OMPC_DEVICE_device_num) && \"Expected device_num modifier.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9454, __PRETTY_FUNCTION__))
9454 "Expected device_num modifier.")(((Device.getInt() == OMPC_DEVICE_unknown || Device.getInt() ==
OMPC_DEVICE_device_num) && "Expected device_num modifier."
) ? static_cast<void> (0) : __assert_fail ("(Device.getInt() == OMPC_DEVICE_unknown || Device.getInt() == OMPC_DEVICE_device_num) && \"Expected device_num modifier.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9454, __PRETTY_FUNCTION__))
;
9455 llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
9456 DeviceID =
9457 CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
9458 } else {
9459 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
9460 }
9461
9462 // Emit the number of elements in the offloading arrays.
9463 llvm::Value *PointerNum =
9464 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
9465
9466 // Return value of the runtime offloading call.
9467 llvm::Value *Return;
9468
9469 llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
9470 llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
9471
9472 // Emit tripcount for the target loop-based directive.
9473 emitTargetNumIterationsCall(CGF, D, DeviceID, SizeEmitter);
9474
9475 bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
9476 // The target region is an outlined function launched by the runtime
9477 // via calls __tgt_target() or __tgt_target_teams().
9478 //
9479 // __tgt_target() launches a target region with one team and one thread,
9480 // executing a serial region. This master thread may in turn launch
9481 // more threads within its team upon encountering a parallel region,
9482 // however, no additional teams can be launched on the device.
9483 //
9484 // __tgt_target_teams() launches a target region with one or more teams,
9485 // each with one or more threads. This call is required for target
9486 // constructs such as:
9487 // 'target teams'
9488 // 'target' / 'teams'
9489 // 'target teams distribute parallel for'
9490 // 'target parallel'
9491 // and so on.
9492 //
9493 // Note that on the host and CPU targets, the runtime implementation of
9494 // these calls simply call the outlined function without forking threads.
9495 // The outlined functions themselves have runtime calls to
9496 // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
9497 // the compiler in emitTeamsCall() and emitParallelCall().
9498 //
9499 // In contrast, on the NVPTX target, the implementation of
9500 // __tgt_target_teams() launches a GPU kernel with the requested number
9501 // of teams and threads so no additional calls to the runtime are required.
9502 if (NumTeams) {
9503 // If we have NumTeams defined this means that we have an enclosed teams
9504 // region. Therefore we also expect to have NumThreads defined. These two
9505 // values should be defined in the presence of a teams directive,
9506 // regardless of having any clauses associated. If the user is using teams
9507 // but no clauses, these two values will be the default that should be
9508 // passed to the runtime library - a 32-bit integer with the value zero.
9509 assert(NumThreads && "Thread limit expression should be available along "((NumThreads && "Thread limit expression should be available along "
"with number of teams.") ? static_cast<void> (0) : __assert_fail
("NumThreads && \"Thread limit expression should be available along \" \"with number of teams.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9510, __PRETTY_FUNCTION__))
9510 "with number of teams.")((NumThreads && "Thread limit expression should be available along "
"with number of teams.") ? static_cast<void> (0) : __assert_fail
("NumThreads && \"Thread limit expression should be available along \" \"with number of teams.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9510, __PRETTY_FUNCTION__))
;
9511 llvm::Value *OffloadingArgs[] = {DeviceID,
9512 OutlinedFnID,
9513 PointerNum,
9514 InputInfo.BasePointersArray.getPointer(),
9515 InputInfo.PointersArray.getPointer(),
9516 InputInfo.SizesArray.getPointer(),
9517 MapTypesArray,
9518 InputInfo.MappersArray.getPointer(),
9519 NumTeams,
9520 NumThreads};
9521 Return = CGF.EmitRuntimeCall(
9522 OMPBuilder.getOrCreateRuntimeFunction(
9523 CGM.getModule(), HasNowait
9524 ? OMPRTL___tgt_target_teams_nowait_mapper
9525 : OMPRTL___tgt_target_teams_mapper),
9526 OffloadingArgs);
9527 } else {
9528 llvm::Value *OffloadingArgs[] = {DeviceID,
9529 OutlinedFnID,
9530 PointerNum,
9531 InputInfo.BasePointersArray.getPointer(),
9532 InputInfo.PointersArray.getPointer(),
9533 InputInfo.SizesArray.getPointer(),
9534 MapTypesArray,
9535 InputInfo.MappersArray.getPointer()};
9536 Return = CGF.EmitRuntimeCall(
9537 OMPBuilder.getOrCreateRuntimeFunction(
9538 CGM.getModule(), HasNowait ? OMPRTL___tgt_target_nowait_mapper
9539 : OMPRTL___tgt_target_mapper),
9540 OffloadingArgs);
9541 }
9542
9543 // Check the error code and execute the host version if required.
9544 llvm::BasicBlock *OffloadFailedBlock =
9545 CGF.createBasicBlock("omp_offload.failed");
9546 llvm::BasicBlock *OffloadContBlock =
9547 CGF.createBasicBlock("omp_offload.cont");
9548 llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
9549 CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
9550
9551 CGF.EmitBlock(OffloadFailedBlock);
9552 if (RequiresOuterTask) {
9553 CapturedVars.clear();
9554 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
9555 }
9556 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
9557 CGF.EmitBranch(OffloadContBlock);
9558
9559 CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
9560 };
9561
9562 // Notify that the host version must be executed.
9563 auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
9564 RequiresOuterTask](CodeGenFunction &CGF,
9565 PrePostActionTy &) {
9566 if (RequiresOuterTask) {
9567 CapturedVars.clear();
9568 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
9569 }
9570 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
9571 };
9572
9573 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
9574 &CapturedVars, RequiresOuterTask,
9575 &CS](CodeGenFunction &CGF, PrePostActionTy &) {
9576 // Fill up the arrays with all the captured variables.
9577 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
9578
9579 // Get mappable expression information.
9580 MappableExprsHandler MEHandler(D, CGF);
9581 llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
9582 llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
9583
9584 auto RI = CS.getCapturedRecordDecl()->field_begin();
9585 auto CV = CapturedVars.begin();
9586 for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
17
Loop condition is true. Entering loop body
9587 CE = CS.capture_end();
9588 CI != CE; ++CI, ++RI, ++CV) {
9589 MappableExprsHandler::MapCombinedInfoTy CurInfo;
9590 MappableExprsHandler::StructRangeInfoTy PartialStruct;
9591
9592 // VLA sizes are passed to the outlined region by copy and do not have map
9593 // information associated.
9594 if (CI->capturesVariableArrayType()) {
18
Taking false branch
9595 CurInfo.BasePointers.push_back(*CV);
9596 CurInfo.Pointers.push_back(*CV);
9597 CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
9598 CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
9599 // Copy to the device as an argument. No need to retrieve it.
9600 CurInfo.Types.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
9601 MappableExprsHandler::OMP_MAP_TARGET_PARAM |
9602 MappableExprsHandler::OMP_MAP_IMPLICIT);
9603 CurInfo.Mappers.push_back(nullptr);
9604 } else {
9605 // If we have any information in the map clause, we use it, otherwise we
9606 // just do a default mapping.
9607 MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
19
Calling 'MappableExprsHandler::generateInfoForCapture'
9608 if (!CI->capturesThis())
9609 MappedVarSet.insert(CI->getCapturedVar());
9610 else
9611 MappedVarSet.insert(nullptr);
9612 if (CurInfo.BasePointers.empty())
9613 MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
9614 // Generate correct mapping for variables captured by reference in
9615 // lambdas.
9616 if (CI->capturesVariable())
9617 MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
9618 CurInfo, LambdaPointers);
9619 }
9620 // We expect to have at least an element of information for this capture.
9621 assert(!CurInfo.BasePointers.empty() &&((!CurInfo.BasePointers.empty() && "Non-existing map pointer for capture!"
) ? static_cast<void> (0) : __assert_fail ("!CurInfo.BasePointers.empty() && \"Non-existing map pointer for capture!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9622, __PRETTY_FUNCTION__))
9622 "Non-existing map pointer for capture!")((!CurInfo.BasePointers.empty() && "Non-existing map pointer for capture!"
) ? static_cast<void> (0) : __assert_fail ("!CurInfo.BasePointers.empty() && \"Non-existing map pointer for capture!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9622, __PRETTY_FUNCTION__))
;
9623 assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&((CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
CurInfo.BasePointers.size() == CurInfo.Types.size() &&
CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
"Inconsistent map information sizes!") ? static_cast<void
> (0) : __assert_fail ("CurInfo.BasePointers.size() == CurInfo.Pointers.size() && CurInfo.BasePointers.size() == CurInfo.Sizes.size() && CurInfo.BasePointers.size() == CurInfo.Types.size() && CurInfo.BasePointers.size() == CurInfo.Mappers.size() && \"Inconsistent map information sizes!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9627, __PRETTY_FUNCTION__))
9624 CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&((CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
CurInfo.BasePointers.size() == CurInfo.Types.size() &&
CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
"Inconsistent map information sizes!") ? static_cast<void
> (0) : __assert_fail ("CurInfo.BasePointers.size() == CurInfo.Pointers.size() && CurInfo.BasePointers.size() == CurInfo.Sizes.size() && CurInfo.BasePointers.size() == CurInfo.Types.size() && CurInfo.BasePointers.size() == CurInfo.Mappers.size() && \"Inconsistent map information sizes!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9627, __PRETTY_FUNCTION__))
9625 CurInfo.BasePointers.size() == CurInfo.Types.size() &&((CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
CurInfo.BasePointers.size() == CurInfo.Types.size() &&
CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
"Inconsistent map information sizes!") ? static_cast<void
> (0) : __assert_fail ("CurInfo.BasePointers.size() == CurInfo.Pointers.size() && CurInfo.BasePointers.size() == CurInfo.Sizes.size() && CurInfo.BasePointers.size() == CurInfo.Types.size() && CurInfo.BasePointers.size() == CurInfo.Mappers.size() && \"Inconsistent map information sizes!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9627, __PRETTY_FUNCTION__))
9626 CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&((CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
CurInfo.BasePointers.size() == CurInfo.Types.size() &&
CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
"Inconsistent map information sizes!") ? static_cast<void
> (0) : __assert_fail ("CurInfo.BasePointers.size() == CurInfo.Pointers.size() && CurInfo.BasePointers.size() == CurInfo.Sizes.size() && CurInfo.BasePointers.size() == CurInfo.Types.size() && CurInfo.BasePointers.size() == CurInfo.Mappers.size() && \"Inconsistent map information sizes!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9627, __PRETTY_FUNCTION__))
9627 "Inconsistent map information sizes!")((CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
CurInfo.BasePointers.size() == CurInfo.Types.size() &&
CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
"Inconsistent map information sizes!") ? static_cast<void
> (0) : __assert_fail ("CurInfo.BasePointers.size() == CurInfo.Pointers.size() && CurInfo.BasePointers.size() == CurInfo.Sizes.size() && CurInfo.BasePointers.size() == CurInfo.Types.size() && CurInfo.BasePointers.size() == CurInfo.Mappers.size() && \"Inconsistent map information sizes!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9627, __PRETTY_FUNCTION__))
;
9628
9629 // If there is an entry in PartialStruct it means we have a struct with
9630 // individual members mapped. Emit an extra combined entry.
9631 if (PartialStruct.Base.isValid())
9632 MEHandler.emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct);
9633
9634 // We need to append the results of this capture to what we already have.
9635 CombinedInfo.append(CurInfo);
9636 }
9637 // Adjust MEMBER_OF flags for the lambdas captures.
9638 MEHandler.adjustMemberOfForLambdaCaptures(
9639 LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
9640 CombinedInfo.Types);
9641 // Map any list items in a map clause that were not captures because they
9642 // weren't referenced within the construct.
9643 MEHandler.generateAllInfo(CombinedInfo, /*NotTargetParams=*/true,
9644 MappedVarSet);
9645
9646 TargetDataInfo Info;
9647 // Fill up the arrays and create the arguments.
9648 emitOffloadingArrays(CGF, CombinedInfo, Info);
9649 emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
9650 Info.PointersArray, Info.SizesArray,
9651 Info.MapTypesArray, Info.MappersArray, Info);
9652 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
9653 InputInfo.BasePointersArray =
9654 Address(Info.BasePointersArray, CGM.getPointerAlign());
9655 InputInfo.PointersArray =
9656 Address(Info.PointersArray, CGM.getPointerAlign());
9657 InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
9658 InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
9659 MapTypesArray = Info.MapTypesArray;
9660 if (RequiresOuterTask)
9661 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
9662 else
9663 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
9664 };
9665
9666 auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
9667 CodeGenFunction &CGF, PrePostActionTy &) {
9668 if (RequiresOuterTask) {
9669 CodeGenFunction::OMPTargetDataInfo InputInfo;
9670 CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
9671 } else {
9672 emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
9673 }
9674 };
9675
9676 // If we have a target function ID it means that we need to support
9677 // offloading, otherwise, just execute on the host. We need to execute on host
9678 // regardless of the conditional in the if clause if, e.g., the user do not
9679 // specify target triples.
9680 if (OutlinedFnID) {
4
Assuming 'OutlinedFnID' is non-null
5
Taking true branch
9681 if (IfCond) {
6
Assuming 'IfCond' is non-null
7
Taking true branch
9682 emitIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
8
Calling 'CGOpenMPRuntime::emitIfClause'
9683 } else {
9684 RegionCodeGenTy ThenRCG(TargetThenGen);
9685 ThenRCG(CGF);
9686 }
9687 } else {
9688 RegionCodeGenTy ElseRCG(TargetElseGen);
9689 ElseRCG(CGF);
9690 }
9691}
9692
9693void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
9694 StringRef ParentName) {
9695 if (!S)
9696 return;
9697
9698 // Codegen OMP target directives that offload compute to the device.
9699 bool RequiresDeviceCodegen =
9700 isa<OMPExecutableDirective>(S) &&
9701 isOpenMPTargetExecutionDirective(
9702 cast<OMPExecutableDirective>(S)->getDirectiveKind());
9703
9704 if (RequiresDeviceCodegen) {
9705 const auto &E = *cast<OMPExecutableDirective>(S);
9706 unsigned DeviceID;
9707 unsigned FileID;
9708 unsigned Line;
9709 getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID,
9710 FileID, Line);
9711
9712 // Is this a target region that should not be emitted as an entry point? If
9713 // so just signal we are done with this target region.
9714 if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
9715 ParentName, Line))
9716 return;
9717
9718 switch (E.getDirectiveKind()) {
9719 case OMPD_target:
9720 CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
9721 cast<OMPTargetDirective>(E));
9722 break;
9723 case OMPD_target_parallel:
9724 CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
9725 CGM, ParentName, cast<OMPTargetParallelDirective>(E));
9726 break;
9727 case OMPD_target_teams:
9728 CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
9729 CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
9730 break;
9731 case OMPD_target_teams_distribute:
9732 CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
9733 CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
9734 break;
9735 case OMPD_target_teams_distribute_simd:
9736 CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
9737 CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
9738 break;
9739 case OMPD_target_parallel_for:
9740 CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
9741 CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
9742 break;
9743 case OMPD_target_parallel_for_simd:
9744 CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
9745 CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
9746 break;
9747 case OMPD_target_simd:
9748 CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
9749 CGM, ParentName, cast<OMPTargetSimdDirective>(E));
9750 break;
9751 case OMPD_target_teams_distribute_parallel_for:
9752 CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
9753 CGM, ParentName,
9754 cast<OMPTargetTeamsDistributeParallelForDirective>(E));
9755 break;
9756 case OMPD_target_teams_distribute_parallel_for_simd:
9757 CodeGenFunction::
9758 EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
9759 CGM, ParentName,
9760 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
9761 break;
9762 case OMPD_parallel:
9763 case OMPD_for:
9764 case OMPD_parallel_for:
9765 case OMPD_parallel_master:
9766 case OMPD_parallel_sections:
9767 case OMPD_for_simd:
9768 case OMPD_parallel_for_simd:
9769 case OMPD_cancel:
9770 case OMPD_cancellation_point:
9771 case OMPD_ordered:
9772 case OMPD_threadprivate:
9773 case OMPD_allocate:
9774 case OMPD_task:
9775 case OMPD_simd:
9776 case OMPD_sections:
9777 case OMPD_section:
9778 case OMPD_single:
9779 case OMPD_master:
9780 case OMPD_critical:
9781 case OMPD_taskyield:
9782 case OMPD_barrier:
9783 case OMPD_taskwait:
9784 case OMPD_taskgroup:
9785 case OMPD_atomic:
9786 case OMPD_flush:
9787 case OMPD_depobj:
9788 case OMPD_scan:
9789 case OMPD_teams:
9790 case OMPD_target_data:
9791 case OMPD_target_exit_data:
9792 case OMPD_target_enter_data:
9793 case OMPD_distribute:
9794 case OMPD_distribute_simd:
9795 case OMPD_distribute_parallel_for:
9796 case OMPD_distribute_parallel_for_simd:
9797 case OMPD_teams_distribute:
9798 case OMPD_teams_distribute_simd:
9799 case OMPD_teams_distribute_parallel_for:
9800 case OMPD_teams_distribute_parallel_for_simd:
9801 case OMPD_target_update:
9802 case OMPD_declare_simd:
9803 case OMPD_declare_variant:
9804 case OMPD_begin_declare_variant:
9805 case OMPD_end_declare_variant:
9806 case OMPD_declare_target:
9807 case OMPD_end_declare_target:
9808 case OMPD_declare_reduction:
9809 case OMPD_declare_mapper:
9810 case OMPD_taskloop:
9811 case OMPD_taskloop_simd:
9812 case OMPD_master_taskloop:
9813 case OMPD_master_taskloop_simd:
9814 case OMPD_parallel_master_taskloop:
9815 case OMPD_parallel_master_taskloop_simd:
9816 case OMPD_requires:
9817 case OMPD_unknown:
9818 default:
9819 llvm_unreachable("Unknown target directive for OpenMP device codegen.")::llvm::llvm_unreachable_internal("Unknown target directive for OpenMP device codegen."
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9819)
;
9820 }
9821 return;
9822 }
9823
9824 if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
9825 if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
9826 return;
9827
9828 scanForTargetRegionsFunctions(E->getRawStmt(), ParentName);
9829 return;
9830 }
9831
9832 // If this is a lambda function, look into its body.
9833 if (const auto *L = dyn_cast<LambdaExpr>(S))
9834 S = L->getBody();
9835
9836 // Keep looking for target regions recursively.
9837 for (const Stmt *II : S->children())
9838 scanForTargetRegionsFunctions(II, ParentName);
9839}
9840
9841bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
9842 // If emitting code for the host, we do not process FD here. Instead we do
9843 // the normal code generation.
9844 if (!CGM.getLangOpts().OpenMPIsDevice) {
9845 if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl())) {
9846 Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
9847 OMPDeclareTargetDeclAttr::getDeviceType(FD);
9848 // Do not emit device_type(nohost) functions for the host.
9849 if (DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
9850 return true;
9851 }
9852 return false;
9853 }
9854
9855 const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
9856 // Try to detect target regions in the function.
9857 if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
9858 StringRef Name = CGM.getMangledName(GD);
9859 scanForTargetRegionsFunctions(FD->getBody(), Name);
9860 Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
9861 OMPDeclareTargetDeclAttr::getDeviceType(FD);
9862 // Do not emit device_type(nohost) functions for the host.
9863 if (DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
9864 return true;
9865 }
9866
9867 // Do not to emit function if it is not marked as declare target.
9868 return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
9869 AlreadyEmittedTargetDecls.count(VD) == 0;
9870}
9871
9872bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
9873 if (!CGM.getLangOpts().OpenMPIsDevice)
9874 return false;
9875
9876 // Check if there are Ctors/Dtors in this declaration and look for target
9877 // regions in it. We use the complete variant to produce the kernel name
9878 // mangling.
9879 QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
9880 if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
9881 for (const CXXConstructorDecl *Ctor : RD->ctors()) {
9882 StringRef ParentName =
9883 CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
9884 scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
9885 }
9886 if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
9887 StringRef ParentName =
9888 CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
9889 scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
9890 }
9891 }
9892
9893 // Do not to emit variable if it is not marked as declare target.
9894 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
9895 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
9896 cast<VarDecl>(GD.getDecl()));
9897 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
9898 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
9899 HasRequiresUnifiedSharedMemory)) {
9900 DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
9901 return true;
9902 }
9903 return false;
9904}
9905
9906llvm::Constant *
9907CGOpenMPRuntime::registerTargetFirstprivateCopy(CodeGenFunction &CGF,
9908 const VarDecl *VD) {
9909 assert(VD->getType().isConstant(CGM.getContext()) &&((VD->getType().isConstant(CGM.getContext()) && "Expected constant variable."
) ? static_cast<void> (0) : __assert_fail ("VD->getType().isConstant(CGM.getContext()) && \"Expected constant variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9910, __PRETTY_FUNCTION__))
9910 "Expected constant variable.")((VD->getType().isConstant(CGM.getContext()) && "Expected constant variable."
) ? static_cast<void> (0) : __assert_fail ("VD->getType().isConstant(CGM.getContext()) && \"Expected constant variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9910, __PRETTY_FUNCTION__))
;
9911 StringRef VarName;
9912 llvm::Constant *Addr;
9913 llvm::GlobalValue::LinkageTypes Linkage;
9914 QualType Ty = VD->getType();
9915 SmallString<128> Buffer;
9916 {
9917 unsigned DeviceID;
9918 unsigned FileID;
9919 unsigned Line;
9920 getTargetEntryUniqueInfo(CGM.getContext(), VD->getLocation(), DeviceID,
9921 FileID, Line);
9922 llvm::raw_svector_ostream OS(Buffer);
9923 OS << "__omp_offloading_firstprivate_" << llvm::format("_%x", DeviceID)
9924 << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
9925 VarName = OS.str();
9926 }
9927 Linkage = llvm::GlobalValue::InternalLinkage;
9928 Addr =
9929 getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(Ty), VarName,
9930 getDefaultFirstprivateAddressSpace());
9931 cast<llvm::GlobalValue>(Addr)->setLinkage(Linkage);
9932 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(Ty);
9933 CGM.addCompilerUsedGlobal(cast<llvm::GlobalValue>(Addr));
9934 OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
9935 VarName, Addr, VarSize,
9936 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo, Linkage);
9937 return Addr;
9938}
9939
9940void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
9941 llvm::Constant *Addr) {
9942 if (CGM.getLangOpts().OMPTargetTriples.empty() &&
9943 !CGM.getLangOpts().OpenMPIsDevice)
9944 return;
9945 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
9946 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
9947 if (!Res) {
9948 if (CGM.getLangOpts().OpenMPIsDevice) {
9949 // Register non-target variables being emitted in device code (debug info
9950 // may cause this).
9951 StringRef VarName = CGM.getMangledName(VD);
9952 EmittedNonTargetVariables.try_emplace(VarName, Addr);
9953 }
9954 return;
9955 }
9956 // Register declare target variables.
9957 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
9958 StringRef VarName;
9959 CharUnits VarSize;
9960 llvm::GlobalValue::LinkageTypes Linkage;
9961
9962 if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
9963 !HasRequiresUnifiedSharedMemory) {
9964 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
9965 VarName = CGM.getMangledName(VD);
9966 if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
9967 VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
9968 assert(!VarSize.isZero() && "Expected non-zero size of the variable")((!VarSize.isZero() && "Expected non-zero size of the variable"
) ? static_cast<void> (0) : __assert_fail ("!VarSize.isZero() && \"Expected non-zero size of the variable\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9968, __PRETTY_FUNCTION__))
;
9969 } else {
9970 VarSize = CharUnits::Zero();
9971 }
9972 Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
9973 // Temp solution to prevent optimizations of the internal variables.
9974 if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
9975 std::string RefName = getName({VarName, "ref"});
9976 if (!CGM.GetGlobalValue(RefName)) {
9977 llvm::Constant *AddrRef =
9978 getOrCreateInternalVariable(Addr->getType(), RefName);
9979 auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
9980 GVAddrRef->setConstant(/*Val=*/true);
9981 GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
9982 GVAddrRef->setInitializer(Addr);
9983 CGM.addCompilerUsedGlobal(GVAddrRef);
9984 }
9985 }
9986 } else {
9987 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||((((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Declare target attribute must link or to with unified memory."
) ? static_cast<void> (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Declare target attribute must link or to with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9990, __PRETTY_FUNCTION__))
9988 (*Res == OMPDeclareTargetDeclAttr::MT_To &&((((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Declare target attribute must link or to with unified memory."
) ? static_cast<void> (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Declare target attribute must link or to with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9990, __PRETTY_FUNCTION__))
9989 HasRequiresUnifiedSharedMemory)) &&((((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Declare target attribute must link or to with unified memory."
) ? static_cast<void> (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Declare target attribute must link or to with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9990, __PRETTY_FUNCTION__))
9990 "Declare target attribute must link or to with unified memory.")((((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Declare target attribute must link or to with unified memory."
) ? static_cast<void> (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Declare target attribute must link or to with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 9990, __PRETTY_FUNCTION__))
;
9991 if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
9992 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
9993 else
9994 Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
9995
9996 if (CGM.getLangOpts().OpenMPIsDevice) {
9997 VarName = Addr->getName();
9998 Addr = nullptr;
9999 } else {
10000 VarName = getAddrOfDeclareTargetVar(VD).getName();
10001 Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
10002 }
10003 VarSize = CGM.getPointerSize();
10004 Linkage = llvm::GlobalValue::WeakAnyLinkage;
10005 }
10006
10007 OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
10008 VarName, Addr, VarSize, Flags, Linkage);
10009}
10010
10011bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
10012 if (isa<FunctionDecl>(GD.getDecl()) ||
10013 isa<OMPDeclareReductionDecl>(GD.getDecl()))
10014 return emitTargetFunctions(GD);
10015
10016 return emitTargetGlobalVariable(GD);
10017}
10018
10019void CGOpenMPRuntime::emitDeferredTargetDecls() const {
10020 for (const VarDecl *VD : DeferredGlobalVariables) {
10021 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
10022 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
10023 if (!Res)
10024 continue;
10025 if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
10026 !HasRequiresUnifiedSharedMemory) {
10027 CGM.EmitGlobal(VD);
10028 } else {
10029 assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||(((*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Expected link clause or to clause with unified memory.") ? static_cast
<void> (0) : __assert_fail ("(*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Expected link clause or to clause with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10032, __PRETTY_FUNCTION__))
10030 (*Res == OMPDeclareTargetDeclAttr::MT_To &&(((*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Expected link clause or to clause with unified memory.") ? static_cast
<void> (0) : __assert_fail ("(*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Expected link clause or to clause with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10032, __PRETTY_FUNCTION__))
10031 HasRequiresUnifiedSharedMemory)) &&(((*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Expected link clause or to clause with unified memory.") ? static_cast
<void> (0) : __assert_fail ("(*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Expected link clause or to clause with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10032, __PRETTY_FUNCTION__))
10032 "Expected link clause or to clause with unified memory.")(((*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr
::MT_To && HasRequiresUnifiedSharedMemory)) &&
"Expected link clause or to clause with unified memory.") ? static_cast
<void> (0) : __assert_fail ("(*Res == OMPDeclareTargetDeclAttr::MT_Link || (*Res == OMPDeclareTargetDeclAttr::MT_To && HasRequiresUnifiedSharedMemory)) && \"Expected link clause or to clause with unified memory.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10032, __PRETTY_FUNCTION__))
;
10033 (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
10034 }
10035 }
10036}
10037
10038void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
10039 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
10040 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&((isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
" Expected target-based directive.") ? static_cast<void>
(0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10041, __PRETTY_FUNCTION__))
10041 " Expected target-based directive.")((isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
" Expected target-based directive.") ? static_cast<void>
(0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10041, __PRETTY_FUNCTION__))
;
10042}
10043
10044void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
10045 for (const OMPClause *Clause : D->clauselists()) {
10046 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
10047 HasRequiresUnifiedSharedMemory = true;
10048 } else if (const auto *AC =
10049 dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
10050 switch (AC->getAtomicDefaultMemOrderKind()) {
10051 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
10052 RequiresAtomicOrdering = llvm::AtomicOrdering::AcquireRelease;
10053 break;
10054 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
10055 RequiresAtomicOrdering = llvm::AtomicOrdering::SequentiallyConsistent;
10056 break;
10057 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
10058 RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
10059 break;
10060 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
10061 break;
10062 }
10063 }
10064 }
10065}
10066
10067llvm::AtomicOrdering CGOpenMPRuntime::getDefaultMemoryOrdering() const {
10068 return RequiresAtomicOrdering;
10069}
10070
10071bool CGOpenMPRuntime::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
10072 LangAS &AS) {
10073 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
10074 return false;
10075 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
10076 switch(A->getAllocatorType()) {
10077 case OMPAllocateDeclAttr::OMPNullMemAlloc:
10078 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
10079 // Not supported, fallback to the default mem space.
10080 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
10081 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
10082 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
10083 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
10084 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
10085 case OMPAllocateDeclAttr::OMPConstMemAlloc:
10086 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
10087 AS = LangAS::Default;
10088 return true;
10089 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
10090 llvm_unreachable("Expected predefined allocator for the variables with the "::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10091)
10091 "static storage.")::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10091)
;
10092 }
10093 return false;
10094}
10095
10096bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
10097 return HasRequiresUnifiedSharedMemory;
10098}
10099
10100CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
10101 CodeGenModule &CGM)
10102 : CGM(CGM) {
10103 if (CGM.getLangOpts().OpenMPIsDevice) {
10104 SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
10105 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
10106 }
10107}
10108
10109CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
10110 if (CGM.getLangOpts().OpenMPIsDevice)
10111 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
10112}
10113
10114bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
10115 if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
10116 return true;
10117
10118 const auto *D = cast<FunctionDecl>(GD.getDecl());
10119 // Do not to emit function if it is marked as declare target as it was already
10120 // emitted.
10121 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
10122 if (D->hasBody() && AlreadyEmittedTargetDecls.count(D) == 0) {
10123 if (auto *F = dyn_cast_or_null<llvm::Function>(
10124 CGM.GetGlobalValue(CGM.getMangledName(GD))))
10125 return !F->isDeclaration();
10126 return false;
10127 }
10128 return true;
10129 }
10130
10131 return !AlreadyEmittedTargetDecls.insert(D).second;
10132}
10133
10134llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
10135 // If we don't have entries or if we are emitting code for the device, we
10136 // don't need to do anything.
10137 if (CGM.getLangOpts().OMPTargetTriples.empty() ||
10138 CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
10139 (OffloadEntriesInfoManager.empty() &&
10140 !HasEmittedDeclareTargetRegion &&
10141 !HasEmittedTargetRegion))
10142 return nullptr;
10143
10144 // Create and register the function that handles the requires directives.
10145 ASTContext &C = CGM.getContext();
10146
10147 llvm::Function *RequiresRegFn;
10148 {
10149 CodeGenFunction CGF(CGM);
10150 const auto &FI = CGM.getTypes().arrangeNullaryFunction();
10151 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
10152 std::string ReqName = getName({"omp_offloading", "requires_reg"});
10153 RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
10154 CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
10155 OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
10156 // TODO: check for other requires clauses.
10157 // The requires directive takes effect only when a target region is
10158 // present in the compilation unit. Otherwise it is ignored and not
10159 // passed to the runtime. This avoids the runtime from throwing an error
10160 // for mismatching requires clauses across compilation units that don't
10161 // contain at least 1 target region.
10162 assert((HasEmittedTargetRegion ||(((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion ||
!OffloadEntriesInfoManager.empty()) && "Target or declare target region expected."
) ? static_cast<void> (0) : __assert_fail ("(HasEmittedTargetRegion || HasEmittedDeclareTargetRegion || !OffloadEntriesInfoManager.empty()) && \"Target or declare target region expected.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10165, __PRETTY_FUNCTION__))
10163 HasEmittedDeclareTargetRegion ||(((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion ||
!OffloadEntriesInfoManager.empty()) && "Target or declare target region expected."
) ? static_cast<void> (0) : __assert_fail ("(HasEmittedTargetRegion || HasEmittedDeclareTargetRegion || !OffloadEntriesInfoManager.empty()) && \"Target or declare target region expected.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10165, __PRETTY_FUNCTION__))
10164 !OffloadEntriesInfoManager.empty()) &&(((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion ||
!OffloadEntriesInfoManager.empty()) && "Target or declare target region expected."
) ? static_cast<void> (0) : __assert_fail ("(HasEmittedTargetRegion || HasEmittedDeclareTargetRegion || !OffloadEntriesInfoManager.empty()) && \"Target or declare target region expected.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10165, __PRETTY_FUNCTION__))
10165 "Target or declare target region expected.")(((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion ||
!OffloadEntriesInfoManager.empty()) && "Target or declare target region expected."
) ? static_cast<void> (0) : __assert_fail ("(HasEmittedTargetRegion || HasEmittedDeclareTargetRegion || !OffloadEntriesInfoManager.empty()) && \"Target or declare target region expected.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10165, __PRETTY_FUNCTION__))
;
10166 if (HasRequiresUnifiedSharedMemory)
10167 Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
10168 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
10169 CGM.getModule(), OMPRTL___tgt_register_requires),
10170 llvm::ConstantInt::get(CGM.Int64Ty, Flags));
10171 CGF.FinishFunction();
10172 }
10173 return RequiresRegFn;
10174}
10175
10176void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
10177 const OMPExecutableDirective &D,
10178 SourceLocation Loc,
10179 llvm::Function *OutlinedFn,
10180 ArrayRef<llvm::Value *> CapturedVars) {
10181 if (!CGF.HaveInsertPoint())
10182 return;
10183
10184 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
10185 CodeGenFunction::RunCleanupsScope Scope(CGF);
10186
10187 // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
10188 llvm::Value *Args[] = {
10189 RTLoc,
10190 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
10191 CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
10192 llvm::SmallVector<llvm::Value *, 16> RealArgs;
10193 RealArgs.append(std::begin(Args), std::end(Args));
10194 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
10195
10196 llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
10197 CGM.getModule(), OMPRTL___kmpc_fork_teams);
10198 CGF.EmitRuntimeCall(RTLFn, RealArgs);
10199}
10200
10201void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
10202 const Expr *NumTeams,
10203 const Expr *ThreadLimit,
10204 SourceLocation Loc) {
10205 if (!CGF.HaveInsertPoint())
10206 return;
10207
10208 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
10209
10210 llvm::Value *NumTeamsVal =
10211 NumTeams
10212 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
10213 CGF.CGM.Int32Ty, /* isSigned = */ true)
10214 : CGF.Builder.getInt32(0);
10215
10216 llvm::Value *ThreadLimitVal =
10217 ThreadLimit
10218 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
10219 CGF.CGM.Int32Ty, /* isSigned = */ true)
10220 : CGF.Builder.getInt32(0);
10221
10222 // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
10223 llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
10224 ThreadLimitVal};
10225 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
10226 CGM.getModule(), OMPRTL___kmpc_push_num_teams),
10227 PushNumTeamsArgs);
10228}
10229
10230void CGOpenMPRuntime::emitTargetDataCalls(
10231 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
10232 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
10233 if (!CGF.HaveInsertPoint())
10234 return;
10235
10236 // Action used to replace the default codegen action and turn privatization
10237 // off.
10238 PrePostActionTy NoPrivAction;
10239
10240 // Generate the code for the opening of the data environment. Capture all the
10241 // arguments of the runtime call by reference because they are used in the
10242 // closing of the region.
10243 auto &&BeginThenGen = [this, &D, Device, &Info,
10244 &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
10245 // Fill up the arrays with all the mapped variables.
10246 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
10247
10248 // Get map clause information.
10249 MappableExprsHandler MEHandler(D, CGF);
10250 MEHandler.generateAllInfo(CombinedInfo);
10251
10252 // Fill up the arrays and create the arguments.
10253 emitOffloadingArrays(CGF, CombinedInfo, Info);
10254
10255 llvm::Value *BasePointersArrayArg = nullptr;
10256 llvm::Value *PointersArrayArg = nullptr;
10257 llvm::Value *SizesArrayArg = nullptr;
10258 llvm::Value *MapTypesArrayArg = nullptr;
10259 llvm::Value *MappersArrayArg = nullptr;
10260 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
10261 SizesArrayArg, MapTypesArrayArg,
10262 MappersArrayArg, Info, /*ForEndCall=*/false);
10263
10264 // Emit device ID if any.
10265 llvm::Value *DeviceID = nullptr;
10266 if (Device) {
10267 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
10268 CGF.Int64Ty, /*isSigned=*/true);
10269 } else {
10270 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
10271 }
10272
10273 // Emit the number of elements in the offloading arrays.
10274 llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
10275
10276 llvm::Value *OffloadingArgs[] = {
10277 DeviceID, PointerNum, BasePointersArrayArg, PointersArrayArg,
10278 SizesArrayArg, MapTypesArrayArg, MappersArrayArg};
10279 CGF.EmitRuntimeCall(
10280 OMPBuilder.getOrCreateRuntimeFunction(
10281 CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
10282 OffloadingArgs);
10283
10284 // If device pointer privatization is required, emit the body of the region
10285 // here. It will have to be duplicated: with and without privatization.
10286 if (!Info.CaptureDeviceAddrMap.empty())
10287 CodeGen(CGF);
10288 };
10289
10290 // Generate code for the closing of the data region.
10291 auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF,
10292 PrePostActionTy &) {
10293 assert(Info.isValid() && "Invalid data environment closing arguments.")((Info.isValid() && "Invalid data environment closing arguments."
) ? static_cast<void> (0) : __assert_fail ("Info.isValid() && \"Invalid data environment closing arguments.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10293, __PRETTY_FUNCTION__))
;
10294
10295 llvm::Value *BasePointersArrayArg = nullptr;
10296 llvm::Value *PointersArrayArg = nullptr;
10297 llvm::Value *SizesArrayArg = nullptr;
10298 llvm::Value *MapTypesArrayArg = nullptr;
10299 llvm::Value *MappersArrayArg = nullptr;
10300 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
10301 SizesArrayArg, MapTypesArrayArg,
10302 MappersArrayArg, Info, /*ForEndCall=*/true);
10303
10304 // Emit device ID if any.
10305 llvm::Value *DeviceID = nullptr;
10306 if (Device) {
10307 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
10308 CGF.Int64Ty, /*isSigned=*/true);
10309 } else {
10310 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
10311 }
10312
10313 // Emit the number of elements in the offloading arrays.
10314 llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
10315
10316 llvm::Value *OffloadingArgs[] = {
10317 DeviceID, PointerNum, BasePointersArrayArg, PointersArrayArg,
10318 SizesArrayArg, MapTypesArrayArg, MappersArrayArg};
10319 CGF.EmitRuntimeCall(
10320 OMPBuilder.getOrCreateRuntimeFunction(
10321 CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
10322 OffloadingArgs);
10323 };
10324
10325 // If we need device pointer privatization, we need to emit the body of the
10326 // region with no privatization in the 'else' branch of the conditional.
10327 // Otherwise, we don't have to do anything.
10328 auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
10329 PrePostActionTy &) {
10330 if (!Info.CaptureDeviceAddrMap.empty()) {
10331 CodeGen.setAction(NoPrivAction);
10332 CodeGen(CGF);
10333 }
10334 };
10335
10336 // We don't have to do anything to close the region if the if clause evaluates
10337 // to false.
10338 auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
10339
10340 if (IfCond) {
10341 emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
10342 } else {
10343 RegionCodeGenTy RCG(BeginThenGen);
10344 RCG(CGF);
10345 }
10346
10347 // If we don't require privatization of device pointers, we emit the body in
10348 // between the runtime calls. This avoids duplicating the body code.
10349 if (Info.CaptureDeviceAddrMap.empty()) {
10350 CodeGen.setAction(NoPrivAction);
10351 CodeGen(CGF);
10352 }
10353
10354 if (IfCond) {
10355 emitIfClause(CGF, IfCond, EndThenGen, EndElseGen);
10356 } else {
10357 RegionCodeGenTy RCG(EndThenGen);
10358 RCG(CGF);
10359 }
10360}
10361
10362void CGOpenMPRuntime::emitTargetDataStandAloneCall(
10363 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
10364 const Expr *Device) {
10365 if (!CGF.HaveInsertPoint())
10366 return;
10367
10368 assert((isa<OMPTargetEnterDataDirective>(D) ||(((isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective
>(D) || isa<OMPTargetUpdateDirective>(D)) &&
"Expecting either target enter, exit data, or update directives."
) ? static_cast<void> (0) : __assert_fail ("(isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective>(D) || isa<OMPTargetUpdateDirective>(D)) && \"Expecting either target enter, exit data, or update directives.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10371, __PRETTY_FUNCTION__))
10369 isa<OMPTargetExitDataDirective>(D) ||(((isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective
>(D) || isa<OMPTargetUpdateDirective>(D)) &&
"Expecting either target enter, exit data, or update directives."
) ? static_cast<void> (0) : __assert_fail ("(isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective>(D) || isa<OMPTargetUpdateDirective>(D)) && \"Expecting either target enter, exit data, or update directives.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10371, __PRETTY_FUNCTION__))
10370 isa<OMPTargetUpdateDirective>(D)) &&(((isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective
>(D) || isa<OMPTargetUpdateDirective>(D)) &&
"Expecting either target enter, exit data, or update directives."
) ? static_cast<void> (0) : __assert_fail ("(isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective>(D) || isa<OMPTargetUpdateDirective>(D)) && \"Expecting either target enter, exit data, or update directives.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10371, __PRETTY_FUNCTION__))
10371 "Expecting either target enter, exit data, or update directives.")(((isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective
>(D) || isa<OMPTargetUpdateDirective>(D)) &&
"Expecting either target enter, exit data, or update directives."
) ? static_cast<void> (0) : __assert_fail ("(isa<OMPTargetEnterDataDirective>(D) || isa<OMPTargetExitDataDirective>(D) || isa<OMPTargetUpdateDirective>(D)) && \"Expecting either target enter, exit data, or update directives.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10371, __PRETTY_FUNCTION__))
;
10372
10373 CodeGenFunction::OMPTargetDataInfo InputInfo;
10374 llvm::Value *MapTypesArray = nullptr;
10375 // Generate the code for the opening of the data environment.
10376 auto &&ThenGen = [this, &D, Device, &InputInfo,
10377 &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) {
10378 // Emit device ID if any.
10379 llvm::Value *DeviceID = nullptr;
10380 if (Device) {
10381 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
10382 CGF.Int64Ty, /*isSigned=*/true);
10383 } else {
10384 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
10385 }
10386
10387 // Emit the number of elements in the offloading arrays.
10388 llvm::Constant *PointerNum =
10389 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
10390
10391 llvm::Value *OffloadingArgs[] = {DeviceID,
10392 PointerNum,
10393 InputInfo.BasePointersArray.getPointer(),
10394 InputInfo.PointersArray.getPointer(),
10395 InputInfo.SizesArray.getPointer(),
10396 MapTypesArray,
10397 InputInfo.MappersArray.getPointer()};
10398
10399 // Select the right runtime function call for each standalone
10400 // directive.
10401 const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
10402 RuntimeFunction RTLFn;
10403 switch (D.getDirectiveKind()) {
10404 case OMPD_target_enter_data:
10405 RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper
10406 : OMPRTL___tgt_target_data_begin_mapper;
10407 break;
10408 case OMPD_target_exit_data:
10409 RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper
10410 : OMPRTL___tgt_target_data_end_mapper;
10411 break;
10412 case OMPD_target_update:
10413 RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper
10414 : OMPRTL___tgt_target_data_update_mapper;
10415 break;
10416 case OMPD_parallel:
10417 case OMPD_for:
10418 case OMPD_parallel_for:
10419 case OMPD_parallel_master:
10420 case OMPD_parallel_sections:
10421 case OMPD_for_simd:
10422 case OMPD_parallel_for_simd:
10423 case OMPD_cancel:
10424 case OMPD_cancellation_point:
10425 case OMPD_ordered:
10426 case OMPD_threadprivate:
10427 case OMPD_allocate:
10428 case OMPD_task:
10429 case OMPD_simd:
10430 case OMPD_sections:
10431 case OMPD_section:
10432 case OMPD_single:
10433 case OMPD_master:
10434 case OMPD_critical:
10435 case OMPD_taskyield:
10436 case OMPD_barrier:
10437 case OMPD_taskwait:
10438 case OMPD_taskgroup:
10439 case OMPD_atomic:
10440 case OMPD_flush:
10441 case OMPD_depobj:
10442 case OMPD_scan:
10443 case OMPD_teams:
10444 case OMPD_target_data:
10445 case OMPD_distribute:
10446 case OMPD_distribute_simd:
10447 case OMPD_distribute_parallel_for:
10448 case OMPD_distribute_parallel_for_simd:
10449 case OMPD_teams_distribute:
10450 case OMPD_teams_distribute_simd:
10451 case OMPD_teams_distribute_parallel_for:
10452 case OMPD_teams_distribute_parallel_for_simd:
10453 case OMPD_declare_simd:
10454 case OMPD_declare_variant:
10455 case OMPD_begin_declare_variant:
10456 case OMPD_end_declare_variant:
10457 case OMPD_declare_target:
10458 case OMPD_end_declare_target:
10459 case OMPD_declare_reduction:
10460 case OMPD_declare_mapper:
10461 case OMPD_taskloop:
10462 case OMPD_taskloop_simd:
10463 case OMPD_master_taskloop:
10464 case OMPD_master_taskloop_simd:
10465 case OMPD_parallel_master_taskloop:
10466 case OMPD_parallel_master_taskloop_simd:
10467 case OMPD_target:
10468 case OMPD_target_simd:
10469 case OMPD_target_teams_distribute:
10470 case OMPD_target_teams_distribute_simd:
10471 case OMPD_target_teams_distribute_parallel_for:
10472 case OMPD_target_teams_distribute_parallel_for_simd:
10473 case OMPD_target_teams:
10474 case OMPD_target_parallel:
10475 case OMPD_target_parallel_for:
10476 case OMPD_target_parallel_for_simd:
10477 case OMPD_requires:
10478 case OMPD_unknown:
10479 default:
10480 llvm_unreachable("Unexpected standalone target data directive.")::llvm::llvm_unreachable_internal("Unexpected standalone target data directive."
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10480)
;
10481 break;
10482 }
10483 CGF.EmitRuntimeCall(
10484 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), RTLFn),
10485 OffloadingArgs);
10486 };
10487
10488 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
10489 CodeGenFunction &CGF, PrePostActionTy &) {
10490 // Fill up the arrays with all the mapped variables.
10491 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
10492
10493 // Get map clause information.
10494 MappableExprsHandler MEHandler(D, CGF);
10495 MEHandler.generateAllInfo(CombinedInfo);
10496
10497 TargetDataInfo Info;
10498 // Fill up the arrays and create the arguments.
10499 emitOffloadingArrays(CGF, CombinedInfo, Info);
10500 emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
10501 Info.PointersArray, Info.SizesArray,
10502 Info.MapTypesArray, Info.MappersArray, Info);
10503 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
10504 InputInfo.BasePointersArray =
10505 Address(Info.BasePointersArray, CGM.getPointerAlign());
10506 InputInfo.PointersArray =
10507 Address(Info.PointersArray, CGM.getPointerAlign());
10508 InputInfo.SizesArray =
10509 Address(Info.SizesArray, CGM.getPointerAlign());
10510 InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
10511 MapTypesArray = Info.MapTypesArray;
10512 if (D.hasClausesOfKind<OMPDependClause>())
10513 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
10514 else
10515 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
10516 };
10517
10518 if (IfCond) {
10519 emitIfClause(CGF, IfCond, TargetThenGen,
10520 [](CodeGenFunction &CGF, PrePostActionTy &) {});
10521 } else {
10522 RegionCodeGenTy ThenRCG(TargetThenGen);
10523 ThenRCG(CGF);
10524 }
10525}
10526
10527namespace {
10528 /// Kind of parameter in a function with 'declare simd' directive.
10529 enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
10530 /// Attribute set of the parameter.
10531 struct ParamAttrTy {
10532 ParamKindTy Kind = Vector;
10533 llvm::APSInt StrideOrArg;
10534 llvm::APSInt Alignment;
10535 };
10536} // namespace
10537
10538static unsigned evaluateCDTSize(const FunctionDecl *FD,
10539 ArrayRef<ParamAttrTy> ParamAttrs) {
10540 // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
10541 // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
10542 // of that clause. The VLEN value must be power of 2.
10543 // In other case the notion of the function`s "characteristic data type" (CDT)
10544 // is used to compute the vector length.
10545 // CDT is defined in the following order:
10546 // a) For non-void function, the CDT is the return type.
10547 // b) If the function has any non-uniform, non-linear parameters, then the
10548 // CDT is the type of the first such parameter.
10549 // c) If the CDT determined by a) or b) above is struct, union, or class
10550 // type which is pass-by-value (except for the type that maps to the
10551 // built-in complex data type), the characteristic data type is int.
10552 // d) If none of the above three cases is applicable, the CDT is int.
10553 // The VLEN is then determined based on the CDT and the size of vector
10554 // register of that ISA for which current vector version is generated. The
10555 // VLEN is computed using the formula below:
10556 // VLEN = sizeof(vector_register) / sizeof(CDT),
10557 // where vector register size specified in section 3.2.1 Registers and the
10558 // Stack Frame of original AMD64 ABI document.
10559 QualType RetType = FD->getReturnType();
10560 if (RetType.isNull())
10561 return 0;
10562 ASTContext &C = FD->getASTContext();
10563 QualType CDT;
10564 if (!RetType.isNull() && !RetType->isVoidType()) {
10565 CDT = RetType;
10566 } else {
10567 unsigned Offset = 0;
10568 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
10569 if (ParamAttrs[Offset].Kind == Vector)
10570 CDT = C.getPointerType(C.getRecordType(MD->getParent()));
10571 ++Offset;
10572 }
10573 if (CDT.isNull()) {
10574 for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
10575 if (ParamAttrs[I + Offset].Kind == Vector) {
10576 CDT = FD->getParamDecl(I)->getType();
10577 break;
10578 }
10579 }
10580 }
10581 }
10582 if (CDT.isNull())
10583 CDT = C.IntTy;
10584 CDT = CDT->getCanonicalTypeUnqualified();
10585 if (CDT->isRecordType() || CDT->isUnionType())
10586 CDT = C.IntTy;
10587 return C.getTypeSize(CDT);
10588}
10589
10590static void
10591emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
10592 const llvm::APSInt &VLENVal,
10593 ArrayRef<ParamAttrTy> ParamAttrs,
10594 OMPDeclareSimdDeclAttr::BranchStateTy State) {
10595 struct ISADataTy {
10596 char ISA;
10597 unsigned VecRegSize;
10598 };
10599 ISADataTy ISAData[] = {
10600 {
10601 'b', 128
10602 }, // SSE
10603 {
10604 'c', 256
10605 }, // AVX
10606 {
10607 'd', 256
10608 }, // AVX2
10609 {
10610 'e', 512
10611 }, // AVX512
10612 };
10613 llvm::SmallVector<char, 2> Masked;
10614 switch (State) {
10615 case OMPDeclareSimdDeclAttr::BS_Undefined:
10616 Masked.push_back('N');
10617 Masked.push_back('M');
10618 break;
10619 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
10620 Masked.push_back('N');
10621 break;
10622 case OMPDeclareSimdDeclAttr::BS_Inbranch:
10623 Masked.push_back('M');
10624 break;
10625 }
10626 for (char Mask : Masked) {
10627 for (const ISADataTy &Data : ISAData) {
10628 SmallString<256> Buffer;
10629 llvm::raw_svector_ostream Out(Buffer);
10630 Out << "_ZGV" << Data.ISA << Mask;
10631 if (!VLENVal) {
10632 unsigned NumElts = evaluateCDTSize(FD, ParamAttrs);
10633 assert(NumElts && "Non-zero simdlen/cdtsize expected")((NumElts && "Non-zero simdlen/cdtsize expected") ? static_cast
<void> (0) : __assert_fail ("NumElts && \"Non-zero simdlen/cdtsize expected\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10633, __PRETTY_FUNCTION__))
;
10634 Out << llvm::APSInt::getUnsigned(Data.VecRegSize / NumElts);
10635 } else {
10636 Out << VLENVal;
10637 }
10638 for (const ParamAttrTy &ParamAttr : ParamAttrs) {
10639 switch (ParamAttr.Kind){
10640 case LinearWithVarStride:
10641 Out << 's' << ParamAttr.StrideOrArg;
10642 break;
10643 case Linear:
10644 Out << 'l';
10645 if (ParamAttr.StrideOrArg != 1)
10646 Out << ParamAttr.StrideOrArg;
10647 break;
10648 case Uniform:
10649 Out << 'u';
10650 break;
10651 case Vector:
10652 Out << 'v';
10653 break;
10654 }
10655 if (!!ParamAttr.Alignment)
10656 Out << 'a' << ParamAttr.Alignment;
10657 }
10658 Out << '_' << Fn->getName();
10659 Fn->addFnAttr(Out.str());
10660 }
10661 }
10662}
10663
10664// This are the Functions that are needed to mangle the name of the
10665// vector functions generated by the compiler, according to the rules
10666// defined in the "Vector Function ABI specifications for AArch64",
10667// available at
10668// https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
10669
10670/// Maps To Vector (MTV), as defined in 3.1.1 of the AAVFABI.
10671///
10672/// TODO: Need to implement the behavior for reference marked with a
10673/// var or no linear modifiers (1.b in the section). For this, we
10674/// need to extend ParamKindTy to support the linear modifiers.
10675static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
10676 QT = QT.getCanonicalType();
10677
10678 if (QT->isVoidType())
10679 return false;
10680
10681 if (Kind == ParamKindTy::Uniform)
10682 return false;
10683
10684 if (Kind == ParamKindTy::Linear)
10685 return false;
10686
10687 // TODO: Handle linear references with modifiers
10688
10689 if (Kind == ParamKindTy::LinearWithVarStride)
10690 return false;
10691
10692 return true;
10693}
10694
10695/// Pass By Value (PBV), as defined in 3.1.2 of the AAVFABI.
10696static bool getAArch64PBV(QualType QT, ASTContext &C) {
10697 QT = QT.getCanonicalType();
10698 unsigned Size = C.getTypeSize(QT);
10699
10700 // Only scalars and complex within 16 bytes wide set PVB to true.
10701 if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128)
10702 return false;
10703
10704 if (QT->isFloatingType())
10705 return true;
10706
10707 if (QT->isIntegerType())
10708 return true;
10709
10710 if (QT->isPointerType())
10711 return true;
10712
10713 // TODO: Add support for complex types (section 3.1.2, item 2).
10714
10715 return false;
10716}
10717
10718/// Computes the lane size (LS) of a return type or of an input parameter,
10719/// as defined by `LS(P)` in 3.2.1 of the AAVFABI.
10720/// TODO: Add support for references, section 3.2.1, item 1.
10721static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C) {
10722 if (!getAArch64MTV(QT, Kind) && QT.getCanonicalType()->isPointerType()) {
10723 QualType PTy = QT.getCanonicalType()->getPointeeType();
10724 if (getAArch64PBV(PTy, C))
10725 return C.getTypeSize(PTy);
10726 }
10727 if (getAArch64PBV(QT, C))
10728 return C.getTypeSize(QT);
10729
10730 return C.getTypeSize(C.getUIntPtrType());
10731}
10732
10733// Get Narrowest Data Size (NDS) and Widest Data Size (WDS) from the
10734// signature of the scalar function, as defined in 3.2.2 of the
10735// AAVFABI.
10736static std::tuple<unsigned, unsigned, bool>
10737getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
10738 QualType RetType = FD->getReturnType().getCanonicalType();
10739
10740 ASTContext &C = FD->getASTContext();
10741
10742 bool OutputBecomesInput = false;
10743
10744 llvm::SmallVector<unsigned, 8> Sizes;
10745 if (!RetType->isVoidType()) {
10746 Sizes.push_back(getAArch64LS(RetType, ParamKindTy::Vector, C));
10747 if (!getAArch64PBV(RetType, C) && getAArch64MTV(RetType, {}))
10748 OutputBecomesInput = true;
10749 }
10750 for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
10751 QualType QT = FD->getParamDecl(I)->getType().getCanonicalType();
10752 Sizes.push_back(getAArch64LS(QT, ParamAttrs[I].Kind, C));
10753 }
10754
10755 assert(!Sizes.empty() && "Unable to determine NDS and WDS.")((!Sizes.empty() && "Unable to determine NDS and WDS."
) ? static_cast<void> (0) : __assert_fail ("!Sizes.empty() && \"Unable to determine NDS and WDS.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10755, __PRETTY_FUNCTION__))
;
10756 // The LS of a function parameter / return value can only be a power
10757 // of 2, starting from 8 bits, up to 128.
10758 assert(std::all_of(Sizes.begin(), Sizes.end(),((std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) {
return Size == 8 || Size == 16 || Size == 32 || Size == 64 ||
Size == 128; }) && "Invalid size") ? static_cast<
void> (0) : __assert_fail ("std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) { return Size == 8 || Size == 16 || Size == 32 || Size == 64 || Size == 128; }) && \"Invalid size\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10763, __PRETTY_FUNCTION__))
10759 [](unsigned Size) {((std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) {
return Size == 8 || Size == 16 || Size == 32 || Size == 64 ||
Size == 128; }) && "Invalid size") ? static_cast<
void> (0) : __assert_fail ("std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) { return Size == 8 || Size == 16 || Size == 32 || Size == 64 || Size == 128; }) && \"Invalid size\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10763, __PRETTY_FUNCTION__))
10760 return Size == 8 || Size == 16 || Size == 32 ||((std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) {
return Size == 8 || Size == 16 || Size == 32 || Size == 64 ||
Size == 128; }) && "Invalid size") ? static_cast<
void> (0) : __assert_fail ("std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) { return Size == 8 || Size == 16 || Size == 32 || Size == 64 || Size == 128; }) && \"Invalid size\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10763, __PRETTY_FUNCTION__))
10761 Size == 64 || Size == 128;((std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) {
return Size == 8 || Size == 16 || Size == 32 || Size == 64 ||
Size == 128; }) && "Invalid size") ? static_cast<
void> (0) : __assert_fail ("std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) { return Size == 8 || Size == 16 || Size == 32 || Size == 64 || Size == 128; }) && \"Invalid size\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10763, __PRETTY_FUNCTION__))
10762 }) &&((std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) {
return Size == 8 || Size == 16 || Size == 32 || Size == 64 ||
Size == 128; }) && "Invalid size") ? static_cast<
void> (0) : __assert_fail ("std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) { return Size == 8 || Size == 16 || Size == 32 || Size == 64 || Size == 128; }) && \"Invalid size\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10763, __PRETTY_FUNCTION__))
10763 "Invalid size")((std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) {
return Size == 8 || Size == 16 || Size == 32 || Size == 64 ||
Size == 128; }) && "Invalid size") ? static_cast<
void> (0) : __assert_fail ("std::all_of(Sizes.begin(), Sizes.end(), [](unsigned Size) { return Size == 8 || Size == 16 || Size == 32 || Size == 64 || Size == 128; }) && \"Invalid size\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10763, __PRETTY_FUNCTION__))
;
10764
10765 return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)),
10766 *std::max_element(std::begin(Sizes), std::end(Sizes)),
10767 OutputBecomesInput);
10768}
10769
10770/// Mangle the parameter part of the vector function name according to
10771/// their OpenMP classification. The mangling function is defined in
10772/// section 3.5 of the AAVFABI.
10773static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
10774 SmallString<256> Buffer;
10775 llvm::raw_svector_ostream Out(Buffer);
10776 for (const auto &ParamAttr : ParamAttrs) {
10777 switch (ParamAttr.Kind) {
10778 case LinearWithVarStride:
10779 Out << "ls" << ParamAttr.StrideOrArg;
10780 break;
10781 case Linear:
10782 Out << 'l';
10783 // Don't print the step value if it is not present or if it is
10784 // equal to 1.
10785 if (ParamAttr.StrideOrArg != 1)
10786 Out << ParamAttr.StrideOrArg;
10787 break;
10788 case Uniform:
10789 Out << 'u';
10790 break;
10791 case Vector:
10792 Out << 'v';
10793 break;
10794 }
10795
10796 if (!!ParamAttr.Alignment)
10797 Out << 'a' << ParamAttr.Alignment;
10798 }
10799
10800 return std::string(Out.str());
10801}
10802
10803// Function used to add the attribute. The parameter `VLEN` is
10804// templated to allow the use of "x" when targeting scalable functions
10805// for SVE.
10806template <typename T>
10807static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix,
10808 char ISA, StringRef ParSeq,
10809 StringRef MangledName, bool OutputBecomesInput,
10810 llvm::Function *Fn) {
10811 SmallString<256> Buffer;
10812 llvm::raw_svector_ostream Out(Buffer);
10813 Out << Prefix << ISA << LMask << VLEN;
10814 if (OutputBecomesInput)
10815 Out << "v";
10816 Out << ParSeq << "_" << MangledName;
10817 Fn->addFnAttr(Out.str());
10818}
10819
10820// Helper function to generate the Advanced SIMD names depending on
10821// the value of the NDS when simdlen is not present.
10822static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask,
10823 StringRef Prefix, char ISA,
10824 StringRef ParSeq, StringRef MangledName,
10825 bool OutputBecomesInput,
10826 llvm::Function *Fn) {
10827 switch (NDS) {
10828 case 8:
10829 addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
10830 OutputBecomesInput, Fn);
10831 addAArch64VectorName(16, Mask, Prefix, ISA, ParSeq, MangledName,
10832 OutputBecomesInput, Fn);
10833 break;
10834 case 16:
10835 addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
10836 OutputBecomesInput, Fn);
10837 addAArch64VectorName(8, Mask, Prefix, ISA, ParSeq, MangledName,
10838 OutputBecomesInput, Fn);
10839 break;
10840 case 32:
10841 addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
10842 OutputBecomesInput, Fn);
10843 addAArch64VectorName(4, Mask, Prefix, ISA, ParSeq, MangledName,
10844 OutputBecomesInput, Fn);
10845 break;
10846 case 64:
10847 case 128:
10848 addAArch64VectorName(2, Mask, Prefix, ISA, ParSeq, MangledName,
10849 OutputBecomesInput, Fn);
10850 break;
10851 default:
10852 llvm_unreachable("Scalar type is too wide.")::llvm::llvm_unreachable_internal("Scalar type is too wide.",
"/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10852)
;
10853 }
10854}
10855
10856/// Emit vector function attributes for AArch64, as defined in the AAVFABI.
10857static void emitAArch64DeclareSimdFunction(
10858 CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN,
10859 ArrayRef<ParamAttrTy> ParamAttrs,
10860 OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName,
10861 char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc) {
10862
10863 // Get basic data for building the vector signature.
10864 const auto Data = getNDSWDS(FD, ParamAttrs);
10865 const unsigned NDS = std::get<0>(Data);
10866 const unsigned WDS = std::get<1>(Data);
10867 const bool OutputBecomesInput = std::get<2>(Data);
10868
10869 // Check the values provided via `simdlen` by the user.
10870 // 1. A `simdlen(1)` doesn't produce vector signatures,
10871 if (UserVLEN == 1) {
10872 unsigned DiagID = CGM.getDiags().getCustomDiagID(
10873 DiagnosticsEngine::Warning,
10874 "The clause simdlen(1) has no effect when targeting aarch64.");
10875 CGM.getDiags().Report(SLoc, DiagID);
10876 return;
10877 }
10878
10879 // 2. Section 3.3.1, item 1: user input must be a power of 2 for
10880 // Advanced SIMD output.
10881 if (ISA == 'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) {
10882 unsigned DiagID = CGM.getDiags().getCustomDiagID(
10883 DiagnosticsEngine::Warning, "The value specified in simdlen must be a "
10884 "power of 2 when targeting Advanced SIMD.");
10885 CGM.getDiags().Report(SLoc, DiagID);
10886 return;
10887 }
10888
10889 // 3. Section 3.4.1. SVE fixed lengh must obey the architectural
10890 // limits.
10891 if (ISA == 's' && UserVLEN != 0) {
10892 if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) {
10893 unsigned DiagID = CGM.getDiags().getCustomDiagID(
10894 DiagnosticsEngine::Warning, "The clause simdlen must fit the %0-bit "
10895 "lanes in the architectural constraints "
10896 "for SVE (min is 128-bit, max is "
10897 "2048-bit, by steps of 128-bit)");
10898 CGM.getDiags().Report(SLoc, DiagID) << WDS;
10899 return;
10900 }
10901 }
10902
10903 // Sort out parameter sequence.
10904 const std::string ParSeq = mangleVectorParameters(ParamAttrs);
10905 StringRef Prefix = "_ZGV";
10906 // Generate simdlen from user input (if any).
10907 if (UserVLEN) {
10908 if (ISA == 's') {
10909 // SVE generates only a masked function.
10910 addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
10911 OutputBecomesInput, Fn);
10912 } else {
10913 assert(ISA == 'n' && "Expected ISA either 's' or 'n'.")((ISA == 'n' && "Expected ISA either 's' or 'n'.") ? static_cast
<void> (0) : __assert_fail ("ISA == 'n' && \"Expected ISA either 's' or 'n'.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10913, __PRETTY_FUNCTION__))
;
10914 // Advanced SIMD generates one or two functions, depending on
10915 // the `[not]inbranch` clause.
10916 switch (State) {
10917 case OMPDeclareSimdDeclAttr::BS_Undefined:
10918 addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
10919 OutputBecomesInput, Fn);
10920 addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
10921 OutputBecomesInput, Fn);
10922 break;
10923 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
10924 addAArch64VectorName(UserVLEN, "N", Prefix, ISA, ParSeq, MangledName,
10925 OutputBecomesInput, Fn);
10926 break;
10927 case OMPDeclareSimdDeclAttr::BS_Inbranch:
10928 addAArch64VectorName(UserVLEN, "M", Prefix, ISA, ParSeq, MangledName,
10929 OutputBecomesInput, Fn);
10930 break;
10931 }
10932 }
10933 } else {
10934 // If no user simdlen is provided, follow the AAVFABI rules for
10935 // generating the vector length.
10936 if (ISA == 's') {
10937 // SVE, section 3.4.1, item 1.
10938 addAArch64VectorName("x", "M", Prefix, ISA, ParSeq, MangledName,
10939 OutputBecomesInput, Fn);
10940 } else {
10941 assert(ISA == 'n' && "Expected ISA either 's' or 'n'.")((ISA == 'n' && "Expected ISA either 's' or 'n'.") ? static_cast
<void> (0) : __assert_fail ("ISA == 'n' && \"Expected ISA either 's' or 'n'.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 10941, __PRETTY_FUNCTION__))
;
10942 // Advanced SIMD, Section 3.3.1 of the AAVFABI, generates one or
10943 // two vector names depending on the use of the clause
10944 // `[not]inbranch`.
10945 switch (State) {
10946 case OMPDeclareSimdDeclAttr::BS_Undefined:
10947 addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
10948 OutputBecomesInput, Fn);
10949 addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
10950 OutputBecomesInput, Fn);
10951 break;
10952 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
10953 addAArch64AdvSIMDNDSNames(NDS, "N", Prefix, ISA, ParSeq, MangledName,
10954 OutputBecomesInput, Fn);
10955 break;
10956 case OMPDeclareSimdDeclAttr::BS_Inbranch:
10957 addAArch64AdvSIMDNDSNames(NDS, "M", Prefix, ISA, ParSeq, MangledName,
10958 OutputBecomesInput, Fn);
10959 break;
10960 }
10961 }
10962 }
10963}
10964
10965void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
10966 llvm::Function *Fn) {
10967 ASTContext &C = CGM.getContext();
10968 FD = FD->getMostRecentDecl();
10969 // Map params to their positions in function decl.
10970 llvm::DenseMap<const Decl *, unsigned> ParamPositions;
10971 if (isa<CXXMethodDecl>(FD))
10972 ParamPositions.try_emplace(FD, 0);
10973 unsigned ParamPos = ParamPositions.size();
10974 for (const ParmVarDecl *P : FD->parameters()) {
10975 ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
10976 ++ParamPos;
10977 }
10978 while (FD) {
10979 for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
10980 llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
10981 // Mark uniform parameters.
10982 for (const Expr *E : Attr->uniforms()) {
10983 E = E->IgnoreParenImpCasts();
10984 unsigned Pos;
10985 if (isa<CXXThisExpr>(E)) {
10986 Pos = ParamPositions[FD];
10987 } else {
10988 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
10989 ->getCanonicalDecl();
10990 Pos = ParamPositions[PVD];
10991 }
10992 ParamAttrs[Pos].Kind = Uniform;
10993 }
10994 // Get alignment info.
10995 auto NI = Attr->alignments_begin();
10996 for (const Expr *E : Attr->aligneds()) {
10997 E = E->IgnoreParenImpCasts();
10998 unsigned Pos;
10999 QualType ParmTy;
11000 if (isa<CXXThisExpr>(E)) {
11001 Pos = ParamPositions[FD];
11002 ParmTy = E->getType();
11003 } else {
11004 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
11005 ->getCanonicalDecl();
11006 Pos = ParamPositions[PVD];
11007 ParmTy = PVD->getType();
11008 }
11009 ParamAttrs[Pos].Alignment =
11010 (*NI)
11011 ? (*NI)->EvaluateKnownConstInt(C)
11012 : llvm::APSInt::getUnsigned(
11013 C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
11014 .getQuantity());
11015 ++NI;
11016 }
11017 // Mark linear parameters.
11018 auto SI = Attr->steps_begin();
11019 auto MI = Attr->modifiers_begin();
11020 for (const Expr *E : Attr->linears()) {
11021 E = E->IgnoreParenImpCasts();
11022 unsigned Pos;
11023 // Rescaling factor needed to compute the linear parameter
11024 // value in the mangled name.
11025 unsigned PtrRescalingFactor = 1;
11026 if (isa<CXXThisExpr>(E)) {
11027 Pos = ParamPositions[FD];
11028 } else {
11029 const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
11030 ->getCanonicalDecl();
11031 Pos = ParamPositions[PVD];
11032 if (auto *P = dyn_cast<PointerType>(PVD->getType()))
11033 PtrRescalingFactor = CGM.getContext()
11034 .getTypeSizeInChars(P->getPointeeType())
11035 .getQuantity();
11036 }
11037 ParamAttrTy &ParamAttr = ParamAttrs[Pos];
11038 ParamAttr.Kind = Linear;
11039 // Assuming a stride of 1, for `linear` without modifiers.
11040 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
11041 if (*SI) {
11042 Expr::EvalResult Result;
11043 if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
11044 if (const auto *DRE =
11045 cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
11046 if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
11047 ParamAttr.Kind = LinearWithVarStride;
11048 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
11049 ParamPositions[StridePVD->getCanonicalDecl()]);
11050 }
11051 }
11052 } else {
11053 ParamAttr.StrideOrArg = Result.Val.getInt();
11054 }
11055 }
11056 // If we are using a linear clause on a pointer, we need to
11057 // rescale the value of linear_step with the byte size of the
11058 // pointee type.
11059 if (Linear == ParamAttr.Kind)
11060 ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
11061 ++SI;
11062 ++MI;
11063 }
11064 llvm::APSInt VLENVal;
11065 SourceLocation ExprLoc;
11066 const Expr *VLENExpr = Attr->getSimdlen();
11067 if (VLENExpr) {
11068 VLENVal = VLENExpr->EvaluateKnownConstInt(C);
11069 ExprLoc = VLENExpr->getExprLoc();
11070 }
11071 OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
11072 if (CGM.getTriple().isX86()) {
11073 emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
11074 } else if (CGM.getTriple().getArch() == llvm::Triple::aarch64) {
11075 unsigned VLEN = VLENVal.getExtValue();
11076 StringRef MangledName = Fn->getName();
11077 if (CGM.getTarget().hasFeature("sve"))
11078 emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
11079 MangledName, 's', 128, Fn, ExprLoc);
11080 if (CGM.getTarget().hasFeature("neon"))
11081 emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
11082 MangledName, 'n', 128, Fn, ExprLoc);
11083 }
11084 }
11085 FD = FD->getPreviousDecl();
11086 }
11087}
11088
11089namespace {
11090/// Cleanup action for doacross support.
11091class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
11092public:
11093 static const int DoacrossFinArgs = 2;
11094
11095private:
11096 llvm::FunctionCallee RTLFn;
11097 llvm::Value *Args[DoacrossFinArgs];
11098
11099public:
11100 DoacrossCleanupTy(llvm::FunctionCallee RTLFn,
11101 ArrayRef<llvm::Value *> CallArgs)
11102 : RTLFn(RTLFn) {
11103 assert(CallArgs.size() == DoacrossFinArgs)((CallArgs.size() == DoacrossFinArgs) ? static_cast<void>
(0) : __assert_fail ("CallArgs.size() == DoacrossFinArgs", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11103, __PRETTY_FUNCTION__))
;
11104 std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
11105 }
11106 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
11107 if (!CGF.HaveInsertPoint())
11108 return;
11109 CGF.EmitRuntimeCall(RTLFn, Args);
11110 }
11111};
11112} // namespace
11113
11114void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
11115 const OMPLoopDirective &D,
11116 ArrayRef<Expr *> NumIterations) {
11117 if (!CGF.HaveInsertPoint())
11118 return;
11119
11120 ASTContext &C = CGM.getContext();
11121 QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
11122 RecordDecl *RD;
11123 if (KmpDimTy.isNull()) {
11124 // Build struct kmp_dim { // loop bounds info casted to kmp_int64
11125 // kmp_int64 lo; // lower
11126 // kmp_int64 up; // upper
11127 // kmp_int64 st; // stride
11128 // };
11129 RD = C.buildImplicitRecord("kmp_dim");
11130 RD->startDefinition();
11131 addFieldToRecordDecl(C, RD, Int64Ty);
11132 addFieldToRecordDecl(C, RD, Int64Ty);
11133 addFieldToRecordDecl(C, RD, Int64Ty);
11134 RD->completeDefinition();
11135 KmpDimTy = C.getRecordType(RD);
11136 } else {
11137 RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
11138 }
11139 llvm::APInt Size(/*numBits=*/32, NumIterations.size());
11140 QualType ArrayTy =
11141 C.getConstantArrayType(KmpDimTy, Size, nullptr, ArrayType::Normal, 0);
11142
11143 Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
11144 CGF.EmitNullInitialization(DimsAddr, ArrayTy);
11145 enum { LowerFD = 0, UpperFD, StrideFD };
11146 // Fill dims with data.
11147 for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
11148 LValue DimsLVal = CGF.MakeAddrLValue(
11149 CGF.Builder.CreateConstArrayGEP(DimsAddr, I), KmpDimTy);
11150 // dims.upper = num_iterations;
11151 LValue UpperLVal = CGF.EmitLValueForField(
11152 DimsLVal, *std::next(RD->field_begin(), UpperFD));
11153 llvm::Value *NumIterVal = CGF.EmitScalarConversion(
11154 CGF.EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
11155 Int64Ty, NumIterations[I]->getExprLoc());
11156 CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
11157 // dims.stride = 1;
11158 LValue StrideLVal = CGF.EmitLValueForField(
11159 DimsLVal, *std::next(RD->field_begin(), StrideFD));
11160 CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
11161 StrideLVal);
11162 }
11163
11164 // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
11165 // kmp_int32 num_dims, struct kmp_dim * dims);
11166 llvm::Value *Args[] = {
11167 emitUpdateLocation(CGF, D.getBeginLoc()),
11168 getThreadID(CGF, D.getBeginLoc()),
11169 llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
11170 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
11171 CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
11172 CGM.VoidPtrTy)};
11173
11174 llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
11175 CGM.getModule(), OMPRTL___kmpc_doacross_init);
11176 CGF.EmitRuntimeCall(RTLFn, Args);
11177 llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
11178 emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
11179 llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
11180 CGM.getModule(), OMPRTL___kmpc_doacross_fini);
11181 CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
11182 llvm::makeArrayRef(FiniArgs));
11183}
11184
11185void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
11186 const OMPDependClause *C) {
11187 QualType Int64Ty =
11188 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
11189 llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
11190 QualType ArrayTy = CGM.getContext().getConstantArrayType(
11191 Int64Ty, Size, nullptr, ArrayType::Normal, 0);
11192 Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
11193 for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
11194 const Expr *CounterVal = C->getLoopData(I);
11195 assert(CounterVal)((CounterVal) ? static_cast<void> (0) : __assert_fail (
"CounterVal", "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11195, __PRETTY_FUNCTION__))
;
11196 llvm::Value *CntVal = CGF.EmitScalarConversion(
11197 CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
11198 CounterVal->getExprLoc());
11199 CGF.EmitStoreOfScalar(CntVal, CGF.Builder.CreateConstArrayGEP(CntAddr, I),
11200 /*Volatile=*/false, Int64Ty);
11201 }
11202 llvm::Value *Args[] = {
11203 emitUpdateLocation(CGF, C->getBeginLoc()),
11204 getThreadID(CGF, C->getBeginLoc()),
11205 CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
11206 llvm::FunctionCallee RTLFn;
11207 if (C->getDependencyKind() == OMPC_DEPEND_source) {
11208 RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
11209 OMPRTL___kmpc_doacross_post);
11210 } else {
11211 assert(C->getDependencyKind() == OMPC_DEPEND_sink)((C->getDependencyKind() == OMPC_DEPEND_sink) ? static_cast
<void> (0) : __assert_fail ("C->getDependencyKind() == OMPC_DEPEND_sink"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11211, __PRETTY_FUNCTION__))
;
11212 RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
11213 OMPRTL___kmpc_doacross_wait);
11214 }
11215 CGF.EmitRuntimeCall(RTLFn, Args);
11216}
11217
11218void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
11219 llvm::FunctionCallee Callee,
11220 ArrayRef<llvm::Value *> Args) const {
11221 assert(Loc.isValid() && "Outlined function call location must be valid.")((Loc.isValid() && "Outlined function call location must be valid."
) ? static_cast<void> (0) : __assert_fail ("Loc.isValid() && \"Outlined function call location must be valid.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11221, __PRETTY_FUNCTION__))
;
11222 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
11223
11224 if (auto *Fn = dyn_cast<llvm::Function>(Callee.getCallee())) {
11225 if (Fn->doesNotThrow()) {
11226 CGF.EmitNounwindRuntimeCall(Fn, Args);
11227 return;
11228 }
11229 }
11230 CGF.EmitRuntimeCall(Callee, Args);
11231}
11232
11233void CGOpenMPRuntime::emitOutlinedFunctionCall(
11234 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
11235 ArrayRef<llvm::Value *> Args) const {
11236 emitCall(CGF, Loc, OutlinedFn, Args);
11237}
11238
11239void CGOpenMPRuntime::emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {
11240 if (const auto *FD = dyn_cast<FunctionDecl>(D))
11241 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD))
11242 HasEmittedDeclareTargetRegion = true;
11243}
11244
11245Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
11246 const VarDecl *NativeParam,
11247 const VarDecl *TargetParam) const {
11248 return CGF.GetAddrOfLocalVar(NativeParam);
11249}
11250
11251Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
11252 const VarDecl *VD) {
11253 if (!VD)
11254 return Address::invalid();
11255 Address UntiedAddr = Address::invalid();
11256 Address UntiedRealAddr = Address::invalid();
11257 auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
11258 if (It != FunctionToUntiedTaskStackMap.end()) {
11259 const UntiedLocalVarsAddressesMap &UntiedData =
11260 UntiedLocalVarsStack[It->second];
11261 auto I = UntiedData.find(VD);
11262 if (I != UntiedData.end()) {
11263 UntiedAddr = I->second.first;
11264 UntiedRealAddr = I->second.second;
11265 }
11266 }
11267 const VarDecl *CVD = VD->getCanonicalDecl();
11268 if (CVD->hasAttr<OMPAllocateDeclAttr>()) {
11269 // Use the default allocation.
11270 if (!isAllocatableDecl(VD))
11271 return UntiedAddr;
11272 llvm::Value *Size;
11273 CharUnits Align = CGM.getContext().getDeclAlign(CVD);
11274 if (CVD->getType()->isVariablyModifiedType()) {
11275 Size = CGF.getTypeSize(CVD->getType());
11276 // Align the size: ((size + align - 1) / align) * align
11277 Size = CGF.Builder.CreateNUWAdd(
11278 Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
11279 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
11280 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
11281 } else {
11282 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
11283 Size = CGM.getSize(Sz.alignTo(Align));
11284 }
11285 llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
11286 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
11287 assert(AA->getAllocator() &&((AA->getAllocator() && "Expected allocator expression for non-default allocator."
) ? static_cast<void> (0) : __assert_fail ("AA->getAllocator() && \"Expected allocator expression for non-default allocator.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11288, __PRETTY_FUNCTION__))
11288 "Expected allocator expression for non-default allocator.")((AA->getAllocator() && "Expected allocator expression for non-default allocator."
) ? static_cast<void> (0) : __assert_fail ("AA->getAllocator() && \"Expected allocator expression for non-default allocator.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11288, __PRETTY_FUNCTION__))
;
11289 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
11290 // According to the standard, the original allocator type is a enum
11291 // (integer). Convert to pointer type, if required.
11292 Allocator = CGF.EmitScalarConversion(
11293 Allocator, AA->getAllocator()->getType(), CGF.getContext().VoidPtrTy,
11294 AA->getAllocator()->getExprLoc());
11295 llvm::Value *Args[] = {ThreadID, Size, Allocator};
11296
11297 llvm::Value *Addr =
11298 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
11299 CGM.getModule(), OMPRTL___kmpc_alloc),
11300 Args, getName({CVD->getName(), ".void.addr"}));
11301 llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
11302 CGM.getModule(), OMPRTL___kmpc_free);
11303 QualType Ty = CGM.getContext().getPointerType(CVD->getType());
11304 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
11305 Addr, CGF.ConvertTypeForMem(Ty), getName({CVD->getName(), ".addr"}));
11306 if (UntiedAddr.isValid())
11307 CGF.EmitStoreOfScalar(Addr, UntiedAddr, /*Volatile=*/false, Ty);
11308
11309 // Cleanup action for allocate support.
11310 class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
11311 llvm::FunctionCallee RTLFn;
11312 unsigned LocEncoding;
11313 Address Addr;
11314 const Expr *Allocator;
11315
11316 public:
11317 OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn, unsigned LocEncoding,
11318 Address Addr, const Expr *Allocator)
11319 : RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
11320 Allocator(Allocator) {}
11321 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
11322 if (!CGF.HaveInsertPoint())
11323 return;
11324 llvm::Value *Args[3];
11325 Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
11326 CGF, SourceLocation::getFromRawEncoding(LocEncoding));
11327 Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
11328 Addr.getPointer(), CGF.VoidPtrTy);
11329 llvm::Value *AllocVal = CGF.EmitScalarExpr(Allocator);
11330 // According to the standard, the original allocator type is a enum
11331 // (integer). Convert to pointer type, if required.
11332 AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
11333 CGF.getContext().VoidPtrTy,
11334 Allocator->getExprLoc());
11335 Args[2] = AllocVal;
11336
11337 CGF.EmitRuntimeCall(RTLFn, Args);
11338 }
11339 };
11340 Address VDAddr =
11341 UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
11342 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
11343 NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
11344 VDAddr, AA->getAllocator());
11345 if (UntiedRealAddr.isValid())
11346 if (auto *Region =
11347 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
11348 Region->emitUntiedSwitch(CGF);
11349 return VDAddr;
11350 }
11351 return UntiedAddr;
11352}
11353
11354bool CGOpenMPRuntime::isLocalVarInUntiedTask(CodeGenFunction &CGF,
11355 const VarDecl *VD) const {
11356 auto It = FunctionToUntiedTaskStackMap.find(CGF.CurFn);
11357 if (It == FunctionToUntiedTaskStackMap.end())
11358 return false;
11359 return UntiedLocalVarsStack[It->second].count(VD) > 0;
11360}
11361
11362CGOpenMPRuntime::NontemporalDeclsRAII::NontemporalDeclsRAII(
11363 CodeGenModule &CGM, const OMPLoopDirective &S)
11364 : CGM(CGM), NeedToPush(S.hasClausesOfKind<OMPNontemporalClause>()) {
11365 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((CGM.getLangOpts().OpenMP && "Not in OpenMP mode.") ?
static_cast<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMP && \"Not in OpenMP mode.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11365, __PRETTY_FUNCTION__))
;
11366 if (!NeedToPush)
11367 return;
11368 NontemporalDeclsSet &DS =
11369 CGM.getOpenMPRuntime().NontemporalDeclsStack.emplace_back();
11370 for (const auto *C : S.getClausesOfKind<OMPNontemporalClause>()) {
11371 for (const Stmt *Ref : C->private_refs()) {
11372 const auto *SimpleRefExpr = cast<Expr>(Ref)->IgnoreParenImpCasts();
11373 const ValueDecl *VD;
11374 if (const auto *DRE = dyn_cast<DeclRefExpr>(SimpleRefExpr)) {
11375 VD = DRE->getDecl();
11376 } else {
11377 const auto *ME = cast<MemberExpr>(SimpleRefExpr);
11378 assert((ME->isImplicitCXXThis() ||(((ME->isImplicitCXXThis() || isa<CXXThisExpr>(ME->
getBase()->IgnoreParenImpCasts())) && "Expected member of current class."
) ? static_cast<void> (0) : __assert_fail ("(ME->isImplicitCXXThis() || isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) && \"Expected member of current class.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11380, __PRETTY_FUNCTION__))
11379 isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) &&(((ME->isImplicitCXXThis() || isa<CXXThisExpr>(ME->
getBase()->IgnoreParenImpCasts())) && "Expected member of current class."
) ? static_cast<void> (0) : __assert_fail ("(ME->isImplicitCXXThis() || isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) && \"Expected member of current class.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11380, __PRETTY_FUNCTION__))
11380 "Expected member of current class.")(((ME->isImplicitCXXThis() || isa<CXXThisExpr>(ME->
getBase()->IgnoreParenImpCasts())) && "Expected member of current class."
) ? static_cast<void> (0) : __assert_fail ("(ME->isImplicitCXXThis() || isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts())) && \"Expected member of current class.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11380, __PRETTY_FUNCTION__))
;
11381 VD = ME->getMemberDecl();
11382 }
11383 DS.insert(VD);
11384 }
11385 }
11386}
11387
11388CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() {
11389 if (!NeedToPush)
11390 return;
11391 CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back();
11392}
11393
11394CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII(
11395 CodeGenFunction &CGF,
11396 const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
11397 std::pair<Address, Address>> &LocalVars)
11398 : CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
11399 if (!NeedToPush)
11400 return;
11401 CGM.getOpenMPRuntime().FunctionToUntiedTaskStackMap.try_emplace(
11402 CGF.CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size());
11403 CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars);
11404}
11405
11406CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::~UntiedTaskLocalDeclsRAII() {
11407 if (!NeedToPush)
11408 return;
11409 CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back();
11410}
11411
11412bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
11413 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((CGM.getLangOpts().OpenMP && "Not in OpenMP mode.") ?
static_cast<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMP && \"Not in OpenMP mode.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11413, __PRETTY_FUNCTION__))
;
11414
11415 return llvm::any_of(
11416 CGM.getOpenMPRuntime().NontemporalDeclsStack,
11417 [VD](const NontemporalDeclsSet &Set) { return Set.count(VD) > 0; });
11418}
11419
11420void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
11421 const OMPExecutableDirective &S,
11422 llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled)
11423 const {
11424 llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
11425 // Vars in target/task regions must be excluded completely.
11426 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()) ||
11427 isOpenMPTaskingDirective(S.getDirectiveKind())) {
11428 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
11429 getOpenMPCaptureRegions(CaptureRegions, S.getDirectiveKind());
11430 const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
11431 for (const CapturedStmt::Capture &Cap : CS->captures()) {
11432 if (Cap.capturesVariable() || Cap.capturesVariableByCopy())
11433 NeedToCheckForLPCs.insert(Cap.getCapturedVar());
11434 }
11435 }
11436 // Exclude vars in private clauses.
11437 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
11438 for (const Expr *Ref : C->varlists()) {
11439 if (!Ref->getType()->isScalarType())
11440 continue;
11441 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
11442 if (!DRE)
11443 continue;
11444 NeedToCheckForLPCs.insert(DRE->getDecl());
11445 }
11446 }
11447 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
11448 for (const Expr *Ref : C->varlists()) {
11449 if (!Ref->getType()->isScalarType())
11450 continue;
11451 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
11452 if (!DRE)
11453 continue;
11454 NeedToCheckForLPCs.insert(DRE->getDecl());
11455 }
11456 }
11457 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
11458 for (const Expr *Ref : C->varlists()) {
11459 if (!Ref->getType()->isScalarType())
11460 continue;
11461 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
11462 if (!DRE)
11463 continue;
11464 NeedToCheckForLPCs.insert(DRE->getDecl());
11465 }
11466 }
11467 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
11468 for (const Expr *Ref : C->varlists()) {
11469 if (!Ref->getType()->isScalarType())
11470 continue;
11471 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
11472 if (!DRE)
11473 continue;
11474 NeedToCheckForLPCs.insert(DRE->getDecl());
11475 }
11476 }
11477 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
11478 for (const Expr *Ref : C->varlists()) {
11479 if (!Ref->getType()->isScalarType())
11480 continue;
11481 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
11482 if (!DRE)
11483 continue;
11484 NeedToCheckForLPCs.insert(DRE->getDecl());
11485 }
11486 }
11487 for (const Decl *VD : NeedToCheckForLPCs) {
11488 for (const LastprivateConditionalData &Data :
11489 llvm::reverse(CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
11490 if (Data.DeclToUniqueName.count(VD) > 0) {
11491 if (!Data.Disabled)
11492 NeedToAddForLPCsAsDisabled.insert(VD);
11493 break;
11494 }
11495 }
11496 }
11497}
11498
11499CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
11500 CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal)
11501 : CGM(CGF.CGM),
11502 Action((CGM.getLangOpts().OpenMP >= 50 &&
11503 llvm::any_of(S.getClausesOfKind<OMPLastprivateClause>(),
11504 [](const OMPLastprivateClause *C) {
11505 return C->getKind() ==
11506 OMPC_LASTPRIVATE_conditional;
11507 }))
11508 ? ActionToDo::PushAsLastprivateConditional
11509 : ActionToDo::DoNotPush) {
11510 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((CGM.getLangOpts().OpenMP && "Not in OpenMP mode.") ?
static_cast<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMP && \"Not in OpenMP mode.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11510, __PRETTY_FUNCTION__))
;
11511 if (CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
11512 return;
11513 assert(Action == ActionToDo::PushAsLastprivateConditional &&((Action == ActionToDo::PushAsLastprivateConditional &&
"Expected a push action.") ? static_cast<void> (0) : __assert_fail
("Action == ActionToDo::PushAsLastprivateConditional && \"Expected a push action.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11514, __PRETTY_FUNCTION__))
11514 "Expected a push action.")((Action == ActionToDo::PushAsLastprivateConditional &&
"Expected a push action.") ? static_cast<void> (0) : __assert_fail
("Action == ActionToDo::PushAsLastprivateConditional && \"Expected a push action.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11514, __PRETTY_FUNCTION__))
;
11515 LastprivateConditionalData &Data =
11516 CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
11517 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
11518 if (C->getKind() != OMPC_LASTPRIVATE_conditional)
11519 continue;
11520
11521 for (const Expr *Ref : C->varlists()) {
11522 Data.DeclToUniqueName.insert(std::make_pair(
11523 cast<DeclRefExpr>(Ref->IgnoreParenImpCasts())->getDecl(),
11524 SmallString<16>(generateUniqueName(CGM, "pl_cond", Ref))));
11525 }
11526 }
11527 Data.IVLVal = IVLVal;
11528 Data.Fn = CGF.CurFn;
11529}
11530
11531CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
11532 CodeGenFunction &CGF, const OMPExecutableDirective &S)
11533 : CGM(CGF.CGM), Action(ActionToDo::DoNotPush) {
11534 assert(CGM.getLangOpts().OpenMP && "Not in OpenMP mode.")((CGM.getLangOpts().OpenMP && "Not in OpenMP mode.") ?
static_cast<void> (0) : __assert_fail ("CGM.getLangOpts().OpenMP && \"Not in OpenMP mode.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11534, __PRETTY_FUNCTION__))
;
11535 if (CGM.getLangOpts().OpenMP < 50)
11536 return;
11537 llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
11538 tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
11539 if (!NeedToAddForLPCsAsDisabled.empty()) {
11540 Action = ActionToDo::DisableLastprivateConditional;
11541 LastprivateConditionalData &Data =
11542 CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
11543 for (const Decl *VD : NeedToAddForLPCsAsDisabled)
11544 Data.DeclToUniqueName.insert(std::make_pair(VD, SmallString<16>()));
11545 Data.Fn = CGF.CurFn;
11546 Data.Disabled = true;
11547 }
11548}
11549
11550CGOpenMPRuntime::LastprivateConditionalRAII
11551CGOpenMPRuntime::LastprivateConditionalRAII::disable(
11552 CodeGenFunction &CGF, const OMPExecutableDirective &S) {
11553 return LastprivateConditionalRAII(CGF, S);
11554}
11555
11556CGOpenMPRuntime::LastprivateConditionalRAII::~LastprivateConditionalRAII() {
11557 if (CGM.getLangOpts().OpenMP < 50)
11558 return;
11559 if (Action == ActionToDo::DisableLastprivateConditional) {
11560 assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&((CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled
&& "Expected list of disabled private vars.") ? static_cast
<void> (0) : __assert_fail ("CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled && \"Expected list of disabled private vars.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11561, __PRETTY_FUNCTION__))
11561 "Expected list of disabled private vars.")((CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled
&& "Expected list of disabled private vars.") ? static_cast
<void> (0) : __assert_fail ("CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled && \"Expected list of disabled private vars.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11561, __PRETTY_FUNCTION__))
;
11562 CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
11563 }
11564 if (Action == ActionToDo::PushAsLastprivateConditional) {
11565 assert(((!CGM.getOpenMPRuntime().LastprivateConditionalStack.back().
Disabled && "Expected list of lastprivate conditional vars."
) ? static_cast<void> (0) : __assert_fail ("!CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled && \"Expected list of lastprivate conditional vars.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11567, __PRETTY_FUNCTION__))
11566 !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&((!CGM.getOpenMPRuntime().LastprivateConditionalStack.back().
Disabled && "Expected list of lastprivate conditional vars."
) ? static_cast<void> (0) : __assert_fail ("!CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled && \"Expected list of lastprivate conditional vars.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11567, __PRETTY_FUNCTION__))
11567 "Expected list of lastprivate conditional vars.")((!CGM.getOpenMPRuntime().LastprivateConditionalStack.back().
Disabled && "Expected list of lastprivate conditional vars."
) ? static_cast<void> (0) : __assert_fail ("!CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled && \"Expected list of lastprivate conditional vars.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11567, __PRETTY_FUNCTION__))
;
11568 CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
11569 }
11570}
11571
11572Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
11573 const VarDecl *VD) {
11574 ASTContext &C = CGM.getContext();
11575 auto I = LastprivateConditionalToTypes.find(CGF.CurFn);
11576 if (I == LastprivateConditionalToTypes.end())
11577 I = LastprivateConditionalToTypes.try_emplace(CGF.CurFn).first;
11578 QualType NewType;
11579 const FieldDecl *VDField;
11580 const FieldDecl *FiredField;
11581 LValue BaseLVal;
11582 auto VI = I->getSecond().find(VD);
11583 if (VI == I->getSecond().end()) {
11584 RecordDecl *RD = C.buildImplicitRecord("lasprivate.conditional");
11585 RD->startDefinition();
11586 VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
11587 FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
11588 RD->completeDefinition();
11589 NewType = C.getRecordType(RD);
11590 Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
11591 BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
11592 I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
11593 } else {
11594 NewType = std::get<0>(VI->getSecond());
11595 VDField = std::get<1>(VI->getSecond());
11596 FiredField = std::get<2>(VI->getSecond());
11597 BaseLVal = std::get<3>(VI->getSecond());
11598 }
11599 LValue FiredLVal =
11600 CGF.EmitLValueForField(BaseLVal, FiredField);
11601 CGF.EmitStoreOfScalar(
11602 llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
11603 FiredLVal);
11604 return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
11605}
11606
11607namespace {
11608/// Checks if the lastprivate conditional variable is referenced in LHS.
11609class LastprivateConditionalRefChecker final
11610 : public ConstStmtVisitor<LastprivateConditionalRefChecker, bool> {
11611 ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM;
11612 const Expr *FoundE = nullptr;
11613 const Decl *FoundD = nullptr;
11614 StringRef UniqueDeclName;
11615 LValue IVLVal;
11616 llvm::Function *FoundFn = nullptr;
11617 SourceLocation Loc;
11618
11619public:
11620 bool VisitDeclRefExpr(const DeclRefExpr *E) {
11621 for (const CGOpenMPRuntime::LastprivateConditionalData &D :
11622 llvm::reverse(LPM)) {
11623 auto It = D.DeclToUniqueName.find(E->getDecl());
11624 if (It == D.DeclToUniqueName.end())
11625 continue;
11626 if (D.Disabled)
11627 return false;
11628 FoundE = E;
11629 FoundD = E->getDecl()->getCanonicalDecl();
11630 UniqueDeclName = It->second;
11631 IVLVal = D.IVLVal;
11632 FoundFn = D.Fn;
11633 break;
11634 }
11635 return FoundE == E;
11636 }
11637 bool VisitMemberExpr(const MemberExpr *E) {
11638 if (!CodeGenFunction::IsWrappedCXXThis(E->getBase()))
11639 return false;
11640 for (const CGOpenMPRuntime::LastprivateConditionalData &D :
11641 llvm::reverse(LPM)) {
11642 auto It = D.DeclToUniqueName.find(E->getMemberDecl());
11643 if (It == D.DeclToUniqueName.end())
11644 continue;
11645 if (D.Disabled)
11646 return false;
11647 FoundE = E;
11648 FoundD = E->getMemberDecl()->getCanonicalDecl();
11649 UniqueDeclName = It->second;
11650 IVLVal = D.IVLVal;
11651 FoundFn = D.Fn;
11652 break;
11653 }
11654 return FoundE == E;
11655 }
11656 bool VisitStmt(const Stmt *S) {
11657 for (const Stmt *Child : S->children()) {
11658 if (!Child)
11659 continue;
11660 if (const auto *E = dyn_cast<Expr>(Child))
11661 if (!E->isGLValue())
11662 continue;
11663 if (Visit(Child))
11664 return true;
11665 }
11666 return false;
11667 }
11668 explicit LastprivateConditionalRefChecker(
11669 ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
11670 : LPM(LPM) {}
11671 std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
11672 getFoundData() const {
11673 return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
11674 }
11675};
11676} // namespace
11677
11678void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
11679 LValue IVLVal,
11680 StringRef UniqueDeclName,
11681 LValue LVal,
11682 SourceLocation Loc) {
11683 // Last updated loop counter for the lastprivate conditional var.
11684 // int<xx> last_iv = 0;
11685 llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
11686 llvm::Constant *LastIV =
11687 getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"}));
11688 cast<llvm::GlobalVariable>(LastIV)->setAlignment(
11689 IVLVal.getAlignment().getAsAlign());
11690 LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
11691
11692 // Last value of the lastprivate conditional.
11693 // decltype(priv_a) last_a;
11694 llvm::Constant *Last = getOrCreateInternalVariable(
11695 CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
11696 cast<llvm::GlobalVariable>(Last)->setAlignment(
11697 LVal.getAlignment().getAsAlign());
11698 LValue LastLVal =
11699 CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment());
11700
11701 // Global loop counter. Required to handle inner parallel-for regions.
11702 // iv
11703 llvm::Value *IVVal = CGF.EmitLoadOfScalar(IVLVal, Loc);
11704
11705 // #pragma omp critical(a)
11706 // if (last_iv <= iv) {
11707 // last_iv = iv;
11708 // last_a = priv_a;
11709 // }
11710 auto &&CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
11711 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
11712 Action.Enter(CGF);
11713 llvm::Value *LastIVVal = CGF.EmitLoadOfScalar(LastIVLVal, Loc);
11714 // (last_iv <= iv) ? Check if the variable is updated and store new
11715 // value in global var.
11716 llvm::Value *CmpRes;
11717 if (IVLVal.getType()->isSignedIntegerType()) {
11718 CmpRes = CGF.Builder.CreateICmpSLE(LastIVVal, IVVal);
11719 } else {
11720 assert(IVLVal.getType()->isUnsignedIntegerType() &&((IVLVal.getType()->isUnsignedIntegerType() && "Loop iteration variable must be integer."
) ? static_cast<void> (0) : __assert_fail ("IVLVal.getType()->isUnsignedIntegerType() && \"Loop iteration variable must be integer.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11721, __PRETTY_FUNCTION__))
11721 "Loop iteration variable must be integer.")((IVLVal.getType()->isUnsignedIntegerType() && "Loop iteration variable must be integer."
) ? static_cast<void> (0) : __assert_fail ("IVLVal.getType()->isUnsignedIntegerType() && \"Loop iteration variable must be integer.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11721, __PRETTY_FUNCTION__))
;
11722 CmpRes = CGF.Builder.CreateICmpULE(LastIVVal, IVVal);
11723 }
11724 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lp_cond_then");
11725 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("lp_cond_exit");
11726 CGF.Builder.CreateCondBr(CmpRes, ThenBB, ExitBB);
11727 // {
11728 CGF.EmitBlock(ThenBB);
11729
11730 // last_iv = iv;
11731 CGF.EmitStoreOfScalar(IVVal, LastIVLVal);
11732
11733 // last_a = priv_a;
11734 switch (CGF.getEvaluationKind(LVal.getType())) {
11735 case TEK_Scalar: {
11736 llvm::Value *PrivVal = CGF.EmitLoadOfScalar(LVal, Loc);
11737 CGF.EmitStoreOfScalar(PrivVal, LastLVal);
11738 break;
11739 }
11740 case TEK_Complex: {
11741 CodeGenFunction::ComplexPairTy PrivVal = CGF.EmitLoadOfComplex(LVal, Loc);
11742 CGF.EmitStoreOfComplex(PrivVal, LastLVal, /*isInit=*/false);
11743 break;
11744 }
11745 case TEK_Aggregate:
11746 llvm_unreachable(::llvm::llvm_unreachable_internal("Aggregates are not supported in lastprivate conditional."
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11747)
11747 "Aggregates are not supported in lastprivate conditional.")::llvm::llvm_unreachable_internal("Aggregates are not supported in lastprivate conditional."
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11747)
;
11748 }
11749 // }
11750 CGF.EmitBranch(ExitBB);
11751 // There is no need to emit line number for unconditional branch.
11752 (void)ApplyDebugLocation::CreateEmpty(CGF);
11753 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
11754 };
11755
11756 if (CGM.getLangOpts().OpenMPSimd) {
11757 // Do not emit as a critical region as no parallel region could be emitted.
11758 RegionCodeGenTy ThenRCG(CodeGen);
11759 ThenRCG(CGF);
11760 } else {
11761 emitCriticalRegion(CGF, UniqueDeclName, CodeGen, Loc);
11762 }
11763}
11764
11765void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
11766 const Expr *LHS) {
11767 if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
11768 return;
11769 LastprivateConditionalRefChecker Checker(LastprivateConditionalStack);
11770 if (!Checker.Visit(LHS))
11771 return;
11772 const Expr *FoundE;
11773 const Decl *FoundD;
11774 StringRef UniqueDeclName;
11775 LValue IVLVal;
11776 llvm::Function *FoundFn;
11777 std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
11778 Checker.getFoundData();
11779 if (FoundFn != CGF.CurFn) {
11780 // Special codegen for inner parallel regions.
11781 // ((struct.lastprivate.conditional*)&priv_a)->Fired = 1;
11782 auto It = LastprivateConditionalToTypes[FoundFn].find(FoundD);
11783 assert(It != LastprivateConditionalToTypes[FoundFn].end() &&((It != LastprivateConditionalToTypes[FoundFn].end() &&
"Lastprivate conditional is not found in outer region.") ? static_cast
<void> (0) : __assert_fail ("It != LastprivateConditionalToTypes[FoundFn].end() && \"Lastprivate conditional is not found in outer region.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11784, __PRETTY_FUNCTION__))
11784 "Lastprivate conditional is not found in outer region.")((It != LastprivateConditionalToTypes[FoundFn].end() &&
"Lastprivate conditional is not found in outer region.") ? static_cast
<void> (0) : __assert_fail ("It != LastprivateConditionalToTypes[FoundFn].end() && \"Lastprivate conditional is not found in outer region.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11784, __PRETTY_FUNCTION__))
;
11785 QualType StructTy = std::get<0>(It->getSecond());
11786 const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
11787 LValue PrivLVal = CGF.EmitLValue(FoundE);
11788 Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
11789 PrivLVal.getAddress(CGF),
11790 CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)));
11791 LValue BaseLVal =
11792 CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
11793 LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
11794 CGF.EmitAtomicStore(RValue::get(llvm::ConstantInt::get(
11795 CGF.ConvertTypeForMem(FiredDecl->getType()), 1)),
11796 FiredLVal, llvm::AtomicOrdering::Unordered,
11797 /*IsVolatile=*/true, /*isInit=*/false);
11798 return;
11799 }
11800
11801 // Private address of the lastprivate conditional in the current context.
11802 // priv_a
11803 LValue LVal = CGF.EmitLValue(FoundE);
11804 emitLastprivateConditionalUpdate(CGF, IVLVal, UniqueDeclName, LVal,
11805 FoundE->getExprLoc());
11806}
11807
11808void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
11809 CodeGenFunction &CGF, const OMPExecutableDirective &D,
11810 const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls) {
11811 if (CGF.getLangOpts().OpenMP < 50 || LastprivateConditionalStack.empty())
11812 return;
11813 auto Range = llvm::reverse(LastprivateConditionalStack);
11814 auto It = llvm::find_if(
11815 Range, [](const LastprivateConditionalData &D) { return !D.Disabled; });
11816 if (It == Range.end() || It->Fn != CGF.CurFn)
11817 return;
11818 auto LPCI = LastprivateConditionalToTypes.find(It->Fn);
11819 assert(LPCI != LastprivateConditionalToTypes.end() &&((LPCI != LastprivateConditionalToTypes.end() && "Lastprivates must be registered already."
) ? static_cast<void> (0) : __assert_fail ("LPCI != LastprivateConditionalToTypes.end() && \"Lastprivates must be registered already.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11820, __PRETTY_FUNCTION__))
11820 "Lastprivates must be registered already.")((LPCI != LastprivateConditionalToTypes.end() && "Lastprivates must be registered already."
) ? static_cast<void> (0) : __assert_fail ("LPCI != LastprivateConditionalToTypes.end() && \"Lastprivates must be registered already.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11820, __PRETTY_FUNCTION__))
;
11821 SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
11822 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
11823 const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
11824 for (const auto &Pair : It->DeclToUniqueName) {
11825 const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
11826 if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0)
11827 continue;
11828 auto I = LPCI->getSecond().find(Pair.first);
11829 assert(I != LPCI->getSecond().end() &&((I != LPCI->getSecond().end() && "Lastprivate must be rehistered already."
) ? static_cast<void> (0) : __assert_fail ("I != LPCI->getSecond().end() && \"Lastprivate must be rehistered already.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11830, __PRETTY_FUNCTION__))
11830 "Lastprivate must be rehistered already.")((I != LPCI->getSecond().end() && "Lastprivate must be rehistered already."
) ? static_cast<void> (0) : __assert_fail ("I != LPCI->getSecond().end() && \"Lastprivate must be rehistered already.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11830, __PRETTY_FUNCTION__))
;
11831 // bool Cmp = priv_a.Fired != 0;
11832 LValue BaseLVal = std::get<3>(I->getSecond());
11833 LValue FiredLVal =
11834 CGF.EmitLValueForField(BaseLVal, std::get<2>(I->getSecond()));
11835 llvm::Value *Res = CGF.EmitLoadOfScalar(FiredLVal, D.getBeginLoc());
11836 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Res);
11837 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("lpc.then");
11838 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("lpc.done");
11839 // if (Cmp) {
11840 CGF.Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
11841 CGF.EmitBlock(ThenBB);
11842 Address Addr = CGF.GetAddrOfLocalVar(VD);
11843 LValue LVal;
11844 if (VD->getType()->isReferenceType())
11845 LVal = CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
11846 AlignmentSource::Decl);
11847 else
11848 LVal = CGF.MakeAddrLValue(Addr, VD->getType().getNonReferenceType(),
11849 AlignmentSource::Decl);
11850 emitLastprivateConditionalUpdate(CGF, It->IVLVal, Pair.second, LVal,
11851 D.getBeginLoc());
11852 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
11853 CGF.EmitBlock(DoneBB, /*IsFinal=*/true);
11854 // }
11855 }
11856}
11857
11858void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
11859 CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD,
11860 SourceLocation Loc) {
11861 if (CGF.getLangOpts().OpenMP < 50)
11862 return;
11863 auto It = LastprivateConditionalStack.back().DeclToUniqueName.find(VD);
11864 assert(It != LastprivateConditionalStack.back().DeclToUniqueName.end() &&((It != LastprivateConditionalStack.back().DeclToUniqueName.end
() && "Unknown lastprivate conditional variable.") ? static_cast
<void> (0) : __assert_fail ("It != LastprivateConditionalStack.back().DeclToUniqueName.end() && \"Unknown lastprivate conditional variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11865, __PRETTY_FUNCTION__))
11865 "Unknown lastprivate conditional variable.")((It != LastprivateConditionalStack.back().DeclToUniqueName.end
() && "Unknown lastprivate conditional variable.") ? static_cast
<void> (0) : __assert_fail ("It != LastprivateConditionalStack.back().DeclToUniqueName.end() && \"Unknown lastprivate conditional variable.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11865, __PRETTY_FUNCTION__))
;
11866 StringRef UniqueName = It->second;
11867 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(UniqueName);
11868 // The variable was not updated in the region - exit.
11869 if (!GV)
11870 return;
11871 LValue LPLVal = CGF.MakeAddrLValue(
11872 GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment());
11873 llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
11874 CGF.EmitStoreOfScalar(Res, PrivLVal);
11875}
11876
11877llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
11878 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
11879 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
11880 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11880)
;
11881}
11882
11883llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
11884 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
11885 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
11886 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11886)
;
11887}
11888
11889llvm::Function *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
11890 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
11891 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
11892 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
11893 bool Tied, unsigned &NumberOfParts) {
11894 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11894)
;
11895}
11896
11897void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
11898 SourceLocation Loc,
11899 llvm::Function *OutlinedFn,
11900 ArrayRef<llvm::Value *> CapturedVars,
11901 const Expr *IfCond) {
11902 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11902)
;
11903}
11904
11905void CGOpenMPSIMDRuntime::emitCriticalRegion(
11906 CodeGenFunction &CGF, StringRef CriticalName,
11907 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
11908 const Expr *Hint) {
11909 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11909)
;
11910}
11911
11912void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
11913 const RegionCodeGenTy &MasterOpGen,
11914 SourceLocation Loc) {
11915 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11915)
;
11916}
11917
11918void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
11919 SourceLocation Loc) {
11920 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11920)
;
11921}
11922
11923void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
11924 CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
11925 SourceLocation Loc) {
11926 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11926)
;
11927}
11928
11929void CGOpenMPSIMDRuntime::emitSingleRegion(
11930 CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
11931 SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
11932 ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
11933 ArrayRef<const Expr *> AssignmentOps) {
11934 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11934)
;
11935}
11936
11937void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
11938 const RegionCodeGenTy &OrderedOpGen,
11939 SourceLocation Loc,
11940 bool IsThreads) {
11941 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11941)
;
11942}
11943
11944void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
11945 SourceLocation Loc,
11946 OpenMPDirectiveKind Kind,
11947 bool EmitChecks,
11948 bool ForceSimpleCall) {
11949 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11949)
;
11950}
11951
11952void CGOpenMPSIMDRuntime::emitForDispatchInit(
11953 CodeGenFunction &CGF, SourceLocation Loc,
11954 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
11955 bool Ordered, const DispatchRTInput &DispatchValues) {
11956 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11956)
;
11957}
11958
11959void CGOpenMPSIMDRuntime::emitForStaticInit(
11960 CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
11961 const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
11962 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11962)
;
11963}
11964
11965void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
11966 CodeGenFunction &CGF, SourceLocation Loc,
11967 OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
11968 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11968)
;
11969}
11970
11971void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
11972 SourceLocation Loc,
11973 unsigned IVSize,
11974 bool IVSigned) {
11975 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11975)
;
11976}
11977
11978void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
11979 SourceLocation Loc,
11980 OpenMPDirectiveKind DKind) {
11981 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11981)
;
11982}
11983
11984llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
11985 SourceLocation Loc,
11986 unsigned IVSize, bool IVSigned,
11987 Address IL, Address LB,
11988 Address UB, Address ST) {
11989 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11989)
;
11990}
11991
11992void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
11993 llvm::Value *NumThreads,
11994 SourceLocation Loc) {
11995 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 11995)
;
11996}
11997
11998void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
11999 ProcBindKind ProcBind,
12000 SourceLocation Loc) {
12001 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12001)
;
12002}
12003
12004Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
12005 const VarDecl *VD,
12006 Address VDAddr,
12007 SourceLocation Loc) {
12008 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12008)
;
12009}
12010
12011llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
12012 const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
12013 CodeGenFunction *CGF) {
12014 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12014)
;
12015}
12016
12017Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
12018 CodeGenFunction &CGF, QualType VarType, StringRef Name) {
12019 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12019)
;
12020}
12021
12022void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
12023 ArrayRef<const Expr *> Vars,
12024 SourceLocation Loc,
12025 llvm::AtomicOrdering AO) {
12026 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12026)
;
12027}
12028
12029void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
12030 const OMPExecutableDirective &D,
12031 llvm::Function *TaskFunction,
12032 QualType SharedsTy, Address Shareds,
12033 const Expr *IfCond,
12034 const OMPTaskDataTy &Data) {
12035 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12035)
;
12036}
12037
12038void CGOpenMPSIMDRuntime::emitTaskLoopCall(
12039 CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
12040 llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds,
12041 const Expr *IfCond, const OMPTaskDataTy &Data) {
12042 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12042)
;
12043}
12044
12045void CGOpenMPSIMDRuntime::emitReduction(
12046 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
12047 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
12048 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
12049 assert(Options.SimpleReduction && "Only simple reduction is expected.")((Options.SimpleReduction && "Only simple reduction is expected."
) ? static_cast<void> (0) : __assert_fail ("Options.SimpleReduction && \"Only simple reduction is expected.\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12049, __PRETTY_FUNCTION__))
;
12050 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
12051 ReductionOps, Options);
12052}
12053
12054llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
12055 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
12056 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
12057 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12057)
;
12058}
12059
12060void CGOpenMPSIMDRuntime::emitTaskReductionFini(CodeGenFunction &CGF,
12061 SourceLocation Loc,
12062 bool IsWorksharingReduction) {
12063 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12063)
;
12064}
12065
12066void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
12067 SourceLocation Loc,
12068 ReductionCodeGen &RCG,
12069 unsigned N) {
12070 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12070)
;
12071}
12072
12073Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
12074 SourceLocation Loc,
12075 llvm::Value *ReductionsPtr,
12076 LValue SharedLVal) {
12077 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12077)
;
12078}
12079
12080void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
12081 SourceLocation Loc) {
12082 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12082)
;
12083}
12084
12085void CGOpenMPSIMDRuntime::emitCancellationPointCall(
12086 CodeGenFunction &CGF, SourceLocation Loc,
12087 OpenMPDirectiveKind CancelRegion) {
12088 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12088)
;
12089}
12090
12091void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
12092 SourceLocation Loc, const Expr *IfCond,
12093 OpenMPDirectiveKind CancelRegion) {
12094 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12094)
;
12095}
12096
12097void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
12098 const OMPExecutableDirective &D, StringRef ParentName,
12099 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
12100 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
12101 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12101)
;
12102}
12103
12104void CGOpenMPSIMDRuntime::emitTargetCall(
12105 CodeGenFunction &CGF, const OMPExecutableDirective &D,
12106 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
12107 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
12108 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
12109 const OMPLoopDirective &D)>
12110 SizeEmitter) {
12111 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12111)
;
12112}
12113
12114bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
12115 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12115)
;
12116}
12117
12118bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
12119 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12119)
;
12120}
12121
12122bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
12123 return false;
12124}
12125
12126void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
12127 const OMPExecutableDirective &D,
12128 SourceLocation Loc,
12129 llvm::Function *OutlinedFn,
12130 ArrayRef<llvm::Value *> CapturedVars) {
12131 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12131)
;
12132}
12133
12134void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
12135 const Expr *NumTeams,
12136 const Expr *ThreadLimit,
12137 SourceLocation Loc) {
12138 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12138)
;
12139}
12140
12141void CGOpenMPSIMDRuntime::emitTargetDataCalls(
12142 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
12143 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
12144 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12144)
;
12145}
12146
12147void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
12148 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
12149 const Expr *Device) {
12150 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12150)
;
12151}
12152
12153void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
12154 const OMPLoopDirective &D,
12155 ArrayRef<Expr *> NumIterations) {
12156 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12156)
;
12157}
12158
12159void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
12160 const OMPDependClause *C) {
12161 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12161)
;
12162}
12163
12164const VarDecl *
12165CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
12166 const VarDecl *NativeParam) const {
12167 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12167)
;
12168}
12169
12170Address
12171CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
12172 const VarDecl *NativeParam,
12173 const VarDecl *TargetParam) const {
12174 llvm_unreachable("Not supported in SIMD-only mode")::llvm::llvm_unreachable_internal("Not supported in SIMD-only mode"
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 12174)
;
12175}

/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.h

1//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for OpenMP runtime code generation.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
14#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
15
16#include "CGValue.h"
17#include "clang/AST/DeclOpenMP.h"
18#include "clang/AST/GlobalDecl.h"
19#include "clang/AST/Type.h"
20#include "clang/Basic/OpenMPKinds.h"
21#include "clang/Basic/SourceLocation.h"
22#include "llvm/ADT/DenseMap.h"
23#include "llvm/ADT/PointerIntPair.h"
24#include "llvm/ADT/SmallPtrSet.h"
25#include "llvm/ADT/StringMap.h"
26#include "llvm/ADT/StringSet.h"
27#include "llvm/Frontend/OpenMP/OMPConstants.h"
28#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/ValueHandle.h"
31#include "llvm/Support/AtomicOrdering.h"
32
33namespace llvm {
34class ArrayType;
35class Constant;
36class FunctionType;
37class GlobalVariable;
38class StructType;
39class Type;
40class Value;
41class OpenMPIRBuilder;
42} // namespace llvm
43
44namespace clang {
45class Expr;
46class OMPDependClause;
47class OMPExecutableDirective;
48class OMPLoopDirective;
49class VarDecl;
50class OMPDeclareReductionDecl;
51class IdentifierInfo;
52
53namespace CodeGen {
54class Address;
55class CodeGenFunction;
56class CodeGenModule;
57
58/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
59/// region.
60class PrePostActionTy {
61public:
62 explicit PrePostActionTy() {}
63 virtual void Enter(CodeGenFunction &CGF) {}
64 virtual void Exit(CodeGenFunction &CGF) {}
65 virtual ~PrePostActionTy() {}
66};
67
68/// Class provides a way to call simple version of codegen for OpenMP region, or
69/// an advanced with possible pre|post-actions in codegen.
70class RegionCodeGenTy final {
71 intptr_t CodeGen;
72 typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
73 CodeGenTy Callback;
74 mutable PrePostActionTy *PrePostAction;
75 RegionCodeGenTy() = delete;
76 RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
77 template <typename Callable>
78 static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
79 PrePostActionTy &Action) {
80 return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
16
Calling 'operator()'
81 }
82
83public:
84 template <typename Callable>
85 RegionCodeGenTy(
86 Callable &&CodeGen,
87 std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
88 RegionCodeGenTy>::value> * = nullptr)
89 : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
90 Callback(CallbackFn<std::remove_reference_t<Callable>>),
91 PrePostAction(nullptr) {}
92 void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
93 void operator()(CodeGenFunction &CGF) const;
94};
95
96struct OMPTaskDataTy final {
97 SmallVector<const Expr *, 4> PrivateVars;
98 SmallVector<const Expr *, 4> PrivateCopies;
99 SmallVector<const Expr *, 4> FirstprivateVars;
100 SmallVector<const Expr *, 4> FirstprivateCopies;
101 SmallVector<const Expr *, 4> FirstprivateInits;
102 SmallVector<const Expr *, 4> LastprivateVars;
103 SmallVector<const Expr *, 4> LastprivateCopies;
104 SmallVector<const Expr *, 4> ReductionVars;
105 SmallVector<const Expr *, 4> ReductionOrigs;
106 SmallVector<const Expr *, 4> ReductionCopies;
107 SmallVector<const Expr *, 4> ReductionOps;
108 SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals;
109 struct DependData {
110 OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
111 const Expr *IteratorExpr = nullptr;
112 SmallVector<const Expr *, 4> DepExprs;
113 explicit DependData() = default;
114 DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
115 : DepKind(DepKind), IteratorExpr(IteratorExpr) {}
116 };
117 SmallVector<DependData, 4> Dependences;
118 llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
119 llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
120 llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
121 llvm::Value *Reductions = nullptr;
122 unsigned NumberOfParts = 0;
123 bool Tied = true;
124 bool Nogroup = false;
125 bool IsReductionWithTaskMod = false;
126 bool IsWorksharingReduction = false;
127};
128
129/// Class intended to support codegen of all kind of the reduction clauses.
130class ReductionCodeGen {
131private:
132 /// Data required for codegen of reduction clauses.
133 struct ReductionData {
134 /// Reference to the item shared between tasks to reduce into.
135 const Expr *Shared = nullptr;
136 /// Reference to the original item.
137 const Expr *Ref = nullptr;
138 /// Helper expression for generation of private copy.
139 const Expr *Private = nullptr;
140 /// Helper expression for generation reduction operation.
141 const Expr *ReductionOp = nullptr;
142 ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
143 const Expr *ReductionOp)
144 : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
145 }
146 };
147 /// List of reduction-based clauses.
148 SmallVector<ReductionData, 4> ClausesData;
149
150 /// List of addresses of shared variables/expressions.
151 SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
152 /// List of addresses of original variables/expressions.
153 SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
154 /// Sizes of the reduction items in chars.
155 SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
156 /// Base declarations for the reduction items.
157 SmallVector<const VarDecl *, 4> BaseDecls;
158
159 /// Emits lvalue for shared expression.
160 LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
161 /// Emits upper bound for shared expression (if array section).
162 LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
163 /// Performs aggregate initialization.
164 /// \param N Number of reduction item in the common list.
165 /// \param PrivateAddr Address of the corresponding private item.
166 /// \param SharedLVal Address of the original shared variable.
167 /// \param DRD Declare reduction construct used for reduction item.
168 void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
169 Address PrivateAddr, LValue SharedLVal,
170 const OMPDeclareReductionDecl *DRD);
171
172public:
173 ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
174 ArrayRef<const Expr *> Privates,
175 ArrayRef<const Expr *> ReductionOps);
176 /// Emits lvalue for the shared and original reduction item.
177 /// \param N Number of the reduction item.
178 void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
179 /// Emits the code for the variable-modified type, if required.
180 /// \param N Number of the reduction item.
181 void emitAggregateType(CodeGenFunction &CGF, unsigned N);
182 /// Emits the code for the variable-modified type, if required.
183 /// \param N Number of the reduction item.
184 /// \param Size Size of the type in chars.
185 void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
186 /// Performs initialization of the private copy for the reduction item.
187 /// \param N Number of the reduction item.
188 /// \param PrivateAddr Address of the corresponding private item.
189 /// \param DefaultInit Default initialization sequence that should be
190 /// performed if no reduction specific initialization is found.
191 /// \param SharedLVal Address of the original shared variable.
192 void
193 emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
194 LValue SharedLVal,
195 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
196 /// Returns true if the private copy requires cleanups.
197 bool needCleanups(unsigned N);
198 /// Emits cleanup code for the reduction item.
199 /// \param N Number of the reduction item.
200 /// \param PrivateAddr Address of the corresponding private item.
201 void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
202 /// Adjusts \p PrivatedAddr for using instead of the original variable
203 /// address in normal operations.
204 /// \param N Number of the reduction item.
205 /// \param PrivateAddr Address of the corresponding private item.
206 Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
207 Address PrivateAddr);
208 /// Returns LValue for the reduction item.
209 LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
210 /// Returns LValue for the original reduction item.
211 LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
212 /// Returns the size of the reduction item (in chars and total number of
213 /// elements in the item), or nullptr, if the size is a constant.
214 std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
215 return Sizes[N];
216 }
217 /// Returns the base declaration of the reduction item.
218 const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
219 /// Returns the base declaration of the reduction item.
220 const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
221 /// Returns true if the initialization of the reduction item uses initializer
222 /// from declare reduction construct.
223 bool usesReductionInitializer(unsigned N) const;
224};
225
226class CGOpenMPRuntime {
227public:
228 /// Allows to disable automatic handling of functions used in target regions
229 /// as those marked as `omp declare target`.
230 class DisableAutoDeclareTargetRAII {
231 CodeGenModule &CGM;
232 bool SavedShouldMarkAsGlobal;
233
234 public:
235 DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
236 ~DisableAutoDeclareTargetRAII();
237 };
238
239 /// Manages list of nontemporal decls for the specified directive.
240 class NontemporalDeclsRAII {
241 CodeGenModule &CGM;
242 const bool NeedToPush;
243
244 public:
245 NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
246 ~NontemporalDeclsRAII();
247 };
248
249 /// Manages list of nontemporal decls for the specified directive.
250 class UntiedTaskLocalDeclsRAII {
251 CodeGenModule &CGM;
252 const bool NeedToPush;
253
254 public:
255 UntiedTaskLocalDeclsRAII(
256 CodeGenFunction &CGF,
257 const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
258 std::pair<Address, Address>> &LocalVars);
259 ~UntiedTaskLocalDeclsRAII();
260 };
261
262 /// Maps the expression for the lastprivate variable to the global copy used
263 /// to store new value because original variables are not mapped in inner
264 /// parallel regions. Only private copies are captured but we need also to
265 /// store private copy in shared address.
266 /// Also, stores the expression for the private loop counter and it
267 /// threaprivate name.
268 struct LastprivateConditionalData {
269 llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
270 DeclToUniqueName;
271 LValue IVLVal;
272 llvm::Function *Fn = nullptr;
273 bool Disabled = false;
274 };
275 /// Manages list of lastprivate conditional decls for the specified directive.
276 class LastprivateConditionalRAII {
277 enum class ActionToDo {
278 DoNotPush,
279 PushAsLastprivateConditional,
280 DisableLastprivateConditional,
281 };
282 CodeGenModule &CGM;
283 ActionToDo Action = ActionToDo::DoNotPush;
284
285 /// Check and try to disable analysis of inner regions for changes in
286 /// lastprivate conditional.
287 void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
288 llvm::DenseSet<CanonicalDeclPtr<const Decl>>
289 &NeedToAddForLPCsAsDisabled) const;
290
291 LastprivateConditionalRAII(CodeGenFunction &CGF,
292 const OMPExecutableDirective &S);
293
294 public:
295 explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
296 const OMPExecutableDirective &S,
297 LValue IVLVal);
298 static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
299 const OMPExecutableDirective &S);
300 ~LastprivateConditionalRAII();
301 };
302
303 llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
304
305protected:
306 CodeGenModule &CGM;
307 StringRef FirstSeparator, Separator;
308
309 /// Constructor allowing to redefine the name separator for the variables.
310 explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
311 StringRef Separator);
312
313 /// Creates offloading entry for the provided entry ID \a ID,
314 /// address \a Addr, size \a Size, and flags \a Flags.
315 virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
316 uint64_t Size, int32_t Flags,
317 llvm::GlobalValue::LinkageTypes Linkage);
318
319 /// Helper to emit outlined function for 'target' directive.
320 /// \param D Directive to emit.
321 /// \param ParentName Name of the function that encloses the target region.
322 /// \param OutlinedFn Outlined function value to be defined by this call.
323 /// \param OutlinedFnID Outlined function ID value to be defined by this call.
324 /// \param IsOffloadEntry True if the outlined function is an offload entry.
325 /// \param CodeGen Lambda codegen specific to an accelerator device.
326 /// An outlined function may not be an entry if, e.g. the if clause always
327 /// evaluates to false.
328 virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
329 StringRef ParentName,
330 llvm::Function *&OutlinedFn,
331 llvm::Constant *&OutlinedFnID,
332 bool IsOffloadEntry,
333 const RegionCodeGenTy &CodeGen);
334
335 /// Emits object of ident_t type with info for source location.
336 /// \param Flags Flags for OpenMP location.
337 ///
338 llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
339 unsigned Flags = 0);
340
341 /// Returns pointer to ident_t type.
342 llvm::Type *getIdentTyPointerTy();
343
344 /// Gets thread id value for the current thread.
345 ///
346 llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
347
348 /// Get the function name of an outlined region.
349 // The name can be customized depending on the target.
350 //
351 virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
352
353 /// Emits \p Callee function call with arguments \p Args with location \p Loc.
354 void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
355 llvm::FunctionCallee Callee,
356 ArrayRef<llvm::Value *> Args = llvm::None) const;
357
358 /// Emits address of the word in a memory where current thread id is
359 /// stored.
360 virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
361
362 void setLocThreadIdInsertPt(CodeGenFunction &CGF,
363 bool AtCurrentPoint = false);
364 void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
365
366 /// Check if the default location must be constant.
367 /// Default is false to support OMPT/OMPD.
368 virtual bool isDefaultLocationConstant() const { return false; }
369
370 /// Returns additional flags that can be stored in reserved_2 field of the
371 /// default location.
372 virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
373
374 /// Returns default flags for the barriers depending on the directive, for
375 /// which this barier is going to be emitted.
376 static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
377
378 /// Get the LLVM type for the critical name.
379 llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
380
381 /// Returns corresponding lock object for the specified critical region
382 /// name. If the lock object does not exist it is created, otherwise the
383 /// reference to the existing copy is returned.
384 /// \param CriticalName Name of the critical region.
385 ///
386 llvm::Value *getCriticalRegionLock(StringRef CriticalName);
387
388private:
389 /// An OpenMP-IR-Builder instance.
390 llvm::OpenMPIRBuilder OMPBuilder;
391
392 /// Map for SourceLocation and OpenMP runtime library debug locations.
393 typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
394 OpenMPDebugLocMapTy OpenMPDebugLocMap;
395 /// The type for a microtask which gets passed to __kmpc_fork_call().
396 /// Original representation is:
397 /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
398 llvm::FunctionType *Kmpc_MicroTy = nullptr;
399 /// Stores debug location and ThreadID for the function.
400 struct DebugLocThreadIdTy {
401 llvm::Value *DebugLoc;
402 llvm::Value *ThreadID;
403 /// Insert point for the service instructions.
404 llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
405 };
406 /// Map of local debug location, ThreadId and functions.
407 typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
408 OpenMPLocThreadIDMapTy;
409 OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
410 /// Map of UDRs and corresponding combiner/initializer.
411 typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
412 std::pair<llvm::Function *, llvm::Function *>>
413 UDRMapTy;
414 UDRMapTy UDRMap;
415 /// Map of functions and locally defined UDRs.
416 typedef llvm::DenseMap<llvm::Function *,
417 SmallVector<const OMPDeclareReductionDecl *, 4>>
418 FunctionUDRMapTy;
419 FunctionUDRMapTy FunctionUDRMap;
420 /// Map from the user-defined mapper declaration to its corresponding
421 /// functions.
422 llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
423 /// Map of functions and their local user-defined mappers.
424 using FunctionUDMMapTy =
425 llvm::DenseMap<llvm::Function *,
426 SmallVector<const OMPDeclareMapperDecl *, 4>>;
427 FunctionUDMMapTy FunctionUDMMap;
428 /// Maps local variables marked as lastprivate conditional to their internal
429 /// types.
430 llvm::DenseMap<llvm::Function *,
431 llvm::DenseMap<CanonicalDeclPtr<const Decl>,
432 std::tuple<QualType, const FieldDecl *,
433 const FieldDecl *, LValue>>>
434 LastprivateConditionalToTypes;
435 /// Maps function to the position of the untied task locals stack.
436 llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap;
437 /// Type kmp_critical_name, originally defined as typedef kmp_int32
438 /// kmp_critical_name[8];
439 llvm::ArrayType *KmpCriticalNameTy;
440 /// An ordered map of auto-generated variables to their unique names.
441 /// It stores variables with the following names: 1) ".gomp_critical_user_" +
442 /// <critical_section_name> + ".var" for "omp critical" directives; 2)
443 /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
444 /// variables.
445 llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
446 InternalVars;
447 /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
448 llvm::Type *KmpRoutineEntryPtrTy = nullptr;
449 QualType KmpRoutineEntryPtrQTy;
450 /// Type typedef struct kmp_task {
451 /// void * shareds; /**< pointer to block of pointers to
452 /// shared vars */
453 /// kmp_routine_entry_t routine; /**< pointer to routine to call for
454 /// executing task */
455 /// kmp_int32 part_id; /**< part id for the task */
456 /// kmp_routine_entry_t destructors; /* pointer to function to invoke
457 /// deconstructors of firstprivate C++ objects */
458 /// } kmp_task_t;
459 QualType KmpTaskTQTy;
460 /// Saved kmp_task_t for task directive.
461 QualType SavedKmpTaskTQTy;
462 /// Saved kmp_task_t for taskloop-based directive.
463 QualType SavedKmpTaskloopTQTy;
464 /// Type typedef struct kmp_depend_info {
465 /// kmp_intptr_t base_addr;
466 /// size_t len;
467 /// struct {
468 /// bool in:1;
469 /// bool out:1;
470 /// } flags;
471 /// } kmp_depend_info_t;
472 QualType KmpDependInfoTy;
473 /// Type typedef struct kmp_task_affinity_info {
474 /// kmp_intptr_t base_addr;
475 /// size_t len;
476 /// struct {
477 /// bool flag1 : 1;
478 /// bool flag2 : 1;
479 /// kmp_int32 reserved : 30;
480 /// } flags;
481 /// } kmp_task_affinity_info_t;
482 QualType KmpTaskAffinityInfoTy;
483 /// struct kmp_dim { // loop bounds info casted to kmp_int64
484 /// kmp_int64 lo; // lower
485 /// kmp_int64 up; // upper
486 /// kmp_int64 st; // stride
487 /// };
488 QualType KmpDimTy;
489 /// Type struct __tgt_offload_entry{
490 /// void *addr; // Pointer to the offload entry info.
491 /// // (function or global)
492 /// char *name; // Name of the function or global.
493 /// size_t size; // Size of the entry info (0 if it a function).
494 /// int32_t flags;
495 /// int32_t reserved;
496 /// };
497 QualType TgtOffloadEntryQTy;
498 /// Entity that registers the offloading constants that were emitted so
499 /// far.
500 class OffloadEntriesInfoManagerTy {
501 CodeGenModule &CGM;
502
503 /// Number of entries registered so far.
504 unsigned OffloadingEntriesNum = 0;
505
506 public:
507 /// Base class of the entries info.
508 class OffloadEntryInfo {
509 public:
510 /// Kind of a given entry.
511 enum OffloadingEntryInfoKinds : unsigned {
512 /// Entry is a target region.
513 OffloadingEntryInfoTargetRegion = 0,
514 /// Entry is a declare target variable.
515 OffloadingEntryInfoDeviceGlobalVar = 1,
516 /// Invalid entry info.
517 OffloadingEntryInfoInvalid = ~0u
518 };
519
520 protected:
521 OffloadEntryInfo() = delete;
522 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
523 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
524 uint32_t Flags)
525 : Flags(Flags), Order(Order), Kind(Kind) {}
526 ~OffloadEntryInfo() = default;
527
528 public:
529 bool isValid() const { return Order != ~0u; }
530 unsigned getOrder() const { return Order; }
531 OffloadingEntryInfoKinds getKind() const { return Kind; }
532 uint32_t getFlags() const { return Flags; }
533 void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
534 llvm::Constant *getAddress() const {
535 return cast_or_null<llvm::Constant>(Addr);
536 }
537 void setAddress(llvm::Constant *V) {
538 assert(!Addr.pointsToAliveValue() && "Address has been set before!")((!Addr.pointsToAliveValue() && "Address has been set before!"
) ? static_cast<void> (0) : __assert_fail ("!Addr.pointsToAliveValue() && \"Address has been set before!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.h"
, 538, __PRETTY_FUNCTION__))
;
539 Addr = V;
540 }
541 static bool classof(const OffloadEntryInfo *Info) { return true; }
542
543 private:
544 /// Address of the entity that has to be mapped for offloading.
545 llvm::WeakTrackingVH Addr;
546
547 /// Flags associated with the device global.
548 uint32_t Flags = 0u;
549
550 /// Order this entry was emitted.
551 unsigned Order = ~0u;
552
553 OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
554 };
555
556 /// Return true if a there are no entries defined.
557 bool empty() const;
558 /// Return number of entries defined so far.
559 unsigned size() const { return OffloadingEntriesNum; }
560 OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
561
562 //
563 // Target region entries related.
564 //
565
566 /// Kind of the target registry entry.
567 enum OMPTargetRegionEntryKind : uint32_t {
568 /// Mark the entry as target region.
569 OMPTargetRegionEntryTargetRegion = 0x0,
570 /// Mark the entry as a global constructor.
571 OMPTargetRegionEntryCtor = 0x02,
572 /// Mark the entry as a global destructor.
573 OMPTargetRegionEntryDtor = 0x04,
574 };
575
576 /// Target region entries info.
577 class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
578 /// Address that can be used as the ID of the entry.
579 llvm::Constant *ID = nullptr;
580
581 public:
582 OffloadEntryInfoTargetRegion()
583 : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
584 explicit OffloadEntryInfoTargetRegion(unsigned Order,
585 llvm::Constant *Addr,
586 llvm::Constant *ID,
587 OMPTargetRegionEntryKind Flags)
588 : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
589 ID(ID) {
590 setAddress(Addr);
591 }
592
593 llvm::Constant *getID() const { return ID; }
594 void setID(llvm::Constant *V) {
595 assert(!ID && "ID has been set before!")((!ID && "ID has been set before!") ? static_cast<
void> (0) : __assert_fail ("!ID && \"ID has been set before!\""
, "/build/llvm-toolchain-snapshot-12~++20200915100651+00ba1a3de7f/clang/lib/CodeGen/CGOpenMPRuntime.h"
, 595, __PRETTY_FUNCTION__))
;
596 ID = V;
597 }
598 static bool classof(const OffloadEntryInfo *Info) {
599 return Info->getKind() == OffloadingEntryInfoTargetRegion;
600 }
601 };
602
603 /// Initialize target region entry.
604 void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
605 StringRef ParentName, unsigned LineNum,
606 unsigned Order);
607 /// Register target region entry.
608 void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
609 StringRef ParentName, unsigned LineNum,
610 llvm::Constant *Addr, llvm::Constant *ID,
611 OMPTargetRegionEntryKind Flags);
612 /// Return true if a target region entry with the provided information
613 /// exists.
614 bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
615 StringRef ParentName, unsigned LineNum) const;
616 /// brief Applies action \a Action on all registered entries.
617 typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
618 const OffloadEntryInfoTargetRegion &)>
619 OffloadTargetRegionEntryInfoActTy;
620 void actOnTargetRegionEntriesInfo(
621 const OffloadTargetRegionEntryInfoActTy &Action);
622
623 //
624 // Device global variable entries related.
625 //
626
627 /// Kind of the global variable entry..
628 enum OMPTargetGlobalVarEntryKind : uint32_t {
629 /// Mark the entry as a to declare target.
630 OMPTargetGlobalVarEntryTo = 0x0,
631 /// Mark the entry as a to declare target link.
632 OMPTargetGlobalVarEntryLink = 0x1,
633 };
634
635 /// Device global variable entries info.
636 class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
637 /// Type of the global variable.
638 CharUnits VarSize;
639 llvm::GlobalValue::LinkageTypes Linkage;
640
641 public:
642 OffloadEntryInfoDeviceGlobalVar()
643 : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
644 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
645 OMPTargetGlobalVarEntryKind Flags)
646 : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
647 explicit OffloadEntryInfoDeviceGlobalVar(
648 unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
649 OMPTargetGlobalVarEntryKind Flags,
650 llvm::GlobalValue::LinkageTypes Linkage)
651 : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
652 VarSize(VarSize), Linkage(Linkage) {
653 setAddress(Addr);
654 }
655
656 CharUnits getVarSize() const { return VarSize; }
657 void setVarSize(CharUnits Size) { VarSize = Size; }
658 llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
659 void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
660 static bool classof(const OffloadEntryInfo *Info) {
661 return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
662 }
663 };
664
665 /// Initialize device global variable entry.
666 void initializeDeviceGlobalVarEntryInfo(StringRef Name,
667 OMPTargetGlobalVarEntryKind Flags,
668 unsigned Order);
669
670 /// Register device global variable entry.
671 void
672 registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
673 CharUnits VarSize,
674 OMPTargetGlobalVarEntryKind Flags,
675 llvm::GlobalValue::LinkageTypes Linkage);
676 /// Checks if the variable with the given name has been registered already.
677 bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
678 return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
679 }
680 /// Applies action \a Action on all registered entries.
681 typedef llvm::function_ref<void(StringRef,
682 const OffloadEntryInfoDeviceGlobalVar &)>
683 OffloadDeviceGlobalVarEntryInfoActTy;
684 void actOnDeviceGlobalVarEntriesInfo(
685 const OffloadDeviceGlobalVarEntryInfoActTy &Action);
686
687 private:
688 // Storage for target region entries kind. The storage is to be indexed by
689 // file ID, device ID, parent function name and line number.
690 typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
691 OffloadEntriesTargetRegionPerLine;
692 typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
693 OffloadEntriesTargetRegionPerParentName;
694 typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
695 OffloadEntriesTargetRegionPerFile;
696 typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
697 OffloadEntriesTargetRegionPerDevice;
698 typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
699 OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
700 /// Storage for device global variable entries kind. The storage is to be
701 /// indexed by mangled name.
702 typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
703 OffloadEntriesDeviceGlobalVarTy;
704 OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
705 };
706 OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
707
708 bool ShouldMarkAsGlobal = true;
709 /// List of the emitted declarations.
710 llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
711 /// List of the global variables with their addresses that should not be
712 /// emitted for the target.
713 llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
714
715 /// List of variables that can become declare target implicitly and, thus,
716 /// must be emitted.
717 llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
718
719 using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
720 /// Stack for list of declarations in current context marked as nontemporal.
721 /// The set is the union of all current stack elements.
722 llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
723
724 using UntiedLocalVarsAddressesMap =
725 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
726 std::pair<Address, Address>>;
727 llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
728
729 /// Stack for list of addresses of declarations in current context marked as
730 /// lastprivate conditional. The set is the union of all current stack
731 /// elements.
732 llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
733
734 /// Flag for keeping track of weather a requires unified_shared_memory
735 /// directive is present.
736 bool HasRequiresUnifiedSharedMemory = false;
737
738 /// Atomic ordering from the omp requires directive.
739 llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
740
741 /// Flag for keeping track of weather a target region has been emitted.
742 bool HasEmittedTargetRegion = false;
743
744 /// Flag for keeping track of weather a device routine has been emitted.
745 /// Device routines are specific to the
746 bool HasEmittedDeclareTargetRegion = false;
747
748 /// Loads all the offload entries information from the host IR
749 /// metadata.
750 void loadOffloadInfoMetadata();
751
752 /// Returns __tgt_offload_entry type.
753 QualType getTgtOffloadEntryQTy();
754
755 /// Start scanning from statement \a S and and emit all target regions
756 /// found along the way.
757 /// \param S Starting statement.
758 /// \param ParentName Name of the function declaration that is being scanned.
759 void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
760
761 /// Build type kmp_routine_entry_t (if not built yet).
762 void emitKmpRoutineEntryT(QualType KmpInt32Ty);
763
764 /// Returns pointer to kmpc_micro type.
765 llvm::Type *getKmpc_MicroPointerTy();
766
767 /// Returns __kmpc_for_static_init_* runtime function for the specified
768 /// size \a IVSize and sign \a IVSigned.
769 llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
770 bool IVSigned);
771
772 /// Returns __kmpc_dispatch_init_* runtime function for the specified
773 /// size \a IVSize and sign \a IVSigned.
774 llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
775 bool IVSigned);
776
777 /// Returns __kmpc_dispatch_next_* runtime function for the specified
778 /// size \a IVSize and sign \a IVSigned.
779 llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
780 bool IVSigned);
781
782 /// Returns __kmpc_dispatch_fini_* runtime function for the specified
783 /// size \a IVSize and sign \a IVSigned.
784 llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
785 bool IVSigned);
786
787 /// If the specified mangled name is not in the module, create and
788 /// return threadprivate cache object. This object is a pointer's worth of
789 /// storage that's reserved for use by the OpenMP runtime.
790 /// \param VD Threadprivate variable.
791 /// \return Cache variable for the specified threadprivate.
792 llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
793
794 /// Gets (if variable with the given name already exist) or creates
795 /// internal global variable with the specified Name. The created variable has
796 /// linkage CommonLinkage by default and is initialized by null value.
797 /// \param Ty Type of the global variable. If it is exist already the type
798 /// must be the same.
799 /// \param Name Name of the variable.
800 llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
801 const llvm::Twine &Name,
802 unsigned AddressSpace = 0);
803
804 /// Set of threadprivate variables with the generated initializer.
805 llvm::StringSet<> ThreadPrivateWithDefinition;
806
807 /// Set of declare target variables with the generated initializer.
808 llvm::StringSet<> DeclareTargetWithDefinition;
809
810 /// Emits initialization code for the threadprivate variables.
811 /// \param VDAddr Address of the global variable \a VD.
812 /// \param Ctor Pointer to a global init function for \a VD.
813 /// \param CopyCtor Pointer to a global copy function for \a VD.
814 /// \param Dtor Pointer to a global destructor function for \a VD.
815 /// \param Loc Location of threadprivate declaration.
816 void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
817 llvm::Value *Ctor, llvm::Value *CopyCtor,
818 llvm::Value *Dtor, SourceLocation Loc);
819
820 /// Emit the array initialization or deletion portion for user-defined mapper
821 /// code generation.
822 void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
823 llvm::Value *Handle, llvm::Value *BasePtr,
824 llvm::Value *Ptr, llvm::Value *Size,
825 llvm::Value *MapType, CharUnits ElementSize,
826 llvm::BasicBlock *ExitBB, bool IsInit);
827
828 struct TaskResultTy {
829 llvm::Value *NewTask = nullptr;
830 llvm::Function *TaskEntry = nullptr;
831 llvm::Value *NewTaskNewTaskTTy = nullptr;
832 LValue TDBase;
833 const RecordDecl *KmpTaskTQTyRD = nullptr;
834 llvm::Value *TaskDupFn = nullptr;
835 };
836 /// Emit task region for the task directive. The task region is emitted in
837 /// several steps:
838 /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
839 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
840 /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
841 /// function:
842 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
843 /// TaskFunction(gtid, tt->part_id, tt->shareds);
844 /// return 0;
845 /// }
846 /// 2. Copy a list of shared variables to field shareds of the resulting
847 /// structure kmp_task_t returned by the previous call (if any).
848 /// 3. Copy a pointer to destructions function to field destructions of the
849 /// resulting structure kmp_task_t.
850 /// \param D Current task directive.
851 /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
852 /// /*part_id*/, captured_struct */*__context*/);
853 /// \param SharedsTy A type which contains references the shared variables.
854 /// \param Shareds Context with the list of shared variables from the \p
855 /// TaskFunction.
856 /// \param Data Additional data for task generation like tiednsee, final
857 /// state, list of privates etc.
858 TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
859 const OMPExecutableDirective &D,
860 llvm::Function *TaskFunction, QualType SharedsTy,
861 Address Shareds, const OMPTaskDataTy &Data);
862
863 /// Returns default address space for the constant firstprivates, 0 by
864 /// default.
865 virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
866
867 /// Emit code that pushes the trip count of loops associated with constructs
868 /// 'target teams distribute' and 'teams distribute parallel for'.
869 /// \param SizeEmitter Emits the int64 value for the number of iterations of
870 /// the associated loop.
871 void emitTargetNumIterationsCall(
872 CodeGenFunction &CGF, const OMPExecutableDirective &D,
873 llvm::Value *DeviceID,
874 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
875 const OMPLoopDirective &D)>
876 SizeEmitter);
877
878 /// Emit update for lastprivate conditional data.
879 void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
880 StringRef UniqueDeclName, LValue LVal,
881 SourceLocation Loc);
882
883 /// Returns the number of the elements and the address of the depobj
884 /// dependency array.
885 /// \return Number of elements in depobj array and the pointer to the array of
886 /// dependencies.
887 std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
888 LValue DepobjLVal,
889 SourceLocation Loc);
890
891public:
892 explicit CGOpenMPRuntime(CodeGenModule &CGM)
893 : CGOpenMPRuntime(CGM, ".", ".") {}
894 virtual ~CGOpenMPRuntime() {}
895 virtual void clear();
896
897 /// Emits code for OpenMP 'if' clause using specified \a CodeGen
898 /// function. Here is the logic:
899 /// if (Cond) {
900 /// ThenGen();
901 /// } else {
902 /// ElseGen();
903 /// }
904 void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
905 const RegionCodeGenTy &ThenGen,
906 const RegionCodeGenTy &ElseGen);
907
908 /// Checks if the \p Body is the \a CompoundStmt and returns its child
909 /// statement iff there is only one that is not evaluatable at the compile
910 /// time.
911 static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
912
913 /// Get the platform-specific name separator.
914 std::string getName(ArrayRef<StringRef> Parts) const;
915
916 /// Emit code for the specified user defined reduction construct.
917 virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
918 const OMPDeclareReductionDecl *D);
919 /// Get combiner/initializer for the specified user-defined reduction, if any.
920 virtual std::pair<llvm::Function *, llvm::Function *>
921 getUserDefinedReduction(const OMPDeclareReductionDecl *D);
922
923 /// Emit the function for the user defined mapper construct.
924 void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
925 CodeGenFunction *CGF = nullptr);
926 /// Get the function for the specified user-defined mapper. If it does not
927 /// exist, create one.
928 llvm::Function *
929 getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D);
930
931 /// Emits outlined function for the specified OpenMP parallel directive
932 /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
933 /// kmp_int32 BoundID, struct context_vars*).
934 /// \param D OpenMP directive.
935 /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
936 /// \param InnermostKind Kind of innermost directive (for simple directives it
937 /// is a directive itself, for combined - its innermost directive).
938 /// \param CodeGen Code generation sequence for the \a D directive.
939 virtual llvm::Function *emitParallelOutlinedFunction(
940 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
941 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
942
943 /// Emits outlined function for the specified OpenMP teams directive
944 /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
945 /// kmp_int32 BoundID, struct context_vars*).
946 /// \param D OpenMP directive.
947 /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
948 /// \param InnermostKind Kind of innermost directive (for simple directives it
949 /// is a directive itself, for combined - its innermost directive).
950 /// \param CodeGen Code generation sequence for the \a D directive.
951 virtual llvm::Function *emitTeamsOutlinedFunction(
952 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
953 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
954
955 /// Emits outlined function for the OpenMP task directive \a D. This
956 /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
957 /// TaskT).
958 /// \param D OpenMP directive.
959 /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
960 /// \param PartIDVar Variable for partition id in the current OpenMP untied
961 /// task region.
962 /// \param TaskTVar Variable for task_t argument.
963 /// \param InnermostKind Kind of innermost directive (for simple directives it
964 /// is a directive itself, for combined - its innermost directive).
965 /// \param CodeGen Code generation sequence for the \a D directive.
966 /// \param Tied true if task is generated for tied task, false otherwise.
967 /// \param NumberOfParts Number of parts in untied task. Ignored for tied
968 /// tasks.
969 ///
970 virtual llvm::Function *emitTaskOutlinedFunction(
971 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
972 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
973 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
974 bool Tied, unsigned &NumberOfParts);
975
976 /// Cleans up references to the objects in finished function.
977 ///
978 virtual void functionFinished(CodeGenFunction &CGF);
979
980 /// Emits code for parallel or serial call of the \a OutlinedFn with
981 /// variables captured in a record which address is stored in \a
982 /// CapturedStruct.
983 /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
984 /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
985 /// \param CapturedVars A pointer to the record with the references to
986 /// variables used in \a OutlinedFn function.
987 /// \param IfCond Condition in the associated 'if' clause, if it was
988 /// specified, nullptr otherwise.
989 ///
990 virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
991 llvm::Function *OutlinedFn,
992 ArrayRef<llvm::Value *> CapturedVars,
993 const Expr *IfCond);
994
995 /// Emits a critical region.
996 /// \param CriticalName Name of the critical region.
997 /// \param CriticalOpGen Generator for the statement associated with the given
998 /// critical region.
999 /// \param Hint Value of the 'hint' clause (optional).
1000 virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
1001 const RegionCodeGenTy &CriticalOpGen,
1002 SourceLocation Loc,
1003 const Expr *Hint = nullptr);
1004
1005 /// Emits a master region.
1006 /// \param MasterOpGen Generator for the statement associated with the given
1007 /// master region.
1008 virtual void emitMasterRegion(CodeGenFunction &CGF,
1009 const RegionCodeGenTy &MasterOpGen,
1010 SourceLocation Loc);
1011
1012 /// Emits code for a taskyield directive.
1013 virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
1014
1015 /// Emit a taskgroup region.
1016 /// \param TaskgroupOpGen Generator for the statement associated with the
1017 /// given taskgroup region.
1018 virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
1019 const RegionCodeGenTy &TaskgroupOpGen,
1020 SourceLocation Loc);
1021
1022 /// Emits a single region.
1023 /// \param SingleOpGen Generator for the statement associated with the given
1024 /// single region.
1025 virtual void emitSingleRegion(CodeGenFunction &CGF,
1026 const RegionCodeGenTy &SingleOpGen,
1027 SourceLocation Loc,
1028 ArrayRef<const Expr *> CopyprivateVars,
1029 ArrayRef<const Expr *> DestExprs,
1030 ArrayRef<const Expr *> SrcExprs,
1031 ArrayRef<const Expr *> AssignmentOps);
1032
1033 /// Emit an ordered region.
1034 /// \param OrderedOpGen Generator for the statement associated with the given
1035 /// ordered region.
1036 virtual void emitOrderedRegion(CodeGenFunction &CGF,
1037 const RegionCodeGenTy &OrderedOpGen,
1038 SourceLocation Loc, bool IsThreads);
1039
1040 /// Emit an implicit/explicit barrier for OpenMP threads.
1041 /// \param Kind Directive for which this implicit barrier call must be
1042 /// generated. Must be OMPD_barrier for explicit barrier generation.
1043 /// \param EmitChecks true if need to emit checks for cancellation barriers.
1044 /// \param ForceSimpleCall true simple barrier call must be emitted, false if
1045 /// runtime class decides which one to emit (simple or with cancellation
1046 /// checks).
1047 ///
1048 virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
1049 OpenMPDirectiveKind Kind,
1050 bool EmitChecks = true,
1051 bool ForceSimpleCall = false);
1052
1053 /// Check if the specified \a ScheduleKind is static non-chunked.
1054 /// This kind of worksharing directive is emitted without outer loop.
1055 /// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
1056 /// \param Chunked True if chunk is specified in the clause.
1057 ///
1058 virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
1059 bool Chunked) const;
1060
1061 /// Check if the specified \a ScheduleKind is static non-chunked.
1062 /// This kind of distribute directive is emitted without outer loop.
1063 /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
1064 /// \param Chunked True if chunk is specified in the clause.
1065 ///
1066 virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
1067 bool Chunked) const;
1068
1069 /// Check if the specified \a ScheduleKind is static chunked.
1070 /// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
1071 /// \param Chunked True if chunk is specified in the clause.
1072 ///
1073 virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
1074 bool Chunked) const;
1075
1076 /// Check if the specified \a ScheduleKind is static non-chunked.
1077 /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
1078 /// \param Chunked True if chunk is specified in the clause.
1079 ///
1080 virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
1081 bool Chunked) const;
1082
1083 /// Check if the specified \a ScheduleKind is dynamic.
1084 /// This kind of worksharing directive is emitted without outer loop.
1085 /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
1086 ///
1087 virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
1088
1089 /// struct with the values to be passed to the dispatch runtime function
1090 struct DispatchRTInput {
1091 /// Loop lower bound
1092 llvm::Value *LB = nullptr;
1093 /// Loop upper bound
1094 llvm::Value *UB = nullptr;
1095 /// Chunk size specified using 'schedule' clause (nullptr if chunk
1096 /// was not specified)
1097 llvm::Value *Chunk = nullptr;
1098 DispatchRTInput() = default;
1099 DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
1100 : LB(LB), UB(UB), Chunk(Chunk) {}
1101 };
1102
1103 /// Call the appropriate runtime routine to initialize it before start
1104 /// of loop.
1105
1106 /// This is used for non static scheduled types and when the ordered
1107 /// clause is present on the loop construct.
1108 /// Depending on the loop schedule, it is necessary to call some runtime
1109 /// routine before start of the OpenMP loop to get the loop upper / lower
1110 /// bounds \a LB and \a UB and stride \a ST.
1111 ///
1112 /// \param CGF Reference to current CodeGenFunction.
1113 /// \param Loc Clang source location.
1114 /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
1115 /// \param IVSize Size of the iteration variable in bits.
1116 /// \param IVSigned Sign of the iteration variable.
1117 /// \param Ordered true if loop is ordered, false otherwise.
1118 /// \param DispatchValues struct containing llvm values for lower bound, upper
1119 /// bound, and chunk expression.
1120 /// For the default (nullptr) value, the chunk 1 will be used.
1121 ///
1122 virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
1123 const OpenMPScheduleTy &ScheduleKind,
1124 unsigned IVSize, bool IVSigned, bool Ordered,
1125 const DispatchRTInput &DispatchValues);
1126
1127 /// Struct with the values to be passed to the static runtime function
1128 struct StaticRTInput {
1129 /// Size of the iteration variable in bits.
1130 unsigned IVSize = 0;
1131 /// Sign of the iteration variable.
1132 bool IVSigned = false;
1133 /// true if loop is ordered, false otherwise.
1134 bool Ordered = false;
1135 /// Address of the output variable in which the flag of the last iteration
1136 /// is returned.
1137 Address IL = Address::invalid();
1138 /// Address of the output variable in which the lower iteration number is
1139 /// returned.
1140 Address LB = Address::invalid();
1141 /// Address of the output variable in which the upper iteration number is
1142 /// returned.
1143 Address UB = Address::invalid();
1144 /// Address of the output variable in which the stride value is returned
1145 /// necessary to generated the static_chunked scheduled loop.
1146 Address ST = Address::invalid();
1147 /// Value of the chunk for the static_chunked scheduled loop. For the
1148 /// default (nullptr) value, the chunk 1 will be used.
1149 llvm::Value *Chunk = nullptr;
1150 StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
1151 Address LB, Address UB, Address ST,
1152 llvm::Value *Chunk = nullptr)
1153 : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
1154 UB(UB), ST(ST), Chunk(Chunk) {}
1155 };
1156 /// Call the appropriate runtime routine to initialize it before start
1157 /// of loop.
1158 ///
1159 /// This is used only in case of static schedule, when the user did not
1160 /// specify a ordered clause on the loop construct.
1161 /// Depending on the loop schedule, it is necessary to call some runtime
1162 /// routine before start of the OpenMP loop to get the loop upper / lower
1163 /// bounds LB and UB and stride ST.
1164 ///
1165 /// \param CGF Reference to current CodeGenFunction.
1166 /// \param Loc Clang source location.
1167 /// \param DKind Kind of the directive.
1168 /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
1169 /// \param Values Input arguments for the construct.
1170 ///
1171 virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
1172 OpenMPDirectiveKind DKind,
1173 const OpenMPScheduleTy &ScheduleKind,
1174 const StaticRTInput &Values);
1175
1176 ///
1177 /// \param CGF Reference to current CodeGenFunction.
1178 /// \param Loc Clang source location.
1179 /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
1180 /// \param Values Input arguments for the construct.
1181 ///
1182 virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
1183 SourceLocation Loc,
1184 OpenMPDistScheduleClauseKind SchedKind,
1185 const StaticRTInput &Values);
1186
1187 /// Call the appropriate runtime routine to notify that we finished
1188 /// iteration of the ordered loop with the dynamic scheduling.
1189 ///
1190 /// \param CGF Reference to current CodeGenFunction.
1191 /// \param Loc Clang source location.
1192 /// \param IVSize Size of the iteration variable in bits.
1193 /// \param IVSigned Sign of the iteration variable.
1194 ///
1195 virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
1196 SourceLocation Loc, unsigned IVSize,
1197 bool IVSigned);
1198
1199 /// Call the appropriate runtime routine to notify that we finished
1200 /// all the work with current loop.
1201 ///
1202 /// \param CGF Reference to current CodeGenFunction.
1203 /// \param Loc Clang source location.
1204 /// \param DKind Kind of the directive for which the static finish is emitted.
1205 ///
1206 virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
1207 OpenMPDirectiveKind DKind);
1208
1209 /// Call __kmpc_dispatch_next(
1210 /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
1211 /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
1212 /// kmp_int[32|64] *p_stride);
1213 /// \param IVSize Size of the iteration variable in bits.
1214 /// \param IVSigned Sign of the iteration variable.
1215 /// \param IL Address of the output variable in which the flag of the
1216 /// last iteration is returned.
1217 /// \param LB Address of the output variable in which the lower iteration
1218 /// number is returned.
1219 /// \param UB Address of the output variable in which the upper iteration
1220 /// number is returned.
1221 /// \param ST Address of the output variable in which the stride value is
1222 /// returned.
1223 virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
1224 unsigned IVSize, bool IVSigned,
1225 Address IL, Address LB,
1226 Address UB, Address ST);
1227
1228 /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
1229 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
1230 /// clause.
1231 /// \param NumThreads An integer value of threads.
1232 virtual void emitNumThreadsClause(CodeGenFunction &CGF,
1233 llvm::Value *NumThreads,
1234 SourceLocation Loc);
1235
1236 /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
1237 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
1238 virtual void emitProcBindClause(CodeGenFunction &CGF,
1239 llvm::omp::ProcBindKind ProcBind,
1240 SourceLocation Loc);
1241
1242 /// Returns address of the threadprivate variable for the current
1243 /// thread.
1244 /// \param VD Threadprivate variable.
1245 /// \param VDAddr Address of the global variable \a VD.
1246 /// \param Loc Location of the reference to threadprivate var.
1247 /// \return Address of the threadprivate variable for the current thread.
1248 virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
1249 const VarDecl *VD,
1250 Address VDAddr,
1251 SourceLocation Loc);
1252
1253 /// Returns the address of the variable marked as declare target with link
1254 /// clause OR as declare target with to clause and unified memory.
1255 virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
1256
1257 /// Emit a code for initialization of threadprivate variable. It emits
1258 /// a call to runtime library which adds initial value to the newly created
1259 /// threadprivate variable (if it is not constant) and registers destructor
1260 /// for the variable (if any).
1261 /// \param VD Threadprivate variable.
1262 /// \param VDAddr Address of the global variable \a VD.
1263 /// \param Loc Location of threadprivate declaration.
1264 /// \param PerformInit true if initialization expression is not constant.
1265 virtual llvm::Function *
1266 emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
1267 SourceLocation Loc, bool PerformInit,
1268 CodeGenFunction *CGF = nullptr);
1269
1270 /// Emit a code for initialization of declare target variable.
1271 /// \param VD Declare target variable.
1272 /// \param Addr Address of the global variable \a VD.
1273 /// \param PerformInit true if initialization expression is not constant.
1274 virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
1275 llvm::GlobalVariable *Addr,
1276 bool PerformInit);
1277
1278 /// Creates artificial threadprivate variable with name \p Name and type \p
1279 /// VarType.
1280 /// \param VarType Type of the artificial threadprivate variable.
1281 /// \param Name Name of the artificial threadprivate variable.
1282 virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
1283 QualType VarType,
1284 StringRef Name);
1285
1286 /// Emit flush of the variables specified in 'omp flush' directive.
1287 /// \param Vars List of variables to flush.
1288 virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
1289 SourceLocation Loc, llvm::AtomicOrdering AO);
1290
1291 /// Emit task region for the task directive. The task region is
1292 /// emitted in several steps:
1293 /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
1294 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1295 /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
1296 /// function:
1297 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
1298 /// TaskFunction(gtid, tt->part_id, tt->shareds);
1299 /// return 0;
1300 /// }
1301 /// 2. Copy a list of shared variables to field shareds of the resulting
1302 /// structure kmp_task_t returned by the previous call (if any).
1303 /// 3. Copy a pointer to destructions function to field destructions of the
1304 /// resulting structure kmp_task_t.
1305 /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
1306 /// kmp_task_t *new_task), where new_task is a resulting structure from
1307 /// previous items.
1308 /// \param D Current task directive.
1309 /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
1310 /// /*part_id*/, captured_struct */*__context*/);
1311 /// \param SharedsTy A type which contains references the shared variables.
1312 /// \param Shareds Context with the list of shared variables from the \p
1313 /// TaskFunction.
1314 /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
1315 /// otherwise.
1316 /// \param Data Additional data for task generation like tiednsee, final
1317 /// state, list of privates etc.
1318 virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
1319 const OMPExecutableDirective &D,
1320 llvm::Function *TaskFunction, QualType SharedsTy,
1321 Address Shareds, const Expr *IfCond,
1322 const OMPTaskDataTy &Data);
1323
1324 /// Emit task region for the taskloop directive. The taskloop region is
1325 /// emitted in several steps:
1326 /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
1327 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1328 /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
1329 /// function:
1330 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
1331 /// TaskFunction(gtid, tt->part_id, tt->shareds);
1332 /// return 0;
1333 /// }
1334 /// 2. Copy a list of shared variables to field shareds of the resulting
1335 /// structure kmp_task_t returned by the previous call (if any).
1336 /// 3. Copy a pointer to destructions function to field destructions of the
1337 /// resulting structure kmp_task_t.
1338 /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
1339 /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
1340 /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
1341 /// is a resulting structure from
1342 /// previous items.
1343 /// \param D Current task directive.
1344 /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
1345 /// /*part_id*/, captured_struct */*__context*/);
1346 /// \param SharedsTy A type which contains references the shared variables.
1347 /// \param Shareds Context with the list of shared variables from the \p
1348 /// TaskFunction.
1349 /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
1350 /// otherwise.
1351 /// \param Data Additional data for task generation like tiednsee, final
1352 /// state, list of privates etc.
1353 virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
1354 const OMPLoopDirective &D,
1355 llvm::Function *TaskFunction,
1356 QualType SharedsTy, Address Shareds,
1357 const Expr *IfCond, const OMPTaskDataTy &Data);
1358
1359 /// Emit code for the directive that does not require outlining.
1360 ///
1361 /// \param InnermostKind Kind of innermost directive (for simple directives it
1362 /// is a directive itself, for combined - its innermost directive).
1363 /// \param CodeGen Code generation sequence for the \a D directive.
1364 /// \param HasCancel true if region has inner cancel directive, false
1365 /// otherwise.
1366 virtual void emitInlinedDirective(CodeGenFunction &CGF,
1367 OpenMPDirectiveKind InnermostKind,
1368 const RegionCodeGenTy &CodeGen,
1369 bool HasCancel = false);
1370
1371 /// Emits reduction function.
1372 /// \param ArgsType Array type containing pointers to reduction variables.
1373 /// \param Privates List of private copies for original reduction arguments.
1374 /// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
1375 /// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
1376 /// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
1377 /// or 'operator binop(LHS, RHS)'.
1378 llvm::Function *emitReductionFunction(SourceLocation Loc,
1379 llvm::Type *ArgsType,
1380 ArrayRef<const Expr *> Privates,
1381 ArrayRef<const Expr *> LHSExprs,
1382 ArrayRef<const Expr *> RHSExprs,
1383 ArrayRef<const Expr *> ReductionOps);
1384
1385 /// Emits single reduction combiner
1386 void emitSingleReductionCombiner(CodeGenFunction &CGF,
1387 const Expr *ReductionOp,
1388 const Expr *PrivateRef,
1389 const DeclRefExpr *LHS,
1390 const DeclRefExpr *RHS);
1391
1392 struct ReductionOptionsTy {
1393 bool WithNowait;
1394 bool SimpleReduction;
1395 OpenMPDirectiveKind ReductionKind;
1396 };
1397 /// Emit a code for reduction clause. Next code should be emitted for
1398 /// reduction:
1399 /// \code
1400 ///
1401 /// static kmp_critical_name lock = { 0 };
1402 ///
1403 /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
1404 /// ...
1405 /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
1406 /// ...
1407 /// }
1408 ///
1409 /// ...
1410 /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
1411 /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
1412 /// RedList, reduce_func, &<lock>)) {
1413 /// case 1:
1414 /// ...
1415 /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
1416 /// ...
1417 /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
1418 /// break;
1419 /// case 2:
1420 /// ...
1421 /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
1422 /// ...
1423 /// break;
1424 /// default:;
1425 /// }
1426 /// \endcode
1427 ///
1428 /// \param Privates List of private copies for original reduction arguments.
1429 /// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
1430 /// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
1431 /// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
1432 /// or 'operator binop(LHS, RHS)'.
1433 /// \param Options List of options for reduction codegen:
1434 /// WithNowait true if parent directive has also nowait clause, false
1435 /// otherwise.
1436 /// SimpleReduction Emit reduction operation only. Used for omp simd
1437 /// directive on the host.
1438 /// ReductionKind The kind of reduction to perform.
1439 virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
1440 ArrayRef<const Expr *> Privates,
1441 ArrayRef<const Expr *> LHSExprs,
1442 ArrayRef<const Expr *> RHSExprs,
1443 ArrayRef<const Expr *> ReductionOps,
1444 ReductionOptionsTy Options);
1445
1446 /// Emit a code for initialization of task reduction clause. Next code
1447 /// should be emitted for reduction:
1448 /// \code
1449 ///
1450 /// _taskred_item_t red_data[n];
1451 /// ...
1452 /// red_data[i].shar = &shareds[i];
1453 /// red_data[i].orig = &origs[i];
1454 /// red_data[i].size = sizeof(origs[i]);
1455 /// red_data[i].f_init = (void*)RedInit<i>;
1456 /// red_data[i].f_fini = (void*)RedDest<i>;
1457 /// red_data[i].f_comb = (void*)RedOp<i>;
1458 /// red_data[i].flags = <Flag_i>;
1459 /// ...
1460 /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
1461 /// \endcode
1462 /// For reduction clause with task modifier it emits the next call:
1463 /// \code
1464 ///
1465 /// _taskred_item_t red_data[n];
1466 /// ...
1467 /// red_data[i].shar = &shareds[i];
1468 /// red_data[i].orig = &origs[i];
1469 /// red_data[i].size = sizeof(origs[i]);
1470 /// red_data[i].f_init = (void*)RedInit<i>;
1471 /// red_data[i].f_fini = (void*)RedDest<i>;
1472 /// red_data[i].f_comb = (void*)RedOp<i>;
1473 /// red_data[i].flags = <Flag_i>;
1474 /// ...
1475 /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
1476 /// red_data);
1477 /// \endcode
1478 /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
1479 /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
1480 /// \param Data Additional data for task generation like tiedness, final
1481 /// state, list of privates, reductions etc.
1482 virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
1483 SourceLocation Loc,
1484 ArrayRef<const Expr *> LHSExprs,
1485 ArrayRef<const Expr *> RHSExprs,
1486 const OMPTaskDataTy &Data);
1487
1488 /// Emits the following code for reduction clause with task modifier:
1489 /// \code
1490 /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
1491 /// \endcode
1492 virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
1493 bool IsWorksharingReduction);
1494
1495 /// Required to resolve existing problems in the runtime. Emits threadprivate
1496 /// variables to store the size of the VLAs/array sections for
1497 /// initializer/combiner/finalizer functions.
1498 /// \param RCG Allows to reuse an existing data for the reductions.
1499 /// \param N Reduction item for which fixups must be emitted.
1500 virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
1501 ReductionCodeGen &RCG, unsigned N);
1502
1503 /// Get the address of `void *` type of the privatue copy of the reduction
1504 /// item specified by the \p SharedLVal.
1505 /// \param ReductionsPtr Pointer to the reduction data returned by the
1506 /// emitTaskReductionInit function.
1507 /// \param SharedLVal Address of the original reduction item.
1508 virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
1509 llvm::Value *ReductionsPtr,
1510 LValue SharedLVal);
1511
1512 /// Emit code for 'taskwait' directive.
1513 virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
1514
1515 /// Emit code for 'cancellation point' construct.
1516 /// \param CancelRegion Region kind for which the cancellation point must be
1517 /// emitted.
1518 ///
1519 virtual void emitCancellationPointCall(CodeGenFunction &CGF,
1520 SourceLocation Loc,
1521 OpenMPDirectiveKind CancelRegion);
1522
1523 /// Emit code for 'cancel' construct.
1524 /// \param IfCond Condition in the associated 'if' clause, if it was
1525 /// specified, nullptr otherwise.
1526 /// \param CancelRegion Region kind for which the cancel must be emitted.
1527 ///
1528 virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
1529 const Expr *IfCond,
1530 OpenMPDirectiveKind CancelRegion);
1531
1532 /// Emit outilined function for 'target' directive.
1533 /// \param D Directive to emit.
1534 /// \param ParentName Name of the function that encloses the target region.
1535 /// \param OutlinedFn Outlined function value to be defined by this call.
1536 /// \param OutlinedFnID Outlined function ID value to be defined by this call.
1537 /// \param IsOffloadEntry True if the outlined function is an offload entry.
1538 /// \param CodeGen Code generation sequence for the \a D directive.
1539 /// An outlined function may not be an entry if, e.g. the if clause always
1540 /// evaluates to false.
1541 virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
1542 StringRef ParentName,
1543 llvm::Function *&OutlinedFn,
1544 llvm::Constant *&OutlinedFnID,
1545 bool IsOffloadEntry,
1546 const RegionCodeGenTy &CodeGen);
1547
1548 /// Emit the target offloading code associated with \a D. The emitted
1549 /// code attempts offloading the execution to the device, an the event of
1550 /// a failure it executes the host version outlined in \a OutlinedFn.
1551 /// \param D Directive to emit.
1552 /// \param OutlinedFn Host version of the code to be offloaded.
1553 /// \param OutlinedFnID ID of host version of the code to be offloaded.
1554 /// \param IfCond Expression evaluated in if clause associated with the target
1555 /// directive, or null if no if clause is used.
1556 /// \param Device Expression evaluated in device clause associated with the
1557 /// target directive, or null if no device clause is used and device modifier.
1558 /// \param SizeEmitter Callback to emit number of iterations for loop-based
1559 /// directives.
1560 virtual void emitTargetCall(
1561 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1562 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
1563 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
1564 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
1565 const OMPLoopDirective &D)>
1566 SizeEmitter);
1567
1568 /// Emit the target regions enclosed in \a GD function definition or
1569 /// the function itself in case it is a valid device function. Returns true if
1570 /// \a GD was dealt with successfully.
1571 /// \param GD Function to scan.
1572 virtual bool emitTargetFunctions(GlobalDecl GD);
1573
1574 /// Emit the global variable if it is a valid device global variable.
1575 /// Returns true if \a GD was dealt with successfully.
1576 /// \param GD Variable declaration to emit.
1577 virtual bool emitTargetGlobalVariable(GlobalDecl GD);
1578
1579 /// Checks if the provided global decl \a GD is a declare target variable and
1580 /// registers it when emitting code for the host.
1581 virtual void registerTargetGlobalVariable(const VarDecl *VD,
1582 llvm::Constant *Addr);
1583
1584 /// Registers provided target firstprivate variable as global on the
1585 /// target.
1586 llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
1587 const VarDecl *VD);
1588
1589 /// Emit the global \a GD if it is meaningful for the target. Returns
1590 /// if it was emitted successfully.
1591 /// \param GD Global to scan.
1592 virtual bool emitTargetGlobal(GlobalDecl GD);
1593
1594 /// Creates and returns a registration function for when at least one
1595 /// requires directives was used in the current module.
1596 llvm::Function *emitRequiresDirectiveRegFun();
1597
1598 /// Creates all the offload entries in the current compilation unit
1599 /// along with the associated metadata.
1600 void createOffloadEntriesAndInfoMetadata();
1601
1602 /// Emits code for teams call of the \a OutlinedFn with
1603 /// variables captured in a record which address is stored in \a
1604 /// CapturedStruct.
1605 /// \param OutlinedFn Outlined function to be run by team masters. Type of
1606 /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
1607 /// \param CapturedVars A pointer to the record with the references to
1608 /// variables used in \a OutlinedFn function.
1609 ///
1610 virtual void emitTeamsCall(CodeGenFunction &CGF,
1611 const OMPExecutableDirective &D,
1612 SourceLocation Loc, llvm::Function *OutlinedFn,
1613 ArrayRef<llvm::Value *> CapturedVars);
1614
1615 /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
1616 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
1617 /// for num_teams clause.
1618 /// \param NumTeams An integer expression of teams.
1619 /// \param ThreadLimit An integer expression of threads.
1620 virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
1621 const Expr *ThreadLimit, SourceLocation Loc);
1622
1623 /// Struct that keeps all the relevant information that should be kept
1624 /// throughout a 'target data' region.
1625 class TargetDataInfo {
1626 /// Set to true if device pointer information have to be obtained.
1627 bool RequiresDevicePointerInfo = false;
1628 /// Set to true if Clang emits separate runtime calls for the beginning and
1629 /// end of the region. These calls might have separate map type arrays.
1630 bool SeparateBeginEndCalls = false;
1631
1632 public:
1633 /// The array of base pointer passed to the runtime library.
1634 llvm::Value *BasePointersArray = nullptr;
1635 /// The array of section pointers passed to the runtime library.
1636 llvm::Value *PointersArray = nullptr;
1637 /// The array of sizes passed to the runtime library.
1638 llvm::Value *SizesArray = nullptr;
1639 /// The array of map types passed to the runtime library for the beginning
1640 /// of the region or for the entire region if there are no separate map
1641 /// types for the region end.
1642 llvm::Value *MapTypesArray = nullptr;
1643 /// The array of map types passed to the runtime library for the end of the
1644 /// region, or nullptr if there are no separate map types for the region
1645 /// end.
1646 llvm::Value *MapTypesArrayEnd = nullptr;
1647 /// The array of user-defined mappers passed to the runtime library.
1648 llvm::Value *MappersArray = nullptr;
1649 /// Indicate whether any user-defined mapper exists.
1650 bool HasMapper = false;
1651 /// The total number of pointers passed to the runtime library.
1652 unsigned NumberOfPtrs = 0u;
1653 /// Map between the a declaration of a capture and the corresponding base
1654 /// pointer address where the runtime returns the device pointers.
1655 llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
1656
1657 explicit TargetDataInfo() {}
1658 explicit TargetDataInfo(bool RequiresDevicePointerInfo,
1659 bool SeparateBeginEndCalls)
1660 : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
1661 SeparateBeginEndCalls(SeparateBeginEndCalls) {}
1662 /// Clear information about the data arrays.
1663 void clearArrayInfo() {
1664 BasePointersArray = nullptr;
1665 PointersArray = nullptr;
1666 SizesArray = nullptr;
1667 MapTypesArray = nullptr;
1668 MapTypesArrayEnd = nullptr;
1669 MappersArray = nullptr;
1670 HasMapper = false;
1671 NumberOfPtrs = 0u;
1672 }
1673 /// Return true if the current target data information has valid arrays.
1674 bool isValid() {
1675 return BasePointersArray && PointersArray && SizesArray &&
1676 MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
1677 }
1678 bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
1679 bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
1680 };
1681
1682 /// Emit the target data mapping code associated with \a D.
1683 /// \param D Directive to emit.
1684 /// \param IfCond Expression evaluated in if clause associated with the
1685 /// target directive, or null if no device clause is used.
1686 /// \param Device Expression evaluated in device clause associated with the
1687 /// target directive, or null if no device clause is used.
1688 /// \param Info A record used to store information that needs to be preserved
1689 /// until the region is closed.
1690 virtual void emitTargetDataCalls(CodeGenFunction &CGF,
1691 const OMPExecutableDirective &D,
1692 const Expr *IfCond, const Expr *Device,
1693 const RegionCodeGenTy &CodeGen,
1694 TargetDataInfo &Info);
1695
1696 /// Emit the data mapping/movement code associated with the directive
1697 /// \a D that should be of the form 'target [{enter|exit} data | update]'.
1698 /// \param D Directive to emit.
1699 /// \param IfCond Expression evaluated in if clause associated with the target
1700 /// directive, or null if no if clause is used.
1701 /// \param Device Expression evaluated in device clause associated with the
1702 /// target directive, or null if no device clause is used.
1703 virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
1704 const OMPExecutableDirective &D,
1705 const Expr *IfCond,
1706 const Expr *Device);
1707
1708 /// Marks function \a Fn with properly mangled versions of vector functions.
1709 /// \param FD Function marked as 'declare simd'.
1710 /// \param Fn LLVM function that must be marked with 'declare simd'
1711 /// attributes.
1712 virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
1713 llvm::Function *Fn);
1714
1715 /// Emit initialization for doacross loop nesting support.
1716 /// \param D Loop-based construct used in doacross nesting construct.
1717 virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
1718 ArrayRef<Expr *> NumIterations);
1719
1720 /// Emit code for doacross ordered directive with 'depend' clause.
1721 /// \param C 'depend' clause with 'sink|source' dependency kind.
1722 virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
1723 const OMPDependClause *C);
1724
1725 /// Translates the native parameter of outlined function if this is required
1726 /// for target.
1727 /// \param FD Field decl from captured record for the parameter.
1728 /// \param NativeParam Parameter itself.
1729 virtual const VarDecl *translateParameter(const FieldDecl *FD,
1730 const VarDecl *NativeParam) const {
1731 return NativeParam;
1732 }
1733
1734 /// Gets the address of the native argument basing on the address of the
1735 /// target-specific parameter.
1736 /// \param NativeParam Parameter itself.
1737 /// \param TargetParam Corresponding target-specific parameter.
1738 virtual Address getParameterAddress(CodeGenFunction &CGF,
1739 const VarDecl *NativeParam,
1740 const VarDecl *TargetParam) const;
1741
1742 /// Choose default schedule type and chunk value for the
1743 /// dist_schedule clause.
1744 virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
1745 const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
1746 llvm::Value *&Chunk) const {}
1747
1748 /// Choose default schedule type and chunk value for the
1749 /// schedule clause.
1750 virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
1751 const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
1752 const Expr *&ChunkExpr) const;
1753
1754 /// Emits call of the outlined function with the provided arguments,
1755 /// translating these arguments to correct target-specific arguments.
1756 virtual void
1757 emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
1758 llvm::FunctionCallee OutlinedFn,
1759 ArrayRef<llvm::Value *> Args = llvm::None) const;
1760
1761 /// Emits OpenMP-specific function prolog.
1762 /// Required for device constructs.
1763 virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
1764
1765 /// Gets the OpenMP-specific address of the local variable.
1766 virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
1767 const VarDecl *VD);
1768
1769 /// Marks the declaration as already emitted for the device code and returns
1770 /// true, if it was marked already, and false, otherwise.
1771 bool markAsGlobalTarget(GlobalDecl GD);
1772
1773 /// Emit deferred declare target variables marked for deferred emission.
1774 void emitDeferredTargetDecls() const;
1775
1776 /// Adjust some parameters for the target-based directives, like addresses of
1777 /// the variables captured by reference in lambdas.
1778 virtual void
1779 adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
1780 const OMPExecutableDirective &D) const;
1781
1782 /// Perform check on requires decl to ensure that target architecture
1783 /// supports unified addressing
1784 virtual void processRequiresDirective(const OMPRequiresDecl *D);
1785
1786 /// Gets default memory ordering as specified in requires directive.
1787 llvm::AtomicOrdering getDefaultMemoryOrdering() const;
1788
1789 /// Checks if the variable has associated OMPAllocateDeclAttr attribute with
1790 /// the predefined allocator and translates it into the corresponding address
1791 /// space.
1792 virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
1793
1794 /// Return whether the unified_shared_memory has been specified.
1795 bool hasRequiresUnifiedSharedMemory() const;
1796
1797 /// Checks if the \p VD variable is marked as nontemporal declaration in
1798 /// current context.
1799 bool isNontemporalDecl(const ValueDecl *VD) const;
1800
1801 /// Create specialized alloca to handle lastprivate conditionals.
1802 Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
1803 const VarDecl *VD);
1804
1805 /// Checks if the provided \p LVal is lastprivate conditional and emits the
1806 /// code to update the value of the original variable.
1807 /// \code
1808 /// lastprivate(conditional: a)
1809 /// ...
1810 /// <type> a;
1811 /// lp_a = ...;
1812 /// #pragma omp critical(a)
1813 /// if (last_iv_a <= iv) {
1814 /// last_iv_a = iv;
1815 /// global_a = lp_a;
1816 /// }
1817 /// \endcode
1818 virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
1819 const Expr *LHS);
1820
1821 /// Checks if the lastprivate conditional was updated in inner region and
1822 /// writes the value.
1823 /// \code
1824 /// lastprivate(conditional: a)
1825 /// ...
1826 /// <type> a;bool Fired = false;
1827 /// #pragma omp ... shared(a)
1828 /// {
1829 /// lp_a = ...;
1830 /// Fired = true;
1831 /// }
1832 /// if (Fired) {
1833 /// #pragma omp critical(a)
1834 /// if (last_iv_a <= iv) {
1835 /// last_iv_a = iv;
1836 /// global_a = lp_a;
1837 /// }
1838 /// Fired = false;
1839 /// }
1840 /// \endcode
1841 virtual void checkAndEmitSharedLastprivateConditional(
1842 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1843 const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
1844
1845 /// Gets the address of the global copy used for lastprivate conditional
1846 /// update, if any.
1847 /// \param PrivLVal LValue for the private copy.
1848 /// \param VD Original lastprivate declaration.
1849 virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
1850 LValue PrivLVal,
1851 const VarDecl *VD,
1852 SourceLocation Loc);
1853
1854 /// Emits list of dependecies based on the provided data (array of
1855 /// dependence/expression pairs).
1856 /// \returns Pointer to the first element of the array casted to VoidPtr type.
1857 std::pair<llvm::Value *, Address>
1858 emitDependClause(CodeGenFunction &CGF,
1859 ArrayRef<OMPTaskDataTy::DependData> Dependencies,
1860 SourceLocation Loc);
1861
1862 /// Emits list of dependecies based on the provided data (array of
1863 /// dependence/expression pairs) for depobj construct. In this case, the
1864 /// variable is allocated in dynamically. \returns Pointer to the first
1865 /// element of the array casted to VoidPtr type.
1866 Address emitDepobjDependClause(CodeGenFunction &CGF,
1867 const OMPTaskDataTy::DependData &Dependencies,
1868 SourceLocation Loc);
1869
1870 /// Emits the code to destroy the dependency object provided in depobj
1871 /// directive.
1872 void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
1873 SourceLocation Loc);
1874
1875 /// Updates the dependency kind in the specified depobj object.
1876 /// \param DepobjLVal LValue for the main depobj object.
1877 /// \param NewDepKind New dependency kind.
1878 void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
1879 OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
1880
1881 /// Initializes user defined allocators specified in the uses_allocators
1882 /// clauses.
1883 void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
1884 const Expr *AllocatorTraits);
1885
1886 /// Destroys user defined allocators specified in the uses_allocators clause.
1887 void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
1888
1889 /// Returns true if the variable is a local variable in untied task.
1890 bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const;
1891};
1892
1893/// Class supports emissionof SIMD-only code.
1894class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
1895public:
1896 explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
1897 ~CGOpenMPSIMDRuntime() override {}
1898
1899 /// Emits outlined function for the specified OpenMP parallel directive
1900 /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
1901 /// kmp_int32 BoundID, struct context_vars*).
1902 /// \param D OpenMP directive.
1903 /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
1904 /// \param InnermostKind Kind of innermost directive (for simple directives it
1905 /// is a directive itself, for combined - its innermost directive).
1906 /// \param CodeGen Code generation sequence for the \a D directive.
1907 llvm::Function *
1908 emitParallelOutlinedFunction(const OMPExecutableDirective &D,
1909 const VarDecl *ThreadIDVar,
1910 OpenMPDirectiveKind InnermostKind,
1911 const RegionCodeGenTy &CodeGen) override;
1912
1913 /// Emits outlined function for the specified OpenMP teams directive
1914 /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
1915 /// kmp_int32 BoundID, struct context_vars*).
1916 /// \param D OpenMP directive.
1917 /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
1918 /// \param InnermostKind Kind of innermost directive (for simple directives it
1919 /// is a directive itself, for combined - its innermost directive).
1920 /// \param CodeGen Code generation sequence for the \a D directive.
1921 llvm::Function *
1922 emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
1923 const VarDecl *ThreadIDVar,
1924 OpenMPDirectiveKind InnermostKind,
1925 const RegionCodeGenTy &CodeGen) override;
1926
1927 /// Emits outlined function for the OpenMP task directive \a D. This
1928 /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
1929 /// TaskT).
1930 /// \param D OpenMP directive.
1931 /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
1932 /// \param PartIDVar Variable for partition id in the current OpenMP untied
1933 /// task region.
1934 /// \param TaskTVar Variable for task_t argument.
1935 /// \param InnermostKind Kind of innermost directive (for simple directives it
1936 /// is a directive itself, for combined - its innermost directive).
1937 /// \param CodeGen Code generation sequence for the \a D directive.
1938 /// \param Tied true if task is generated for tied task, false otherwise.
1939 /// \param NumberOfParts Number of parts in untied task. Ignored for tied
1940 /// tasks.
1941 ///
1942 llvm::Function *emitTaskOutlinedFunction(
1943 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1944 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1945 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1946 bool Tied, unsigned &NumberOfParts) override;
1947
1948 /// Emits code for parallel or serial call of the \a OutlinedFn with
1949 /// variables captured in a record which address is stored in \a
1950 /// CapturedStruct.
1951 /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
1952 /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
1953 /// \param CapturedVars A pointer to the record with the references to
1954 /// variables used in \a OutlinedFn function.
1955 /// \param IfCond Condition in the associated 'if' clause, if it was
1956 /// specified, nullptr otherwise.
1957 ///
1958 void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
1959 llvm::Function *OutlinedFn,
1960 ArrayRef<llvm::Value *> CapturedVars,
1961 const Expr *IfCond) override;
1962
1963 /// Emits a critical region.
1964 /// \param CriticalName Name of the critical region.
1965 /// \param CriticalOpGen Generator for the statement associated with the given
1966 /// critical region.
1967 /// \param Hint Value of the 'hint' clause (optional).
1968 void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
1969 const RegionCodeGenTy &CriticalOpGen,
1970 SourceLocation Loc,
1971 const Expr *Hint = nullptr) override;
1972
1973 /// Emits a master region.
1974 /// \param MasterOpGen Generator for the statement associated with the given
1975 /// master region.
1976 void emitMasterRegion(CodeGenFunction &CGF,
1977 const RegionCodeGenTy &MasterOpGen,
1978 SourceLocation Loc) override;
1979
1980 /// Emits code for a taskyield directive.
1981 void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
1982
1983 /// Emit a taskgroup region.
1984 /// \param TaskgroupOpGen Generator for the statement associated with the
1985 /// given taskgroup region.
1986 void emitTaskgroupRegion(CodeGenFunction &CGF,
1987 const RegionCodeGenTy &TaskgroupOpGen,
1988 SourceLocation Loc) override;
1989
1990 /// Emits a single region.
1991 /// \param SingleOpGen Generator for the statement associated with the given
1992 /// single region.
1993 void emitSingleRegion(CodeGenFunction &CGF,
1994 const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
1995 ArrayRef<const Expr *> CopyprivateVars,
1996 ArrayRef<const Expr *> DestExprs,
1997 ArrayRef<const Expr *> SrcExprs,
1998 ArrayRef<const Expr *> AssignmentOps) override;
1999
2000 /// Emit an ordered region.
2001 /// \param OrderedOpGen Generator for the statement associated with the given
2002 /// ordered region.
2003 void emitOrderedRegion(CodeGenFunction &CGF,
2004 const RegionCodeGenTy &OrderedOpGen,
2005 SourceLocation Loc, bool IsThreads) override;
2006
2007 /// Emit an implicit/explicit barrier for OpenMP threads.
2008 /// \param Kind Directive for which this implicit barrier call must be
2009 /// generated. Must be OMPD_barrier for explicit barrier generation.
2010 /// \param EmitChecks true if need to emit checks for cancellation barriers.
2011 /// \param ForceSimpleCall true simple barrier call must be emitted, false if
2012 /// runtime class decides which one to emit (simple or with cancellation
2013 /// checks).
2014 ///
2015 void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
2016 OpenMPDirectiveKind Kind, bool EmitChecks = true,
2017 bool ForceSimpleCall = false) override;
2018
2019 /// This is used for non static scheduled types and when the ordered
2020 /// clause is present on the loop construct.
2021 /// Depending on the loop schedule, it is necessary to call some runtime
2022 /// routine before start of the OpenMP loop to get the loop upper / lower
2023 /// bounds \a LB and \a UB and stride \a ST.
2024 ///
2025 /// \param CGF Reference to current CodeGenFunction.
2026 /// \param Loc Clang source location.
2027 /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
2028 /// \param IVSize Size of the iteration variable in bits.
2029 /// \param IVSigned Sign of the iteration variable.
2030 /// \param Ordered true if loop is ordered, false otherwise.
2031 /// \param DispatchValues struct containing llvm values for lower bound, upper
2032 /// bound, and chunk expression.
2033 /// For the default (nullptr) value, the chunk 1 will be used.
2034 ///
2035 void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
2036 const OpenMPScheduleTy &ScheduleKind,
2037 unsigned IVSize, bool IVSigned, bool Ordered,
2038 const DispatchRTInput &DispatchValues) override;
2039
2040 /// Call the appropriate runtime routine to initialize it before start
2041 /// of loop.
2042 ///
2043 /// This is used only in case of static schedule, when the user did not
2044 /// specify a ordered clause on the loop construct.
2045 /// Depending on the loop schedule, it is necessary to call some runtime
2046 /// routine before start of the OpenMP loop to get the loop upper / lower
2047 /// bounds LB and UB and stride ST.
2048 ///
2049 /// \param CGF Reference to current CodeGenFunction.
2050 /// \param Loc Clang source location.
2051 /// \param DKind Kind of the directive.
2052 /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
2053 /// \param Values Input arguments for the construct.
2054 ///
2055 void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
2056 OpenMPDirectiveKind DKind,
2057 const OpenMPScheduleTy &ScheduleKind,
2058 const StaticRTInput &Values) override;
2059
2060 ///
2061 /// \param CGF Reference to current CodeGenFunction.
2062 /// \param Loc Clang source location.
2063 /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
2064 /// \param Values Input arguments for the construct.
2065 ///
2066 void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
2067 OpenMPDistScheduleClauseKind SchedKind,
2068 const StaticRTInput &Values) override;
2069
2070 /// Call the appropriate runtime routine to notify that we finished
2071 /// iteration of the ordered loop with the dynamic scheduling.
2072 ///
2073 /// \param CGF Reference to current CodeGenFunction.
2074 /// \param Loc Clang source location.
2075 /// \param IVSize Size of the iteration variable in bits.
2076 /// \param IVSigned Sign of the iteration variable.
2077 ///
2078 void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
2079 unsigned IVSize, bool IVSigned) override;
2080
2081 /// Call the appropriate runtime routine to notify that we finished
2082 /// all the work with current loop.
2083 ///
2084 /// \param CGF Reference to current CodeGenFunction.
2085 /// \param Loc Clang source location.
2086 /// \param DKind Kind of the directive for which the static finish is emitted.
2087 ///
2088 void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
2089 OpenMPDirectiveKind DKind) override;
2090
2091 /// Call __kmpc_dispatch_next(
2092 /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
2093 /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
2094 /// kmp_int[32|64] *p_stride);
2095 /// \param IVSize Size of the iteration variable in bits.
2096 /// \param IVSigned Sign of the iteration variable.
2097 /// \param IL Address of the output variable in which the flag of the
2098 /// last iteration is returned.
2099 /// \param LB Address of the output variable in which the lower iteration
2100 /// number is returned.
2101 /// \param UB Address of the output variable in which the upper iteration
2102 /// number is returned.
2103 /// \param ST Address of the output variable in which the stride value is
2104 /// returned.
2105 llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
2106 unsigned IVSize, bool IVSigned, Address IL,
2107 Address LB, Address UB, Address ST) override;
2108
2109 /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
2110 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
2111 /// clause.
2112 /// \param NumThreads An integer value of threads.
2113 void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
2114 SourceLocation Loc) override;
2115
2116 /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
2117 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
2118 void emitProcBindClause(CodeGenFunction &CGF,
2119 llvm::omp::ProcBindKind ProcBind,
2120 SourceLocation Loc) override;
2121
2122 /// Returns address of the threadprivate variable for the current
2123 /// thread.
2124 /// \param VD Threadprivate variable.
2125 /// \param VDAddr Address of the global variable \a VD.
2126 /// \param Loc Location of the reference to threadprivate var.
2127 /// \return Address of the threadprivate variable for the current thread.
2128 Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
2129 Address VDAddr, SourceLocation Loc) override;
2130
2131 /// Emit a code for initialization of threadprivate variable. It emits
2132 /// a call to runtime library which adds initial value to the newly created
2133 /// threadprivate variable (if it is not constant) and registers destructor
2134 /// for the variable (if any).
2135 /// \param VD Threadprivate variable.
2136 /// \param VDAddr Address of the global variable \a VD.
2137 /// \param Loc Location of threadprivate declaration.
2138 /// \param PerformInit true if initialization expression is not constant.
2139 llvm::Function *
2140 emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
2141 SourceLocation Loc, bool PerformInit,
2142 CodeGenFunction *CGF = nullptr) override;
2143
2144 /// Creates artificial threadprivate variable with name \p Name and type \p
2145 /// VarType.
2146 /// \param VarType Type of the artificial threadprivate variable.
2147 /// \param Name Name of the artificial threadprivate variable.
2148 Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2149 QualType VarType,
2150 StringRef Name) override;
2151
2152 /// Emit flush of the variables specified in 'omp flush' directive.
2153 /// \param Vars List of variables to flush.
2154 void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
2155 SourceLocation Loc, llvm::AtomicOrdering AO) override;
2156
2157 /// Emit task region for the task directive. The task region is
2158 /// emitted in several steps:
2159 /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
2160 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
2161 /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
2162 /// function:
2163 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
2164 /// TaskFunction(gtid, tt->part_id, tt->shareds);
2165 /// return 0;
2166 /// }
2167 /// 2. Copy a list of shared variables to field shareds of the resulting
2168 /// structure kmp_task_t returned by the previous call (if any).
2169 /// 3. Copy a pointer to destructions function to field destructions of the
2170 /// resulting structure kmp_task_t.
2171 /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
2172 /// kmp_task_t *new_task), where new_task is a resulting structure from
2173 /// previous items.
2174 /// \param D Current task directive.
2175 /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
2176 /// /*part_id*/, captured_struct */*__context*/);
2177 /// \param SharedsTy A type which contains references the shared variables.
2178 /// \param Shareds Context with the list of shared variables from the \p
2179 /// TaskFunction.
2180 /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
2181 /// otherwise.
2182 /// \param Data Additional data for task generation like tiednsee, final
2183 /// state, list of privates etc.
2184 void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
2185 const OMPExecutableDirective &D,
2186 llvm::Function *TaskFunction, QualType SharedsTy,
2187 Address Shareds, const Expr *IfCond,
2188 const OMPTaskDataTy &Data) override;
2189
2190 /// Emit task region for the taskloop directive. The taskloop region is
2191 /// emitted in several steps:
2192 /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
2193 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
2194 /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
2195 /// function:
2196 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
2197 /// TaskFunction(gtid, tt->part_id, tt->shareds);
2198 /// return 0;
2199 /// }
2200 /// 2. Copy a list of shared variables to field shareds of the resulting
2201 /// structure kmp_task_t returned by the previous call (if any).
2202 /// 3. Copy a pointer to destructions function to field destructions of the
2203 /// resulting structure kmp_task_t.
2204 /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
2205 /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
2206 /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
2207 /// is a resulting structure from
2208 /// previous items.
2209 /// \param D Current task directive.
2210 /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
2211 /// /*part_id*/, captured_struct */*__context*/);
2212 /// \param SharedsTy A type which contains references the shared variables.
2213 /// \param Shareds Context with the list of shared variables from the \p
2214 /// TaskFunction.
2215 /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
2216 /// otherwise.
2217 /// \param Data Additional data for task generation like tiednsee, final
2218 /// state, list of privates etc.
2219 void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
2220 const OMPLoopDirective &D, llvm::Function *TaskFunction,
2221 QualType SharedsTy, Address Shareds, const Expr *IfCond,
2222 const OMPTaskDataTy &Data) override;
2223
2224 /// Emit a code for reduction clause. Next code should be emitted for
2225 /// reduction:
2226 /// \code
2227 ///
2228 /// static kmp_critical_name lock = { 0 };
2229 ///
2230 /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
2231 /// ...
2232 /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
2233 /// ...
2234 /// }
2235 ///
2236 /// ...
2237 /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
2238 /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
2239 /// RedList, reduce_func, &<lock>)) {
2240 /// case 1:
2241 /// ...
2242 /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
2243 /// ...
2244 /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
2245 /// break;
2246 /// case 2:
2247 /// ...
2248 /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
2249 /// ...
2250 /// break;
2251 /// default:;
2252 /// }
2253 /// \endcode
2254 ///
2255 /// \param Privates List of private copies for original reduction arguments.
2256 /// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
2257 /// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
2258 /// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
2259 /// or 'operator binop(LHS, RHS)'.
2260 /// \param Options List of options for reduction codegen:
2261 /// WithNowait true if parent directive has also nowait clause, false
2262 /// otherwise.
2263 /// SimpleReduction Emit reduction operation only. Used for omp simd
2264 /// directive on the host.
2265 /// ReductionKind The kind of reduction to perform.
2266 void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
2267 ArrayRef<const Expr *> Privates,
2268 ArrayRef<const Expr *> LHSExprs,
2269 ArrayRef<const Expr *> RHSExprs,
2270 ArrayRef<const Expr *> ReductionOps,
2271 ReductionOptionsTy Options) override;
2272
2273 /// Emit a code for initialization of task reduction clause. Next code
2274 /// should be emitted for reduction:
2275 /// \code
2276 ///
2277 /// _taskred_item_t red_data[n];
2278 /// ...
2279 /// red_data[i].shar = &shareds[i];
2280 /// red_data[i].orig = &origs[i];
2281 /// red_data[i].size = sizeof(origs[i]);
2282 /// red_data[i].f_init = (void*)RedInit<i>;
2283 /// red_data[i].f_fini = (void*)RedDest<i>;
2284 /// red_data[i].f_comb = (void*)RedOp<i>;
2285 /// red_data[i].flags = <Flag_i>;
2286 /// ...
2287 /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
2288 /// \endcode
2289 /// For reduction clause with task modifier it emits the next call:
2290 /// \code
2291 ///
2292 /// _taskred_item_t red_data[n];
2293 /// ...
2294 /// red_data[i].shar = &shareds[i];
2295 /// red_data[i].orig = &origs[i];
2296 /// red_data[i].size = sizeof(origs[i]);
2297 /// red_data[i].f_init = (void*)RedInit<i>;
2298 /// red_data[i].f_fini = (void*)RedDest<i>;
2299 /// red_data[i].f_comb = (void*)RedOp<i>;
2300 /// red_data[i].flags = <Flag_i>;
2301 /// ...
2302 /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
2303 /// red_data);
2304 /// \endcode
2305 /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
2306 /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
2307 /// \param Data Additional data for task generation like tiedness, final
2308 /// state, list of privates, reductions etc.
2309 llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
2310 ArrayRef<const Expr *> LHSExprs,
2311 ArrayRef<const Expr *> RHSExprs,
2312 const OMPTaskDataTy &Data) override;
2313
2314 /// Emits the following code for reduction clause with task modifier:
2315 /// \code
2316 /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
2317 /// \endcode
2318 void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
2319 bool IsWorksharingReduction) override;
2320
2321 /// Required to resolve existing problems in the runtime. Emits threadprivate
2322 /// variables to store the size of the VLAs/array sections for
2323 /// initializer/combiner/finalizer functions + emits threadprivate variable to
2324 /// store the pointer to the original reduction item for the custom
2325 /// initializer defined by declare reduction construct.
2326 /// \param RCG Allows to reuse an existing data for the reductions.
2327 /// \param N Reduction item for which fixups must be emitted.
2328 void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
2329 ReductionCodeGen &RCG, unsigned N) override;
2330
2331 /// Get the address of `void *` type of the privatue copy of the reduction
2332 /// item specified by the \p SharedLVal.
2333 /// \param ReductionsPtr Pointer to the reduction data returned by the
2334 /// emitTaskReductionInit function.
2335 /// \param SharedLVal Address of the original reduction item.
2336 Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
2337 llvm::Value *ReductionsPtr,
2338 LValue SharedLVal) override;
2339
2340 /// Emit code for 'taskwait' directive.
2341 void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
2342
2343 /// Emit code for 'cancellation point' construct.
2344 /// \param CancelRegion Region kind for which the cancellation point must be
2345 /// emitted.
2346 ///
2347 void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
2348 OpenMPDirectiveKind CancelRegion) override;
2349
2350 /// Emit code for 'cancel' construct.
2351 /// \param IfCond Condition in the associated 'if' clause, if it was
2352 /// specified, nullptr otherwise.
2353 /// \param CancelRegion Region kind for which the cancel must be emitted.
2354 ///
2355 void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
2356 const Expr *IfCond,
2357 OpenMPDirectiveKind CancelRegion) override;
2358
2359 /// Emit outilined function for 'target' directive.
2360 /// \param D Directive to emit.
2361 /// \param ParentName Name of the function that encloses the target region.
2362 /// \param OutlinedFn Outlined function value to be defined by this call.
2363 /// \param OutlinedFnID Outlined function ID value to be defined by this call.
2364 /// \param IsOffloadEntry True if the outlined function is an offload entry.
2365 /// \param CodeGen Code generation sequence for the \a D directive.
2366 /// An outlined function may not be an entry if, e.g. the if clause always
2367 /// evaluates to false.
2368 void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
2369 StringRef ParentName,
2370 llvm::Function *&OutlinedFn,
2371 llvm::Constant *&OutlinedFnID,
2372 bool IsOffloadEntry,
2373 const RegionCodeGenTy &CodeGen) override;
2374
2375 /// Emit the target offloading code associated with \a D. The emitted
2376 /// code attempts offloading the execution to the device, an the event of
2377 /// a failure it executes the host version outlined in \a OutlinedFn.
2378 /// \param D Directive to emit.
2379 /// \param OutlinedFn Host version of the code to be offloaded.
2380 /// \param OutlinedFnID ID of host version of the code to be offloaded.
2381 /// \param IfCond Expression evaluated in if clause associated with the target
2382 /// directive, or null if no if clause is used.
2383 /// \param Device Expression evaluated in device clause associated with the
2384 /// target directive, or null if no device clause is used and device modifier.
2385 void emitTargetCall(
2386 CodeGenFunction &CGF, const OMPExecutableDirective &D,
2387 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
2388 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
2389 llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
2390 const OMPLoopDirective &D)>
2391 SizeEmitter) override;
2392
2393 /// Emit the target regions enclosed in \a GD function definition or
2394 /// the function itself in case it is a valid device function. Returns true if
2395 /// \a GD was dealt with successfully.
2396 /// \param GD Function to scan.
2397 bool emitTargetFunctions(GlobalDecl GD) override;
2398
2399 /// Emit the global variable if it is a valid device global variable.
2400 /// Returns true if \a GD was dealt with successfully.
2401 /// \param GD Variable declaration to emit.
2402 bool emitTargetGlobalVariable(GlobalDecl GD) override;
2403
2404 /// Emit the global \a GD if it is meaningful for the target. Returns
2405 /// if it was emitted successfully.
2406 /// \param GD Global to scan.
2407 bool emitTargetGlobal(GlobalDecl GD) override;
2408
2409 /// Emits code for teams call of the \a OutlinedFn with
2410 /// variables captured in a record which address is stored in \a
2411 /// CapturedStruct.
2412 /// \param OutlinedFn Outlined function to be run by team masters. Type of
2413 /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
2414 /// \param CapturedVars A pointer to the record with the references to
2415 /// variables used in \a OutlinedFn function.
2416 ///
2417 void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
2418 SourceLocation Loc, llvm::Function *OutlinedFn,
2419 ArrayRef<llvm::Value *> CapturedVars) override;
2420
2421 /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
2422 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
2423 /// for num_teams clause.
2424 /// \param NumTeams An integer expression of teams.
2425 /// \param ThreadLimit An integer expression of threads.
2426 void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
2427 const Expr *ThreadLimit, SourceLocation Loc) override;
2428
2429 /// Emit the target data mapping code associated with \a D.
2430 /// \param D Directive to emit.
2431 /// \param IfCond Expression evaluated in if clause associated with the
2432 /// target directive, or null if no device clause is used.
2433 /// \param Device Expression evaluated in device clause associated with the
2434 /// target directive, or null if no device clause is used.
2435 /// \param Info A record used to store information that needs to be preserved
2436 /// until the region is closed.
2437 void emitTargetDataCalls(CodeGenFunction &CGF,
2438 const OMPExecutableDirective &D, const Expr *IfCond,
2439 const Expr *Device, const RegionCodeGenTy &CodeGen,
2440 TargetDataInfo &Info) override;
2441
2442 /// Emit the data mapping/movement code associated with the directive
2443 /// \a D that should be of the form 'target [{enter|exit} data | update]'.
2444 /// \param D Directive to emit.
2445 /// \param IfCond Expression evaluated in if clause associated with the target
2446 /// directive, or null if no if clause is used.
2447 /// \param Device Expression evaluated in device clause associated with the
2448 /// target directive, or null if no device clause is used.
2449 void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
2450 const OMPExecutableDirective &D,
2451 const Expr *IfCond,
2452 const Expr *Device) override;
2453
2454 /// Emit initialization for doacross loop nesting support.
2455 /// \param D Loop-based construct used in doacross nesting construct.
2456 void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
2457 ArrayRef<Expr *> NumIterations) override;
2458
2459 /// Emit code for doacross ordered directive with 'depend' clause.
2460 /// \param C 'depend' clause with 'sink|source' dependency kind.
2461 void emitDoacrossOrdered(CodeGenFunction &CGF,
2462 const OMPDependClause *C) override;
2463
2464 /// Translates the native parameter of outlined function if this is required
2465 /// for target.
2466 /// \param FD Field decl from captured record for the parameter.
2467 /// \param NativeParam Parameter itself.
2468 const VarDecl *translateParameter(const FieldDecl *FD,
2469 const VarDecl *NativeParam) const override;
2470
2471 /// Gets the address of the native argument basing on the address of the
2472 /// target-specific parameter.
2473 /// \param NativeParam Parameter itself.
2474 /// \param TargetParam Corresponding target-specific parameter.
2475 Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
2476 const VarDecl *TargetParam) const override;
2477
2478 /// Gets the OpenMP-specific address of the local variable.
2479 Address getAddressOfLocalVariable(CodeGenFunction &CGF,
2480 const VarDecl *VD) override {
2481 return Address::invalid();
2482 }
2483};
2484
2485} // namespace CodeGen
2486} // namespace clang
2487
2488#endif