Bug Summary

File:clang/lib/CodeGen/CGOpenMPRuntime.cpp
Warning:line 8114, column 9
2nd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGOpenMPRuntime.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/tools/clang/lib/CodeGen -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D CLANG_ROUND_TRIP_CC1_ARGS=ON -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/include -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-13-111025-38230-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp

1//===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a class for OpenMP runtime code generation.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGOpenMPRuntime.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "clang/AST/APValue.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/OpenMPClause.h"
22#include "clang/AST/StmtOpenMP.h"
23#include "clang/AST/StmtVisitor.h"
24#include "clang/Basic/BitmaskEnum.h"
25#include "clang/Basic/FileManager.h"
26#include "clang/Basic/OpenMPKinds.h"
27#include "clang/Basic/SourceManager.h"
28#include "clang/CodeGen/ConstantInitBuilder.h"
29#include "llvm/ADT/ArrayRef.h"
30#include "llvm/ADT/SetOperations.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/Bitcode/BitcodeReader.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DerivedTypes.h"
35#include "llvm/IR/GlobalValue.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/AtomicOrdering.h"
38#include "llvm/Support/Format.h"
39#include "llvm/Support/raw_ostream.h"
40#include <cassert>
41#include <numeric>
42
43using namespace clang;
44using namespace CodeGen;
45using namespace llvm::omp;
46
47namespace {
48/// Base class for handling code generation inside OpenMP regions.
49class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
50public:
51 /// Kinds of OpenMP regions used in codegen.
52 enum CGOpenMPRegionKind {
53 /// Region with outlined function for standalone 'parallel'
54 /// directive.
55 ParallelOutlinedRegion,
56 /// Region with outlined function for standalone 'task' directive.
57 TaskOutlinedRegion,
58 /// Region for constructs that do not require function outlining,
59 /// like 'for', 'sections', 'atomic' etc. directives.
60 InlinedRegion,
61 /// Region with outlined function for standalone 'target' directive.
62 TargetRegion,
63 };
64
65 CGOpenMPRegionInfo(const CapturedStmt &CS,
66 const CGOpenMPRegionKind RegionKind,
67 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
68 bool HasCancel)
69 : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
70 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
71
72 CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
73 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
74 bool HasCancel)
75 : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
76 Kind(Kind), HasCancel(HasCancel) {}
77
78 /// Get a variable or parameter for storing global thread id
79 /// inside OpenMP construct.
80 virtual const VarDecl *getThreadIDVariable() const = 0;
81
82 /// Emit the captured statement body.
83 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
84
85 /// Get an LValue for the current ThreadID variable.
86 /// \return LValue for thread id variable. This LValue always has type int32*.
87 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
88
89 virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
90
91 CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
92
93 OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
94
95 bool hasCancel() const { return HasCancel; }
96
97 static bool classof(const CGCapturedStmtInfo *Info) {
98 return Info->getKind() == CR_OpenMP;
99 }
100
101 ~CGOpenMPRegionInfo() override = default;
102
103protected:
104 CGOpenMPRegionKind RegionKind;
105 RegionCodeGenTy CodeGen;
106 OpenMPDirectiveKind Kind;
107 bool HasCancel;
108};
109
110/// API for captured statement code generation in OpenMP constructs.
111class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
112public:
113 CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
114 const RegionCodeGenTy &CodeGen,
115 OpenMPDirectiveKind Kind, bool HasCancel,
116 StringRef HelperName)
117 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
118 HasCancel),
119 ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
120 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")(static_cast <bool> (ThreadIDVar != nullptr && "No ThreadID in OpenMP region."
) ? void (0) : __assert_fail ("ThreadIDVar != nullptr && \"No ThreadID in OpenMP region.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 120, __extension__ __PRETTY_FUNCTION__))
;
121 }
122
123 /// Get a variable or parameter for storing global thread id
124 /// inside OpenMP construct.
125 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
126
127 /// Get the name of the capture helper.
128 StringRef getHelperName() const override { return HelperName; }
129
130 static bool classof(const CGCapturedStmtInfo *Info) {
131 return CGOpenMPRegionInfo::classof(Info) &&
132 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
133 ParallelOutlinedRegion;
134 }
135
136private:
137 /// A variable or parameter storing global thread id for OpenMP
138 /// constructs.
139 const VarDecl *ThreadIDVar;
140 StringRef HelperName;
141};
142
143/// API for captured statement code generation in OpenMP constructs.
144class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
145public:
146 class UntiedTaskActionTy final : public PrePostActionTy {
147 bool Untied;
148 const VarDecl *PartIDVar;
149 const RegionCodeGenTy UntiedCodeGen;
150 llvm::SwitchInst *UntiedSwitch = nullptr;
151
152 public:
153 UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
154 const RegionCodeGenTy &UntiedCodeGen)
155 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
156 void Enter(CodeGenFunction &CGF) override {
157 if (Untied) {
158 // Emit task switching point.
159 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
160 CGF.GetAddrOfLocalVar(PartIDVar),
161 PartIDVar->getType()->castAs<PointerType>());
162 llvm::Value *Res =
163 CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
164 llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
165 UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
166 CGF.EmitBlock(DoneBB);
167 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
168 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
169 UntiedSwitch->addCase(CGF.Builder.getInt32(0),
170 CGF.Builder.GetInsertBlock());
171 emitUntiedSwitch(CGF);
172 }
173 }
174 void emitUntiedSwitch(CodeGenFunction &CGF) const {
175 if (Untied) {
176 LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
177 CGF.GetAddrOfLocalVar(PartIDVar),
178 PartIDVar->getType()->castAs<PointerType>());
179 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
180 PartIdLVal);
181 UntiedCodeGen(CGF);
182 CodeGenFunction::JumpDest CurPoint =
183 CGF.getJumpDestInCurrentScope(".untied.next.");
184 CGF.EmitBranch(CGF.ReturnBlock.getBlock());
185 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
186 UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
187 CGF.Builder.GetInsertBlock());
188 CGF.EmitBranchThroughCleanup(CurPoint);
189 CGF.EmitBlock(CurPoint.getBlock());
190 }
191 }
192 unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
193 };
194 CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
195 const VarDecl *ThreadIDVar,
196 const RegionCodeGenTy &CodeGen,
197 OpenMPDirectiveKind Kind, bool HasCancel,
198 const UntiedTaskActionTy &Action)
199 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
200 ThreadIDVar(ThreadIDVar), Action(Action) {
201 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.")(static_cast <bool> (ThreadIDVar != nullptr && "No ThreadID in OpenMP region."
) ? void (0) : __assert_fail ("ThreadIDVar != nullptr && \"No ThreadID in OpenMP region.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 201, __extension__ __PRETTY_FUNCTION__))
;
202 }
203
204 /// Get a variable or parameter for storing global thread id
205 /// inside OpenMP construct.
206 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
207
208 /// Get an LValue for the current ThreadID variable.
209 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
210
211 /// Get the name of the capture helper.
212 StringRef getHelperName() const override { return ".omp_outlined."; }
213
214 void emitUntiedSwitch(CodeGenFunction &CGF) override {
215 Action.emitUntiedSwitch(CGF);
216 }
217
218 static bool classof(const CGCapturedStmtInfo *Info) {
219 return CGOpenMPRegionInfo::classof(Info) &&
220 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
221 TaskOutlinedRegion;
222 }
223
224private:
225 /// A variable or parameter storing global thread id for OpenMP
226 /// constructs.
227 const VarDecl *ThreadIDVar;
228 /// Action for emitting code for untied tasks.
229 const UntiedTaskActionTy &Action;
230};
231
232/// API for inlined captured statement code generation in OpenMP
233/// constructs.
234class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
235public:
236 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
237 const RegionCodeGenTy &CodeGen,
238 OpenMPDirectiveKind Kind, bool HasCancel)
239 : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
240 OldCSI(OldCSI),
241 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
242
243 // Retrieve the value of the context parameter.
244 llvm::Value *getContextValue() const override {
245 if (OuterRegionInfo)
246 return OuterRegionInfo->getContextValue();
247 llvm_unreachable("No context value for inlined OpenMP region")::llvm::llvm_unreachable_internal("No context value for inlined OpenMP region"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 247)
;
248 }
249
250 void setContextValue(llvm::Value *V) override {
251 if (OuterRegionInfo) {
252 OuterRegionInfo->setContextValue(V);
253 return;
254 }
255 llvm_unreachable("No context value for inlined OpenMP region")::llvm::llvm_unreachable_internal("No context value for inlined OpenMP region"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 255)
;
256 }
257
258 /// Lookup the captured field decl for a variable.
259 const FieldDecl *lookup(const VarDecl *VD) const override {
260 if (OuterRegionInfo)
261 return OuterRegionInfo->lookup(VD);
262 // If there is no outer outlined region,no need to lookup in a list of
263 // captured variables, we can use the original one.
264 return nullptr;
265 }
266
267 FieldDecl *getThisFieldDecl() const override {
268 if (OuterRegionInfo)
269 return OuterRegionInfo->getThisFieldDecl();
270 return nullptr;
271 }
272
273 /// Get a variable or parameter for storing global thread id
274 /// inside OpenMP construct.
275 const VarDecl *getThreadIDVariable() const override {
276 if (OuterRegionInfo)
277 return OuterRegionInfo->getThreadIDVariable();
278 return nullptr;
279 }
280
281 /// Get an LValue for the current ThreadID variable.
282 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
283 if (OuterRegionInfo)
284 return OuterRegionInfo->getThreadIDVariableLValue(CGF);
285 llvm_unreachable("No LValue for inlined OpenMP construct")::llvm::llvm_unreachable_internal("No LValue for inlined OpenMP construct"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 285)
;
286 }
287
288 /// Get the name of the capture helper.
289 StringRef getHelperName() const override {
290 if (auto *OuterRegionInfo = getOldCSI())
291 return OuterRegionInfo->getHelperName();
292 llvm_unreachable("No helper name for inlined OpenMP construct")::llvm::llvm_unreachable_internal("No helper name for inlined OpenMP construct"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 292)
;
293 }
294
295 void emitUntiedSwitch(CodeGenFunction &CGF) override {
296 if (OuterRegionInfo)
297 OuterRegionInfo->emitUntiedSwitch(CGF);
298 }
299
300 CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
301
302 static bool classof(const CGCapturedStmtInfo *Info) {
303 return CGOpenMPRegionInfo::classof(Info) &&
304 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
305 }
306
307 ~CGOpenMPInlinedRegionInfo() override = default;
308
309private:
310 /// CodeGen info about outer OpenMP region.
311 CodeGenFunction::CGCapturedStmtInfo *OldCSI;
312 CGOpenMPRegionInfo *OuterRegionInfo;
313};
314
315/// API for captured statement code generation in OpenMP target
316/// constructs. For this captures, implicit parameters are used instead of the
317/// captured fields. The name of the target region has to be unique in a given
318/// application so it is provided by the client, because only the client has
319/// the information to generate that.
320class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
321public:
322 CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
323 const RegionCodeGenTy &CodeGen, StringRef HelperName)
324 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
325 /*HasCancel=*/false),
326 HelperName(HelperName) {}
327
328 /// This is unused for target regions because each starts executing
329 /// with a single thread.
330 const VarDecl *getThreadIDVariable() const override { return nullptr; }
331
332 /// Get the name of the capture helper.
333 StringRef getHelperName() const override { return HelperName; }
334
335 static bool classof(const CGCapturedStmtInfo *Info) {
336 return CGOpenMPRegionInfo::classof(Info) &&
337 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
338 }
339
340private:
341 StringRef HelperName;
342};
343
344static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
345 llvm_unreachable("No codegen for expressions")::llvm::llvm_unreachable_internal("No codegen for expressions"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 345)
;
346}
347/// API for generation of expressions captured in a innermost OpenMP
348/// region.
349class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
350public:
351 CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
352 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
353 OMPD_unknown,
354 /*HasCancel=*/false),
355 PrivScope(CGF) {
356 // Make sure the globals captured in the provided statement are local by
357 // using the privatization logic. We assume the same variable is not
358 // captured more than once.
359 for (const auto &C : CS.captures()) {
360 if (!C.capturesVariable() && !C.capturesVariableByCopy())
361 continue;
362
363 const VarDecl *VD = C.getCapturedVar();
364 if (VD->isLocalVarDeclOrParm())
365 continue;
366
367 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
368 /*RefersToEnclosingVariableOrCapture=*/false,
369 VD->getType().getNonReferenceType(), VK_LValue,
370 C.getLocation());
371 PrivScope.addPrivate(
372 VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); });
373 }
374 (void)PrivScope.Privatize();
375 }
376
377 /// Lookup the captured field decl for a variable.
378 const FieldDecl *lookup(const VarDecl *VD) const override {
379 if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
380 return FD;
381 return nullptr;
382 }
383
384 /// Emit the captured statement body.
385 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
386 llvm_unreachable("No body for expressions")::llvm::llvm_unreachable_internal("No body for expressions", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 386)
;
387 }
388
389 /// Get a variable or parameter for storing global thread id
390 /// inside OpenMP construct.
391 const VarDecl *getThreadIDVariable() const override {
392 llvm_unreachable("No thread id for expressions")::llvm::llvm_unreachable_internal("No thread id for expressions"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 392)
;
393 }
394
395 /// Get the name of the capture helper.
396 StringRef getHelperName() const override {
397 llvm_unreachable("No helper name for expressions")::llvm::llvm_unreachable_internal("No helper name for expressions"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 397)
;
398 }
399
400 static bool classof(const CGCapturedStmtInfo *Info) { return false; }
401
402private:
403 /// Private scope to capture global variables.
404 CodeGenFunction::OMPPrivateScope PrivScope;
405};
406
407/// RAII for emitting code of OpenMP constructs.
408class InlinedOpenMPRegionRAII {
409 CodeGenFunction &CGF;
410 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
411 FieldDecl *LambdaThisCaptureField = nullptr;
412 const CodeGen::CGBlockInfo *BlockInfo = nullptr;
413 bool NoInheritance = false;
414
415public:
416 /// Constructs region for combined constructs.
417 /// \param CodeGen Code generation sequence for combined directives. Includes
418 /// a list of functions used for code generation of implicitly inlined
419 /// regions.
420 InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
421 OpenMPDirectiveKind Kind, bool HasCancel,
422 bool NoInheritance = true)
423 : CGF(CGF), NoInheritance(NoInheritance) {
424 // Start emission for the construct.
425 CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
426 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
427 if (NoInheritance) {
428 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
429 LambdaThisCaptureField = CGF.LambdaThisCaptureField;
430 CGF.LambdaThisCaptureField = nullptr;
431 BlockInfo = CGF.BlockInfo;
432 CGF.BlockInfo = nullptr;
433 }
434 }
435
436 ~InlinedOpenMPRegionRAII() {
437 // Restore original CapturedStmtInfo only if we're done with code emission.
438 auto *OldCSI =
439 cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
440 delete CGF.CapturedStmtInfo;
441 CGF.CapturedStmtInfo = OldCSI;
442 if (NoInheritance) {
443 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
444 CGF.LambdaThisCaptureField = LambdaThisCaptureField;
445 CGF.BlockInfo = BlockInfo;
446 }
447 }
448};
449
450/// Values for bit flags used in the ident_t to describe the fields.
451/// All enumeric elements are named and described in accordance with the code
452/// from https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
453enum OpenMPLocationFlags : unsigned {
454 /// Use trampoline for internal microtask.
455 OMP_IDENT_IMD = 0x01,
456 /// Use c-style ident structure.
457 OMP_IDENT_KMPC = 0x02,
458 /// Atomic reduction option for kmpc_reduce.
459 OMP_ATOMIC_REDUCE = 0x10,
460 /// Explicit 'barrier' directive.
461 OMP_IDENT_BARRIER_EXPL = 0x20,
462 /// Implicit barrier in code.
463 OMP_IDENT_BARRIER_IMPL = 0x40,
464 /// Implicit barrier in 'for' directive.
465 OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
466 /// Implicit barrier in 'sections' directive.
467 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
468 /// Implicit barrier in 'single' directive.
469 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
470 /// Call of __kmp_for_static_init for static loop.
471 OMP_IDENT_WORK_LOOP = 0x200,
472 /// Call of __kmp_for_static_init for sections.
473 OMP_IDENT_WORK_SECTIONS = 0x400,
474 /// Call of __kmp_for_static_init for distribute.
475 OMP_IDENT_WORK_DISTRIBUTE = 0x800,
476 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_IDENT_WORK_DISTRIBUTE
477};
478
479namespace {
480LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
481/// Values for bit flags for marking which requires clauses have been used.
482enum OpenMPOffloadingRequiresDirFlags : int64_t {
483 /// flag undefined.
484 OMP_REQ_UNDEFINED = 0x000,
485 /// no requires clause present.
486 OMP_REQ_NONE = 0x001,
487 /// reverse_offload clause.
488 OMP_REQ_REVERSE_OFFLOAD = 0x002,
489 /// unified_address clause.
490 OMP_REQ_UNIFIED_ADDRESS = 0x004,
491 /// unified_shared_memory clause.
492 OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
493 /// dynamic_allocators clause.
494 OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
495 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)LLVM_BITMASK_LARGEST_ENUMERATOR = OMP_REQ_DYNAMIC_ALLOCATORS
496};
497
498enum OpenMPOffloadingReservedDeviceIDs {
499 /// Device ID if the device was not defined, runtime should get it
500 /// from environment variables in the spec.
501 OMP_DEVICEID_UNDEF = -1,
502};
503} // anonymous namespace
504
505/// Describes ident structure that describes a source location.
506/// All descriptions are taken from
507/// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
508/// Original structure:
509/// typedef struct ident {
510/// kmp_int32 reserved_1; /**< might be used in Fortran;
511/// see above */
512/// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
513/// KMP_IDENT_KMPC identifies this union
514/// member */
515/// kmp_int32 reserved_2; /**< not really used in Fortran any more;
516/// see above */
517///#if USE_ITT_BUILD
518/// /* but currently used for storing
519/// region-specific ITT */
520/// /* contextual information. */
521///#endif /* USE_ITT_BUILD */
522/// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
523/// C++ */
524/// char const *psource; /**< String describing the source location.
525/// The string is composed of semi-colon separated
526// fields which describe the source file,
527/// the function and a pair of line numbers that
528/// delimit the construct.
529/// */
530/// } ident_t;
531enum IdentFieldIndex {
532 /// might be used in Fortran
533 IdentField_Reserved_1,
534 /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
535 IdentField_Flags,
536 /// Not really used in Fortran any more
537 IdentField_Reserved_2,
538 /// Source[4] in Fortran, do not use for C++
539 IdentField_Reserved_3,
540 /// String describing the source location. The string is composed of
541 /// semi-colon separated fields which describe the source file, the function
542 /// and a pair of line numbers that delimit the construct.
543 IdentField_PSource
544};
545
546/// Schedule types for 'omp for' loops (these enumerators are taken from
547/// the enum sched_type in kmp.h).
548enum OpenMPSchedType {
549 /// Lower bound for default (unordered) versions.
550 OMP_sch_lower = 32,
551 OMP_sch_static_chunked = 33,
552 OMP_sch_static = 34,
553 OMP_sch_dynamic_chunked = 35,
554 OMP_sch_guided_chunked = 36,
555 OMP_sch_runtime = 37,
556 OMP_sch_auto = 38,
557 /// static with chunk adjustment (e.g., simd)
558 OMP_sch_static_balanced_chunked = 45,
559 /// Lower bound for 'ordered' versions.
560 OMP_ord_lower = 64,
561 OMP_ord_static_chunked = 65,
562 OMP_ord_static = 66,
563 OMP_ord_dynamic_chunked = 67,
564 OMP_ord_guided_chunked = 68,
565 OMP_ord_runtime = 69,
566 OMP_ord_auto = 70,
567 OMP_sch_default = OMP_sch_static,
568 /// dist_schedule types
569 OMP_dist_sch_static_chunked = 91,
570 OMP_dist_sch_static = 92,
571 /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
572 /// Set if the monotonic schedule modifier was present.
573 OMP_sch_modifier_monotonic = (1 << 29),
574 /// Set if the nonmonotonic schedule modifier was present.
575 OMP_sch_modifier_nonmonotonic = (1 << 30),
576};
577
578/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
579/// region.
580class CleanupTy final : public EHScopeStack::Cleanup {
581 PrePostActionTy *Action;
582
583public:
584 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
585 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
586 if (!CGF.HaveInsertPoint())
587 return;
588 Action->Exit(CGF);
589 }
590};
591
592} // anonymous namespace
593
594void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
595 CodeGenFunction::RunCleanupsScope Scope(CGF);
596 if (PrePostAction) {
597 CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
598 Callback(CodeGen, CGF, *PrePostAction);
599 } else {
600 PrePostActionTy Action;
601 Callback(CodeGen, CGF, Action);
602 }
603}
604
605/// Check if the combiner is a call to UDR combiner and if it is so return the
606/// UDR decl used for reduction.
607static const OMPDeclareReductionDecl *
608getReductionInit(const Expr *ReductionOp) {
609 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
610 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
611 if (const auto *DRE =
612 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
613 if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
614 return DRD;
615 return nullptr;
616}
617
618static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
619 const OMPDeclareReductionDecl *DRD,
620 const Expr *InitOp,
621 Address Private, Address Original,
622 QualType Ty) {
623 if (DRD->getInitializer()) {
624 std::pair<llvm::Function *, llvm::Function *> Reduction =
625 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
626 const auto *CE = cast<CallExpr>(InitOp);
627 const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
628 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
629 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
630 const auto *LHSDRE =
631 cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
632 const auto *RHSDRE =
633 cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
634 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
635 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
636 [=]() { return Private; });
637 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
638 [=]() { return Original; });
639 (void)PrivateScope.Privatize();
640 RValue Func = RValue::get(Reduction.second);
641 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
642 CGF.EmitIgnoredExpr(InitOp);
643 } else {
644 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
645 std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
646 auto *GV = new llvm::GlobalVariable(
647 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
648 llvm::GlobalValue::PrivateLinkage, Init, Name);
649 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
650 RValue InitRVal;
651 switch (CGF.getEvaluationKind(Ty)) {
652 case TEK_Scalar:
653 InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
654 break;
655 case TEK_Complex:
656 InitRVal =
657 RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
658 break;
659 case TEK_Aggregate: {
660 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue);
661 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV);
662 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
663 /*IsInitializer=*/false);
664 return;
665 }
666 }
667 OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue);
668 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
669 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
670 /*IsInitializer=*/false);
671 }
672}
673
674/// Emit initialization of arrays of complex types.
675/// \param DestAddr Address of the array.
676/// \param Type Type of array.
677/// \param Init Initial expression of array.
678/// \param SrcAddr Address of the original array.
679static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
680 QualType Type, bool EmitDeclareReductionInit,
681 const Expr *Init,
682 const OMPDeclareReductionDecl *DRD,
683 Address SrcAddr = Address::invalid()) {
684 // Perform element-by-element initialization.
685 QualType ElementTy;
686
687 // Drill down to the base element type on both arrays.
688 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
689 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
690 DestAddr =
691 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
692 if (DRD)
693 SrcAddr =
694 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
695
696 llvm::Value *SrcBegin = nullptr;
697 if (DRD)
698 SrcBegin = SrcAddr.getPointer();
699 llvm::Value *DestBegin = DestAddr.getPointer();
700 // Cast from pointer to array type to pointer to single element.
701 llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
702 // The basic structure here is a while-do loop.
703 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
704 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
705 llvm::Value *IsEmpty =
706 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
707 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
708
709 // Enter the loop body, making that address the current address.
710 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
711 CGF.EmitBlock(BodyBB);
712
713 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
714
715 llvm::PHINode *SrcElementPHI = nullptr;
716 Address SrcElementCurrent = Address::invalid();
717 if (DRD) {
718 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
719 "omp.arraycpy.srcElementPast");
720 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
721 SrcElementCurrent =
722 Address(SrcElementPHI,
723 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
724 }
725 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
726 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
727 DestElementPHI->addIncoming(DestBegin, EntryBB);
728 Address DestElementCurrent =
729 Address(DestElementPHI,
730 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
731
732 // Emit copy.
733 {
734 CodeGenFunction::RunCleanupsScope InitScope(CGF);
735 if (EmitDeclareReductionInit) {
736 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
737 SrcElementCurrent, ElementTy);
738 } else
739 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
740 /*IsInitializer=*/false);
741 }
742
743 if (DRD) {
744 // Shift the address forward by one element.
745 llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
746 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
747 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
748 }
749
750 // Shift the address forward by one element.
751 llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
752 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
753 // Check whether we've reached the end.
754 llvm::Value *Done =
755 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
756 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
757 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
758
759 // Done.
760 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
761}
762
763LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
764 return CGF.EmitOMPSharedLValue(E);
765}
766
767LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
768 const Expr *E) {
769 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
770 return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
771 return LValue();
772}
773
774void ReductionCodeGen::emitAggregateInitialization(
775 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
776 const OMPDeclareReductionDecl *DRD) {
777 // Emit VarDecl with copy init for arrays.
778 // Get the address of the original variable captured in current
779 // captured region.
780 const auto *PrivateVD =
781 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
782 bool EmitDeclareReductionInit =
783 DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
784 EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
785 EmitDeclareReductionInit,
786 EmitDeclareReductionInit ? ClausesData[N].ReductionOp
787 : PrivateVD->getInit(),
788 DRD, SharedLVal.getAddress(CGF));
789}
790
791ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
792 ArrayRef<const Expr *> Origs,
793 ArrayRef<const Expr *> Privates,
794 ArrayRef<const Expr *> ReductionOps) {
795 ClausesData.reserve(Shareds.size());
796 SharedAddresses.reserve(Shareds.size());
797 Sizes.reserve(Shareds.size());
798 BaseDecls.reserve(Shareds.size());
799 const auto *IOrig = Origs.begin();
800 const auto *IPriv = Privates.begin();
801 const auto *IRed = ReductionOps.begin();
802 for (const Expr *Ref : Shareds) {
803 ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
804 std::advance(IOrig, 1);
805 std::advance(IPriv, 1);
806 std::advance(IRed, 1);
807 }
808}
809
810void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
811 assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&(static_cast <bool> (SharedAddresses.size() == N &&
OrigAddresses.size() == N && "Number of generated lvalues must be exactly N."
) ? void (0) : __assert_fail ("SharedAddresses.size() == N && OrigAddresses.size() == N && \"Number of generated lvalues must be exactly N.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 812, __extension__ __PRETTY_FUNCTION__))
812 "Number of generated lvalues must be exactly N.")(static_cast <bool> (SharedAddresses.size() == N &&
OrigAddresses.size() == N && "Number of generated lvalues must be exactly N."
) ? void (0) : __assert_fail ("SharedAddresses.size() == N && OrigAddresses.size() == N && \"Number of generated lvalues must be exactly N.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 812, __extension__ __PRETTY_FUNCTION__))
;
813 LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
814 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
815 SharedAddresses.emplace_back(First, Second);
816 if (ClausesData[N].Shared == ClausesData[N].Ref) {
817 OrigAddresses.emplace_back(First, Second);
818 } else {
819 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
820 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
821 OrigAddresses.emplace_back(First, Second);
822 }
823}
824
825void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
826 const auto *PrivateVD =
827 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
828 QualType PrivateType = PrivateVD->getType();
829 bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
830 if (!PrivateType->isVariablyModifiedType()) {
831 Sizes.emplace_back(
832 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
833 nullptr);
834 return;
835 }
836 llvm::Value *Size;
837 llvm::Value *SizeInChars;
838 auto *ElemType =
839 cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
840 ->getElementType();
841 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
842 if (AsArraySection) {
843 Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
844 OrigAddresses[N].first.getPointer(CGF));
845 Size = CGF.Builder.CreateNUWAdd(
846 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
847 SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
848 } else {
849 SizeInChars =
850 CGF.getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
851 Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
852 }
853 Sizes.emplace_back(SizeInChars, Size);
854 CodeGenFunction::OpaqueValueMapping OpaqueMap(
855 CGF,
856 cast<OpaqueValueExpr>(
857 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
858 RValue::get(Size));
859 CGF.EmitVariablyModifiedType(PrivateType);
860}
861
862void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
863 llvm::Value *Size) {
864 const auto *PrivateVD =
865 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
866 QualType PrivateType = PrivateVD->getType();
867 if (!PrivateType->isVariablyModifiedType()) {
868 assert(!Size && !Sizes[N].second &&(static_cast <bool> (!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
"items.") ? void (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 870, __extension__ __PRETTY_FUNCTION__))
869 "Size should be nullptr for non-variably modified reduction "(static_cast <bool> (!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
"items.") ? void (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 870, __extension__ __PRETTY_FUNCTION__))
870 "items.")(static_cast <bool> (!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
"items.") ? void (0) : __assert_fail ("!Size && !Sizes[N].second && \"Size should be nullptr for non-variably modified reduction \" \"items.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 870, __extension__ __PRETTY_FUNCTION__))
;
871 return;
872 }
873 CodeGenFunction::OpaqueValueMapping OpaqueMap(
874 CGF,
875 cast<OpaqueValueExpr>(
876 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
877 RValue::get(Size));
878 CGF.EmitVariablyModifiedType(PrivateType);
879}
880
881void ReductionCodeGen::emitInitialization(
882 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
883 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
884 assert(SharedAddresses.size() > N && "No variable was generated")(static_cast <bool> (SharedAddresses.size() > N &&
"No variable was generated") ? void (0) : __assert_fail ("SharedAddresses.size() > N && \"No variable was generated\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 884, __extension__ __PRETTY_FUNCTION__))
;
885 const auto *PrivateVD =
886 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
887 const OMPDeclareReductionDecl *DRD =
888 getReductionInit(ClausesData[N].ReductionOp);
889 QualType PrivateType = PrivateVD->getType();
890 PrivateAddr = CGF.Builder.CreateElementBitCast(
891 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
892 QualType SharedType = SharedAddresses[N].first.getType();
893 SharedLVal = CGF.MakeAddrLValue(
894 CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF),
895 CGF.ConvertTypeForMem(SharedType)),
896 SharedType, SharedAddresses[N].first.getBaseInfo(),
897 CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
898 if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
899 if (DRD && DRD->getInitializer())
900 (void)DefaultInit(CGF);
901 emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
902 } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
903 (void)DefaultInit(CGF);
904 emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
905 PrivateAddr, SharedLVal.getAddress(CGF),
906 SharedLVal.getType());
907 } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
908 !CGF.isTrivialInitializer(PrivateVD->getInit())) {
909 CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
910 PrivateVD->getType().getQualifiers(),
911 /*IsInitializer=*/false);
912 }
913}
914
915bool ReductionCodeGen::needCleanups(unsigned N) {
916 const auto *PrivateVD =
917 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
918 QualType PrivateType = PrivateVD->getType();
919 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
920 return DTorKind != QualType::DK_none;
921}
922
923void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
924 Address PrivateAddr) {
925 const auto *PrivateVD =
926 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
927 QualType PrivateType = PrivateVD->getType();
928 QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
929 if (needCleanups(N)) {
930 PrivateAddr = CGF.Builder.CreateElementBitCast(
931 PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
932 CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
933 }
934}
935
936static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
937 LValue BaseLV) {
938 BaseTy = BaseTy.getNonReferenceType();
939 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
940 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
941 if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
942 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
943 } else {
944 LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
945 BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
946 }
947 BaseTy = BaseTy->getPointeeType();
948 }
949 return CGF.MakeAddrLValue(
950 CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
951 CGF.ConvertTypeForMem(ElTy)),
952 BaseLV.getType(), BaseLV.getBaseInfo(),
953 CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
954}
955
956static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
957 llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
958 llvm::Value *Addr) {
959 Address Tmp = Address::invalid();
960 Address TopTmp = Address::invalid();
961 Address MostTopTmp = Address::invalid();
962 BaseTy = BaseTy.getNonReferenceType();
963 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
964 !CGF.getContext().hasSameType(BaseTy, ElTy)) {
965 Tmp = CGF.CreateMemTemp(BaseTy);
966 if (TopTmp.isValid())
967 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
968 else
969 MostTopTmp = Tmp;
970 TopTmp = Tmp;
971 BaseTy = BaseTy->getPointeeType();
972 }
973 llvm::Type *Ty = BaseLVType;
974 if (Tmp.isValid())
975 Ty = Tmp.getElementType();
976 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
977 if (Tmp.isValid()) {
978 CGF.Builder.CreateStore(Addr, Tmp);
979 return MostTopTmp;
980 }
981 return Address(Addr, BaseLVAlignment);
982}
983
984static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
985 const VarDecl *OrigVD = nullptr;
986 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
987 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
988 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
989 Base = TempOASE->getBase()->IgnoreParenImpCasts();
990 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
991 Base = TempASE->getBase()->IgnoreParenImpCasts();
992 DE = cast<DeclRefExpr>(Base);
993 OrigVD = cast<VarDecl>(DE->getDecl());
994 } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
995 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
996 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
997 Base = TempASE->getBase()->IgnoreParenImpCasts();
998 DE = cast<DeclRefExpr>(Base);
999 OrigVD = cast<VarDecl>(DE->getDecl());
1000 }
1001 return OrigVD;
1002}
1003
1004Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1005 Address PrivateAddr) {
1006 const DeclRefExpr *DE;
1007 if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1008 BaseDecls.emplace_back(OrigVD);
1009 LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1010 LValue BaseLValue =
1011 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1012 OriginalBaseLValue);
1013 llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1014 BaseLValue.getPointer(CGF), SharedAddresses[N].first.getPointer(CGF));
1015 llvm::Value *PrivatePointer =
1016 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1017 PrivateAddr.getPointer(),
1018 SharedAddresses[N].first.getAddress(CGF).getType());
1019 llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1020 return castToBase(CGF, OrigVD->getType(),
1021 SharedAddresses[N].first.getType(),
1022 OriginalBaseLValue.getAddress(CGF).getType(),
1023 OriginalBaseLValue.getAlignment(), Ptr);
1024 }
1025 BaseDecls.emplace_back(
1026 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1027 return PrivateAddr;
1028}
1029
1030bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1031 const OMPDeclareReductionDecl *DRD =
1032 getReductionInit(ClausesData[N].ReductionOp);
1033 return DRD && DRD->getInitializer();
1034}
1035
1036LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1037 return CGF.EmitLoadOfPointerLValue(
1038 CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1039 getThreadIDVariable()->getType()->castAs<PointerType>());
1040}
1041
1042void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) {
1043 if (!CGF.HaveInsertPoint())
1044 return;
1045 // 1.2.2 OpenMP Language Terminology
1046 // Structured block - An executable statement with a single entry at the
1047 // top and a single exit at the bottom.
1048 // The point of exit cannot be a branch out of the structured block.
1049 // longjmp() and throw() must not violate the entry/exit criteria.
1050 CGF.EHStack.pushTerminate();
1051 if (S)
1052 CGF.incrementProfileCounter(S);
1053 CodeGen(CGF);
1054 CGF.EHStack.popTerminate();
1055}
1056
1057LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1058 CodeGenFunction &CGF) {
1059 return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1060 getThreadIDVariable()->getType(),
1061 AlignmentSource::Decl);
1062}
1063
1064static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
1065 QualType FieldTy) {
1066 auto *Field = FieldDecl::Create(
1067 C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1068 C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1069 /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1070 Field->setAccess(AS_public);
1071 DC->addDecl(Field);
1072 return Field;
1073}
1074
1075CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1076 StringRef Separator)
1077 : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1078 OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
1079 KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1080
1081 // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
1082 OMPBuilder.initialize();
1083 loadOffloadInfoMetadata();
1084}
1085
1086void CGOpenMPRuntime::clear() {
1087 InternalVars.clear();
1088 // Clean non-target variable declarations possibly used only in debug info.
1089 for (const auto &Data : EmittedNonTargetVariables) {
1090 if (!Data.getValue().pointsToAliveValue())
1091 continue;
1092 auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
1093 if (!GV)
1094 continue;
1095 if (!GV->isDeclaration() || GV->getNumUses() > 0)
1096 continue;
1097 GV->eraseFromParent();
1098 }
1099}
1100
1101std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1102 SmallString<128> Buffer;
1103 llvm::raw_svector_ostream OS(Buffer);
1104 StringRef Sep = FirstSeparator;
1105 for (StringRef Part : Parts) {
1106 OS << Sep << Part;
1107 Sep = Separator;
1108 }
1109 return std::string(OS.str());
1110}
1111
1112static llvm::Function *
1113emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1114 const Expr *CombinerInitializer, const VarDecl *In,
1115 const VarDecl *Out, bool IsCombiner) {
1116 // void .omp_combiner.(Ty *in, Ty *out);
1117 ASTContext &C = CGM.getContext();
1118 QualType PtrTy = C.getPointerType(Ty).withRestrict();
1119 FunctionArgList Args;
1120 ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1121 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1122 ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1123 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1124 Args.push_back(&OmpOutParm);
1125 Args.push_back(&OmpInParm);
1126 const CGFunctionInfo &FnInfo =
1127 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1128 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1129 std::string Name = CGM.getOpenMPRuntime().getName(
1130 {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1131 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1132 Name, &CGM.getModule());
1133 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1134 if (CGM.getLangOpts().Optimize) {
1135 Fn->removeFnAttr(llvm::Attribute::NoInline);
1136 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1137 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1138 }
1139 CodeGenFunction CGF(CGM);
1140 // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1141 // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1142 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1143 Out->getLocation());
1144 CodeGenFunction::OMPPrivateScope Scope(CGF);
1145 Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1146 Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1147 return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1148 .getAddress(CGF);
1149 });
1150 Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1151 Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1152 return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1153 .getAddress(CGF);
1154 });
1155 (void)Scope.Privatize();
1156 if (!IsCombiner && Out->hasInit() &&
1157 !CGF.isTrivialInitializer(Out->getInit())) {
1158 CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1159 Out->getType().getQualifiers(),
1160 /*IsInitializer=*/true);
1161 }
1162 if (CombinerInitializer)
1163 CGF.EmitIgnoredExpr(CombinerInitializer);
1164 Scope.ForceCleanup();
1165 CGF.FinishFunction();
1166 return Fn;
1167}
1168
1169void CGOpenMPRuntime::emitUserDefinedReduction(
1170 CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1171 if (UDRMap.count(D) > 0)
1172 return;
1173 llvm::Function *Combiner = emitCombinerOrInitializer(
1174 CGM, D->getType(), D->getCombiner(),
1175 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
1176 cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
1177 /*IsCombiner=*/true);
1178 llvm::Function *Initializer = nullptr;
1179 if (const Expr *Init = D->getInitializer()) {
1180 Initializer = emitCombinerOrInitializer(
1181 CGM, D->getType(),
1182 D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1183 : nullptr,
1184 cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
1185 cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
1186 /*IsCombiner=*/false);
1187 }
1188 UDRMap.try_emplace(D, Combiner, Initializer);
1189 if (CGF) {
1190 auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1191 Decls.second.push_back(D);
1192 }
1193}
1194
1195std::pair<llvm::Function *, llvm::Function *>
1196CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1197 auto I = UDRMap.find(D);
1198 if (I != UDRMap.end())
1199 return I->second;
1200 emitUserDefinedReduction(/*CGF=*/nullptr, D);
1201 return UDRMap.lookup(D);
1202}
1203
1204namespace {
1205// Temporary RAII solution to perform a push/pop stack event on the OpenMP IR
1206// Builder if one is present.
1207struct PushAndPopStackRAII {
1208 PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
1209 bool HasCancel, llvm::omp::Directive Kind)
1210 : OMPBuilder(OMPBuilder) {
1211 if (!OMPBuilder)
1212 return;
1213
1214 // The following callback is the crucial part of clangs cleanup process.
1215 //
1216 // NOTE:
1217 // Once the OpenMPIRBuilder is used to create parallel regions (and
1218 // similar), the cancellation destination (Dest below) is determined via
1219 // IP. That means if we have variables to finalize we split the block at IP,
1220 // use the new block (=BB) as destination to build a JumpDest (via
1221 // getJumpDestInCurrentScope(BB)) which then is fed to
1222 // EmitBranchThroughCleanup. Furthermore, there will not be the need
1223 // to push & pop an FinalizationInfo object.
1224 // The FiniCB will still be needed but at the point where the
1225 // OpenMPIRBuilder is asked to construct a parallel (or similar) construct.
1226 auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
1227 assert(IP.getBlock()->end() == IP.getPoint() &&(static_cast <bool> (IP.getBlock()->end() == IP.getPoint
() && "Clang CG should cause non-terminated block!") ?
void (0) : __assert_fail ("IP.getBlock()->end() == IP.getPoint() && \"Clang CG should cause non-terminated block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1228, __extension__ __PRETTY_FUNCTION__))
1228 "Clang CG should cause non-terminated block!")(static_cast <bool> (IP.getBlock()->end() == IP.getPoint
() && "Clang CG should cause non-terminated block!") ?
void (0) : __assert_fail ("IP.getBlock()->end() == IP.getPoint() && \"Clang CG should cause non-terminated block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1228, __extension__ __PRETTY_FUNCTION__))
;
1229 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1230 CGF.Builder.restoreIP(IP);
1231 CodeGenFunction::JumpDest Dest =
1232 CGF.getOMPCancelDestination(OMPD_parallel);
1233 CGF.EmitBranchThroughCleanup(Dest);
1234 };
1235
1236 // TODO: Remove this once we emit parallel regions through the
1237 // OpenMPIRBuilder as it can do this setup internally.
1238 llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
1239 OMPBuilder->pushFinalizationCB(std::move(FI));
1240 }
1241 ~PushAndPopStackRAII() {
1242 if (OMPBuilder)
1243 OMPBuilder->popFinalizationCB();
1244 }
1245 llvm::OpenMPIRBuilder *OMPBuilder;
1246};
1247} // namespace
1248
1249static llvm::Function *emitParallelOrTeamsOutlinedFunction(
1250 CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1251 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1252 const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1253 assert(ThreadIDVar->getType()->isPointerType() &&(static_cast <bool> (ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 *"
) ? void (0) : __assert_fail ("ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 *\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1254, __extension__ __PRETTY_FUNCTION__))
1254 "thread id variable must be of type kmp_int32 *")(static_cast <bool> (ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 *"
) ? void (0) : __assert_fail ("ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 *\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1254, __extension__ __PRETTY_FUNCTION__))
;
1255 CodeGenFunction CGF(CGM, true);
1256 bool HasCancel = false;
1257 if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1258 HasCancel = OPD->hasCancel();
1259 else if (const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
1260 HasCancel = OPD->hasCancel();
1261 else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1262 HasCancel = OPSD->hasCancel();
1263 else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1264 HasCancel = OPFD->hasCancel();
1265 else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1266 HasCancel = OPFD->hasCancel();
1267 else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1268 HasCancel = OPFD->hasCancel();
1269 else if (const auto *OPFD =
1270 dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1271 HasCancel = OPFD->hasCancel();
1272 else if (const auto *OPFD =
1273 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1274 HasCancel = OPFD->hasCancel();
1275
1276 // TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
1277 // parallel region to make cancellation barriers work properly.
1278 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1279 PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
1280 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1281 HasCancel, OutlinedHelperName);
1282 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1283 return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
1284}
1285
1286llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
1287 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1288 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1289 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1290 return emitParallelOrTeamsOutlinedFunction(
1291 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1292}
1293
1294llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1295 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1296 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1297 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1298 return emitParallelOrTeamsOutlinedFunction(
1299 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1300}
1301
1302llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
1303 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1304 const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1305 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1306 bool Tied, unsigned &NumberOfParts) {
1307 auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1308 PrePostActionTy &) {
1309 llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
1310 llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
1311 llvm::Value *TaskArgs[] = {
1312 UpLoc, ThreadID,
1313 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1314 TaskTVar->getType()->castAs<PointerType>())
1315 .getPointer(CGF)};
1316 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1317 CGM.getModule(), OMPRTL___kmpc_omp_task),
1318 TaskArgs);
1319 };
1320 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1321 UntiedCodeGen);
1322 CodeGen.setAction(Action);
1323 assert(!ThreadIDVar->getType()->isPointerType() &&(static_cast <bool> (!ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 for tasks"
) ? void (0) : __assert_fail ("!ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 for tasks\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1324, __extension__ __PRETTY_FUNCTION__))
1324 "thread id variable must be of type kmp_int32 for tasks")(static_cast <bool> (!ThreadIDVar->getType()->isPointerType
() && "thread id variable must be of type kmp_int32 for tasks"
) ? void (0) : __assert_fail ("!ThreadIDVar->getType()->isPointerType() && \"thread id variable must be of type kmp_int32 for tasks\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1324, __extension__ __PRETTY_FUNCTION__))
;
1325 const OpenMPDirectiveKind Region =
1326 isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1327 : OMPD_task;
1328 const CapturedStmt *CS = D.getCapturedStmt(Region);
1329 bool HasCancel = false;
1330 if (const auto *TD = dyn_cast<OMPTaskDirective>(&D))
1331 HasCancel = TD->hasCancel();
1332 else if (const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
1333 HasCancel = TD->hasCancel();
1334 else if (const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
1335 HasCancel = TD->hasCancel();
1336 else if (const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
1337 HasCancel = TD->hasCancel();
1338
1339 CodeGenFunction CGF(CGM, true);
1340 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1341 InnermostKind, HasCancel, Action);
1342 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1343 llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1344 if (!Tied)
1345 NumberOfParts = Action.getNumberOfParts();
1346 return Res;
1347}
1348
1349static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
1350 const RecordDecl *RD, const CGRecordLayout &RL,
1351 ArrayRef<llvm::Constant *> Data) {
1352 llvm::StructType *StructTy = RL.getLLVMType();
1353 unsigned PrevIdx = 0;
1354 ConstantInitBuilder CIBuilder(CGM);
1355 auto DI = Data.begin();
1356 for (const FieldDecl *FD : RD->fields()) {
1357 unsigned Idx = RL.getLLVMFieldNo(FD);
1358 // Fill the alignment.
1359 for (unsigned I = PrevIdx; I < Idx; ++I)
1360 Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1361 PrevIdx = Idx + 1;
1362 Fields.add(*DI);
1363 ++DI;
1364 }
1365}
1366
1367template <class... As>
1368static llvm::GlobalVariable *
1369createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
1370 ArrayRef<llvm::Constant *> Data, const Twine &Name,
1371 As &&... Args) {
1372 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1373 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1374 ConstantInitBuilder CIBuilder(CGM);
1375 ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1376 buildStructValue(Fields, CGM, RD, RL, Data);
1377 return Fields.finishAndCreateGlobal(
1378 Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
1379 std::forward<As>(Args)...);
1380}
1381
1382template <typename T>
1383static void
1384createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
1385 ArrayRef<llvm::Constant *> Data,
1386 T &Parent) {
1387 const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1388 const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1389 ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1390 buildStructValue(Fields, CGM, RD, RL, Data);
1391 Fields.finishAndAddTo(Parent);
1392}
1393
1394void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
1395 bool AtCurrentPoint) {
1396 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1397 assert(!Elem.second.ServiceInsertPt && "Insert point is set already.")(static_cast <bool> (!Elem.second.ServiceInsertPt &&
"Insert point is set already.") ? void (0) : __assert_fail (
"!Elem.second.ServiceInsertPt && \"Insert point is set already.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1397, __extension__ __PRETTY_FUNCTION__))
;
1398
1399 llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
1400 if (AtCurrentPoint) {
1401 Elem.second.ServiceInsertPt = new llvm::BitCastInst(
1402 Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
1403 } else {
1404 Elem.second.ServiceInsertPt =
1405 new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
1406 Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
1407 }
1408}
1409
1410void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
1411 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1412 if (Elem.second.ServiceInsertPt) {
1413 llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
1414 Elem.second.ServiceInsertPt = nullptr;
1415 Ptr->eraseFromParent();
1416 }
1417}
1418
1419static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
1420 SourceLocation Loc,
1421 SmallString<128> &Buffer) {
1422 llvm::raw_svector_ostream OS(Buffer);
1423 // Build debug location
1424 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1425 OS << ";" << PLoc.getFilename() << ";";
1426 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1427 OS << FD->getQualifiedNameAsString();
1428 OS << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1429 return OS.str();
1430}
1431
1432llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1433 SourceLocation Loc,
1434 unsigned Flags) {
1435 llvm::Constant *SrcLocStr;
1436 if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1437 Loc.isInvalid()) {
1438 SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
1439 } else {
1440 std::string FunctionName = "";
1441 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1442 FunctionName = FD->getQualifiedNameAsString();
1443 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1444 const char *FileName = PLoc.getFilename();
1445 unsigned Line = PLoc.getLine();
1446 unsigned Column = PLoc.getColumn();
1447 SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName.c_str(), FileName,
1448 Line, Column);
1449 }
1450 unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
1451 return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags),
1452 Reserved2Flags);
1453}
1454
1455llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1456 SourceLocation Loc) {
1457 assert(CGF.CurFn && "No function in current CodeGenFunction.")(static_cast <bool> (CGF.CurFn && "No function in current CodeGenFunction."
) ? void (0) : __assert_fail ("CGF.CurFn && \"No function in current CodeGenFunction.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1457, __extension__ __PRETTY_FUNCTION__))
;
1458 // If the OpenMPIRBuilder is used we need to use it for all thread id calls as
1459 // the clang invariants used below might be broken.
1460 if (CGM.getLangOpts().OpenMPIRBuilder) {
1461 SmallString<128> Buffer;
1462 OMPBuilder.updateToLocation(CGF.Builder.saveIP());
1463 auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
1464 getIdentStringFromSourceLocation(CGF, Loc, Buffer));
1465 return OMPBuilder.getOrCreateThreadID(
1466 OMPBuilder.getOrCreateIdent(SrcLocStr));
1467 }
1468
1469 llvm::Value *ThreadID = nullptr;
1470 // Check whether we've already cached a load of the thread id in this
1471 // function.
1472 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1473 if (I != OpenMPLocThreadIDMap.end()) {
1474 ThreadID = I->second.ThreadID;
1475 if (ThreadID != nullptr)
1476 return ThreadID;
1477 }
1478 // If exceptions are enabled, do not use parameter to avoid possible crash.
1479 if (auto *OMPRegionInfo =
1480 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1481 if (OMPRegionInfo->getThreadIDVariable()) {
1482 // Check if this an outlined function with thread id passed as argument.
1483 LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1484 llvm::BasicBlock *TopBlock = CGF.AllocaInsertPt->getParent();
1485 if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1486 !CGF.getLangOpts().CXXExceptions ||
1487 CGF.Builder.GetInsertBlock() == TopBlock ||
1488 !isa<llvm::Instruction>(LVal.getPointer(CGF)) ||
1489 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1490 TopBlock ||
1491 cast<llvm::Instruction>(LVal.getPointer(CGF))->getParent() ==
1492 CGF.Builder.GetInsertBlock()) {
1493 ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1494 // If value loaded in entry block, cache it and use it everywhere in
1495 // function.
1496 if (CGF.Builder.GetInsertBlock() == TopBlock) {
1497 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1498 Elem.second.ThreadID = ThreadID;
1499 }
1500 return ThreadID;
1501 }
1502 }
1503 }
1504
1505 // This is not an outlined function region - need to call __kmpc_int32
1506 // kmpc_global_thread_num(ident_t *loc).
1507 // Generate thread id value and cache this value for use across the
1508 // function.
1509 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1510 if (!Elem.second.ServiceInsertPt)
1511 setLocThreadIdInsertPt(CGF);
1512 CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1513 CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
1514 llvm::CallInst *Call = CGF.Builder.CreateCall(
1515 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
1516 OMPRTL___kmpc_global_thread_num),
1517 emitUpdateLocation(CGF, Loc));
1518 Call->setCallingConv(CGF.getRuntimeCC());
1519 Elem.second.ThreadID = Call;
1520 return Call;
1521}
1522
1523void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1524 assert(CGF.CurFn && "No function in current CodeGenFunction.")(static_cast <bool> (CGF.CurFn && "No function in current CodeGenFunction."
) ? void (0) : __assert_fail ("CGF.CurFn && \"No function in current CodeGenFunction.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1524, __extension__ __PRETTY_FUNCTION__))
;
1525 if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
1526 clearLocThreadIdInsertPt(CGF);
1527 OpenMPLocThreadIDMap.erase(CGF.CurFn);
1528 }
1529 if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1530 for(const auto *D : FunctionUDRMap[CGF.CurFn])
1531 UDRMap.erase(D);
1532 FunctionUDRMap.erase(CGF.CurFn);
1533 }
1534 auto I = FunctionUDMMap.find(CGF.CurFn);
1535 if (I != FunctionUDMMap.end()) {
1536 for(const auto *D : I->second)
1537 UDMMap.erase(D);
1538 FunctionUDMMap.erase(I);
1539 }
1540 LastprivateConditionalToTypes.erase(CGF.CurFn);
1541 FunctionToUntiedTaskStackMap.erase(CGF.CurFn);
1542}
1543
1544llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1545 return OMPBuilder.IdentPtr;
1546}
1547
1548llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1549 if (!Kmpc_MicroTy) {
1550 // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1551 llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1552 llvm::PointerType::getUnqual(CGM.Int32Ty)};
1553 Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1554 }
1555 return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1556}
1557
1558llvm::FunctionCallee
1559CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
1560 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1561, __extension__ __PRETTY_FUNCTION__))
1561 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1561, __extension__ __PRETTY_FUNCTION__))
;
1562 StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
1563 : "__kmpc_for_static_init_4u")
1564 : (IVSigned ? "__kmpc_for_static_init_8"
1565 : "__kmpc_for_static_init_8u");
1566 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1567 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
1568 llvm::Type *TypeParams[] = {
1569 getIdentTyPointerTy(), // loc
1570 CGM.Int32Ty, // tid
1571 CGM.Int32Ty, // schedtype
1572 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
1573 PtrTy, // p_lower
1574 PtrTy, // p_upper
1575 PtrTy, // p_stride
1576 ITy, // incr
1577 ITy // chunk
1578 };
1579 auto *FnTy =
1580 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1581 return CGM.CreateRuntimeFunction(FnTy, Name);
1582}
1583
1584llvm::FunctionCallee
1585CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
1586 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1587, __extension__ __PRETTY_FUNCTION__))
1587 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1587, __extension__ __PRETTY_FUNCTION__))
;
1588 StringRef Name =
1589 IVSize == 32
1590 ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
1591 : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
1592 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1593 llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
1594 CGM.Int32Ty, // tid
1595 CGM.Int32Ty, // schedtype
1596 ITy, // lower
1597 ITy, // upper
1598 ITy, // stride
1599 ITy // chunk
1600 };
1601 auto *FnTy =
1602 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1603 return CGM.CreateRuntimeFunction(FnTy, Name);
1604}
1605
1606llvm::FunctionCallee
1607CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
1608 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1609, __extension__ __PRETTY_FUNCTION__))
1609 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1609, __extension__ __PRETTY_FUNCTION__))
;
1610 StringRef Name =
1611 IVSize == 32
1612 ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
1613 : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
1614 llvm::Type *TypeParams[] = {
1615 getIdentTyPointerTy(), // loc
1616 CGM.Int32Ty, // tid
1617 };
1618 auto *FnTy =
1619 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1620 return CGM.CreateRuntimeFunction(FnTy, Name);
1621}
1622
1623llvm::FunctionCallee
1624CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
1625 assert((IVSize == 32 || IVSize == 64) &&(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1626, __extension__ __PRETTY_FUNCTION__))
1626 "IV size is not compatible with the omp runtime")(static_cast <bool> ((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime") ? void (0)
: __assert_fail ("(IVSize == 32 || IVSize == 64) && \"IV size is not compatible with the omp runtime\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1626, __extension__ __PRETTY_FUNCTION__))
;
1627 StringRef Name =
1628 IVSize == 32
1629 ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
1630 : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
1631 llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
1632 auto *PtrTy = llvm::PointerType::getUnqual(ITy);
1633 llvm::Type *TypeParams[] = {
1634 getIdentTyPointerTy(), // loc
1635 CGM.Int32Ty, // tid
1636 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
1637 PtrTy, // p_lower
1638 PtrTy, // p_upper
1639 PtrTy // p_stride
1640 };
1641 auto *FnTy =
1642 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1643 return CGM.CreateRuntimeFunction(FnTy, Name);
1644}
1645
1646/// Obtain information that uniquely identifies a target entry. This
1647/// consists of the file and device IDs as well as line number associated with
1648/// the relevant entry source location.
1649static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
1650 unsigned &DeviceID, unsigned &FileID,
1651 unsigned &LineNum) {
1652 SourceManager &SM = C.getSourceManager();
1653
1654 // The loc should be always valid and have a file ID (the user cannot use
1655 // #pragma directives in macros)
1656
1657 assert(Loc.isValid() && "Source location is expected to be always valid.")(static_cast <bool> (Loc.isValid() && "Source location is expected to be always valid."
) ? void (0) : __assert_fail ("Loc.isValid() && \"Source location is expected to be always valid.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1657, __extension__ __PRETTY_FUNCTION__))
;
1658
1659 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
1660 assert(PLoc.isValid() && "Source location is expected to be always valid.")(static_cast <bool> (PLoc.isValid() && "Source location is expected to be always valid."
) ? void (0) : __assert_fail ("PLoc.isValid() && \"Source location is expected to be always valid.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1660, __extension__ __PRETTY_FUNCTION__))
;
1661
1662 llvm::sys::fs::UniqueID ID;
1663 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
1664 PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false);
1665 assert(PLoc.isValid() && "Source location is expected to be always valid.")(static_cast <bool> (PLoc.isValid() && "Source location is expected to be always valid."
) ? void (0) : __assert_fail ("PLoc.isValid() && \"Source location is expected to be always valid.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1665, __extension__ __PRETTY_FUNCTION__))
;
1666 if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
1667 SM.getDiagnostics().Report(diag::err_cannot_open_file)
1668 << PLoc.getFilename() << EC.message();
1669 }
1670
1671 DeviceID = ID.getDevice();
1672 FileID = ID.getFile();
1673 LineNum = PLoc.getLine();
1674}
1675
1676Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
1677 if (CGM.getLangOpts().OpenMPSimd)
1678 return Address::invalid();
1679 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
1680 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1681 if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
1682 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
1683 HasRequiresUnifiedSharedMemory))) {
1684 SmallString<64> PtrName;
1685 {
1686 llvm::raw_svector_ostream OS(PtrName);
1687 OS << CGM.getMangledName(GlobalDecl(VD));
1688 if (!VD->isExternallyVisible()) {
1689 unsigned DeviceID, FileID, Line;
1690 getTargetEntryUniqueInfo(CGM.getContext(),
1691 VD->getCanonicalDecl()->getBeginLoc(),
1692 DeviceID, FileID, Line);
1693 OS << llvm::format("_%x", FileID);
1694 }
1695 OS << "_decl_tgt_ref_ptr";
1696 }
1697 llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
1698 if (!Ptr) {
1699 QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
1700 Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
1701 PtrName);
1702
1703 auto *GV = cast<llvm::GlobalVariable>(Ptr);
1704 GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
1705
1706 if (!CGM.getLangOpts().OpenMPIsDevice)
1707 GV->setInitializer(CGM.GetAddrOfGlobal(VD));
1708 registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
1709 }
1710 return Address(Ptr, CGM.getContext().getDeclAlign(VD));
1711 }
1712 return Address::invalid();
1713}
1714
1715llvm::Constant *
1716CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
1717 assert(!CGM.getLangOpts().OpenMPUseTLS ||(static_cast <bool> (!CGM.getLangOpts().OpenMPUseTLS ||
!CGM.getContext().getTargetInfo().isTLSSupported()) ? void (
0) : __assert_fail ("!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo().isTLSSupported()"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1718, __extension__ __PRETTY_FUNCTION__))
1718 !CGM.getContext().getTargetInfo().isTLSSupported())(static_cast <bool> (!CGM.getLangOpts().OpenMPUseTLS ||
!CGM.getContext().getTargetInfo().isTLSSupported()) ? void (
0) : __assert_fail ("!CGM.getLangOpts().OpenMPUseTLS || !CGM.getContext().getTargetInfo().isTLSSupported()"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1718, __extension__ __PRETTY_FUNCTION__))
;
1719 // Lookup the entry, lazily creating it if necessary.
1720 std::string Suffix = getName({"cache", ""});
1721 return getOrCreateInternalVariable(
1722 CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
1723}
1724
1725Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
1726 const VarDecl *VD,
1727 Address VDAddr,
1728 SourceLocation Loc) {
1729 if (CGM.getLangOpts().OpenMPUseTLS &&
1730 CGM.getContext().getTargetInfo().isTLSSupported())
1731 return VDAddr;
1732
1733 llvm::Type *VarTy = VDAddr.getElementType();
1734 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
1735 CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
1736 CGM.Int8PtrTy),
1737 CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
1738 getOrCreateThreadPrivateCache(VD)};
1739 return Address(CGF.EmitRuntimeCall(
1740 OMPBuilder.getOrCreateRuntimeFunction(
1741 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
1742 Args),
1743 VDAddr.getAlignment());
1744}
1745
1746void CGOpenMPRuntime::emitThreadPrivateVarInit(
1747 CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
1748 llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
1749 // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
1750 // library.
1751 llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
1752 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1753 CGM.getModule(), OMPRTL___kmpc_global_thread_num),
1754 OMPLoc);
1755 // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
1756 // to register constructor/destructor for variable.
1757 llvm::Value *Args[] = {
1758 OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
1759 Ctor, CopyCtor, Dtor};
1760 CGF.EmitRuntimeCall(
1761 OMPBuilder.getOrCreateRuntimeFunction(
1762 CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
1763 Args);
1764}
1765
1766llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
1767 const VarDecl *VD, Address VDAddr, SourceLocation Loc,
1768 bool PerformInit, CodeGenFunction *CGF) {
1769 if (CGM.getLangOpts().OpenMPUseTLS &&
1770 CGM.getContext().getTargetInfo().isTLSSupported())
1771 return nullptr;
1772
1773 VD = VD->getDefinition(CGM.getContext());
1774 if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
1775 QualType ASTTy = VD->getType();
1776
1777 llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
1778 const Expr *Init = VD->getAnyInitializer();
1779 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
1780 // Generate function that re-emits the declaration's initializer into the
1781 // threadprivate copy of the variable VD
1782 CodeGenFunction CtorCGF(CGM);
1783 FunctionArgList Args;
1784 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1785 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1786 ImplicitParamDecl::Other);
1787 Args.push_back(&Dst);
1788
1789 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1790 CGM.getContext().VoidPtrTy, Args);
1791 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1792 std::string Name = getName({"__kmpc_global_ctor_", ""});
1793 llvm::Function *Fn =
1794 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1795 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
1796 Args, Loc, Loc);
1797 llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
1798 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1799 CGM.getContext().VoidPtrTy, Dst.getLocation());
1800 Address Arg = Address(ArgVal, VDAddr.getAlignment());
1801 Arg = CtorCGF.Builder.CreateElementBitCast(
1802 Arg, CtorCGF.ConvertTypeForMem(ASTTy));
1803 CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
1804 /*IsInitializer=*/true);
1805 ArgVal = CtorCGF.EmitLoadOfScalar(
1806 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
1807 CGM.getContext().VoidPtrTy, Dst.getLocation());
1808 CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
1809 CtorCGF.FinishFunction();
1810 Ctor = Fn;
1811 }
1812 if (VD->getType().isDestructedType() != QualType::DK_none) {
1813 // Generate function that emits destructor call for the threadprivate copy
1814 // of the variable VD
1815 CodeGenFunction DtorCGF(CGM);
1816 FunctionArgList Args;
1817 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
1818 /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
1819 ImplicitParamDecl::Other);
1820 Args.push_back(&Dst);
1821
1822 const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1823 CGM.getContext().VoidTy, Args);
1824 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1825 std::string Name = getName({"__kmpc_global_dtor_", ""});
1826 llvm::Function *Fn =
1827 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1828 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
1829 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
1830 Loc, Loc);
1831 // Create a scope with an artificial location for the body of this function.
1832 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
1833 llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
1834 DtorCGF.GetAddrOfLocalVar(&Dst),
1835 /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
1836 DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
1837 DtorCGF.getDestroyer(ASTTy.isDestructedType()),
1838 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
1839 DtorCGF.FinishFunction();
1840 Dtor = Fn;
1841 }
1842 // Do not emit init function if it is not required.
1843 if (!Ctor && !Dtor)
1844 return nullptr;
1845
1846 llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1847 auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
1848 /*isVarArg=*/false)
1849 ->getPointerTo();
1850 // Copying constructor for the threadprivate variable.
1851 // Must be NULL - reserved by runtime, but currently it requires that this
1852 // parameter is always NULL. Otherwise it fires assertion.
1853 CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
1854 if (Ctor == nullptr) {
1855 auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1856 /*isVarArg=*/false)
1857 ->getPointerTo();
1858 Ctor = llvm::Constant::getNullValue(CtorTy);
1859 }
1860 if (Dtor == nullptr) {
1861 auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
1862 /*isVarArg=*/false)
1863 ->getPointerTo();
1864 Dtor = llvm::Constant::getNullValue(DtorTy);
1865 }
1866 if (!CGF) {
1867 auto *InitFunctionTy =
1868 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
1869 std::string Name = getName({"__omp_threadprivate_init_", ""});
1870 llvm::Function *InitFunction = CGM.CreateGlobalInitOrCleanUpFunction(
1871 InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
1872 CodeGenFunction InitCGF(CGM);
1873 FunctionArgList ArgList;
1874 InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
1875 CGM.getTypes().arrangeNullaryFunction(), ArgList,
1876 Loc, Loc);
1877 emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1878 InitCGF.FinishFunction();
1879 return InitFunction;
1880 }
1881 emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
1882 }
1883 return nullptr;
1884}
1885
1886bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
1887 llvm::GlobalVariable *Addr,
1888 bool PerformInit) {
1889 if (CGM.getLangOpts().OMPTargetTriples.empty() &&
1890 !CGM.getLangOpts().OpenMPIsDevice)
1891 return false;
1892 Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
1893 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1894 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
1895 (*Res == OMPDeclareTargetDeclAttr::MT_To &&
1896 HasRequiresUnifiedSharedMemory))
1897 return CGM.getLangOpts().OpenMPIsDevice;
1898 VD = VD->getDefinition(CGM.getContext());
1899 assert(VD && "Unknown VarDecl")(static_cast <bool> (VD && "Unknown VarDecl") ?
void (0) : __assert_fail ("VD && \"Unknown VarDecl\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 1899, __extension__ __PRETTY_FUNCTION__))
;
1900
1901 if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
1902 return CGM.getLangOpts().OpenMPIsDevice;
1903
1904 QualType ASTTy = VD->getType();
1905 SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
1906
1907 // Produce the unique prefix to identify the new target regions. We use
1908 // the source location of the variable declaration which we know to not
1909 // conflict with any target region.
1910 unsigned DeviceID;
1911 unsigned FileID;
1912 unsigned Line;
1913 getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
1914 SmallString<128> Buffer, Out;
1915 {
1916 llvm::raw_svector_ostream OS(Buffer);
1917 OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
1918 << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
1919 }
1920
1921 const Expr *Init = VD->getAnyInitializer();
1922 if (CGM.getLangOpts().CPlusPlus && PerformInit) {
1923 llvm::Constant *Ctor;
1924 llvm::Constant *ID;
1925 if (CGM.getLangOpts().OpenMPIsDevice) {
1926 // Generate function that re-emits the declaration's initializer into
1927 // the threadprivate copy of the variable VD
1928 CodeGenFunction CtorCGF(CGM);
1929
1930 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
1931 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1932 llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
1933 FTy, Twine(Buffer, "_ctor"), FI, Loc);
1934 auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
1935 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
1936 FunctionArgList(), Loc, Loc);
1937 auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
1938 CtorCGF.EmitAnyExprToMem(Init,
1939 Address(Addr, CGM.getContext().getDeclAlign(VD)),
1940 Init->getType().getQualifiers(),
1941 /*IsInitializer=*/true);
1942 CtorCGF.FinishFunction();
1943 Ctor = Fn;
1944 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
1945 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
1946 } else {
1947 Ctor = new llvm::GlobalVariable(
1948 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1949 llvm::GlobalValue::PrivateLinkage,
1950 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
1951 ID = Ctor;
1952 }
1953
1954 // Register the information for the entry associated with the constructor.
1955 Out.clear();
1956 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
1957 DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
1958 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
1959 }
1960 if (VD->getType().isDestructedType() != QualType::DK_none) {
1961 llvm::Constant *Dtor;
1962 llvm::Constant *ID;
1963 if (CGM.getLangOpts().OpenMPIsDevice) {
1964 // Generate function that emits destructor call for the threadprivate
1965 // copy of the variable VD
1966 CodeGenFunction DtorCGF(CGM);
1967
1968 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
1969 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1970 llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
1971 FTy, Twine(Buffer, "_dtor"), FI, Loc);
1972 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
1973 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
1974 FunctionArgList(), Loc, Loc);
1975 // Create a scope with an artificial location for the body of this
1976 // function.
1977 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
1978 DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
1979 ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
1980 DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
1981 DtorCGF.FinishFunction();
1982 Dtor = Fn;
1983 ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
1984 CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
1985 } else {
1986 Dtor = new llvm::GlobalVariable(
1987 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1988 llvm::GlobalValue::PrivateLinkage,
1989 llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
1990 ID = Dtor;
1991 }
1992 // Register the information for the entry associated with the destructor.
1993 Out.clear();
1994 OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
1995 DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
1996 ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
1997 }
1998 return CGM.getLangOpts().OpenMPIsDevice;
1999}
2000
2001Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2002 QualType VarType,
2003 StringRef Name) {
2004 std::string Suffix = getName({"artificial", ""});
2005 llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2006 llvm::Value *GAddr =
2007 getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2008 if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
2009 CGM.getTarget().isTLSSupported()) {
2010 cast<llvm::GlobalVariable>(GAddr)->setThreadLocal(/*Val=*/true);
2011 return Address(GAddr, CGM.getContext().getTypeAlignInChars(VarType));
2012 }
2013 std::string CacheSuffix = getName({"cache", ""});
2014 llvm::Value *Args[] = {
2015 emitUpdateLocation(CGF, SourceLocation()),
2016 getThreadID(CGF, SourceLocation()),
2017 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2018 CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2019 /*isSigned=*/false),
2020 getOrCreateInternalVariable(
2021 CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2022 return Address(
2023 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2024 CGF.EmitRuntimeCall(
2025 OMPBuilder.getOrCreateRuntimeFunction(
2026 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
2027 Args),
2028 VarLVType->getPointerTo(/*AddrSpace=*/0)),
2029 CGM.getContext().getTypeAlignInChars(VarType));
2030}
2031
2032void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
2033 const RegionCodeGenTy &ThenGen,
2034 const RegionCodeGenTy &ElseGen) {
2035 CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2036
2037 // If the condition constant folds and can be elided, try to avoid emitting
2038 // the condition and the dead arm of the if/else.
2039 bool CondConstant;
2040 if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2041 if (CondConstant)
2042 ThenGen(CGF);
2043 else
2044 ElseGen(CGF);
2045 return;
2046 }
2047
2048 // Otherwise, the condition did not fold, or we couldn't elide it. Just
2049 // emit the conditional branch.
2050 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2051 llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2052 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2053 CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2054
2055 // Emit the 'then' code.
2056 CGF.EmitBlock(ThenBlock);
2057 ThenGen(CGF);
2058 CGF.EmitBranch(ContBlock);
2059 // Emit the 'else' code if present.
2060 // There is no need to emit line number for unconditional branch.
2061 (void)ApplyDebugLocation::CreateEmpty(CGF);
2062 CGF.EmitBlock(ElseBlock);
2063 ElseGen(CGF);
2064 // There is no need to emit line number for unconditional branch.
2065 (void)ApplyDebugLocation::CreateEmpty(CGF);
2066 CGF.EmitBranch(ContBlock);
2067 // Emit the continuation block for code after the if.
2068 CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2069}
2070
2071void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2072 llvm::Function *OutlinedFn,
2073 ArrayRef<llvm::Value *> CapturedVars,
2074 const Expr *IfCond) {
2075 if (!CGF.HaveInsertPoint())
2076 return;
2077 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2078 auto &M = CGM.getModule();
2079 auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
2080 this](CodeGenFunction &CGF, PrePostActionTy &) {
2081 // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2082 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2083 llvm::Value *Args[] = {
2084 RTLoc,
2085 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2086 CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2087 llvm::SmallVector<llvm::Value *, 16> RealArgs;
2088 RealArgs.append(std::begin(Args), std::end(Args));
2089 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2090
2091 llvm::FunctionCallee RTLFn =
2092 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
2093 CGF.EmitRuntimeCall(RTLFn, RealArgs);
2094 };
2095 auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
2096 this](CodeGenFunction &CGF, PrePostActionTy &) {
2097 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2098 llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2099 // Build calls:
2100 // __kmpc_serialized_parallel(&Loc, GTid);
2101 llvm::Value *Args[] = {RTLoc, ThreadID};
2102 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2103 M, OMPRTL___kmpc_serialized_parallel),
2104 Args);
2105
2106 // OutlinedFn(&GTid, &zero_bound, CapturedStruct);
2107 Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
2108 Address ZeroAddrBound =
2109 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2110 /*Name=*/".bound.zero.addr");
2111 CGF.InitTempAlloca(ZeroAddrBound, CGF.Builder.getInt32(/*C*/ 0));
2112 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2113 // ThreadId for serialized parallels is 0.
2114 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2115 OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
2116 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2117
2118 // Ensure we do not inline the function. This is trivially true for the ones
2119 // passed to __kmpc_fork_call but the ones calles in serialized regions
2120 // could be inlined. This is not a perfect but it is closer to the invariant
2121 // we want, namely, every data environment starts with a new function.
2122 // TODO: We should pass the if condition to the runtime function and do the
2123 // handling there. Much cleaner code.
2124 OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
2125 RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2126
2127 // __kmpc_end_serialized_parallel(&Loc, GTid);
2128 llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2129 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2130 M, OMPRTL___kmpc_end_serialized_parallel),
2131 EndArgs);
2132 };
2133 if (IfCond) {
2134 emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2135 } else {
2136 RegionCodeGenTy ThenRCG(ThenGen);
2137 ThenRCG(CGF);
2138 }
2139}
2140
2141// If we're inside an (outlined) parallel region, use the region info's
2142// thread-ID variable (it is passed in a first argument of the outlined function
2143// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2144// regular serial code region, get thread ID by calling kmp_int32
2145// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2146// return the address of that temp.
2147Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2148 SourceLocation Loc) {
2149 if (auto *OMPRegionInfo =
2150 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2151 if (OMPRegionInfo->getThreadIDVariable())
2152 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
2153
2154 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2155 QualType Int32Ty =
2156 CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2157 Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2158 CGF.EmitStoreOfScalar(ThreadID,
2159 CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2160
2161 return ThreadIDTemp;
2162}
2163
2164llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
2165 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
2166 SmallString<256> Buffer;
2167 llvm::raw_svector_ostream Out(Buffer);
2168 Out << Name;
2169 StringRef RuntimeName = Out.str();
2170 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2171 if (Elem.second) {
2172 assert(Elem.second->getType()->getPointerElementType() == Ty &&(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2173, __extension__ __PRETTY_FUNCTION__))
2173 "OMP internal variable has different type than requested")(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2173, __extension__ __PRETTY_FUNCTION__))
;
2174 return &*Elem.second;
2175 }
2176
2177 return Elem.second = new llvm::GlobalVariable(
2178 CGM.getModule(), Ty, /*IsConstant*/ false,
2179 llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2180 Elem.first(), /*InsertBefore=*/nullptr,
2181 llvm::GlobalValue::NotThreadLocal, AddressSpace);
2182}
2183
2184llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2185 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2186 std::string Name = getName({Prefix, "var"});
2187 return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
2188}
2189
2190namespace {
2191/// Common pre(post)-action for different OpenMP constructs.
2192class CommonActionTy final : public PrePostActionTy {
2193 llvm::FunctionCallee EnterCallee;
2194 ArrayRef<llvm::Value *> EnterArgs;
2195 llvm::FunctionCallee ExitCallee;
2196 ArrayRef<llvm::Value *> ExitArgs;
2197 bool Conditional;
2198 llvm::BasicBlock *ContBlock = nullptr;
2199
2200public:
2201 CommonActionTy(llvm::FunctionCallee EnterCallee,
2202 ArrayRef<llvm::Value *> EnterArgs,
2203 llvm::FunctionCallee ExitCallee,
2204 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
2205 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2206 ExitArgs(ExitArgs), Conditional(Conditional) {}
2207 void Enter(CodeGenFunction &CGF) override {
2208 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2209 if (Conditional) {
2210 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2211 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2212 ContBlock = CGF.createBasicBlock("omp_if.end");
2213 // Generate the branch (If-stmt)
2214 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2215 CGF.EmitBlock(ThenBlock);
2216 }
2217 }
2218 void Done(CodeGenFunction &CGF) {
2219 // Emit the rest of blocks/branches
2220 CGF.EmitBranch(ContBlock);
2221 CGF.EmitBlock(ContBlock, true);
2222 }
2223 void Exit(CodeGenFunction &CGF) override {
2224 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2225 }
2226};
2227} // anonymous namespace
2228
2229void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2230 StringRef CriticalName,
2231 const RegionCodeGenTy &CriticalOpGen,
2232 SourceLocation Loc, const Expr *Hint) {
2233 // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2234 // CriticalOpGen();
2235 // __kmpc_end_critical(ident_t *, gtid, Lock);
2236 // Prepare arguments and build a call to __kmpc_critical
2237 if (!CGF.HaveInsertPoint())
2238 return;
2239 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2240 getCriticalRegionLock(CriticalName)};
2241 llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2242 std::end(Args));
2243 if (Hint) {
2244 EnterArgs.push_back(CGF.Builder.CreateIntCast(
2245 CGF.EmitScalarExpr(Hint), CGM.Int32Ty, /*isSigned=*/false));
2246 }
2247 CommonActionTy Action(
2248 OMPBuilder.getOrCreateRuntimeFunction(
2249 CGM.getModule(),
2250 Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
2251 EnterArgs,
2252 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2253 OMPRTL___kmpc_end_critical),
2254 Args);
2255 CriticalOpGen.setAction(Action);
2256 emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2257}
2258
2259void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2260 const RegionCodeGenTy &MasterOpGen,
2261 SourceLocation Loc) {
2262 if (!CGF.HaveInsertPoint())
2263 return;
2264 // if(__kmpc_master(ident_t *, gtid)) {
2265 // MasterOpGen();
2266 // __kmpc_end_master(ident_t *, gtid);
2267 // }
2268 // Prepare arguments and build a call to __kmpc_master
2269 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2270 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2271 CGM.getModule(), OMPRTL___kmpc_master),
2272 Args,
2273 OMPBuilder.getOrCreateRuntimeFunction(
2274 CGM.getModule(), OMPRTL___kmpc_end_master),
2275 Args,
2276 /*Conditional=*/true);
2277 MasterOpGen.setAction(Action);
2278 emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2279 Action.Done(CGF);
2280}
2281
2282void CGOpenMPRuntime::emitMaskedRegion(CodeGenFunction &CGF,
2283 const RegionCodeGenTy &MaskedOpGen,
2284 SourceLocation Loc, const Expr *Filter) {
2285 if (!CGF.HaveInsertPoint())
2286 return;
2287 // if(__kmpc_masked(ident_t *, gtid, filter)) {
2288 // MaskedOpGen();
2289 // __kmpc_end_masked(iden_t *, gtid);
2290 // }
2291 // Prepare arguments and build a call to __kmpc_masked
2292 llvm::Value *FilterVal = Filter
2293 ? CGF.EmitScalarExpr(Filter, CGF.Int32Ty)
2294 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
2295 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2296 FilterVal};
2297 llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc),
2298 getThreadID(CGF, Loc)};
2299 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2300 CGM.getModule(), OMPRTL___kmpc_masked),
2301 Args,
2302 OMPBuilder.getOrCreateRuntimeFunction(
2303 CGM.getModule(), OMPRTL___kmpc_end_masked),
2304 ArgsEnd,
2305 /*Conditional=*/true);
2306 MaskedOpGen.setAction(Action);
2307 emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen);
2308 Action.Done(CGF);
2309}
2310
2311void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2312 SourceLocation Loc) {
2313 if (!CGF.HaveInsertPoint())
2314 return;
2315 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2316 OMPBuilder.createTaskyield(CGF.Builder);
2317 } else {
2318 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
2319 llvm::Value *Args[] = {
2320 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2321 llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
2322 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2323 CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
2324 Args);
2325 }
2326
2327 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2328 Region->emitUntiedSwitch(CGF);
2329}
2330
2331void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
2332 const RegionCodeGenTy &TaskgroupOpGen,
2333 SourceLocation Loc) {
2334 if (!CGF.HaveInsertPoint())
2335 return;
2336 // __kmpc_taskgroup(ident_t *, gtid);
2337 // TaskgroupOpGen();
2338 // __kmpc_end_taskgroup(ident_t *, gtid);
2339 // Prepare arguments and build a call to __kmpc_taskgroup
2340 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2341 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2342 CGM.getModule(), OMPRTL___kmpc_taskgroup),
2343 Args,
2344 OMPBuilder.getOrCreateRuntimeFunction(
2345 CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
2346 Args);
2347 TaskgroupOpGen.setAction(Action);
2348 emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
2349}
2350
2351/// Given an array of pointers to variables, project the address of a
2352/// given variable.
2353static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
2354 unsigned Index, const VarDecl *Var) {
2355 // Pull out the pointer to the variable.
2356 Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
2357 llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
2358
2359 Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
2360 Addr = CGF.Builder.CreateElementBitCast(
2361 Addr, CGF.ConvertTypeForMem(Var->getType()));
2362 return Addr;
2363}
2364
2365static llvm::Value *emitCopyprivateCopyFunction(
2366 CodeGenModule &CGM, llvm::Type *ArgsType,
2367 ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
2368 ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
2369 SourceLocation Loc) {
2370 ASTContext &C = CGM.getContext();
2371 // void copy_func(void *LHSArg, void *RHSArg);
2372 FunctionArgList Args;
2373 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2374 ImplicitParamDecl::Other);
2375 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
2376 ImplicitParamDecl::Other);
2377 Args.push_back(&LHSArg);
2378 Args.push_back(&RHSArg);
2379 const auto &CGFI =
2380 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2381 std::string Name =
2382 CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
2383 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2384 llvm::GlobalValue::InternalLinkage, Name,
2385 &CGM.getModule());
2386 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2387 Fn->setDoesNotRecurse();
2388 CodeGenFunction CGF(CGM);
2389 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2390 // Dest = (void*[n])(LHSArg);
2391 // Src = (void*[n])(RHSArg);
2392 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2393 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
2394 ArgsType), CGF.getPointerAlign());
2395 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2396 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
2397 ArgsType), CGF.getPointerAlign());
2398 // *(Type0*)Dst[0] = *(Type0*)Src[0];
2399 // *(Type1*)Dst[1] = *(Type1*)Src[1];
2400 // ...
2401 // *(Typen*)Dst[n] = *(Typen*)Src[n];
2402 for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2403 const auto *DestVar =
2404 cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
2405 Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
2406
2407 const auto *SrcVar =
2408 cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
2409 Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
2410
2411 const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
2412 QualType Type = VD->getType();
2413 CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2414 }
2415 CGF.FinishFunction();
2416 return Fn;
2417}
2418
2419void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
2420 const RegionCodeGenTy &SingleOpGen,
2421 SourceLocation Loc,
2422 ArrayRef<const Expr *> CopyprivateVars,
2423 ArrayRef<const Expr *> SrcExprs,
2424 ArrayRef<const Expr *> DstExprs,
2425 ArrayRef<const Expr *> AssignmentOps) {
2426 if (!CGF.HaveInsertPoint())
2427 return;
2428 assert(CopyprivateVars.size() == SrcExprs.size() &&(static_cast <bool> (CopyprivateVars.size() == SrcExprs
.size() && CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size()) ? void (0) :
__assert_fail ("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2430, __extension__ __PRETTY_FUNCTION__))
2429 CopyprivateVars.size() == DstExprs.size() &&(static_cast <bool> (CopyprivateVars.size() == SrcExprs
.size() && CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size()) ? void (0) :
__assert_fail ("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2430, __extension__ __PRETTY_FUNCTION__))
2430 CopyprivateVars.size() == AssignmentOps.size())(static_cast <bool> (CopyprivateVars.size() == SrcExprs
.size() && CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size()) ? void (0) :
__assert_fail ("CopyprivateVars.size() == SrcExprs.size() && CopyprivateVars.size() == DstExprs.size() && CopyprivateVars.size() == AssignmentOps.size()"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2430, __extension__ __PRETTY_FUNCTION__))
;
2431 ASTContext &C = CGM.getContext();
2432 // int32 did_it = 0;
2433 // if(__kmpc_single(ident_t *, gtid)) {
2434 // SingleOpGen();
2435 // __kmpc_end_single(ident_t *, gtid);
2436 // did_it = 1;
2437 // }
2438 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2439 // <copy_func>, did_it);
2440
2441 Address DidIt = Address::invalid();
2442 if (!CopyprivateVars.empty()) {
2443 // int32 did_it = 0;
2444 QualType KmpInt32Ty =
2445 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
2446 DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
2447 CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
2448 }
2449 // Prepare arguments and build a call to __kmpc_single
2450 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2451 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2452 CGM.getModule(), OMPRTL___kmpc_single),
2453 Args,
2454 OMPBuilder.getOrCreateRuntimeFunction(
2455 CGM.getModule(), OMPRTL___kmpc_end_single),
2456 Args,
2457 /*Conditional=*/true);
2458 SingleOpGen.setAction(Action);
2459 emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
2460 if (DidIt.isValid()) {
2461 // did_it = 1;
2462 CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
2463 }
2464 Action.Done(CGF);
2465 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
2466 // <copy_func>, did_it);
2467 if (DidIt.isValid()) {
2468 llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
2469 QualType CopyprivateArrayTy = C.getConstantArrayType(
2470 C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
2471 /*IndexTypeQuals=*/0);
2472 // Create a list of all private variables for copyprivate.
2473 Address CopyprivateList =
2474 CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
2475 for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2476 Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I);
2477 CGF.Builder.CreateStore(
2478 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2479 CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF),
2480 CGF.VoidPtrTy),
2481 Elem);
2482 }
2483 // Build function that copies private values from single region to all other
2484 // threads in the corresponding parallel region.
2485 llvm::Value *CpyFn = emitCopyprivateCopyFunction(
2486 CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
2487 CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
2488 llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
2489 Address CL =
2490 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
2491 CGF.VoidPtrTy);
2492 llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
2493 llvm::Value *Args[] = {
2494 emitUpdateLocation(CGF, Loc), // ident_t *<loc>
2495 getThreadID(CGF, Loc), // i32 <gtid>
2496 BufSize, // size_t <buf_size>
2497 CL.getPointer(), // void *<copyprivate list>
2498 CpyFn, // void (*) (void *, void *) <copy_func>
2499 DidItVal // i32 did_it
2500 };
2501 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2502 CGM.getModule(), OMPRTL___kmpc_copyprivate),
2503 Args);
2504 }
2505}
2506
2507void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
2508 const RegionCodeGenTy &OrderedOpGen,
2509 SourceLocation Loc, bool IsThreads) {
2510 if (!CGF.HaveInsertPoint())
2511 return;
2512 // __kmpc_ordered(ident_t *, gtid);
2513 // OrderedOpGen();
2514 // __kmpc_end_ordered(ident_t *, gtid);
2515 // Prepare arguments and build a call to __kmpc_ordered
2516 if (IsThreads) {
2517 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2518 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
2519 CGM.getModule(), OMPRTL___kmpc_ordered),
2520 Args,
2521 OMPBuilder.getOrCreateRuntimeFunction(
2522 CGM.getModule(), OMPRTL___kmpc_end_ordered),
2523 Args);
2524 OrderedOpGen.setAction(Action);
2525 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2526 return;
2527 }
2528 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
2529}
2530
2531unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
2532 unsigned Flags;
2533 if (Kind == OMPD_for)
2534 Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2535 else if (Kind == OMPD_sections)
2536 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2537 else if (Kind == OMPD_single)
2538 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2539 else if (Kind == OMPD_barrier)
2540 Flags = OMP_IDENT_BARRIER_EXPL;
2541 else
2542 Flags = OMP_IDENT_BARRIER_IMPL;
2543 return Flags;
2544}
2545
2546void CGOpenMPRuntime::getDefaultScheduleAndChunk(
2547 CodeGenFunction &CGF, const OMPLoopDirective &S,
2548 OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const {
2549 // Check if the loop directive is actually a doacross loop directive. In this
2550 // case choose static, 1 schedule.
2551 if (llvm::any_of(
2552 S.getClausesOfKind<OMPOrderedClause>(),
2553 [](const OMPOrderedClause *C) { return C->getNumForLoops(); })) {
2554 ScheduleKind = OMPC_SCHEDULE_static;
2555 // Chunk size is 1 in this case.
2556 llvm::APInt ChunkSize(32, 1);
2557 ChunkExpr = IntegerLiteral::Create(
2558 CGF.getContext(), ChunkSize,
2559 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
2560 SourceLocation());
2561 }
2562}
2563
2564void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
2565 OpenMPDirectiveKind Kind, bool EmitChecks,
2566 bool ForceSimpleCall) {
2567 // Check if we should use the OMPBuilder
2568 auto *OMPRegionInfo =
2569 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo);
2570 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2571 CGF.Builder.restoreIP(OMPBuilder.createBarrier(
2572 CGF.Builder, Kind, ForceSimpleCall, EmitChecks));
2573 return;
2574 }
2575
2576 if (!CGF.HaveInsertPoint())
2577 return;
2578 // Build call __kmpc_cancel_barrier(loc, thread_id);
2579 // Build call __kmpc_barrier(loc, thread_id);
2580 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2581 // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
2582 // thread_id);
2583 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2584 getThreadID(CGF, Loc)};
2585 if (OMPRegionInfo) {
2586 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2587 llvm::Value *Result = CGF.EmitRuntimeCall(
2588 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
2589 OMPRTL___kmpc_cancel_barrier),
2590 Args);
2591 if (EmitChecks) {
2592 // if (__kmpc_cancel_barrier()) {
2593 // exit from construct;
2594 // }
2595 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
2596 llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
2597 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
2598 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2599 CGF.EmitBlock(ExitBB);
2600 // exit from construct;
2601 CodeGenFunction::JumpDest CancelDestination =
2602 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
2603 CGF.EmitBranchThroughCleanup(CancelDestination);
2604 CGF.EmitBlock(ContBB, /*IsFinished=*/true);
2605 }
2606 return;
2607 }
2608 }
2609 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2610 CGM.getModule(), OMPRTL___kmpc_barrier),
2611 Args);
2612}
2613
2614/// Map the OpenMP loop schedule to the runtime enumeration.
2615static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
2616 bool Chunked, bool Ordered) {
2617 switch (ScheduleKind) {
2618 case OMPC_SCHEDULE_static:
2619 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2620 : (Ordered ? OMP_ord_static : OMP_sch_static);
2621 case OMPC_SCHEDULE_dynamic:
2622 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
2623 case OMPC_SCHEDULE_guided:
2624 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
2625 case OMPC_SCHEDULE_runtime:
2626 return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
2627 case OMPC_SCHEDULE_auto:
2628 return Ordered ? OMP_ord_auto : OMP_sch_auto;
2629 case OMPC_SCHEDULE_unknown:
2630 assert(!Chunked && "chunk was specified but schedule kind not known")(static_cast <bool> (!Chunked && "chunk was specified but schedule kind not known"
) ? void (0) : __assert_fail ("!Chunked && \"chunk was specified but schedule kind not known\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2630, __extension__ __PRETTY_FUNCTION__))
;
2631 return Ordered ? OMP_ord_static : OMP_sch_static;
2632 }
2633 llvm_unreachable("Unexpected runtime schedule")::llvm::llvm_unreachable_internal("Unexpected runtime schedule"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2633)
;
2634}
2635
2636/// Map the OpenMP distribute schedule to the runtime enumeration.
2637static OpenMPSchedType
2638getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
2639 // only static is allowed for dist_schedule
2640 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
2641}
2642
2643bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
2644 bool Chunked) const {
2645 OpenMPSchedType Schedule =
2646 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2647 return Schedule == OMP_sch_static;
2648}
2649
2650bool CGOpenMPRuntime::isStaticNonchunked(
2651 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2652 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2653 return Schedule == OMP_dist_sch_static;
2654}
2655
2656bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
2657 bool Chunked) const {
2658 OpenMPSchedType Schedule =
2659 getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
2660 return Schedule == OMP_sch_static_chunked;
2661}
2662
2663bool CGOpenMPRuntime::isStaticChunked(
2664 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
2665 OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
2666 return Schedule == OMP_dist_sch_static_chunked;
2667}
2668
2669bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
2670 OpenMPSchedType Schedule =
2671 getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
2672 assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here")(static_cast <bool> (Schedule != OMP_sch_static_chunked
&& "cannot be chunked here") ? void (0) : __assert_fail
("Schedule != OMP_sch_static_chunked && \"cannot be chunked here\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2672, __extension__ __PRETTY_FUNCTION__))
;
2673 return Schedule != OMP_sch_static;
2674}
2675
2676static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule,
2677 OpenMPScheduleClauseModifier M1,
2678 OpenMPScheduleClauseModifier M2) {
2679 int Modifier = 0;
2680 switch (M1) {
2681 case OMPC_SCHEDULE_MODIFIER_monotonic:
2682 Modifier = OMP_sch_modifier_monotonic;
2683 break;
2684 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2685 Modifier = OMP_sch_modifier_nonmonotonic;
2686 break;
2687 case OMPC_SCHEDULE_MODIFIER_simd:
2688 if (Schedule == OMP_sch_static_chunked)
2689 Schedule = OMP_sch_static_balanced_chunked;
2690 break;
2691 case OMPC_SCHEDULE_MODIFIER_last:
2692 case OMPC_SCHEDULE_MODIFIER_unknown:
2693 break;
2694 }
2695 switch (M2) {
2696 case OMPC_SCHEDULE_MODIFIER_monotonic:
2697 Modifier = OMP_sch_modifier_monotonic;
2698 break;
2699 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2700 Modifier = OMP_sch_modifier_nonmonotonic;
2701 break;
2702 case OMPC_SCHEDULE_MODIFIER_simd:
2703 if (Schedule == OMP_sch_static_chunked)
2704 Schedule = OMP_sch_static_balanced_chunked;
2705 break;
2706 case OMPC_SCHEDULE_MODIFIER_last:
2707 case OMPC_SCHEDULE_MODIFIER_unknown:
2708 break;
2709 }
2710 // OpenMP 5.0, 2.9.2 Worksharing-Loop Construct, Desription.
2711 // If the static schedule kind is specified or if the ordered clause is
2712 // specified, and if the nonmonotonic modifier is not specified, the effect is
2713 // as if the monotonic modifier is specified. Otherwise, unless the monotonic
2714 // modifier is specified, the effect is as if the nonmonotonic modifier is
2715 // specified.
2716 if (CGM.getLangOpts().OpenMP >= 50 && Modifier == 0) {
2717 if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
2718 Schedule == OMP_sch_static_balanced_chunked ||
2719 Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
2720 Schedule == OMP_dist_sch_static_chunked ||
2721 Schedule == OMP_dist_sch_static))
2722 Modifier = OMP_sch_modifier_nonmonotonic;
2723 }
2724 return Schedule | Modifier;
2725}
2726
2727void CGOpenMPRuntime::emitForDispatchInit(
2728 CodeGenFunction &CGF, SourceLocation Loc,
2729 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
2730 bool Ordered, const DispatchRTInput &DispatchValues) {
2731 if (!CGF.HaveInsertPoint())
2732 return;
2733 OpenMPSchedType Schedule = getRuntimeSchedule(
2734 ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
2735 assert(Ordered ||(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2738, __extension__ __PRETTY_FUNCTION__))
2736 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2738, __extension__ __PRETTY_FUNCTION__))
2737 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2738, __extension__ __PRETTY_FUNCTION__))
2738 Schedule != OMP_sch_static_balanced_chunked))(static_cast <bool> (Ordered || (Schedule != OMP_sch_static
&& Schedule != OMP_sch_static_chunked && Schedule
!= OMP_ord_static && Schedule != OMP_ord_static_chunked
&& Schedule != OMP_sch_static_balanced_chunked)) ? void
(0) : __assert_fail ("Ordered || (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && Schedule != OMP_sch_static_balanced_chunked)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2738, __extension__ __PRETTY_FUNCTION__))
;
2739 // Call __kmpc_dispatch_init(
2740 // ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
2741 // kmp_int[32|64] lower, kmp_int[32|64] upper,
2742 // kmp_int[32|64] stride, kmp_int[32|64] chunk);
2743
2744 // If the Chunk was not specified in the clause - use default value 1.
2745 llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
2746 : CGF.Builder.getIntN(IVSize, 1);
2747 llvm::Value *Args[] = {
2748 emitUpdateLocation(CGF, Loc),
2749 getThreadID(CGF, Loc),
2750 CGF.Builder.getInt32(addMonoNonMonoModifier(
2751 CGM, Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
2752 DispatchValues.LB, // Lower
2753 DispatchValues.UB, // Upper
2754 CGF.Builder.getIntN(IVSize, 1), // Stride
2755 Chunk // Chunk
2756 };
2757 CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
2758}
2759
2760static void emitForStaticInitCall(
2761 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
2762 llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
2763 OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
2764 const CGOpenMPRuntime::StaticRTInput &Values) {
2765 if (!CGF.HaveInsertPoint())
2766 return;
2767
2768 assert(!Values.Ordered)(static_cast <bool> (!Values.Ordered) ? void (0) : __assert_fail
("!Values.Ordered", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2768, __extension__ __PRETTY_FUNCTION__))
;
2769 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
2770 Schedule == OMP_sch_static_balanced_chunked ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
2771 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
2772 Schedule == OMP_dist_sch_static ||(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
2773 Schedule == OMP_dist_sch_static_chunked)(static_cast <bool> (Schedule == OMP_sch_static || Schedule
== OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked
|| Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked
|| Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked
) ? void (0) : __assert_fail ("Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static || Schedule == OMP_dist_sch_static_chunked"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
;
2774
2775 // Call __kmpc_for_static_init(
2776 // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
2777 // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
2778 // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
2779 // kmp_int[32|64] incr, kmp_int[32|64] chunk);
2780 llvm::Value *Chunk = Values.Chunk;
2781 if (Chunk == nullptr) {
2782 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||(static_cast <bool> ((Schedule == OMP_sch_static || Schedule
== OMP_ord_static || Schedule == OMP_dist_sch_static) &&
"expected static non-chunked schedule") ? void (0) : __assert_fail
("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2784, __extension__ __PRETTY_FUNCTION__))
2783 Schedule == OMP_dist_sch_static) &&(static_cast <bool> ((Schedule == OMP_sch_static || Schedule
== OMP_ord_static || Schedule == OMP_dist_sch_static) &&
"expected static non-chunked schedule") ? void (0) : __assert_fail
("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2784, __extension__ __PRETTY_FUNCTION__))
2784 "expected static non-chunked schedule")(static_cast <bool> ((Schedule == OMP_sch_static || Schedule
== OMP_ord_static || Schedule == OMP_dist_sch_static) &&
"expected static non-chunked schedule") ? void (0) : __assert_fail
("(Schedule == OMP_sch_static || Schedule == OMP_ord_static || Schedule == OMP_dist_sch_static) && \"expected static non-chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2784, __extension__ __PRETTY_FUNCTION__))
;
2785 // If the Chunk was not specified in the clause - use default value 1.
2786 Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
2787 } else {
2788 assert((Schedule == OMP_sch_static_chunked ||(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2792, __extension__ __PRETTY_FUNCTION__))
2789 Schedule == OMP_sch_static_balanced_chunked ||(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2792, __extension__ __PRETTY_FUNCTION__))
2790 Schedule == OMP_ord_static_chunked ||(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2792, __extension__ __PRETTY_FUNCTION__))
2791 Schedule == OMP_dist_sch_static_chunked) &&(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2792, __extension__ __PRETTY_FUNCTION__))
2792 "expected static chunked schedule")(static_cast <bool> ((Schedule == OMP_sch_static_chunked
|| Schedule == OMP_sch_static_balanced_chunked || Schedule ==
OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked
) && "expected static chunked schedule") ? void (0) :
__assert_fail ("(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static_balanced_chunked || Schedule == OMP_ord_static_chunked || Schedule == OMP_dist_sch_static_chunked) && \"expected static chunked schedule\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2792, __extension__ __PRETTY_FUNCTION__))
;
2793 }
2794 llvm::Value *Args[] = {
2795 UpdateLocation,
2796 ThreadId,
2797 CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
2798 M2)), // Schedule type
2799 Values.IL.getPointer(), // &isLastIter
2800 Values.LB.getPointer(), // &LB
2801 Values.UB.getPointer(), // &UB
2802 Values.ST.getPointer(), // &Stride
2803 CGF.Builder.getIntN(Values.IVSize, 1), // Incr
2804 Chunk // Chunk
2805 };
2806 CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
2807}
2808
2809void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
2810 SourceLocation Loc,
2811 OpenMPDirectiveKind DKind,
2812 const OpenMPScheduleTy &ScheduleKind,
2813 const StaticRTInput &Values) {
2814 OpenMPSchedType ScheduleNum = getRuntimeSchedule(
2815 ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
2816 assert(isOpenMPWorksharingDirective(DKind) &&(static_cast <bool> (isOpenMPWorksharingDirective(DKind
) && "Expected loop-based or sections-based directive."
) ? void (0) : __assert_fail ("isOpenMPWorksharingDirective(DKind) && \"Expected loop-based or sections-based directive.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2817, __extension__ __PRETTY_FUNCTION__))
2817 "Expected loop-based or sections-based directive.")(static_cast <bool> (isOpenMPWorksharingDirective(DKind
) && "Expected loop-based or sections-based directive."
) ? void (0) : __assert_fail ("isOpenMPWorksharingDirective(DKind) && \"Expected loop-based or sections-based directive.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2817, __extension__ __PRETTY_FUNCTION__))
;
2818 llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
2819 isOpenMPLoopDirective(DKind)
2820 ? OMP_IDENT_WORK_LOOP
2821 : OMP_IDENT_WORK_SECTIONS);
2822 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2823 llvm::FunctionCallee StaticInitFunction =
2824 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
2825 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
2826 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2827 ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
2828}
2829
2830void CGOpenMPRuntime::emitDistributeStaticInit(
2831 CodeGenFunction &CGF, SourceLocation Loc,
2832 OpenMPDistScheduleClauseKind SchedKind,
2833 const CGOpenMPRuntime::StaticRTInput &Values) {
2834 OpenMPSchedType ScheduleNum =
2835 getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
2836 llvm::Value *UpdatedLocation =
2837 emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
2838 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2839 llvm::FunctionCallee StaticInitFunction =
2840 createForStaticInitFunction(Values.IVSize, Values.IVSigned);
2841 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
2842 ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
2843 OMPC_SCHEDULE_MODIFIER_unknown, Values);
2844}
2845
2846void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
2847 SourceLocation Loc,
2848 OpenMPDirectiveKind DKind) {
2849 if (!CGF.HaveInsertPoint())
2850 return;
2851 // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
2852 llvm::Value *Args[] = {
2853 emitUpdateLocation(CGF, Loc,
2854 isOpenMPDistributeDirective(DKind)
2855 ? OMP_IDENT_WORK_DISTRIBUTE
2856 : isOpenMPLoopDirective(DKind)
2857 ? OMP_IDENT_WORK_LOOP
2858 : OMP_IDENT_WORK_SECTIONS),
2859 getThreadID(CGF, Loc)};
2860 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
2861 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2862 CGM.getModule(), OMPRTL___kmpc_for_static_fini),
2863 Args);
2864}
2865
2866void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
2867 SourceLocation Loc,
2868 unsigned IVSize,
2869 bool IVSigned) {
2870 if (!CGF.HaveInsertPoint())
2871 return;
2872 // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
2873 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2874 CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
2875}
2876
2877llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
2878 SourceLocation Loc, unsigned IVSize,
2879 bool IVSigned, Address IL,
2880 Address LB, Address UB,
2881 Address ST) {
2882 // Call __kmpc_dispatch_next(
2883 // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
2884 // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
2885 // kmp_int[32|64] *p_stride);
2886 llvm::Value *Args[] = {
2887 emitUpdateLocation(CGF, Loc),
2888 getThreadID(CGF, Loc),
2889 IL.getPointer(), // &isLastIter
2890 LB.getPointer(), // &Lower
2891 UB.getPointer(), // &Upper
2892 ST.getPointer() // &Stride
2893 };
2894 llvm::Value *Call =
2895 CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
2896 return CGF.EmitScalarConversion(
2897 Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
2898 CGF.getContext().BoolTy, Loc);
2899}
2900
2901void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
2902 llvm::Value *NumThreads,
2903 SourceLocation Loc) {
2904 if (!CGF.HaveInsertPoint())
2905 return;
2906 // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
2907 llvm::Value *Args[] = {
2908 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2909 CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
2910 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2911 CGM.getModule(), OMPRTL___kmpc_push_num_threads),
2912 Args);
2913}
2914
2915void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
2916 ProcBindKind ProcBind,
2917 SourceLocation Loc) {
2918 if (!CGF.HaveInsertPoint())
2919 return;
2920 assert(ProcBind != OMP_PROC_BIND_unknown && "Unsupported proc_bind value.")(static_cast <bool> (ProcBind != OMP_PROC_BIND_unknown &&
"Unsupported proc_bind value.") ? void (0) : __assert_fail (
"ProcBind != OMP_PROC_BIND_unknown && \"Unsupported proc_bind value.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2920, __extension__ __PRETTY_FUNCTION__))
;
2921 // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
2922 llvm::Value *Args[] = {
2923 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2924 llvm::ConstantInt::get(CGM.IntTy, unsigned(ProcBind), /*isSigned=*/true)};
2925 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2926 CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
2927 Args);
2928}
2929
2930void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
2931 SourceLocation Loc, llvm::AtomicOrdering AO) {
2932 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
2933 OMPBuilder.createFlush(CGF.Builder);
2934 } else {
2935 if (!CGF.HaveInsertPoint())
2936 return;
2937 // Build call void __kmpc_flush(ident_t *loc)
2938 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
2939 CGM.getModule(), OMPRTL___kmpc_flush),
2940 emitUpdateLocation(CGF, Loc));
2941 }
2942}
2943
2944namespace {
2945/// Indexes of fields for type kmp_task_t.
2946enum KmpTaskTFields {
2947 /// List of shared variables.
2948 KmpTaskTShareds,
2949 /// Task routine.
2950 KmpTaskTRoutine,
2951 /// Partition id for the untied tasks.
2952 KmpTaskTPartId,
2953 /// Function with call of destructors for private variables.
2954 Data1,
2955 /// Task priority.
2956 Data2,
2957 /// (Taskloops only) Lower bound.
2958 KmpTaskTLowerBound,
2959 /// (Taskloops only) Upper bound.
2960 KmpTaskTUpperBound,
2961 /// (Taskloops only) Stride.
2962 KmpTaskTStride,
2963 /// (Taskloops only) Is last iteration flag.
2964 KmpTaskTLastIter,
2965 /// (Taskloops only) Reduction data.
2966 KmpTaskTReductions,
2967};
2968} // anonymous namespace
2969
2970bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
2971 return OffloadEntriesTargetRegion.empty() &&
2972 OffloadEntriesDeviceGlobalVar.empty();
2973}
2974
2975/// Initialize target region entry.
2976void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
2977 initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
2978 StringRef ParentName, unsigned LineNum,
2979 unsigned Order) {
2980 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2982, __extension__ __PRETTY_FUNCTION__))
2981 "only required for the device "(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2982, __extension__ __PRETTY_FUNCTION__))
2982 "code generation.")(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 2982, __extension__ __PRETTY_FUNCTION__))
;
2983 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
2984 OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
2985 OMPTargetRegionEntryTargetRegion);
2986 ++OffloadingEntriesNum;
2987}
2988
2989void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
2990 registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
2991 StringRef ParentName, unsigned LineNum,
2992 llvm::Constant *Addr, llvm::Constant *ID,
2993 OMPTargetRegionEntryKind Flags) {
2994 // If we are emitting code for a target, the entry is already initialized,
2995 // only has to be registered.
2996 if (CGM.getLangOpts().OpenMPIsDevice) {
2997 // This could happen if the device compilation is invoked standalone.
2998 if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum))
2999 return;
3000 auto &Entry =
3001 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3002 Entry.setAddress(Addr);
3003 Entry.setID(ID);
3004 Entry.setFlags(Flags);
3005 } else {
3006 if (Flags ==
3007 OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion &&
3008 hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
3009 /*IgnoreAddressId*/ true))
3010 return;
3011 assert(!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&(static_cast <bool> (!hasTargetRegionEntryInfo(DeviceID
, FileID, ParentName, LineNum) && "Target region entry already registered!"
) ? void (0) : __assert_fail ("!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) && \"Target region entry already registered!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3012, __extension__ __PRETTY_FUNCTION__))
3012 "Target region entry already registered!")(static_cast <bool> (!hasTargetRegionEntryInfo(DeviceID
, FileID, ParentName, LineNum) && "Target region entry already registered!"
) ? void (0) : __assert_fail ("!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) && \"Target region entry already registered!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3012, __extension__ __PRETTY_FUNCTION__))
;
3013 OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3014 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3015 ++OffloadingEntriesNum;
3016 }
3017}
3018
3019bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3020 unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
3021 bool IgnoreAddressId) const {
3022 auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3023 if (PerDevice == OffloadEntriesTargetRegion.end())
3024 return false;
3025 auto PerFile = PerDevice->second.find(FileID);
3026 if (PerFile == PerDevice->second.end())
3027 return false;
3028 auto PerParentName = PerFile->second.find(ParentName);
3029 if (PerParentName == PerFile->second.end())
3030 return false;
3031 auto PerLine = PerParentName->second.find(LineNum);
3032 if (PerLine == PerParentName->second.end())
3033 return false;
3034 // Fail if this entry is already registered.
3035 if (!IgnoreAddressId &&
3036 (PerLine->second.getAddress() || PerLine->second.getID()))
3037 return false;
3038 return true;
3039}
3040
3041void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3042 const OffloadTargetRegionEntryInfoActTy &Action) {
3043 // Scan all target region entries and perform the provided action.
3044 for (const auto &D : OffloadEntriesTargetRegion)
3045 for (const auto &F : D.second)
3046 for (const auto &P : F.second)
3047 for (const auto &L : P.second)
3048 Action(D.first, F.first, P.first(), L.first, L.second);
3049}
3050
3051void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3052 initializeDeviceGlobalVarEntryInfo(StringRef Name,
3053 OMPTargetGlobalVarEntryKind Flags,
3054 unsigned Order) {
3055 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3057, __extension__ __PRETTY_FUNCTION__))
3056 "only required for the device "(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3057, __extension__ __PRETTY_FUNCTION__))
3057 "code generation.")(static_cast <bool> (CGM.getLangOpts().OpenMPIsDevice &&
"Initialization of entries is " "only required for the device "
"code generation.") ? void (0) : __assert_fail ("CGM.getLangOpts().OpenMPIsDevice && \"Initialization of entries is \" \"only required for the device \" \"code generation.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3057, __extension__ __PRETTY_FUNCTION__))
;
3058 OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3059 ++OffloadingEntriesNum;
3060}
3061
3062void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3063 registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3064 CharUnits VarSize,
3065 OMPTargetGlobalVarEntryKind Flags,
3066 llvm::GlobalValue::LinkageTypes Linkage) {
3067 if (CGM.getLangOpts().OpenMPIsDevice) {
3068 // This could happen if the device compilation is invoked standalone.
3069 if (!hasDeviceGlobalVarEntryInfo(VarName))
3070 return;
3071 auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3072 if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
3073 if (Entry.getVarSize().isZero()) {
3074 Entry.setVarSize(VarSize);
3075 Entry.setLinkage(Linkage);
3076 }
3077 return;
3078 }
3079 Entry.setVarSize(VarSize);
3080 Entry.setLinkage(Linkage);
3081 Entry.setAddress(Addr);
3082 } else {
3083 if (hasDeviceGlobalVarEntryInfo(VarName)) {
3084 auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3085 assert(Entry.isValid() && Entry.getFlags() == Flags &&(static_cast <bool> (Entry.isValid() && Entry.getFlags
() == Flags && "Entry not initialized!") ? void (0) :
__assert_fail ("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__))
3086 "Entry not initialized!")(static_cast <bool> (Entry.isValid() && Entry.getFlags
() == Flags && "Entry not initialized!") ? void (0) :
__assert_fail ("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3086, __extension__ __PRETTY_FUNCTION__))
;
3087 if (Entry.getVarSize().isZero()) {
3088 Entry.setVarSize(VarSize);
3089 Entry.setLinkage(Linkage);
3090 }
3091 return;
3092 }
3093 OffloadEntriesDeviceGlobalVar.try_emplace(
3094 VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3095 ++OffloadingEntriesNum;
3096 }
3097}
3098
3099void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3100 actOnDeviceGlobalVarEntriesInfo(
3101 const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3102 // Scan all target region entries and perform the provided action.
3103 for (const auto &E : OffloadEntriesDeviceGlobalVar)
3104 Action(E.getKey(), E.getValue());
3105}
3106
3107void CGOpenMPRuntime::createOffloadEntry(
3108 llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
3109 llvm::GlobalValue::LinkageTypes Linkage) {
3110 StringRef Name = Addr->getName();
3111 llvm::Module &M = CGM.getModule();
3112 llvm::LLVMContext &C = M.getContext();
3113
3114 // Create constant string with the name.
3115 llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3116
3117 std::string StringName = getName({"omp_offloading", "entry_name"});
3118 auto *Str = new llvm::GlobalVariable(
3119 M, StrPtrInit->getType(), /*isConstant=*/true,
3120 llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
3121 Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3122
3123 llvm::Constant *Data[] = {
3124 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(ID, CGM.VoidPtrTy),
3125 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, CGM.Int8PtrTy),
3126 llvm::ConstantInt::get(CGM.SizeTy, Size),
3127 llvm::ConstantInt::get(CGM.Int32Ty, Flags),
3128 llvm::ConstantInt::get(CGM.Int32Ty, 0)};
3129 std::string EntryName = getName({"omp_offloading", "entry", ""});
3130 llvm::GlobalVariable *Entry = createGlobalStruct(
3131 CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
3132 Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
3133
3134 // The entry has to be created in the section the linker expects it to be.
3135 Entry->setSection("omp_offloading_entries");
3136}
3137
3138void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3139 // Emit the offloading entries and metadata so that the device codegen side
3140 // can easily figure out what to emit. The produced metadata looks like
3141 // this:
3142 //
3143 // !omp_offload.info = !{!1, ...}
3144 //
3145 // Right now we only generate metadata for function that contain target
3146 // regions.
3147
3148 // If we are in simd mode or there are no entries, we don't need to do
3149 // anything.
3150 if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
3151 return;
3152
3153 llvm::Module &M = CGM.getModule();
3154 llvm::LLVMContext &C = M.getContext();
3155 SmallVector<std::tuple<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *,
3156 SourceLocation, StringRef>,
3157 16>
3158 OrderedEntries(OffloadEntriesInfoManager.size());
3159 llvm::SmallVector<StringRef, 16> ParentFunctions(
3160 OffloadEntriesInfoManager.size());
3161
3162 // Auxiliary methods to create metadata values and strings.
3163 auto &&GetMDInt = [this](unsigned V) {
3164 return llvm::ConstantAsMetadata::get(
3165 llvm::ConstantInt::get(CGM.Int32Ty, V));
3166 };
3167
3168 auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
3169
3170 // Create the offloading info metadata node.
3171 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3172
3173 // Create function that emits metadata for each target region entry;
3174 auto &&TargetRegionMetadataEmitter =
3175 [this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt,
3176 &GetMDString](
3177 unsigned DeviceID, unsigned FileID, StringRef ParentName,
3178 unsigned Line,
3179 const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3180 // Generate metadata for target regions. Each entry of this metadata
3181 // contains:
3182 // - Entry 0 -> Kind of this type of metadata (0).
3183 // - Entry 1 -> Device ID of the file where the entry was identified.
3184 // - Entry 2 -> File ID of the file where the entry was identified.
3185 // - Entry 3 -> Mangled name of the function where the entry was
3186 // identified.
3187 // - Entry 4 -> Line in the file where the entry was identified.
3188 // - Entry 5 -> Order the entry was created.
3189 // The first element of the metadata node is the kind.
3190 llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
3191 GetMDInt(FileID), GetMDString(ParentName),
3192 GetMDInt(Line), GetMDInt(E.getOrder())};
3193
3194 SourceLocation Loc;
3195 for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
3196 E = CGM.getContext().getSourceManager().fileinfo_end();
3197 I != E; ++I) {
3198 if (I->getFirst()->getUniqueID().getDevice() == DeviceID &&
3199 I->getFirst()->getUniqueID().getFile() == FileID) {
3200 Loc = CGM.getContext().getSourceManager().translateFileLineCol(
3201 I->getFirst(), Line, 1);
3202 break;
3203 }
3204 }
3205 // Save this entry in the right position of the ordered entries array.
3206 OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName);
3207 ParentFunctions[E.getOrder()] = ParentName;
3208
3209 // Add metadata to the named metadata node.
3210 MD->addOperand(llvm::MDNode::get(C, Ops));
3211 };
3212
3213 OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3214 TargetRegionMetadataEmitter);
3215
3216 // Create function that emits metadata for each device global variable entry;
3217 auto &&DeviceGlobalVarMetadataEmitter =
3218 [&C, &OrderedEntries, &GetMDInt, &GetMDString,
3219 MD](StringRef MangledName,
3220 const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
3221 &E) {
3222 // Generate metadata for global variables. Each entry of this metadata
3223 // contains:
3224 // - Entry 0 -> Kind of this type of metadata (1).
3225 // - Entry 1 -> Mangled name of the variable.
3226 // - Entry 2 -> Declare target kind.
3227 // - Entry 3 -> Order the entry was created.
3228 // The first element of the metadata node is the kind.
3229 llvm::Metadata *Ops[] = {
3230 GetMDInt(E.getKind()), GetMDString(MangledName),
3231 GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
3232
3233 // Save this entry in the right position of the ordered entries array.
3234 OrderedEntries[E.getOrder()] =
3235 std::make_tuple(&E, SourceLocation(), MangledName);
3236
3237 // Add metadata to the named metadata node.
3238 MD->addOperand(llvm::MDNode::get(C, Ops));
3239 };
3240
3241 OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
3242 DeviceGlobalVarMetadataEmitter);
3243
3244 for (const auto &E : OrderedEntries) {
3245 assert(std::get<0>(E) && "All ordered entries must exist!")(static_cast <bool> (std::get<0>(E) && "All ordered entries must exist!"
) ? void (0) : __assert_fail ("std::get<0>(E) && \"All ordered entries must exist!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3245, __extension__ __PRETTY_FUNCTION__))
;
3246 if (const auto *CE =
3247 dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3248 std::get<0>(E))) {
3249 if (!CE->getID() || !CE->getAddress()) {
3250 // Do not blame the entry if the parent funtion is not emitted.
3251 StringRef FnName = ParentFunctions[CE->getOrder()];
3252 if (!CGM.GetGlobalValue(FnName))
3253 continue;
3254 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3255 DiagnosticsEngine::Error,
3256 "Offloading entry for target region in %0 is incorrect: either the "
3257 "address or the ID is invalid.");
3258 CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName;
3259 continue;
3260 }
3261 createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
3262 CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
3263 } else if (const auto *CE = dyn_cast<OffloadEntriesInfoManagerTy::
3264 OffloadEntryInfoDeviceGlobalVar>(
3265 std::get<0>(E))) {
3266 OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
3267 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3268 CE->getFlags());
3269 switch (Flags) {
3270 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
3271 if (CGM.getLangOpts().OpenMPIsDevice &&
3272 CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
3273 continue;
3274 if (!CE->getAddress()) {
3275 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3276 DiagnosticsEngine::Error, "Offloading entry for declare target "
3277 "variable %0 is incorrect: the "
3278 "address is invalid.");
3279 CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E);
3280 continue;
3281 }
3282 // The vaiable has no definition - no need to add the entry.
3283 if (CE->getVarSize().isZero())
3284 continue;
3285 break;
3286 }
3287 case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
3288 assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||(static_cast <bool> (((CGM.getLangOpts().OpenMPIsDevice
&& !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice
&& CE->getAddress())) && "Declaret target link address is set."
) ? void (0) : __assert_fail ("((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) && \"Declaret target link address is set.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3290, __extension__ __PRETTY_FUNCTION__))
3289 (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&(static_cast <bool> (((CGM.getLangOpts().OpenMPIsDevice
&& !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice
&& CE->getAddress())) && "Declaret target link address is set."
) ? void (0) : __assert_fail ("((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) && \"Declaret target link address is set.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3290, __extension__ __PRETTY_FUNCTION__))
3290 "Declaret target link address is set.")(static_cast <bool> (((CGM.getLangOpts().OpenMPIsDevice
&& !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice
&& CE->getAddress())) && "Declaret target link address is set."
) ? void (0) : __assert_fail ("((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) || (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) && \"Declaret target link address is set.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3290, __extension__ __PRETTY_FUNCTION__))
;
3291 if (CGM.getLangOpts().OpenMPIsDevice)
3292 continue;
3293 if (!CE->getAddress()) {
3294 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3295 DiagnosticsEngine::Error,
3296 "Offloading entry for declare target variable is incorrect: the "
3297 "address is invalid.");
3298 CGM.getDiags().Report(DiagID);
3299 continue;
3300 }
3301 break;
3302 }
3303 createOffloadEntry(CE->getAddress(), CE->getAddress(),
3304 CE->getVarSize().getQuantity(), Flags,
3305 CE->getLinkage());
3306 } else {
3307 llvm_unreachable("Unsupported entry kind.")::llvm::llvm_unreachable_internal("Unsupported entry kind.", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3307)
;
3308 }
3309 }
3310}
3311
3312/// Loads all the offload entries information from the host IR
3313/// metadata.
3314void CGOpenMPRuntime::loadOffloadInfoMetadata() {
3315 // If we are in target mode, load the metadata from the host IR. This code has
3316 // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
3317
3318 if (!CGM.getLangOpts().OpenMPIsDevice)
3319 return;
3320
3321 if (CGM.getLangOpts().OMPHostIRFile.empty())
3322 return;
3323
3324 auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
3325 if (auto EC = Buf.getError()) {
3326 CGM.getDiags().Report(diag::err_cannot_open_file)
3327 << CGM.getLangOpts().OMPHostIRFile << EC.message();
3328 return;
3329 }
3330
3331 llvm::LLVMContext C;
3332 auto ME = expectedToErrorOrAndEmitErrors(
3333 C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
3334
3335 if (auto EC = ME.getError()) {
3336 unsigned DiagID = CGM.getDiags().getCustomDiagID(
3337 DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
3338 CGM.getDiags().Report(DiagID)
3339 << CGM.getLangOpts().OMPHostIRFile << EC.message();
3340 return;
3341 }
3342
3343 llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
3344 if (!MD)
3345 return;
3346
3347 for (llvm::MDNode *MN : MD->operands()) {
3348 auto &&GetMDInt = [MN](unsigned Idx) {
3349 auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
3350 return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
3351 };
3352
3353 auto &&GetMDString = [MN](unsigned Idx) {
3354 auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
3355 return V->getString();
3356 };
3357
3358 switch (GetMDInt(0)) {
3359 default:
3360 llvm_unreachable("Unexpected metadata!")::llvm::llvm_unreachable_internal("Unexpected metadata!", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3360)
;
3361 break;
3362 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3363 OffloadingEntryInfoTargetRegion:
3364 OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
3365 /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
3366 /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
3367 /*Order=*/GetMDInt(5));
3368 break;
3369 case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
3370 OffloadingEntryInfoDeviceGlobalVar:
3371 OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
3372 /*MangledName=*/GetMDString(1),
3373 static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
3374 /*Flags=*/GetMDInt(2)),
3375 /*Order=*/GetMDInt(3));
3376 break;
3377 }
3378 }
3379}
3380
3381void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
3382 if (!KmpRoutineEntryPtrTy) {
3383 // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
3384 ASTContext &C = CGM.getContext();
3385 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
3386 FunctionProtoType::ExtProtoInfo EPI;
3387 KmpRoutineEntryPtrQTy = C.getPointerType(
3388 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
3389 KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
3390 }
3391}
3392
3393QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
3394 // Make sure the type of the entry is already created. This is the type we
3395 // have to create:
3396 // struct __tgt_offload_entry{
3397 // void *addr; // Pointer to the offload entry info.
3398 // // (function or global)
3399 // char *name; // Name of the function or global.
3400 // size_t size; // Size of the entry info (0 if it a function).
3401 // int32_t flags; // Flags associated with the entry, e.g. 'link'.
3402 // int32_t reserved; // Reserved, to use by the runtime library.
3403 // };
3404 if (TgtOffloadEntryQTy.isNull()) {
3405 ASTContext &C = CGM.getContext();
3406 RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
3407 RD->startDefinition();
3408 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3409 addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
3410 addFieldToRecordDecl(C, RD, C.getSizeType());
3411 addFieldToRecordDecl(
3412 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3413 addFieldToRecordDecl(
3414 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
3415 RD->completeDefinition();
3416 RD->addAttr(PackedAttr::CreateImplicit(C));
3417 TgtOffloadEntryQTy = C.getRecordType(RD);
3418 }
3419 return TgtOffloadEntryQTy;
3420}
3421
3422namespace {
3423struct PrivateHelpersTy {
3424 PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
3425 const VarDecl *PrivateCopy, const VarDecl *PrivateElemInit)
3426 : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
3427 PrivateElemInit(PrivateElemInit) {}
3428 PrivateHelpersTy(const VarDecl *Original) : Original(Original) {}
3429 const Expr *OriginalRef = nullptr;
3430 const VarDecl *Original = nullptr;
3431 const VarDecl *PrivateCopy = nullptr;
3432 const VarDecl *PrivateElemInit = nullptr;
3433 bool isLocalPrivate() const {
3434 return !OriginalRef && !PrivateCopy && !PrivateElemInit;
3435 }
3436};
3437typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
3438} // anonymous namespace
3439
3440static bool isAllocatableDecl(const VarDecl *VD) {
3441 const VarDecl *CVD = VD->getCanonicalDecl();
3442 if (!CVD->hasAttr<OMPAllocateDeclAttr>())
3443 return false;
3444 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
3445 // Use the default allocation.
3446 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
3447 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
3448 !AA->getAllocator());
3449}
3450
3451static RecordDecl *
3452createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
3453 if (!Privates.empty()) {
3454 ASTContext &C = CGM.getContext();
3455 // Build struct .kmp_privates_t. {
3456 // /* private vars */
3457 // };
3458 RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
3459 RD->startDefinition();
3460 for (const auto &Pair : Privates) {
3461 const VarDecl *VD = Pair.second.Original;
3462 QualType Type = VD->getType().getNonReferenceType();
3463 // If the private variable is a local variable with lvalue ref type,
3464 // allocate the pointer instead of the pointee type.
3465 if (Pair.second.isLocalPrivate()) {
3466 if (VD->getType()->isLValueReferenceType())
3467 Type = C.getPointerType(Type);
3468 if (isAllocatableDecl(VD))
3469 Type = C.getPointerType(Type);
3470 }
3471 FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
3472 if (VD->hasAttrs()) {
3473 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
3474 E(VD->getAttrs().end());
3475 I != E; ++I)
3476 FD->addAttr(*I);
3477 }
3478 }
3479 RD->completeDefinition();
3480 return RD;
3481 }
3482 return nullptr;
3483}
3484
3485static RecordDecl *
3486createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
3487 QualType KmpInt32Ty,
3488 QualType KmpRoutineEntryPointerQTy) {
3489 ASTContext &C = CGM.getContext();
3490 // Build struct kmp_task_t {
3491 // void * shareds;
3492 // kmp_routine_entry_t routine;
3493 // kmp_int32 part_id;
3494 // kmp_cmplrdata_t data1;
3495 // kmp_cmplrdata_t data2;
3496 // For taskloops additional fields:
3497 // kmp_uint64 lb;
3498 // kmp_uint64 ub;
3499 // kmp_int64 st;
3500 // kmp_int32 liter;
3501 // void * reductions;
3502 // };
3503 RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
3504 UD->startDefinition();
3505 addFieldToRecordDecl(C, UD, KmpInt32Ty);
3506 addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
3507 UD->completeDefinition();
3508 QualType KmpCmplrdataTy = C.getRecordType(UD);
3509 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
3510 RD->startDefinition();
3511 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3512 addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
3513 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3514 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3515 addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
3516 if (isOpenMPTaskLoopDirective(Kind)) {
3517 QualType KmpUInt64Ty =
3518 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
3519 QualType KmpInt64Ty =
3520 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
3521 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3522 addFieldToRecordDecl(C, RD, KmpUInt64Ty);
3523 addFieldToRecordDecl(C, RD, KmpInt64Ty);
3524 addFieldToRecordDecl(C, RD, KmpInt32Ty);
3525 addFieldToRecordDecl(C, RD, C.VoidPtrTy);
3526 }
3527 RD->completeDefinition();
3528 return RD;
3529}
3530
3531static RecordDecl *
3532createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
3533 ArrayRef<PrivateDataTy> Privates) {
3534 ASTContext &C = CGM.getContext();
3535 // Build struct kmp_task_t_with_privates {
3536 // kmp_task_t task_data;
3537 // .kmp_privates_t. privates;
3538 // };
3539 RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
3540 RD->startDefinition();
3541 addFieldToRecordDecl(C, RD, KmpTaskTQTy);
3542 if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
3543 addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
3544 RD->completeDefinition();
3545 return RD;
3546}
3547
3548/// Emit a proxy function which accepts kmp_task_t as the second
3549/// argument.
3550/// \code
3551/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
3552/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
3553/// For taskloops:
3554/// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3555/// tt->reductions, tt->shareds);
3556/// return 0;
3557/// }
3558/// \endcode
3559static llvm::Function *
3560emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
3561 OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
3562 QualType KmpTaskTWithPrivatesPtrQTy,
3563 QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
3564 QualType SharedsPtrTy, llvm::Function *TaskFunction,
3565 llvm::Value *TaskPrivatesMap) {
3566 ASTContext &C = CGM.getContext();
3567 FunctionArgList Args;
3568 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3569 ImplicitParamDecl::Other);
3570 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3571 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3572 ImplicitParamDecl::Other);
3573 Args.push_back(&GtidArg);
3574 Args.push_back(&TaskTypeArg);
3575 const auto &TaskEntryFnInfo =
3576 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3577 llvm::FunctionType *TaskEntryTy =
3578 CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
3579 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
3580 auto *TaskEntry = llvm::Function::Create(
3581 TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
3582 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
3583 TaskEntry->setDoesNotRecurse();
3584 CodeGenFunction CGF(CGM);
3585 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
3586 Loc, Loc);
3587
3588 // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
3589 // tt,
3590 // For taskloops:
3591 // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
3592 // tt->task_data.shareds);
3593 llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
3594 CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
3595 LValue TDBase = CGF.EmitLoadOfPointerLValue(
3596 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3597 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3598 const auto *KmpTaskTWithPrivatesQTyRD =
3599 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3600 LValue Base =
3601 CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
3602 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
3603 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3604 LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
3605 llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
3606
3607 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3608 LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
3609 llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3610 CGF.EmitLoadOfScalar(SharedsLVal, Loc),
3611 CGF.ConvertTypeForMem(SharedsPtrTy));
3612
3613 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3614 llvm::Value *PrivatesParam;
3615 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3616 LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
3617 PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3618 PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy);
3619 } else {
3620 PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
3621 }
3622
3623 llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
3624 TaskPrivatesMap,
3625 CGF.Builder
3626 .CreatePointerBitCastOrAddrSpaceCast(
3627 TDBase.getAddress(CGF), CGF.VoidPtrTy)
3628 .getPointer()};
3629 SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
3630 std::end(CommonArgs));
3631 if (isOpenMPTaskLoopDirective(Kind)) {
3632 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
3633 LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
3634 llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
3635 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
3636 LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
3637 llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
3638 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
3639 LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
3640 llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
3641 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3642 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
3643 llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
3644 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
3645 LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
3646 llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
3647 CallArgs.push_back(LBParam);
3648 CallArgs.push_back(UBParam);
3649 CallArgs.push_back(StParam);
3650 CallArgs.push_back(LIParam);
3651 CallArgs.push_back(RParam);
3652 }
3653 CallArgs.push_back(SharedsParam);
3654
3655 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
3656 CallArgs);
3657 CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
3658 CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
3659 CGF.FinishFunction();
3660 return TaskEntry;
3661}
3662
3663static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
3664 SourceLocation Loc,
3665 QualType KmpInt32Ty,
3666 QualType KmpTaskTWithPrivatesPtrQTy,
3667 QualType KmpTaskTWithPrivatesQTy) {
3668 ASTContext &C = CGM.getContext();
3669 FunctionArgList Args;
3670 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
3671 ImplicitParamDecl::Other);
3672 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3673 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
3674 ImplicitParamDecl::Other);
3675 Args.push_back(&GtidArg);
3676 Args.push_back(&TaskTypeArg);
3677 const auto &DestructorFnInfo =
3678 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
3679 llvm::FunctionType *DestructorFnTy =
3680 CGM.getTypes().GetFunctionType(DestructorFnInfo);
3681 std::string Name =
3682 CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
3683 auto *DestructorFn =
3684 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
3685 Name, &CGM.getModule());
3686 CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
3687 DestructorFnInfo);
3688 DestructorFn->setDoesNotRecurse();
3689 CodeGenFunction CGF(CGM);
3690 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
3691 Args, Loc, Loc);
3692
3693 LValue Base = CGF.EmitLoadOfPointerLValue(
3694 CGF.GetAddrOfLocalVar(&TaskTypeArg),
3695 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
3696 const auto *KmpTaskTWithPrivatesQTyRD =
3697 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
3698 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3699 Base = CGF.EmitLValueForField(Base, *FI);
3700 for (const auto *Field :
3701 cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
3702 if (QualType::DestructionKind DtorKind =
3703 Field->getType().isDestructedType()) {
3704 LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
3705 CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
3706 }
3707 }
3708 CGF.FinishFunction();
3709 return DestructorFn;
3710}
3711
3712/// Emit a privates mapping function for correct handling of private and
3713/// firstprivate variables.
3714/// \code
3715/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
3716/// **noalias priv1,..., <tyn> **noalias privn) {
3717/// *priv1 = &.privates.priv1;
3718/// ...;
3719/// *privn = &.privates.privn;
3720/// }
3721/// \endcode
3722static llvm::Value *
3723emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
3724 const OMPTaskDataTy &Data, QualType PrivatesQTy,
3725 ArrayRef<PrivateDataTy> Privates) {
3726 ASTContext &C = CGM.getContext();
3727 FunctionArgList Args;
3728 ImplicitParamDecl TaskPrivatesArg(
3729 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3730 C.getPointerType(PrivatesQTy).withConst().withRestrict(),
3731 ImplicitParamDecl::Other);
3732 Args.push_back(&TaskPrivatesArg);
3733 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
3734 unsigned Counter = 1;
3735 for (const Expr *E : Data.PrivateVars) {
3736 Args.push_back(ImplicitParamDecl::Create(
3737 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3738 C.getPointerType(C.getPointerType(E->getType()))
3739 .withConst()
3740 .withRestrict(),
3741 ImplicitParamDecl::Other));
3742 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3743 PrivateVarsPos[VD] = Counter;
3744 ++Counter;
3745 }
3746 for (const Expr *E : Data.FirstprivateVars) {
3747 Args.push_back(ImplicitParamDecl::Create(
3748 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3749 C.getPointerType(C.getPointerType(E->getType()))
3750 .withConst()
3751 .withRestrict(),
3752 ImplicitParamDecl::Other));
3753 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3754 PrivateVarsPos[VD] = Counter;
3755 ++Counter;
3756 }
3757 for (const Expr *E : Data.LastprivateVars) {
3758 Args.push_back(ImplicitParamDecl::Create(
3759 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3760 C.getPointerType(C.getPointerType(E->getType()))
3761 .withConst()
3762 .withRestrict(),
3763 ImplicitParamDecl::Other));
3764 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
3765 PrivateVarsPos[VD] = Counter;
3766 ++Counter;
3767 }
3768 for (const VarDecl *VD : Data.PrivateLocals) {
3769 QualType Ty = VD->getType().getNonReferenceType();
3770 if (VD->getType()->isLValueReferenceType())
3771 Ty = C.getPointerType(Ty);
3772 if (isAllocatableDecl(VD))
3773 Ty = C.getPointerType(Ty);
3774 Args.push_back(ImplicitParamDecl::Create(
3775 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3776 C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
3777 ImplicitParamDecl::Other));
3778 PrivateVarsPos[VD] = Counter;
3779 ++Counter;
3780 }
3781 const auto &TaskPrivatesMapFnInfo =
3782 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3783 llvm::FunctionType *TaskPrivatesMapTy =
3784 CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
3785 std::string Name =
3786 CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
3787 auto *TaskPrivatesMap = llvm::Function::Create(
3788 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
3789 &CGM.getModule());
3790 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
3791 TaskPrivatesMapFnInfo);
3792 if (CGM.getLangOpts().Optimize) {
3793 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
3794 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
3795 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
3796 }
3797 CodeGenFunction CGF(CGM);
3798 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
3799 TaskPrivatesMapFnInfo, Args, Loc, Loc);
3800
3801 // *privi = &.privates.privi;
3802 LValue Base = CGF.EmitLoadOfPointerLValue(
3803 CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
3804 TaskPrivatesArg.getType()->castAs<PointerType>());
3805 const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
3806 Counter = 0;
3807 for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
3808 LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
3809 const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
3810 LValue RefLVal =
3811 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
3812 LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
3813 RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
3814 CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
3815 ++Counter;
3816 }
3817 CGF.FinishFunction();
3818 return TaskPrivatesMap;
3819}
3820
3821/// Emit initialization for private variables in task-based directives.
3822static void emitPrivatesInit(CodeGenFunction &CGF,
3823 const OMPExecutableDirective &D,
3824 Address KmpTaskSharedsPtr, LValue TDBase,
3825 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3826 QualType SharedsTy, QualType SharedsPtrTy,
3827 const OMPTaskDataTy &Data,
3828 ArrayRef<PrivateDataTy> Privates, bool ForDup) {
3829 ASTContext &C = CGF.getContext();
3830 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3831 LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
3832 OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
3833 ? OMPD_taskloop
3834 : OMPD_task;
3835 const CapturedStmt &CS = *D.getCapturedStmt(Kind);
3836 CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
3837 LValue SrcBase;
3838 bool IsTargetTask =
3839 isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
3840 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
3841 // For target-based directives skip 4 firstprivate arrays BasePointersArray,
3842 // PointersArray, SizesArray, and MappersArray. The original variables for
3843 // these arrays are not captured and we get their addresses explicitly.
3844 if ((!IsTargetTask && !Data.FirstprivateVars.empty() && ForDup) ||
3845 (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
3846 SrcBase = CGF.MakeAddrLValue(
3847 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3848 KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
3849 SharedsTy);
3850 }
3851 FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
3852 for (const PrivateDataTy &Pair : Privates) {
3853 // Do not initialize private locals.
3854 if (Pair.second.isLocalPrivate()) {
3855 ++FI;
3856 continue;
3857 }
3858 const VarDecl *VD = Pair.second.PrivateCopy;
3859 const Expr *Init = VD->getAnyInitializer();
3860 if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
3861 !CGF.isTrivialInitializer(Init)))) {
3862 LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
3863 if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
3864 const VarDecl *OriginalVD = Pair.second.Original;
3865 // Check if the variable is the target-based BasePointersArray,
3866 // PointersArray, SizesArray, or MappersArray.
3867 LValue SharedRefLValue;
3868 QualType Type = PrivateLValue.getType();
3869 const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
3870 if (IsTargetTask && !SharedField) {
3871 assert(isa<ImplicitParamDecl>(OriginalVD) &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
3872 isa<CapturedDecl>(OriginalVD->getDeclContext()) &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
3873 cast<CapturedDecl>(OriginalVD->getDeclContext())(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
3874 ->getNumParams() == 0 &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
3875 isa<TranslationUnitDecl>((static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
3876 cast<CapturedDecl>(OriginalVD->getDeclContext())(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
3877 ->getDeclContext()) &&(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
3878 "Expected artificial target data variable.")(static_cast <bool> (isa<ImplicitParamDecl>(OriginalVD
) && isa<CapturedDecl>(OriginalVD->getDeclContext
()) && cast<CapturedDecl>(OriginalVD->getDeclContext
()) ->getNumParams() == 0 && isa<TranslationUnitDecl
>( cast<CapturedDecl>(OriginalVD->getDeclContext(
)) ->getDeclContext()) && "Expected artificial target data variable."
) ? void (0) : __assert_fail ("isa<ImplicitParamDecl>(OriginalVD) && isa<CapturedDecl>(OriginalVD->getDeclContext()) && cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getNumParams() == 0 && isa<TranslationUnitDecl>( cast<CapturedDecl>(OriginalVD->getDeclContext()) ->getDeclContext()) && \"Expected artificial target data variable.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 3878, __extension__ __PRETTY_FUNCTION__))
;
3879 SharedRefLValue =
3880 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
3881 } else if (ForDup) {
3882 SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
3883 SharedRefLValue = CGF.MakeAddrLValue(
3884 Address(SharedRefLValue.getPointer(CGF),
3885 C.getDeclAlign(OriginalVD)),
3886 SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
3887 SharedRefLValue.getTBAAInfo());
3888 } else if (CGF.LambdaCaptureFields.count(
3889 Pair.second.Original->getCanonicalDecl()) > 0 ||
3890 dyn_cast_or_null<BlockDecl>(CGF.CurCodeDecl)) {
3891 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3892 } else {
3893 // Processing for implicitly captured variables.
3894 InlinedOpenMPRegionRAII Region(
3895 CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
3896 /*HasCancel=*/false, /*NoInheritance=*/true);
3897 SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
3898 }
3899 if (Type->isArrayType()) {
3900 // Initialize firstprivate array.
3901 if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
3902 // Perform simple memcpy.
3903 CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
3904 } else {
3905 // Initialize firstprivate array using element-by-element
3906 // initialization.
3907 CGF.EmitOMPAggregateAssign(
3908 PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
3909 Type,
3910 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
3911 Address SrcElement) {
3912 // Clean up any temporaries needed by the initialization.
3913 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3914 InitScope.addPrivate(
3915 Elem, [SrcElement]() -> Address { return SrcElement; });
3916 (void)InitScope.Privatize();
3917 // Emit initialization for single element.
3918 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
3919 CGF, &CapturesInfo);
3920 CGF.EmitAnyExprToMem(Init, DestElement,
3921 Init->getType().getQualifiers(),
3922 /*IsInitializer=*/false);
3923 });
3924 }
3925 } else {
3926 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3927 InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address {
3928 return SharedRefLValue.getAddress(CGF);
3929 });
3930 (void)InitScope.Privatize();
3931 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
3932 CGF.EmitExprAsInit(Init, VD, PrivateLValue,
3933 /*capturedByInit=*/false);
3934 }
3935 } else {
3936 CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
3937 }
3938 }
3939 ++FI;
3940 }
3941}
3942
3943/// Check if duplication function is required for taskloops.
3944static bool checkInitIsRequired(CodeGenFunction &CGF,
3945 ArrayRef<PrivateDataTy> Privates) {
3946 bool InitRequired = false;
3947 for (const PrivateDataTy &Pair : Privates) {
3948 if (Pair.second.isLocalPrivate())
3949 continue;
3950 const VarDecl *VD = Pair.second.PrivateCopy;
3951 const Expr *Init = VD->getAnyInitializer();
3952 InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
3953 !CGF.isTrivialInitializer(Init));
3954 if (InitRequired)
3955 break;
3956 }
3957 return InitRequired;
3958}
3959
3960
3961/// Emit task_dup function (for initialization of
3962/// private/firstprivate/lastprivate vars and last_iter flag)
3963/// \code
3964/// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
3965/// lastpriv) {
3966/// // setup lastprivate flag
3967/// task_dst->last = lastpriv;
3968/// // could be constructor calls here...
3969/// }
3970/// \endcode
3971static llvm::Value *
3972emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
3973 const OMPExecutableDirective &D,
3974 QualType KmpTaskTWithPrivatesPtrQTy,
3975 const RecordDecl *KmpTaskTWithPrivatesQTyRD,
3976 const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
3977 QualType SharedsPtrTy, const OMPTaskDataTy &Data,
3978 ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
3979 ASTContext &C = CGM.getContext();
3980 FunctionArgList Args;
3981 ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3982 KmpTaskTWithPrivatesPtrQTy,
3983 ImplicitParamDecl::Other);
3984 ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3985 KmpTaskTWithPrivatesPtrQTy,
3986 ImplicitParamDecl::Other);
3987 ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3988 ImplicitParamDecl::Other);
3989 Args.push_back(&DstArg);
3990 Args.push_back(&SrcArg);
3991 Args.push_back(&LastprivArg);
3992 const auto &TaskDupFnInfo =
3993 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3994 llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
3995 std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
3996 auto *TaskDup = llvm::Function::Create(
3997 TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
3998 CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
3999 TaskDup->setDoesNotRecurse();
4000 CodeGenFunction CGF(CGM);
4001 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4002 Loc);
4003
4004 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4005 CGF.GetAddrOfLocalVar(&DstArg),
4006 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4007 // task_dst->liter = lastpriv;
4008 if (WithLastIter) {
4009 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4010 LValue Base = CGF.EmitLValueForField(
4011 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4012 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4013 llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4014 CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4015 CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4016 }
4017
4018 // Emit initial values for private copies (if any).
4019 assert(!Privates.empty())(static_cast <bool> (!Privates.empty()) ? void (0) : __assert_fail
("!Privates.empty()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4019, __extension__ __PRETTY_FUNCTION__))
;
4020 Address KmpTaskSharedsPtr = Address::invalid();
4021 if (!Data.FirstprivateVars.empty()) {
4022 LValue TDBase = CGF.EmitLoadOfPointerLValue(
4023 CGF.GetAddrOfLocalVar(&SrcArg),
4024 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4025 LValue Base = CGF.EmitLValueForField(
4026 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4027 KmpTaskSharedsPtr = Address(
4028 CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4029 Base, *std::next(KmpTaskTQTyRD->field_begin(),
4030 KmpTaskTShareds)),
4031 Loc),
4032 CGM.getNaturalTypeAlignment(SharedsTy));
4033 }
4034 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4035 SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4036 CGF.FinishFunction();
4037 return TaskDup;
4038}
4039
4040/// Checks if destructor function is required to be generated.
4041/// \return true if cleanups are required, false otherwise.
4042static bool
4043checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4044 ArrayRef<PrivateDataTy> Privates) {
4045 for (const PrivateDataTy &P : Privates) {
4046 if (P.second.isLocalPrivate())
4047 continue;
4048 QualType Ty = P.second.Original->getType().getNonReferenceType();
4049 if (Ty.isDestructedType())
4050 return true;
4051 }
4052 return false;
4053}
4054
4055namespace {
4056/// Loop generator for OpenMP iterator expression.
4057class OMPIteratorGeneratorScope final
4058 : public CodeGenFunction::OMPPrivateScope {
4059 CodeGenFunction &CGF;
4060 const OMPIteratorExpr *E = nullptr;
4061 SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
4062 SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
4063 OMPIteratorGeneratorScope() = delete;
4064 OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) = delete;
4065
4066public:
4067 OMPIteratorGeneratorScope(CodeGenFunction &CGF, const OMPIteratorExpr *E)
4068 : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
4069 if (!E)
4070 return;
4071 SmallVector<llvm::Value *, 4> Uppers;
4072 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
4073 Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
4074 const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
4075 addPrivate(VD, [&CGF, VD]() {
4076 return CGF.CreateMemTemp(VD->getType(), VD->getName());
4077 });
4078 const OMPIteratorHelperData &HelperData = E->getHelper(I);
4079 addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
4080 return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
4081 "counter.addr");
4082 });
4083 }
4084 Privatize();
4085
4086 for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
4087 const OMPIteratorHelperData &HelperData = E->getHelper(I);
4088 LValue CLVal =
4089 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
4090 HelperData.CounterVD->getType());
4091 // Counter = 0;
4092 CGF.EmitStoreOfScalar(
4093 llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
4094 CLVal);
4095 CodeGenFunction::JumpDest &ContDest =
4096 ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
4097 CodeGenFunction::JumpDest &ExitDest =
4098 ExitDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.exit"));
4099 // N = <number-of_iterations>;
4100 llvm::Value *N = Uppers[I];
4101 // cont:
4102 // if (Counter < N) goto body; else goto exit;
4103 CGF.EmitBlock(ContDest.getBlock());
4104 auto *CVal =
4105 CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
4106 llvm::Value *Cmp =
4107 HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
4108 ? CGF.Builder.CreateICmpSLT(CVal, N)
4109 : CGF.Builder.CreateICmpULT(CVal, N);
4110 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("iter.body");
4111 CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
4112 // body:
4113 CGF.EmitBlock(BodyBB);
4114 // Iteri = Begini + Counter * Stepi;
4115 CGF.EmitIgnoredExpr(HelperData.Update);
4116 }
4117 }
4118 ~OMPIteratorGeneratorScope() {
4119 if (!E)
4120 return;
4121 for (unsigned I = E->numOfIterators(); I > 0; --I) {
4122 // Counter = Counter + 1;
4123 const OMPIteratorHelperData &HelperData = E->getHelper(I - 1);
4124 CGF.EmitIgnoredExpr(HelperData.CounterUpdate);
4125 // goto cont;
4126 CGF.EmitBranchThroughCleanup(ContDests[I - 1]);
4127 // exit:
4128 CGF.EmitBlock(ExitDests[I - 1].getBlock(), /*IsFinished=*/I == 1);
4129 }
4130 }
4131};
4132} // namespace
4133
4134static std::pair<llvm::Value *, llvm::Value *>
4135getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
4136 const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
4137 llvm::Value *Addr;
4138 if (OASE) {
4139 const Expr *Base = OASE->getBase();
4140 Addr = CGF.EmitScalarExpr(Base);
4141 } else {
4142 Addr = CGF.EmitLValue(E).getPointer(CGF);
4143 }
4144 llvm::Value *SizeVal;
4145 QualType Ty = E->getType();
4146 if (OASE) {
4147 SizeVal = CGF.getTypeSize(OASE->getBase()->getType()->getPointeeType());
4148 for (const Expr *SE : OASE->getDimensions()) {
4149 llvm::Value *Sz = CGF.EmitScalarExpr(SE);
4150 Sz = CGF.EmitScalarConversion(
4151 Sz, SE->getType(), CGF.getContext().getSizeType(), SE->getExprLoc());
4152 SizeVal = CGF.Builder.CreateNUWMul(SizeVal, Sz);
4153 }
4154 } else if (const auto *ASE =
4155 dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
4156 LValue UpAddrLVal =
4157 CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
4158 llvm::Value *UpAddr =
4159 CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(CGF), /*Idx0=*/1);
4160 llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
4161 llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
4162 SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
4163 } else {
4164 SizeVal = CGF.getTypeSize(Ty);
4165 }
4166 return std::make_pair(Addr, SizeVal);
4167}
4168
4169/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
4170static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
4171 QualType FlagsTy = C.getIntTypeForBitwidth(32, /*Signed=*/false);
4172 if (KmpTaskAffinityInfoTy.isNull()) {
4173 RecordDecl *KmpAffinityInfoRD =
4174 C.buildImplicitRecord("kmp_task_affinity_info_t");
4175 KmpAffinityInfoRD->startDefinition();
4176 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getIntPtrType());
4177 addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
4178 addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
4179 KmpAffinityInfoRD->completeDefinition();
4180 KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
4181 }
4182}
4183
4184CGOpenMPRuntime::TaskResultTy
4185CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4186 const OMPExecutableDirective &D,
4187 llvm::Function *TaskFunction, QualType SharedsTy,
4188 Address Shareds, const OMPTaskDataTy &Data) {
4189 ASTContext &C = CGM.getContext();
4190 llvm::SmallVector<PrivateDataTy, 4> Privates;
4191 // Aggregate privates and sort them by the alignment.
4192 const auto *I = Data.PrivateCopies.begin();
4193 for (const Expr *E : Data.PrivateVars) {
4194 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4195 Privates.emplace_back(
4196 C.getDeclAlign(VD),
4197 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4198 /*PrivateElemInit=*/nullptr));
4199 ++I;
4200 }
4201 I = Data.FirstprivateCopies.begin();
4202 const auto *IElemInitRef = Data.FirstprivateInits.begin();
4203 for (const Expr *E : Data.FirstprivateVars) {
4204 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4205 Privates.emplace_back(
4206 C.getDeclAlign(VD),
4207 PrivateHelpersTy(
4208 E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4209 cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
4210 ++I;
4211 ++IElemInitRef;
4212 }
4213 I = Data.LastprivateCopies.begin();
4214 for (const Expr *E : Data.LastprivateVars) {
4215 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4216 Privates.emplace_back(
4217 C.getDeclAlign(VD),
4218 PrivateHelpersTy(E, VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4219 /*PrivateElemInit=*/nullptr));
4220 ++I;
4221 }
4222 for (const VarDecl *VD : Data.PrivateLocals) {
4223 if (isAllocatableDecl(VD))
4224 Privates.emplace_back(CGM.getPointerAlign(), PrivateHelpersTy(VD));
4225 else
4226 Privates.emplace_back(C.getDeclAlign(VD), PrivateHelpersTy(VD));
4227 }
4228 llvm::stable_sort(Privates,
4229 [](const PrivateDataTy &L, const PrivateDataTy &R) {
4230 return L.first > R.first;
4231 });
4232 QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4233 // Build type kmp_routine_entry_t (if not built yet).
4234 emitKmpRoutineEntryT(KmpInt32Ty);
4235 // Build type kmp_task_t (if not built yet).
4236 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4237 if (SavedKmpTaskloopTQTy.isNull()) {
4238 SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4239 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4240 }
4241 KmpTaskTQTy = SavedKmpTaskloopTQTy;
4242 } else {
4243 assert((D.getDirectiveKind() == OMPD_task ||(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4246, __extension__ __PRETTY_FUNCTION__))
4244 isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4246, __extension__ __PRETTY_FUNCTION__))
4245 isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4246, __extension__ __PRETTY_FUNCTION__))
4246 "Expected taskloop, task or target directive")(static_cast <bool> ((D.getDirectiveKind() == OMPD_task
|| isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()))
&& "Expected taskloop, task or target directive") ? void
(0) : __assert_fail ("(D.getDirectiveKind() == OMPD_task || isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && \"Expected taskloop, task or target directive\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4246, __extension__ __PRETTY_FUNCTION__))
;
4247 if (SavedKmpTaskTQTy.isNull()) {
4248 SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4249 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4250 }
4251 KmpTaskTQTy = SavedKmpTaskTQTy;
4252 }
4253 const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4254 // Build particular struct kmp_task_t for the given task.
4255 const RecordDecl *KmpTaskTWithPrivatesQTyRD =
4256 createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4257 QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4258 QualType KmpTaskTWithPrivatesPtrQTy =
4259 C.getPointerType(KmpTaskTWithPrivatesQTy);
4260 llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4261 llvm::Type *KmpTaskTWithPrivatesPtrTy =
4262 KmpTaskTWithPrivatesTy->getPointerTo();
4263 llvm::Value *KmpTaskTWithPrivatesTySize =
4264 CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4265 QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4266
4267 // Emit initial values for private copies (if any).
4268 llvm::Value *TaskPrivatesMap = nullptr;
4269 llvm::Type *TaskPrivatesMapTy =
4270 std::next(TaskFunction->arg_begin(), 3)->getType();
4271 if (!Privates.empty()) {
4272 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4273 TaskPrivatesMap =
4274 emitTaskPrivateMappingFunction(CGM, Loc, Data, FI->getType(), Privates);
4275 TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4276 TaskPrivatesMap, TaskPrivatesMapTy);
4277 } else {
4278 TaskPrivatesMap = llvm::ConstantPointerNull::get(
4279 cast<llvm::PointerType>(TaskPrivatesMapTy));
4280 }
4281 // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4282 // kmp_task_t *tt);
4283 llvm::Function *TaskEntry = emitProxyTaskFunction(
4284 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4285 KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4286 TaskPrivatesMap);
4287
4288 // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4289 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4290 // kmp_routine_entry_t *task_entry);
4291 // Task flags. Format is taken from
4292 // https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h,
4293 // description of kmp_tasking_flags struct.
4294 enum {
4295 TiedFlag = 0x1,
4296 FinalFlag = 0x2,
4297 DestructorsFlag = 0x8,
4298 PriorityFlag = 0x20,
4299 DetachableFlag = 0x40,
4300 };
4301 unsigned Flags = Data.Tied ? TiedFlag : 0;
4302 bool NeedsCleanup = false;
4303 if (!Privates.empty()) {
4304 NeedsCleanup =
4305 checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD, Privates);
4306 if (NeedsCleanup)
4307 Flags = Flags | DestructorsFlag;
4308 }
4309 if (Data.Priority.getInt())
4310 Flags = Flags | PriorityFlag;
4311 if (D.hasClausesOfKind<OMPDetachClause>())
4312 Flags = Flags | DetachableFlag;
4313 llvm::Value *TaskFlags =
4314 Data.Final.getPointer()
4315 ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4316 CGF.Builder.getInt32(FinalFlag),
4317 CGF.Builder.getInt32(/*C=*/0))
4318 : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4319 TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4320 llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4321 SmallVector<llvm::Value *, 8> AllocArgs = {emitUpdateLocation(CGF, Loc),
4322 getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
4323 SharedsSize, CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4324 TaskEntry, KmpRoutineEntryPtrTy)};
4325 llvm::Value *NewTask;
4326 if (D.hasClausesOfKind<OMPNowaitClause>()) {
4327 // Check if we have any device clause associated with the directive.
4328 const Expr *Device = nullptr;
4329 if (auto *C = D.getSingleClause<OMPDeviceClause>())
4330 Device = C->getDevice();
4331 // Emit device ID if any otherwise use default value.
4332 llvm::Value *DeviceID;
4333 if (Device)
4334 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
4335 CGF.Int64Ty, /*isSigned=*/true);
4336 else
4337 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
4338 AllocArgs.push_back(DeviceID);
4339 NewTask = CGF.EmitRuntimeCall(
4340 OMPBuilder.getOrCreateRuntimeFunction(
4341 CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
4342 AllocArgs);
4343 } else {
4344 NewTask =
4345 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
4346 CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
4347 AllocArgs);
4348 }
4349 // Emit detach clause initialization.
4350 // evt = (typeof(evt))__kmpc_task_allow_completion_event(loc, tid,
4351 // task_descriptor);
4352 if (const auto *DC = D.getSingleClause<OMPDetachClause>()) {
4353 const Expr *Evt = DC->getEventHandler()->IgnoreParenImpCasts();
4354 LValue EvtLVal = CGF.EmitLValue(Evt);
4355
4356 // Build kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref,
4357 // int gtid, kmp_task_t *task);
4358 llvm::Value *Loc = emitUpdateLocation(CGF, DC->getBeginLoc());
4359 llvm::Value *Tid = getThreadID(CGF, DC->getBeginLoc());
4360 Tid = CGF.Builder.CreateIntCast(Tid, CGF.IntTy, /*isSigned=*/false);
4361 llvm::Value *EvtVal = CGF.EmitRuntimeCall(
4362 OMPBuilder.getOrCreateRuntimeFunction(
4363 CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
4364 {Loc, Tid, NewTask});
4365 EvtVal = CGF.EmitScalarConversion(EvtVal, C.VoidPtrTy, Evt->getType(),
4366 Evt->getExprLoc());
4367 CGF.EmitStoreOfScalar(EvtVal, EvtLVal);
4368 }
4369 // Process affinity clauses.
4370 if (D.hasClausesOfKind<OMPAffinityClause>()) {
4371 // Process list of affinity data.
4372 ASTContext &C = CGM.getContext();
4373 Address AffinitiesArray = Address::invalid();
4374 // Calculate number of elements to form the array of affinity data.
4375 llvm::Value *NumOfElements = nullptr;
4376 unsigned NumAffinities = 0;
4377 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4378 if (const Expr *Modifier = C->getModifier()) {
4379 const auto *IE = cast<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts());
4380 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4381 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4382 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
4383 NumOfElements =
4384 NumOfElements ? CGF.Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
4385 }
4386 } else {
4387 NumAffinities += C->varlist_size();
4388 }
4389 }
4390 getKmpAffinityType(CGM.getContext(), KmpTaskAffinityInfoTy);
4391 // Fields ids in kmp_task_affinity_info record.
4392 enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
4393
4394 QualType KmpTaskAffinityInfoArrayTy;
4395 if (NumOfElements) {
4396 NumOfElements = CGF.Builder.CreateNUWAdd(
4397 llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
4398 OpaqueValueExpr OVE(
4399 Loc,
4400 C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
4401 VK_PRValue);
4402 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
4403 RValue::get(NumOfElements));
4404 KmpTaskAffinityInfoArrayTy =
4405 C.getVariableArrayType(KmpTaskAffinityInfoTy, &OVE, ArrayType::Normal,
4406 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
4407 // Properly emit variable-sized array.
4408 auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
4409 ImplicitParamDecl::Other);
4410 CGF.EmitVarDecl(*PD);
4411 AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
4412 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
4413 /*isSigned=*/false);
4414 } else {
4415 KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
4416 KmpTaskAffinityInfoTy,
4417 llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
4418 ArrayType::Normal, /*IndexTypeQuals=*/0);
4419 AffinitiesArray =
4420 CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
4421 AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
4422 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumAffinities,
4423 /*isSigned=*/false);
4424 }
4425
4426 const auto *KmpAffinityInfoRD = KmpTaskAffinityInfoTy->getAsRecordDecl();
4427 // Fill array by elements without iterators.
4428 unsigned Pos = 0;
4429 bool HasIterator = false;
4430 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4431 if (C->getModifier()) {
4432 HasIterator = true;
4433 continue;
4434 }
4435 for (const Expr *E : C->varlists()) {
4436 llvm::Value *Addr;
4437 llvm::Value *Size;
4438 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4439 LValue Base =
4440 CGF.MakeAddrLValue(CGF.Builder.CreateConstGEP(AffinitiesArray, Pos),
4441 KmpTaskAffinityInfoTy);
4442 // affs[i].base_addr = &<Affinities[i].second>;
4443 LValue BaseAddrLVal = CGF.EmitLValueForField(
4444 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
4445 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4446 BaseAddrLVal);
4447 // affs[i].len = sizeof(<Affinities[i].second>);
4448 LValue LenLVal = CGF.EmitLValueForField(
4449 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
4450 CGF.EmitStoreOfScalar(Size, LenLVal);
4451 ++Pos;
4452 }
4453 }
4454 LValue PosLVal;
4455 if (HasIterator) {
4456 PosLVal = CGF.MakeAddrLValue(
4457 CGF.CreateMemTemp(C.getSizeType(), "affs.counter.addr"),
4458 C.getSizeType());
4459 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
4460 }
4461 // Process elements with iterators.
4462 for (const auto *C : D.getClausesOfKind<OMPAffinityClause>()) {
4463 const Expr *Modifier = C->getModifier();
4464 if (!Modifier)
4465 continue;
4466 OMPIteratorGeneratorScope IteratorScope(
4467 CGF, cast_or_null<OMPIteratorExpr>(Modifier->IgnoreParenImpCasts()));
4468 for (const Expr *E : C->varlists()) {
4469 llvm::Value *Addr;
4470 llvm::Value *Size;
4471 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4472 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4473 LValue Base = CGF.MakeAddrLValue(
4474 Address(CGF.Builder.CreateGEP(AffinitiesArray.getPointer(), Idx),
4475 AffinitiesArray.getAlignment()),
4476 KmpTaskAffinityInfoTy);
4477 // affs[i].base_addr = &<Affinities[i].second>;
4478 LValue BaseAddrLVal = CGF.EmitLValueForField(
4479 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
4480 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4481 BaseAddrLVal);
4482 // affs[i].len = sizeof(<Affinities[i].second>);
4483 LValue LenLVal = CGF.EmitLValueForField(
4484 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
4485 CGF.EmitStoreOfScalar(Size, LenLVal);
4486 Idx = CGF.Builder.CreateNUWAdd(
4487 Idx, llvm::ConstantInt::get(Idx->getType(), 1));
4488 CGF.EmitStoreOfScalar(Idx, PosLVal);
4489 }
4490 }
4491 // Call to kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref,
4492 // kmp_int32 gtid, kmp_task_t *new_task, kmp_int32
4493 // naffins, kmp_task_affinity_info_t *affin_list);
4494 llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
4495 llvm::Value *GTid = getThreadID(CGF, Loc);
4496 llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4497 AffinitiesArray.getPointer(), CGM.VoidPtrTy);
4498 // FIXME: Emit the function and ignore its result for now unless the
4499 // runtime function is properly implemented.
4500 (void)CGF.EmitRuntimeCall(
4501 OMPBuilder.getOrCreateRuntimeFunction(
4502 CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
4503 {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
4504 }
4505 llvm::Value *NewTaskNewTaskTTy =
4506 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4507 NewTask, KmpTaskTWithPrivatesPtrTy);
4508 LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4509 KmpTaskTWithPrivatesQTy);
4510 LValue TDBase =
4511 CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4512 // Fill the data in the resulting kmp_task_t record.
4513 // Copy shareds if there are any.
4514 Address KmpTaskSharedsPtr = Address::invalid();
4515 if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4516 KmpTaskSharedsPtr =
4517 Address(CGF.EmitLoadOfScalar(
4518 CGF.EmitLValueForField(
4519 TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4520 KmpTaskTShareds)),
4521 Loc),
4522 CGM.getNaturalTypeAlignment(SharedsTy));
4523 LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4524 LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4525 CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
4526 }
4527 // Emit initial values for private copies (if any).
4528 TaskResultTy Result;
4529 if (!Privates.empty()) {
4530 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4531 SharedsTy, SharedsPtrTy, Data, Privates,
4532 /*ForDup=*/false);
4533 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4534 (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4535 Result.TaskDupFn = emitTaskDupFunction(
4536 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4537 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4538 /*WithLastIter=*/!Data.LastprivateVars.empty());
4539 }
4540 }
4541 // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4542 enum { Priority = 0, Destructors = 1 };
4543 // Provide pointer to function with destructors for privates.
4544 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4545 const RecordDecl *KmpCmplrdataUD =
4546 (*FI)->getType()->getAsUnionType()->getDecl();
4547 if (NeedsCleanup) {
4548 llvm::Value *DestructorFn = emitDestructorsFunction(
4549 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4550 KmpTaskTWithPrivatesQTy);
4551 LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4552 LValue DestructorsLV = CGF.EmitLValueForField(
4553 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4554 CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4555 DestructorFn, KmpRoutineEntryPtrTy),
4556 DestructorsLV);
4557 }
4558 // Set priority.
4559 if (Data.Priority.getInt()) {
4560 LValue Data2LV = CGF.EmitLValueForField(
4561 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4562 LValue PriorityLV = CGF.EmitLValueForField(
4563 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4564 CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4565 }
4566 Result.NewTask = NewTask;
4567 Result.TaskEntry = TaskEntry;
4568 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4569 Result.TDBase = TDBase;
4570 Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4571 return Result;
4572}
4573
4574namespace {
4575/// Dependence kind for RTL.
4576enum RTLDependenceKindTy {
4577 DepIn = 0x01,
4578 DepInOut = 0x3,
4579 DepMutexInOutSet = 0x4
4580};
4581/// Fields ids in kmp_depend_info record.
4582enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4583} // namespace
4584
4585/// Translates internal dependency kind into the runtime kind.
4586static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
4587 RTLDependenceKindTy DepKind;
4588 switch (K) {
4589 case OMPC_DEPEND_in:
4590 DepKind = DepIn;
4591 break;
4592 // Out and InOut dependencies must use the same code.
4593 case OMPC_DEPEND_out:
4594 case OMPC_DEPEND_inout:
4595 DepKind = DepInOut;
4596 break;
4597 case OMPC_DEPEND_mutexinoutset:
4598 DepKind = DepMutexInOutSet;
4599 break;
4600 case OMPC_DEPEND_source:
4601 case OMPC_DEPEND_sink:
4602 case OMPC_DEPEND_depobj:
4603 case OMPC_DEPEND_unknown:
4604 llvm_unreachable("Unknown task dependence type")::llvm::llvm_unreachable_internal("Unknown task dependence type"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4604)
;
4605 }
4606 return DepKind;
4607}
4608
4609/// Builds kmp_depend_info, if it is not built yet, and builds flags type.
4610static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
4611 QualType &FlagsTy) {
4612 FlagsTy = C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4613 if (KmpDependInfoTy.isNull()) {
4614 RecordDecl *KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4615 KmpDependInfoRD->startDefinition();
4616 addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4617 addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4618 addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4619 KmpDependInfoRD->completeDefinition();
4620 KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4621 }
4622}
4623
4624std::pair<llvm::Value *, LValue>
4625CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
4626 SourceLocation Loc) {
4627 ASTContext &C = CGM.getContext();
4628 QualType FlagsTy;
4629 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4630 RecordDecl *KmpDependInfoRD =
4631 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4632 LValue Base = CGF.EmitLoadOfPointerLValue(
4633 DepobjLVal.getAddress(CGF),
4634 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4635 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4636 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4637 Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
4638 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4639 Base.getTBAAInfo());
4640 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4641 Addr.getPointer(),
4642 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4643 LValue NumDepsBase = CGF.MakeAddrLValue(
4644 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4645 Base.getBaseInfo(), Base.getTBAAInfo());
4646 // NumDeps = deps[i].base_addr;
4647 LValue BaseAddrLVal = CGF.EmitLValueForField(
4648 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4649 llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
4650 return std::make_pair(NumDeps, Base);
4651}
4652
4653static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4654 llvm::PointerUnion<unsigned *, LValue *> Pos,
4655 const OMPTaskDataTy::DependData &Data,
4656 Address DependenciesArray) {
4657 CodeGenModule &CGM = CGF.CGM;
4658 ASTContext &C = CGM.getContext();
4659 QualType FlagsTy;
4660 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4661 RecordDecl *KmpDependInfoRD =
4662 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4663 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4664
4665 OMPIteratorGeneratorScope IteratorScope(
4666 CGF, cast_or_null<OMPIteratorExpr>(
4667 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4668 : nullptr));
4669 for (const Expr *E : Data.DepExprs) {
4670 llvm::Value *Addr;
4671 llvm::Value *Size;
4672 std::tie(Addr, Size) = getPointerAndSize(CGF, E);
4673 LValue Base;
4674 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4675 Base = CGF.MakeAddrLValue(
4676 CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
4677 } else {
4678 LValue &PosLVal = *Pos.get<LValue *>();
4679 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4680 Base = CGF.MakeAddrLValue(
4681 Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Idx),
4682 DependenciesArray.getAlignment()),
4683 KmpDependInfoTy);
4684 }
4685 // deps[i].base_addr = &<Dependencies[i].second>;
4686 LValue BaseAddrLVal = CGF.EmitLValueForField(
4687 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4688 CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
4689 BaseAddrLVal);
4690 // deps[i].len = sizeof(<Dependencies[i].second>);
4691 LValue LenLVal = CGF.EmitLValueForField(
4692 Base, *std::next(KmpDependInfoRD->field_begin(), Len));
4693 CGF.EmitStoreOfScalar(Size, LenLVal);
4694 // deps[i].flags = <Dependencies[i].first>;
4695 RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
4696 LValue FlagsLVal = CGF.EmitLValueForField(
4697 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
4698 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
4699 FlagsLVal);
4700 if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
4701 ++(*P);
4702 } else {
4703 LValue &PosLVal = *Pos.get<LValue *>();
4704 llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4705 Idx = CGF.Builder.CreateNUWAdd(Idx,
4706 llvm::ConstantInt::get(Idx->getType(), 1));
4707 CGF.EmitStoreOfScalar(Idx, PosLVal);
4708 }
4709 }
4710}
4711
4712static SmallVector<llvm::Value *, 4>
4713emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4714 const OMPTaskDataTy::DependData &Data) {
4715 assert(Data.DepKind == OMPC_DEPEND_depobj &&(static_cast <bool> (Data.DepKind == OMPC_DEPEND_depobj
&& "Expected depobj dependecy kind.") ? void (0) : __assert_fail
("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4716, __extension__ __PRETTY_FUNCTION__))
4716 "Expected depobj dependecy kind.")(static_cast <bool> (Data.DepKind == OMPC_DEPEND_depobj
&& "Expected depobj dependecy kind.") ? void (0) : __assert_fail
("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4716, __extension__ __PRETTY_FUNCTION__))
;
4717 SmallVector<llvm::Value *, 4> Sizes;
4718 SmallVector<LValue, 4> SizeLVals;
4719 ASTContext &C = CGF.getContext();
4720 QualType FlagsTy;
4721 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4722 RecordDecl *KmpDependInfoRD =
4723 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4724 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4725 llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
4726 {
4727 OMPIteratorGeneratorScope IteratorScope(
4728 CGF, cast_or_null<OMPIteratorExpr>(
4729 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4730 : nullptr));
4731 for (const Expr *E : Data.DepExprs) {
4732 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4733 LValue Base = CGF.EmitLoadOfPointerLValue(
4734 DepobjLVal.getAddress(CGF),
4735 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4736 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4737 Base.getAddress(CGF), KmpDependInfoPtrT);
4738 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4739 Base.getTBAAInfo());
4740 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4741 Addr.getPointer(),
4742 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4743 LValue NumDepsBase = CGF.MakeAddrLValue(
4744 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4745 Base.getBaseInfo(), Base.getTBAAInfo());
4746 // NumDeps = deps[i].base_addr;
4747 LValue BaseAddrLVal = CGF.EmitLValueForField(
4748 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4749 llvm::Value *NumDeps =
4750 CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
4751 LValue NumLVal = CGF.MakeAddrLValue(
4752 CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
4753 C.getUIntPtrType());
4754 CGF.InitTempAlloca(NumLVal.getAddress(CGF),
4755 llvm::ConstantInt::get(CGF.IntPtrTy, 0));
4756 llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
4757 llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
4758 CGF.EmitStoreOfScalar(Add, NumLVal);
4759 SizeLVals.push_back(NumLVal);
4760 }
4761 }
4762 for (unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
4763 llvm::Value *Size =
4764 CGF.EmitLoadOfScalar(SizeLVals[I], Data.DepExprs[I]->getExprLoc());
4765 Sizes.push_back(Size);
4766 }
4767 return Sizes;
4768}
4769
4770static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
4771 LValue PosLVal,
4772 const OMPTaskDataTy::DependData &Data,
4773 Address DependenciesArray) {
4774 assert(Data.DepKind == OMPC_DEPEND_depobj &&(static_cast <bool> (Data.DepKind == OMPC_DEPEND_depobj
&& "Expected depobj dependecy kind.") ? void (0) : __assert_fail
("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4775, __extension__ __PRETTY_FUNCTION__))
4775 "Expected depobj dependecy kind.")(static_cast <bool> (Data.DepKind == OMPC_DEPEND_depobj
&& "Expected depobj dependecy kind.") ? void (0) : __assert_fail
("Data.DepKind == OMPC_DEPEND_depobj && \"Expected depobj dependecy kind.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/clang/lib/CodeGen/CGOpenMPRuntime.cpp"
, 4775, __extension__ __PRETTY_FUNCTION__))
;
4776 ASTContext &C = CGF.getContext();
4777 QualType FlagsTy;
4778 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4779 RecordDecl *KmpDependInfoRD =
4780 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4781 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
4782 llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
4783 llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
4784 {
4785 OMPIteratorGeneratorScope IteratorScope(
4786 CGF, cast_or_null<OMPIteratorExpr>(
4787 Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
4788 : nullptr));
4789 for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
4790 const Expr *E = Data.DepExprs[I];
4791 LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
4792 LValue Base = CGF.EmitLoadOfPointerLValue(
4793 DepobjLVal.getAddress(CGF),
4794 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
4795 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4796 Base.getAddress(CGF), KmpDependInfoPtrT);
4797 Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
4798 Base.getTBAAInfo());
4799
4800 // Get number of elements in a single depobj.
4801 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
4802 Addr.getPointer(),
4803 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
4804 LValue NumDepsBase = CGF.MakeAddrLValue(
4805 Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
4806 Base.getBaseInfo(), Base.getTBAAInfo());
4807 // NumDeps = deps[i].base_addr;
4808 LValue BaseAddrLVal = CGF.EmitLValueForField(
4809 NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
4810 llvm::Value *NumDeps =
4811 CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
4812
4813 // memcopy dependency data.
4814 llvm::Value *Size = CGF.Builder.CreateNUWMul(
4815 ElSize,
4816 CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
4817 llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
4818 Address DepAddr =
4819 Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Pos),
4820 DependenciesArray.getAlignment());
4821 CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
4822
4823 // Increase pos.
4824 // pos += size;
4825 llvm::Value *Add = CGF.Builder.CreateNUWAdd(Pos, NumDeps);
4826 CGF.EmitStoreOfScalar(Add, PosLVal);
4827 }
4828 }
4829}
4830
4831std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
4832 CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies,
4833 SourceLocation Loc) {
4834 if (llvm::all_of(Dependencies, [](const OMPTaskDataTy::DependData &D) {
4835 return D.DepExprs.empty();
4836 }))
4837 return std::make_pair(nullptr, Address::invalid());
4838 // Process list of dependencies.
4839 ASTContext &C = CGM.getContext();
4840 Address DependenciesArray = Address::invalid();
4841 llvm::Value *NumOfElements = nullptr;
4842 unsigned NumDependencies = std::accumulate(
4843 Dependencies.begin(), Dependencies.end(), 0,
4844 [](unsigned V, const OMPTaskDataTy::DependData &D) {
4845 return D.DepKind == OMPC_DEPEND_depobj
4846 ? V
4847 : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
4848 });
4849 QualType FlagsTy;
4850 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4851 bool HasDepobjDeps = false;
4852 bool HasRegularWithIterators = false;
4853 llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
4854 llvm::Value *NumOfRegularWithIterators =
4855 llvm::ConstantInt::get(CGF.IntPtrTy, 1);
4856 // Calculate number of depobj dependecies and regular deps with the iterators.
4857 for (const OMPTaskDataTy::DependData &D : Dependencies) {
4858 if (D.DepKind == OMPC_DEPEND_depobj) {
4859 SmallVector<llvm::Value *, 4> Sizes =
4860 emitDepobjElementsSizes(CGF, KmpDependInfoTy, D);
4861 for (llvm::Value *Size : Sizes) {
4862 NumOfDepobjElements =
4863 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, Size);
4864 }
4865 HasDepobjDeps = true;
4866 continue;
4867 }
4868 // Include number of iterations, if any.
4869 if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
4870 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4871 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4872 Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
4873 NumOfRegularWithIterators =
4874 CGF.Builder.CreateNUWMul(NumOfRegularWithIterators, Sz);
4875 }
4876 HasRegularWithIterators = true;
4877 continue;
4878 }
4879 }
4880
4881 QualType KmpDependInfoArrayTy;
4882 if (HasDepobjDeps || HasRegularWithIterators) {
4883 NumOfElements = llvm::ConstantInt::get(CGM.IntPtrTy, NumDependencies,
4884 /*isSigned=*/false);
4885 if (HasDepobjDeps) {
4886 NumOfElements =
4887 CGF.Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
4888 }
4889 if (HasRegularWithIterators) {
4890 NumOfElements =
4891 CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
4892 }
4893 OpaqueValueExpr OVE(Loc,
4894 C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
4895 VK_PRValue);
4896 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
4897 RValue::get(NumOfElements));
4898 KmpDependInfoArrayTy =
4899 C.getVariableArrayType(KmpDependInfoTy, &OVE, ArrayType::Normal,
4900 /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
4901 // CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
4902 // Properly emit variable-sized array.
4903 auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
4904 ImplicitParamDecl::Other);
4905 CGF.EmitVarDecl(*PD);
4906 DependenciesArray = CGF.GetAddrOfLocalVar(PD);
4907 NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
4908 /*isSigned=*/false);
4909 } else {
4910 KmpDependInfoArrayTy = C.getConstantArrayType(
4911 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
4912 ArrayType::Normal, /*IndexTypeQuals=*/0);
4913 DependenciesArray =
4914 CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4915 DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
4916 NumOfElements = llvm::ConstantInt::get(CGM.Int32Ty, NumDependencies,
4917 /*isSigned=*/false);
4918 }
4919 unsigned Pos = 0;
4920 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4921 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4922 Dependencies[I].IteratorExpr)
4923 continue;
4924 emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
4925 DependenciesArray);
4926 }
4927 // Copy regular dependecies with iterators.
4928 LValue PosLVal = CGF.MakeAddrLValue(
4929 CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
4930 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
4931 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4932 if (Dependencies[I].DepKind == OMPC_DEPEND_depobj ||
4933 !Dependencies[I].IteratorExpr)
4934 continue;
4935 emitDependData(CGF, KmpDependInfoTy, &PosLVal, Dependencies[I],
4936 DependenciesArray);
4937 }
4938 // Copy final depobj arrays without iterators.
4939 if (HasDepobjDeps) {
4940 for (unsigned I = 0, End = Dependencies.size(); I < End; ++I) {
4941 if (Dependencies[I].DepKind != OMPC_DEPEND_depobj)
4942 continue;
4943 emitDepobjElements(CGF, KmpDependInfoTy, PosLVal, Dependencies[I],
4944 DependenciesArray);
4945 }
4946 }
4947 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4948 DependenciesArray, CGF.VoidPtrTy);
4949 return std::make_pair(NumOfElements, DependenciesArray);
4950}
4951
4952Address CGOpenMPRuntime::emitDepobjDependClause(
4953 CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies,
4954 SourceLocation Loc) {
4955 if (Dependencies.DepExprs.empty())
4956 return Address::invalid();
4957 // Process list of dependencies.
4958 ASTContext &C = CGM.getContext();
4959 Address DependenciesArray = Address::invalid();
4960 unsigned NumDependencies = Dependencies.DepExprs.size();
4961 QualType FlagsTy;
4962 getDependTypes(C, KmpDependInfoTy, FlagsTy);
4963 RecordDecl *KmpDependInfoRD =
4964 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4965
4966 llvm::Value *Size;
4967 // Define type kmp_depend_info[<Dependencies.size()>];
4968 // For depobj reserve one extra element to store the number of elements.
4969 // It is required to handle depobj(x) update(in) construct.
4970 // kmp_depend_info[<Dependencies.size()>] deps;
4971 llvm::Value *NumDepsVal;
4972 CharUnits Align = C.getTypeAlignInChars(KmpDependInfoTy);
4973 if (const auto *IE =
4974 cast_or_null<OMPIteratorExpr>(Dependencies.IteratorExpr)) {
4975 NumDepsVal = llvm::ConstantInt::get(CGF.SizeTy, 1);
4976 for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
4977 llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
4978 Sz = CGF.Builder.CreateIntCast(Sz, CGF.SizeTy, /*isSigned=*/false);
4979 NumDepsVal = CGF.Builder.CreateNUWMul(NumDepsVal, Sz);
4980 }
4981 Size = CGF.Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.SizeTy, 1),
4982 NumDepsVal);
4983 CharUnits SizeInBytes =
4984 C.getTypeSizeInChars(KmpDependInfoTy).alignTo(Align);
4985 llvm::Value *RecSize = CGM.getSize(SizeInBytes);
4986 Size = CGF.Builder.CreateNUWMul(Size, RecSize);
4987 NumDepsVal =
4988 CGF.Builder.CreateIntCast(NumDepsVal, CGF.IntPtrTy, /*isSigned=*/false);
4989 } else {
4990 QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4991 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
4992 nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
4993 CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
4994 Size = CGM.getSize(Sz.alignTo(Align));
4995 NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
4996 }
4997 // Need to allocate on the dynamic memory.
4998 llvm::Value *ThreadID = getThreadID(CGF, Loc);
4999 // Use default allocator.
5000 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5001 llvm::Value *Args[] = {ThreadID, Size, Allocator};
5002
5003 llvm::Value *Addr =
5004 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5005 CGM.getModule(), OMPRTL___kmpc_alloc),
5006 Args, ".dep.arr.addr");
5007 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5008 Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
5009 DependenciesArray = Address(Addr, Align);
5010 // Write number of elements in the first element of array for depobj.
5011 LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
5012 // deps[i].base_addr = NumDependencies;
5013 LValue BaseAddrLVal = CGF.EmitLValueForField(
5014 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
5015 CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
5016 llvm::PointerUnion<unsigned *, LValue *> Pos;
5017 unsigned Idx = 1;
5018 LValue PosLVal;
5019 if (Dependencies.IteratorExpr) {
5020 PosLVal = CGF.MakeAddrLValue(
5021 CGF.CreateMemTemp(C.getSizeType(), "iterator.counter.addr"),
5022 C.getSizeType());
5023 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Idx), PosLVal,
5024 /*IsInit=*/true);
5025 Pos = &PosLVal;
5026 } else {
5027 Pos = &Idx;
5028 }
5029 emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
5030 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5031 CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
5032 return DependenciesArray;
5033}
5034
5035void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
5036 SourceLocation Loc) {
5037 ASTContext &C = CGM.getContext();
5038 QualType FlagsTy;
5039 getDependTypes(C, KmpDependInfoTy, FlagsTy);
5040 LValue Base = CGF.EmitLoadOfPointerLValue(
5041 DepobjLVal.getAddress(CGF),
5042 C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5043 QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
5044 Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5045 Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
5046 llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
5047 Addr.getPointer(),
5048 llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
5049 DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
5050 CGF.VoidPtrTy);
5051 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5052 // Use default allocator.
5053 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5054 llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
5055
5056 // _kmpc_free(gtid, addr, nullptr);
5057 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5058 CGM.getModule(), OMPRTL___kmpc_free),
5059 Args);
5060}
5061
5062void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
5063 OpenMPDependClauseKind NewDepKind,
5064 SourceLocation Loc) {
5065 ASTContext &C = CGM.getContext();
5066 QualType FlagsTy;
5067 getDependTypes(C, KmpDependInfoTy, FlagsTy);
5068 RecordDecl *KmpDependInfoRD =
5069 cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
5070 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
5071 llvm::Value *NumDeps;
5072 LValue Base;
5073 std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
5074
5075 Address Begin = Base.getAddress(CGF);
5076 // Cast from pointer to array type to pointer to single element.
5077 llvm::Value *End = CGF.Builder.CreateGEP(Begin.getPointer(), NumDeps);
5078 // The basic structure here is a while-do loop.
5079 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
5080 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
5081 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5082 CGF.EmitBlock(BodyBB);
5083 llvm::PHINode *ElementPHI =
5084 CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
5085 ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
5086 Begin = Address(ElementPHI, Begin.getAlignment());
5087 Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
5088 Base.getTBAAInfo());
5089 // deps[i].flags = NewDepKind;
5090 RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
5091 LValue FlagsLVal = CGF.EmitLValueForField(
5092 Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
5093 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
5094 FlagsLVal);
5095
5096 // Shift the address forward by one element.
5097 Address ElementNext =
5098 CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
5099 ElementPHI->addIncoming(ElementNext.getPointer(),
5100 CGF.Builder.GetInsertBlock());
5101 llvm::Value *IsEmpty =
5102 CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
5103 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5104 // Done.
5105 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5106}
5107
5108void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
5109 const OMPExecutableDirective &D,
5110 llvm::Function *TaskFunction,
5111 QualType SharedsTy, Address Shareds,
5112 const Expr *IfCond,
5113 const OMPTaskDataTy &Data) {
5114 if (!CGF.HaveInsertPoint())
5115 return;
5116
5117 TaskResultTy Result =
5118 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5119 llvm::Value *NewTask = Result.NewTask;
5120 llvm::Function *TaskEntry = Result.TaskEntry;
5121 llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
5122 LValue TDBase = Result.TDBase;
5123 const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
5124 // Process list of dependences.
5125 Address DependenciesArray = Address::invalid();
5126 llvm::Value *NumOfElements;
5127 std::tie(NumOfElements, DependenciesArray) =
5128 emitDependClause(CGF, Data.Dependences, Loc);
5129
5130 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5131 // libcall.
5132 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
5133 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
5134 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
5135 // list is not empty
5136 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5137 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5138 llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
5139 llvm::Value *DepTaskArgs[7];
5140 if (!Data.Dependences.empty()) {
5141 DepTaskArgs[0] = UpLoc;
5142 DepTaskArgs[1] = ThreadID;
5143 DepTaskArgs[2] = NewTask;
5144 DepTaskArgs[3] = NumOfElements;
5145 DepTaskArgs[4] = DependenciesArray.getPointer();
5146 DepTaskArgs[5] = CGF.Builder.getInt32(0);
5147 DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5148 }
5149 auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
5150 &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
5151 if (!Data.Tied) {
5152 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
5153 LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
5154 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
5155 }
5156 if (!Data.Dependences.empty()) {
5157 CGF.EmitRuntimeCall(
5158 OMPBuilder.getOrCreateRuntimeFunction(
5159 CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
5160 DepTaskArgs);
5161 } else {
5162 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5163 CGM.getModule(), OMPRTL___kmpc_omp_task),
5164 TaskArgs);
5165 }
5166 // Check if parent region is untied and build return for untied task;
5167 if (auto *Region =
5168 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5169 Region->emitUntiedSwitch(CGF);
5170 };
5171
5172 llvm::Value *DepWaitTaskArgs[6];
5173 if (!Data.Dependences.empty()) {
5174 DepWaitTaskArgs[0] = UpLoc;
5175 DepWaitTaskArgs[1] = ThreadID;
5176 DepWaitTaskArgs[2] = NumOfElements;
5177 DepWaitTaskArgs[3] = DependenciesArray.getPointer();
5178 DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
5179 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5180 }
5181 auto &M = CGM.getModule();
5182 auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
5183 TaskEntry, &Data, &DepWaitTaskArgs,
5184 Loc](CodeGenFunction &CGF, PrePostActionTy &) {
5185 CodeGenFunction::RunCleanupsScope LocalScope(CGF);
5186 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
5187 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
5188 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
5189 // is specified.
5190 if (!Data.Dependences.empty())
5191 CGF.EmitRuntimeCall(
5192 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
5193 DepWaitTaskArgs);
5194 // Call proxy_task_entry(gtid, new_task);
5195 auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
5196 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
5197 Action.Enter(CGF);
5198 llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
5199 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
5200 OutlinedFnArgs);
5201 };
5202
5203 // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
5204 // kmp_task_t *new_task);
5205 // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
5206 // kmp_task_t *new_task);
5207 RegionCodeGenTy RCG(CodeGen);
5208 CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
5209 M, OMPRTL___kmpc_omp_task_begin_if0),
5210 TaskArgs,
5211 OMPBuilder.getOrCreateRuntimeFunction(
5212 M, OMPRTL___kmpc_omp_task_complete_if0),
5213 TaskArgs);
5214 RCG.setAction(Action);
5215 RCG(CGF);
5216 };
5217
5218 if (IfCond) {
5219 emitIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
5220 } else {
5221 RegionCodeGenTy ThenRCG(ThenCodeGen);
5222 ThenRCG(CGF);
5223 }
5224}
5225
5226void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
5227 const OMPLoopDirective &D,
5228 llvm::Function *TaskFunction,
5229 QualType SharedsTy, Address Shareds,
5230 const Expr *IfCond,
5231 const OMPTaskDataTy &Data) {
5232 if (!CGF.HaveInsertPoint())
5233 return;
5234 TaskResultTy Result =
5235 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5236 // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5237 // libcall.
5238 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
5239 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
5240 // sched, kmp_uint64 grainsize, void *task_dup);
5241 llvm::Value *ThreadID = getThreadID(CGF, Loc);
5242 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5243 llvm::Value *IfVal;
5244 if (IfCond) {
5245 IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
5246 /*isSigned=*/true);
5247 } else {
5248 IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
5249 }
5250
5251 LValue LBLVal = CGF.EmitLValueForField(
5252 Result.TDBase,
5253 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
5254 const auto *LBVar =
5255 cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
5256 CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
5257 LBLVal.getQuals(),
5258 /*IsInitializer=*/true);
5259 LValue UBLVal = CGF.EmitLValueForField(
5260 Result.TDBase,
5261 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
5262 const auto *UBVar =
5263 cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
5264 CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
5265 UBLVal.getQuals(),
5266 /*IsInitializer=*/true);
5267 LValue StLVal = CGF.EmitLValueForField(
5268 Result.TDBase,
5269 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
5270 const auto *StVar =
5271 cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
5272 CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
5273 StLVal.getQuals(),
5274 /*IsInitializer=*/true);
5275 // Store reductions address.
5276 LValue RedLVal = CGF.EmitLValueForField(
5277 Result.TDBase,
5278 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
5279 if (Data.Reductions) {
5280 CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
5281 } else {
5282 CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
5283 CGF.getContext().VoidPtrTy);
5284 }
5285 enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
5286 llvm::Value *TaskArgs[] = {
5287 UpLoc,
5288 ThreadID,
5289 Result.NewTask,
5290 IfVal,
5291 LBLVal.getPointer(CGF),
5292 UBLVal.getPointer(CGF),
5293 CGF.EmitLoadOfScalar(StLVal, Loc),
5294 llvm::ConstantInt::getSigned(
5295 CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
5296 llvm::ConstantInt::getSigned(
5297 CGF.IntTy, Data.Schedule.getPointer()
5298 ? Data.Schedule.getInt() ? NumTasks : Grainsize
5299 : NoSchedule),
5300 Data.Schedule.getPointer()
5301 ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
5302 /*isSigned=*/false)
5303 : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
5304 Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5305 Result.TaskDupFn, CGF.VoidPtrTy)
5306 : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
5307 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
5308 CGM.getModule(), OMPRTL___kmpc_taskloop),
5309 TaskArgs);
5310}
5311
5312/// Emit reduction operation for each element of array (required for
5313/// array sections) LHS op = RHS.
5314/// \param Type Type of array.
5315/// \param LHSVar Variable on the left side of the reduction operation
5316/// (references element of array in original variable).
5317/// \param RHSVar Variable on the right side of the reduction operation
5318/// (references element of array in original variable).
5319/// \param RedOpGen Generator of reduction operation with use of LHSVar and
5320/// RHSVar.
5321static void EmitOMPAggregateReduction(
5322 CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
5323 const VarDecl *RHSVar,
5324 const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
5325 const Expr *, const Expr *)> &RedOpGen,
5326 const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
5327 const Expr *UpExpr = nullptr) {
5328 // Perform element-by-element initialization.
5329 QualType ElementTy;
5330 Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
5331 Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
5332
5333 // Drill down to the base element type on both arrays.
5334 const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
5335 llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
5336
5337 llvm::Value *RHSBegin = RHSAddr.getPointer();
5338 llvm::Value *LHSBegin = LHSAddr.getPointer();
5339 // Cast from pointer to array type to pointer to single element.
5340 llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
5341 // The basic structure here is a while-do loop.
5342 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
5343 llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
5344 llvm::Value *IsEmpty =
5345 CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
5346 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5347
5348 // Enter the loop body, making that address the current address.
5349 llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5350 CGF.EmitBlock(BodyBB);
5351
5352 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
5353
5354 llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
5355 RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
5356 RHSElementPHI->addIncoming(RHSBegin, EntryBB);
5357 Address RHSElementCurrent =
5358 Address(RHSElementPHI,
5359 RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5360
5361 llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
5362 LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
5363 LHSElementPHI->addIncoming(LHSBegin, EntryBB);
5364 Address LHSElementCurrent =
5365 Address(LHSElementPHI,
5366 LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5367
5368 // Emit copy.
5369 CodeGenFunction::OMPPrivateScope Scope(CGF);
5370 Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
5371 Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
5372 Scope.Privatize();
5373 RedOpGen(CGF, XExpr, EExpr, UpExpr);
5374 Scope.ForceCleanup();
5375
5376 // Shift the address forward by one element.
5377 llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
5378 LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
5379 llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
5380 RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
5381 // Check whether we've reached the end.
5382 llvm::Value *Done =
5383 CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
5384 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
5385 LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
5386 RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
5387
5388 // Done.
5389 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5390}
5391
5392/// Emit reduction combiner. If the combiner is a simple expression emit it as
5393/// is, otherwise consider it as combiner of UDR decl and emit it as a call of
5394/// UDR combiner function.
5395static void emitReductionCombiner(CodeGenFunction &CGF,
5396 const Expr *ReductionOp) {
5397 if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
5398 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
5399 if (const auto *DRE =
5400 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
5401 if (const auto *DRD =
5402 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
5403 std::pair<llvm::Function *, llvm::Function *> Reduction =
5404 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
5405 RValue Func = RValue::get(Reduction.first);
5406 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
5407 CGF.EmitIgnoredExpr(ReductionOp);
5408 return;
5409 }
5410 CGF.EmitIgnoredExpr(ReductionOp);
5411}
5412
5413llvm::Function *CGOpenMPRuntime::emitReductionFunction(
5414 SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
5415 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
5416 ArrayRef<const Expr *> ReductionOps) {
5417 ASTContext &C = CGM.getContext();
5418
5419 // void reduction_func(void *LHSArg, void *RHSArg);
5420 FunctionArgList Args;
5421 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5422 ImplicitParamDecl::Other);
5423 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5424 ImplicitParamDecl::Other);
5425 Args.push_back(&LHSArg);
5426 Args.push_back(&RHSArg);
5427 const auto &CGFI =
5428 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5429 std::string Name = getName({"omp", "reduction", "reduction_func"});
5430 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
5431 llvm::GlobalValue::InternalLinkage, Name,
5432 &CGM.getModule());
5433 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
5434 Fn->setDoesNotRecurse();
5435 CodeGenFunction CGF(CGM);
5436 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
5437
5438 // Dst = (void*[n])(LHSArg);
5439 // Src = (void*[n])(RHSArg);
5440 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5441 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
5442 ArgsType), CGF.getPointerAlign());
5443 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5444 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
5445 ArgsType), CGF.getPointerAlign());
5446
5447 // ...
5448 // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
5449 // ...
5450 CodeGenFunction::OMPPrivateScope Scope(CGF);
5451 auto IPriv = Privates.begin();
5452 unsigned Idx = 0;
5453 for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
5454 const auto *RHSVar =
5455 cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
5456 Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
5457 return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
5458 });
5459 const auto *LHSVar =
5460 cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5461 Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
5462 return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5463 });
5464 QualType PrivTy = (*IPriv)->getType();
5465 if (PrivTy->isVariablyModifiedType()) {
5466 // Get array size and emit VLA type.
5467 ++Idx;
5468 Address Elem = CGF.Builder.CreateConstArrayGEP(LHS, Idx);
5469 llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5470 const VariableArrayType *VLA =
5471 CGF.getContext().getAsVariableArrayType(PrivTy);
5472 const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5473 CodeGenFunction::OpaqueValueMapping OpaqueMap(
5474 CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5475 CGF.EmitVariablyModifiedType(PrivTy);
5476 }
5477 }
5478 Scope.Privatize();
5479 IPriv = Privates.begin();
5480 auto ILHS = LHSExprs.begin();
5481 auto IRHS = RHSExprs.begin();
5482 for (const Expr *E : ReductionOps) {
5483 if ((*IPriv)->getType()->isArrayType()) {
5484 // Emit reduction for array section.
5485 const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5486 const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5487 EmitOMPAggregateReduction(
5488 CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5489 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5490 emitReductionCombiner(CGF, E);
5491 });
5492 } else {
5493 // Emit reduction for array subscript or single variable.
5494 emitReductionCombiner(CGF, E);
5495 }
5496 ++IPriv;
5497 ++ILHS;
5498 ++IRHS;
5499 }
5500 Scope.ForceCleanup();
5501 CGF.FinishFunction();
5502 return Fn;
5503}
5504
5505void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5506 const Expr *ReductionOp,
5507 const Expr *PrivateRef,
5508 const DeclRefExpr *LHS,
5509 const DeclRefExpr *RHS) {
5510 if (PrivateRef->getType()->isArrayType()) {
5511 // Emit reduction for array section.
5512 const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5513 const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5514 EmitOMPAggregateReduction(
5515 CGF, PrivateRef->getType(), LHSVar, RHSVar,
5516 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5517 emitReductionCombiner(CGF, ReductionOp);
5518 });
5519 } else {
5520 // Emit reduction for array subscript or single variable.
5521 emitReductionCombiner(CGF, ReductionOp);
5522 }
5523}
5524
5525void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5526 ArrayRef<const Expr *> Privates,
5527 ArrayRef<const Expr *> LHSExprs,
5528 ArrayRef<const Expr *> RHSExprs,
5529 ArrayRef<const Expr *> ReductionOps,
5530 ReductionOptionsTy Options) {
5531 if (!CGF.HaveInsertPoint())
5532 return;
5533
5534 bool WithNowait = Options.WithNowait;
5535 bool SimpleReduction = Options.SimpleReduction;
5536
5537 // Next code should be emitted for reduction:
5538 //
5539 // static kmp_critical_name lock = { 0 };
5540 //
5541 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5542 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5543 // ...
5544 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5545 // *(Type<n>-1*)rhs[<n>-1]);
5546 // }
5547 //
5548 // ...
5549 // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5550 // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5551 // RedList, reduce_func, &<lock>)) {
5552 // case 1:
5553 // ...
5554 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5555 // ...
5556 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5557 // break;
5558 // case 2:
5559 // ...
5560 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5561 // ...
5562 // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5563 // break;
5564 // default:;
5565 // }
5566 //
5567 // if SimpleReduction is true, only the next code is generated:
5568 // ...
5569 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5570 // ...
5571
5572 ASTContext &C = CGM.getContext();
5573
5574 if (SimpleReduction) {
5575 CodeGenFunction::RunCleanupsScope Scope(CGF);
5576 auto IPriv = Privates.begin();
5577 auto ILHS = LHSExprs.begin();
5578 auto IRHS = RHSExprs.begin();
5579 for (const Expr *E : ReductionOps) {
5580 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5581 cast<DeclRefExpr>(*IRHS));
5582 ++IPriv;
5583 ++ILHS;
5584 ++IRHS;
5585 }
5586 return;
5587 }
5588
5589 // 1. Build a list of reduction variables.
5590 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5591 auto Size = RHSExprs.size();
5592 for (const Expr *E : Privates) {
5593 if (E->getType()->isVariablyModifiedType())
5594 // Reserve place for array size.
5595 ++Size;
5596 }
5597 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5598 QualType ReductionArrayTy =
5599 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
5600 /*IndexTypeQuals=*/0);
5601 Address ReductionList =
5602 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5603 auto IPriv = Privates.begin();
5604 unsigned Idx = 0;
5605 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5606 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
5607 CGF.Builder.CreateStore(
5608 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5609 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
5610 Elem);
5611 if ((*IPriv)->getType()->isVariablyModifiedType()) {
5612 // Store array size.
5613 ++Idx;
5614 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
5615 llvm::Value *Size = CGF.Builder.CreateIntCast(
5616 CGF.getVLASize(
5617 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5618 .NumElts,
5619 CGF.SizeTy, /*isSigned=*/false);
5620 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5621 Elem);
5622 }
5623 }
5624
5625 // 2. Emit reduce_func().
5626 llvm::Function *ReductionFn = emitReductionFunction(
5627 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
5628 LHSExprs, RHSExprs, ReductionOps);
5629
5630 // 3. Create static kmp_critical_name lock = { 0 };
5631 std::string Name = getName({"reduction"});
5632 llvm::Value *Lock = getCriticalRegionLock(Name);
5633
5634 // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5635 // RedList, reduce_func, &<lock>);
5636 llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5637 llvm::Value *ThreadId = getThreadID(CGF, Loc);
5638 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5639 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5640 ReductionList.getPointer(), CGF.VoidPtrTy);
5641 llvm::Value *Args[] = {
5642 IdentTLoc, // ident_t *<loc>
5643 ThreadId, // i32 <gtid>
5644 CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5645 ReductionArrayTySize, // size_type sizeof(RedList)
5646 RL, // void *RedList
5647 ReductionFn, // void (*) (void *, void *) <reduce_func>
5648 Lock // kmp_critical_name *&<lock>
5649 };
5650 llvm::Value *Res = CGF.EmitRuntimeCall(
5651 OMPBuilder.getOrCreateRuntimeFunction(
5652 CGM.getModule(),
5653 WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
5654 Args);
5655
5656 // 5. Build switch(res)
5657 llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5658 llvm::SwitchInst *SwInst =
5659 CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5660
5661 // 6. Build case 1:
5662 // ...
5663 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5664 // ...
5665 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5666 // break;
5667 llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5668 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5669 CGF.EmitBlock(Case1BB);
5670
5671 // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5672 llvm::Value *EndArgs[] = {
5673 IdentTLoc, // ident_t *<loc>
5674 ThreadId, // i32 <gtid>
5675 Lock // kmp_critical_name *&<lock>
5676 };
5677 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5678 CodeGenFunction &CGF, PrePostActionTy &Action) {
5679 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5680 auto IPriv = Privates.begin();
5681 auto ILHS = LHSExprs.begin();
5682 auto IRHS = RHSExprs.begin();
5683 for (const Expr *E : ReductionOps) {
5684 RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5685 cast<DeclRefExpr>(*IRHS));
5686 ++IPriv;
5687 ++ILHS;
5688 ++IRHS;
5689 }
5690 };
5691 RegionCodeGenTy RCG(CodeGen);
5692 CommonActionTy Action(
5693 nullptr, llvm::None,
5694 OMPBuilder.getOrCreateRuntimeFunction(
5695 CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
5696 : OMPRTL___kmpc_end_reduce),
5697 EndArgs);
5698 RCG.setAction(Action);
5699 RCG(CGF);
5700
5701 CGF.EmitBranch(DefaultBB);
5702
5703 // 7. Build case 2:
5704 // ...
5705 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5706 // ...
5707 // break;
5708 llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5709 SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5710 CGF.EmitBlock(Case2BB);
5711
5712 auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5713 CodeGenFunction &CGF, PrePostActionTy &Action) {
5714 auto ILHS = LHSExprs.begin();
5715 auto IRHS = RHSExprs.begin();
5716 auto IPriv = Privates.begin();
5717 for (const Expr *E : ReductionOps) {
5718 const Expr *XExpr = nullptr;
5719 const Expr *EExpr = nullptr;
5720 const Expr *UpExpr = nullptr;
5721 BinaryOperatorKind BO = BO_Comma;
5722 if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
5723 if (BO->getOpcode() == BO_Assign) {
5724 XExpr = BO->getLHS();
5725 UpExpr = BO->getRHS();
5726 }
5727 }
5728 // Try to emit update expression as a simple atomic.
5729 const Expr *RHSExpr = UpExpr;
5730 if (RHSExpr) {
5731 // Analyze RHS part of the whole expression.
5732 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5733 RHSExpr->IgnoreParenImpCasts())) {
5734 // If this is a conditional operator, analyze its condition for
5735 // min/max reduction operator.
5736 RHSExpr = ACO->getCond();
5737 }
5738 if (const auto *BORHS =
5739 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5740 EExpr = BORHS->getRHS();
5741 BO = BORHS->getOpcode();
5742 }
5743 }
5744 if (XExpr) {
5745 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5746 auto &&AtomicRedGen = [BO, VD,
5747 Loc](CodeGenFunction &CGF, const Expr *XExpr,
5748 const Expr *EExpr, const Expr *UpExpr) {
5749 LValue X = CGF.EmitLValue(XExpr);
5750 RValue E;
5751 if (EExpr)
5752 E = CGF.EmitAnyExpr(EExpr);
5753 CGF.EmitOMPAtomicSimpleUpdateExpr(
5754 X, E, BO, /*IsXLHSInRHSPart=*/true,
5755 llvm::AtomicOrdering::Monotonic, Loc,
5756 [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5757 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5758 PrivateScope.addPrivate(
5759 VD, [&CGF, VD, XRValue, Loc]() {
5760 Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5761 CGF.emitOMPSimpleStore(
5762 CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5763 VD->getType().getNonReferenceType(), Loc);
5764 return LHSTemp;
5765 });
5766 (void)PrivateScope.Privatize();
5767 return CGF.EmitAnyExpr(UpExpr);
5768 });
5769 };
5770 if ((*IPriv)->getType()->isArrayType()) {
5771 // Emit atomic reduction for array section.
5772 const auto *RHSVar =
5773 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5774 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5775 AtomicRedGen, XExpr, EExpr, UpExpr);
5776 } else {
5777 // Emit atomic reduction for array subscript or single variable.
5778 AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5779 }
5780 } else {
5781 // Emit as a critical region.
5782 auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5783 const Expr *, const Expr *) {
5784 CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5785 std::string Name = RT.getName({"atomic_reduction"});
5786 RT.emitCriticalRegion(
5787 CGF, Name,
5788 [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5789 Action.Enter(CGF);
5790 emitReductionCombiner(CGF, E);
5791 },
5792 Loc);
5793 };
5794 if ((*IPriv)->getType()->isArrayType()) {
5795 const auto *LHSVar =
5796 cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5797 const auto *RHSVar =
5798 cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5799 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5800 CritRedGen);
5801 } else {
5802 CritRedGen(CGF, nullptr, nullptr, nullptr);
5803 }
5804 }
5805 ++ILHS;
5806 ++IRHS;
5807 ++IPriv;
5808 }
5809 };
5810 RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5811 if (!WithNowait) {
5812 // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5813 llvm::Value *EndArgs[] = {
5814 IdentTLoc, // ident_t *<loc>
5815 ThreadId, // i32 <gtid>
5816 Lock