Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
Warning:line 3452, column 8
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CGOpenMPRuntimeGPU.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/clang/include -I tools/clang/include -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
1//===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a generalized class for OpenMP runtime code generation
10// specialized by GPU targets NVPTX and AMDGCN.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGOpenMPRuntimeGPU.h"
15#include "CodeGenFunction.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/DeclOpenMP.h"
18#include "clang/AST/StmtOpenMP.h"
19#include "clang/AST/StmtVisitor.h"
20#include "clang/Basic/Cuda.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/Frontend/OpenMP/OMPGridValues.h"
23#include "llvm/Support/MathExtras.h"
24
25using namespace clang;
26using namespace CodeGen;
27using namespace llvm::omp;
28
29namespace {
30/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
31class NVPTXActionTy final : public PrePostActionTy {
32 llvm::FunctionCallee EnterCallee = nullptr;
33 ArrayRef<llvm::Value *> EnterArgs;
34 llvm::FunctionCallee ExitCallee = nullptr;
35 ArrayRef<llvm::Value *> ExitArgs;
36 bool Conditional = false;
37 llvm::BasicBlock *ContBlock = nullptr;
38
39public:
40 NVPTXActionTy(llvm::FunctionCallee EnterCallee,
41 ArrayRef<llvm::Value *> EnterArgs,
42 llvm::FunctionCallee ExitCallee,
43 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
44 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
45 ExitArgs(ExitArgs), Conditional(Conditional) {}
46 void Enter(CodeGenFunction &CGF) override {
47 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
48 if (Conditional) {
49 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
50 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
51 ContBlock = CGF.createBasicBlock("omp_if.end");
52 // Generate the branch (If-stmt)
53 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
54 CGF.EmitBlock(ThenBlock);
55 }
56 }
57 void Done(CodeGenFunction &CGF) {
58 // Emit the rest of blocks/branches
59 CGF.EmitBranch(ContBlock);
60 CGF.EmitBlock(ContBlock, true);
61 }
62 void Exit(CodeGenFunction &CGF) override {
63 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
64 }
65};
66
67/// A class to track the execution mode when codegening directives within
68/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
69/// to the target region and used by containing directives such as 'parallel'
70/// to emit optimized code.
71class ExecutionRuntimeModesRAII {
72private:
73 CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
74 CGOpenMPRuntimeGPU::EM_Unknown;
75 CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
76
77public:
78 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
79 CGOpenMPRuntimeGPU::ExecutionMode EntryMode)
80 : ExecMode(ExecMode) {
81 SavedExecMode = ExecMode;
82 ExecMode = EntryMode;
83 }
84 ~ExecutionRuntimeModesRAII() { ExecMode = SavedExecMode; }
85};
86
87/// GPU Configuration: This information can be derived from cuda registers,
88/// however, providing compile time constants helps generate more efficient
89/// code. For all practical purposes this is fine because the configuration
90/// is the same for all known NVPTX architectures.
91enum MachineConfiguration : unsigned {
92 /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
93 /// specific Grid Values like GV_Warp_Size, GV_Slot_Size
94
95 /// Global memory alignment for performance.
96 GlobalMemoryAlignment = 128,
97
98 /// Maximal size of the shared memory buffer.
99 SharedMemorySize = 128,
100};
101
102static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
103 RefExpr = RefExpr->IgnoreParens();
104 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
105 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
106 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
107 Base = TempASE->getBase()->IgnoreParenImpCasts();
108 RefExpr = Base;
109 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
110 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
111 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
112 Base = TempOASE->getBase()->IgnoreParenImpCasts();
113 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
114 Base = TempASE->getBase()->IgnoreParenImpCasts();
115 RefExpr = Base;
116 }
117 RefExpr = RefExpr->IgnoreParenImpCasts();
118 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
119 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
120 const auto *ME = cast<MemberExpr>(RefExpr);
121 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
122}
123
124
125static RecordDecl *buildRecordForGlobalizedVars(
126 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
127 ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
128 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
129 &MappedDeclsFields, int BufSize) {
130 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
131 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
132 return nullptr;
133 SmallVector<VarsDataTy, 4> GlobalizedVars;
134 for (const ValueDecl *D : EscapedDecls)
135 GlobalizedVars.emplace_back(
136 CharUnits::fromQuantity(std::max(
137 C.getDeclAlign(D).getQuantity(),
138 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
139 D);
140 for (const ValueDecl *D : EscapedDeclsForTeams)
141 GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
142 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
143 return L.first > R.first;
144 });
145
146 // Build struct _globalized_locals_ty {
147 // /* globalized vars */[WarSize] align (max(decl_align,
148 // GlobalMemoryAlignment))
149 // /* globalized vars */ for EscapedDeclsForTeams
150 // };
151 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
152 GlobalizedRD->startDefinition();
153 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
154 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
155 for (const auto &Pair : GlobalizedVars) {
156 const ValueDecl *VD = Pair.second;
157 QualType Type = VD->getType();
158 if (Type->isLValueReferenceType())
159 Type = C.getPointerType(Type.getNonReferenceType());
160 else
161 Type = Type.getNonReferenceType();
162 SourceLocation Loc = VD->getLocation();
163 FieldDecl *Field;
164 if (SingleEscaped.count(VD)) {
165 Field = FieldDecl::Create(
166 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
167 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
168 /*BW=*/nullptr, /*Mutable=*/false,
169 /*InitStyle=*/ICIS_NoInit);
170 Field->setAccess(AS_public);
171 if (VD->hasAttrs()) {
172 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
173 E(VD->getAttrs().end());
174 I != E; ++I)
175 Field->addAttr(*I);
176 }
177 } else {
178 llvm::APInt ArraySize(32, BufSize);
179 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
180 0);
181 Field = FieldDecl::Create(
182 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
183 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
184 /*BW=*/nullptr, /*Mutable=*/false,
185 /*InitStyle=*/ICIS_NoInit);
186 Field->setAccess(AS_public);
187 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
188 static_cast<CharUnits::QuantityType>(
189 GlobalMemoryAlignment)));
190 Field->addAttr(AlignedAttr::CreateImplicit(
191 C, /*IsAlignmentExpr=*/true,
192 IntegerLiteral::Create(C, Align,
193 C.getIntTypeForBitwidth(32, /*Signed=*/0),
194 SourceLocation()),
195 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
196 }
197 GlobalizedRD->addDecl(Field);
198 MappedDeclsFields.try_emplace(VD, Field);
199 }
200 GlobalizedRD->completeDefinition();
201 return GlobalizedRD;
202}
203
204/// Get the list of variables that can escape their declaration context.
205class CheckVarsEscapingDeclContext final
206 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
207 CodeGenFunction &CGF;
208 llvm::SetVector<const ValueDecl *> EscapedDecls;
209 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
210 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
211 RecordDecl *GlobalizedRD = nullptr;
212 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
213 bool AllEscaped = false;
214 bool IsForCombinedParallelRegion = false;
215
216 void markAsEscaped(const ValueDecl *VD) {
217 // Do not globalize declare target variables.
218 if (!isa<VarDecl>(VD) ||
219 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
220 return;
221 VD = cast<ValueDecl>(VD->getCanonicalDecl());
222 // Use user-specified allocation.
223 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
224 return;
225 // Variables captured by value must be globalized.
226 if (auto *CSI = CGF.CapturedStmtInfo) {
227 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
228 // Check if need to capture the variable that was already captured by
229 // value in the outer region.
230 if (!IsForCombinedParallelRegion) {
231 if (!FD->hasAttrs())
232 return;
233 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
234 if (!Attr)
235 return;
236 if (((Attr->getCaptureKind() != OMPC_map) &&
237 !isOpenMPPrivate(Attr->getCaptureKind())) ||
238 ((Attr->getCaptureKind() == OMPC_map) &&
239 !FD->getType()->isAnyPointerType()))
240 return;
241 }
242 if (!FD->getType()->isReferenceType()) {
243 assert(!VD->getType()->isVariablyModifiedType() &&(static_cast <bool> (!VD->getType()->isVariablyModifiedType
() && "Parameter captured by value with variably modified type"
) ? void (0) : __assert_fail ("!VD->getType()->isVariablyModifiedType() && \"Parameter captured by value with variably modified type\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 244, __extension__
__PRETTY_FUNCTION__))
244 "Parameter captured by value with variably modified type")(static_cast <bool> (!VD->getType()->isVariablyModifiedType
() && "Parameter captured by value with variably modified type"
) ? void (0) : __assert_fail ("!VD->getType()->isVariablyModifiedType() && \"Parameter captured by value with variably modified type\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 244, __extension__
__PRETTY_FUNCTION__))
;
245 EscapedParameters.insert(VD);
246 } else if (!IsForCombinedParallelRegion) {
247 return;
248 }
249 }
250 }
251 if ((!CGF.CapturedStmtInfo ||
252 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
253 VD->getType()->isReferenceType())
254 // Do not globalize variables with reference type.
255 return;
256 if (VD->getType()->isVariablyModifiedType())
257 EscapedVariableLengthDecls.insert(VD);
258 else
259 EscapedDecls.insert(VD);
260 }
261
262 void VisitValueDecl(const ValueDecl *VD) {
263 if (VD->getType()->isLValueReferenceType())
264 markAsEscaped(VD);
265 if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
266 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
267 const bool SavedAllEscaped = AllEscaped;
268 AllEscaped = VD->getType()->isLValueReferenceType();
269 Visit(VarD->getInit());
270 AllEscaped = SavedAllEscaped;
271 }
272 }
273 }
274 void VisitOpenMPCapturedStmt(const CapturedStmt *S,
275 ArrayRef<OMPClause *> Clauses,
276 bool IsCombinedParallelRegion) {
277 if (!S)
278 return;
279 for (const CapturedStmt::Capture &C : S->captures()) {
280 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
281 const ValueDecl *VD = C.getCapturedVar();
282 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
283 if (IsCombinedParallelRegion) {
284 // Check if the variable is privatized in the combined construct and
285 // those private copies must be shared in the inner parallel
286 // directive.
287 IsForCombinedParallelRegion = false;
288 for (const OMPClause *C : Clauses) {
289 if (!isOpenMPPrivate(C->getClauseKind()) ||
290 C->getClauseKind() == OMPC_reduction ||
291 C->getClauseKind() == OMPC_linear ||
292 C->getClauseKind() == OMPC_private)
293 continue;
294 ArrayRef<const Expr *> Vars;
295 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
296 Vars = PC->getVarRefs();
297 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
298 Vars = PC->getVarRefs();
299 else
300 llvm_unreachable("Unexpected clause.")::llvm::llvm_unreachable_internal("Unexpected clause.", "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 300)
;
301 for (const auto *E : Vars) {
302 const Decl *D =
303 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
304 if (D == VD->getCanonicalDecl()) {
305 IsForCombinedParallelRegion = true;
306 break;
307 }
308 }
309 if (IsForCombinedParallelRegion)
310 break;
311 }
312 }
313 markAsEscaped(VD);
314 if (isa<OMPCapturedExprDecl>(VD))
315 VisitValueDecl(VD);
316 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
317 }
318 }
319 }
320
321 void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
322 assert(!GlobalizedRD &&(static_cast <bool> (!GlobalizedRD && "Record for globalized variables is built already."
) ? void (0) : __assert_fail ("!GlobalizedRD && \"Record for globalized variables is built already.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 323, __extension__
__PRETTY_FUNCTION__))
323 "Record for globalized variables is built already.")(static_cast <bool> (!GlobalizedRD && "Record for globalized variables is built already."
) ? void (0) : __assert_fail ("!GlobalizedRD && \"Record for globalized variables is built already.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 323, __extension__
__PRETTY_FUNCTION__))
;
324 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
325 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
326 if (IsInTTDRegion)
327 EscapedDeclsForTeams = EscapedDecls.getArrayRef();
328 else
329 EscapedDeclsForParallel = EscapedDecls.getArrayRef();
330 GlobalizedRD = ::buildRecordForGlobalizedVars(
331 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
332 MappedDeclsFields, WarpSize);
333 }
334
335public:
336 CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
337 ArrayRef<const ValueDecl *> TeamsReductions)
338 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
339 }
340 virtual ~CheckVarsEscapingDeclContext() = default;
341 void VisitDeclStmt(const DeclStmt *S) {
342 if (!S)
343 return;
344 for (const Decl *D : S->decls())
345 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
346 VisitValueDecl(VD);
347 }
348 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
349 if (!D)
350 return;
351 if (!D->hasAssociatedStmt())
352 return;
353 if (const auto *S =
354 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
355 // Do not analyze directives that do not actually require capturing,
356 // like `omp for` or `omp simd` directives.
357 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
358 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
359 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
360 VisitStmt(S->getCapturedStmt());
361 return;
362 }
363 VisitOpenMPCapturedStmt(
364 S, D->clauses(),
365 CaptureRegions.back() == OMPD_parallel &&
366 isOpenMPDistributeDirective(D->getDirectiveKind()));
367 }
368 }
369 void VisitCapturedStmt(const CapturedStmt *S) {
370 if (!S)
371 return;
372 for (const CapturedStmt::Capture &C : S->captures()) {
373 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
374 const ValueDecl *VD = C.getCapturedVar();
375 markAsEscaped(VD);
376 if (isa<OMPCapturedExprDecl>(VD))
377 VisitValueDecl(VD);
378 }
379 }
380 }
381 void VisitLambdaExpr(const LambdaExpr *E) {
382 if (!E)
383 return;
384 for (const LambdaCapture &C : E->captures()) {
385 if (C.capturesVariable()) {
386 if (C.getCaptureKind() == LCK_ByRef) {
387 const ValueDecl *VD = C.getCapturedVar();
388 markAsEscaped(VD);
389 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
390 VisitValueDecl(VD);
391 }
392 }
393 }
394 }
395 void VisitBlockExpr(const BlockExpr *E) {
396 if (!E)
397 return;
398 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
399 if (C.isByRef()) {
400 const VarDecl *VD = C.getVariable();
401 markAsEscaped(VD);
402 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
403 VisitValueDecl(VD);
404 }
405 }
406 }
407 void VisitCallExpr(const CallExpr *E) {
408 if (!E)
409 return;
410 for (const Expr *Arg : E->arguments()) {
411 if (!Arg)
412 continue;
413 if (Arg->isLValue()) {
414 const bool SavedAllEscaped = AllEscaped;
415 AllEscaped = true;
416 Visit(Arg);
417 AllEscaped = SavedAllEscaped;
418 } else {
419 Visit(Arg);
420 }
421 }
422 Visit(E->getCallee());
423 }
424 void VisitDeclRefExpr(const DeclRefExpr *E) {
425 if (!E)
426 return;
427 const ValueDecl *VD = E->getDecl();
428 if (AllEscaped)
429 markAsEscaped(VD);
430 if (isa<OMPCapturedExprDecl>(VD))
431 VisitValueDecl(VD);
432 else if (VD->isInitCapture())
433 VisitValueDecl(VD);
434 }
435 void VisitUnaryOperator(const UnaryOperator *E) {
436 if (!E)
437 return;
438 if (E->getOpcode() == UO_AddrOf) {
439 const bool SavedAllEscaped = AllEscaped;
440 AllEscaped = true;
441 Visit(E->getSubExpr());
442 AllEscaped = SavedAllEscaped;
443 } else {
444 Visit(E->getSubExpr());
445 }
446 }
447 void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
448 if (!E)
449 return;
450 if (E->getCastKind() == CK_ArrayToPointerDecay) {
451 const bool SavedAllEscaped = AllEscaped;
452 AllEscaped = true;
453 Visit(E->getSubExpr());
454 AllEscaped = SavedAllEscaped;
455 } else {
456 Visit(E->getSubExpr());
457 }
458 }
459 void VisitExpr(const Expr *E) {
460 if (!E)
461 return;
462 bool SavedAllEscaped = AllEscaped;
463 if (!E->isLValue())
464 AllEscaped = false;
465 for (const Stmt *Child : E->children())
466 if (Child)
467 Visit(Child);
468 AllEscaped = SavedAllEscaped;
469 }
470 void VisitStmt(const Stmt *S) {
471 if (!S)
472 return;
473 for (const Stmt *Child : S->children())
474 if (Child)
475 Visit(Child);
476 }
477
478 /// Returns the record that handles all the escaped local variables and used
479 /// instead of their original storage.
480 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
481 if (!GlobalizedRD)
482 buildRecordForGlobalizedVars(IsInTTDRegion);
483 return GlobalizedRD;
484 }
485
486 /// Returns the field in the globalized record for the escaped variable.
487 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
488 assert(GlobalizedRD &&(static_cast <bool> (GlobalizedRD && "Record for globalized variables must be generated already."
) ? void (0) : __assert_fail ("GlobalizedRD && \"Record for globalized variables must be generated already.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 489, __extension__
__PRETTY_FUNCTION__))
489 "Record for globalized variables must be generated already.")(static_cast <bool> (GlobalizedRD && "Record for globalized variables must be generated already."
) ? void (0) : __assert_fail ("GlobalizedRD && \"Record for globalized variables must be generated already.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 489, __extension__
__PRETTY_FUNCTION__))
;
490 auto I = MappedDeclsFields.find(VD);
491 if (I == MappedDeclsFields.end())
492 return nullptr;
493 return I->getSecond();
494 }
495
496 /// Returns the list of the escaped local variables/parameters.
497 ArrayRef<const ValueDecl *> getEscapedDecls() const {
498 return EscapedDecls.getArrayRef();
499 }
500
501 /// Checks if the escaped local variable is actually a parameter passed by
502 /// value.
503 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
504 return EscapedParameters;
505 }
506
507 /// Returns the list of the escaped variables with the variably modified
508 /// types.
509 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
510 return EscapedVariableLengthDecls.getArrayRef();
511 }
512};
513} // anonymous namespace
514
515/// Get the id of the warp in the block.
516/// We assume that the warp size is 32, which is always the case
517/// on the NVPTX device, to generate more efficient code.
518static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
519 CGBuilderTy &Bld = CGF.Builder;
520 unsigned LaneIDBits =
521 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
522 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
523 return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
524}
525
526/// Get the id of the current lane in the Warp.
527/// We assume that the warp size is 32, which is always the case
528/// on the NVPTX device, to generate more efficient code.
529static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
530 CGBuilderTy &Bld = CGF.Builder;
531 unsigned LaneIDBits =
532 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
533 unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
534 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
535 return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
536 "nvptx_lane_id");
537}
538
539CGOpenMPRuntimeGPU::ExecutionMode
540CGOpenMPRuntimeGPU::getExecutionMode() const {
541 return CurrentExecutionMode;
542}
543
544static CGOpenMPRuntimeGPU::DataSharingMode
545getDataSharingMode(CodeGenModule &CGM) {
546 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
547 : CGOpenMPRuntimeGPU::Generic;
548}
549
550/// Check for inner (nested) SPMD construct, if any
551static bool hasNestedSPMDDirective(ASTContext &Ctx,
552 const OMPExecutableDirective &D) {
553 const auto *CS = D.getInnermostCapturedStmt();
554 const auto *Body =
555 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
556 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
557
558 if (const auto *NestedDir =
559 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
560 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
561 switch (D.getDirectiveKind()) {
562 case OMPD_target:
563 if (isOpenMPParallelDirective(DKind))
564 return true;
565 if (DKind == OMPD_teams) {
566 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
567 /*IgnoreCaptured=*/true);
568 if (!Body)
569 return false;
570 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
571 if (const auto *NND =
572 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
573 DKind = NND->getDirectiveKind();
574 if (isOpenMPParallelDirective(DKind))
575 return true;
576 }
577 }
578 return false;
579 case OMPD_target_teams:
580 return isOpenMPParallelDirective(DKind);
581 case OMPD_target_simd:
582 case OMPD_target_parallel:
583 case OMPD_target_parallel_for:
584 case OMPD_target_parallel_for_simd:
585 case OMPD_target_teams_distribute:
586 case OMPD_target_teams_distribute_simd:
587 case OMPD_target_teams_distribute_parallel_for:
588 case OMPD_target_teams_distribute_parallel_for_simd:
589 case OMPD_parallel:
590 case OMPD_for:
591 case OMPD_parallel_for:
592 case OMPD_parallel_master:
593 case OMPD_parallel_sections:
594 case OMPD_for_simd:
595 case OMPD_parallel_for_simd:
596 case OMPD_cancel:
597 case OMPD_cancellation_point:
598 case OMPD_ordered:
599 case OMPD_threadprivate:
600 case OMPD_allocate:
601 case OMPD_task:
602 case OMPD_simd:
603 case OMPD_sections:
604 case OMPD_section:
605 case OMPD_single:
606 case OMPD_master:
607 case OMPD_critical:
608 case OMPD_taskyield:
609 case OMPD_barrier:
610 case OMPD_taskwait:
611 case OMPD_taskgroup:
612 case OMPD_atomic:
613 case OMPD_flush:
614 case OMPD_depobj:
615 case OMPD_scan:
616 case OMPD_teams:
617 case OMPD_target_data:
618 case OMPD_target_exit_data:
619 case OMPD_target_enter_data:
620 case OMPD_distribute:
621 case OMPD_distribute_simd:
622 case OMPD_distribute_parallel_for:
623 case OMPD_distribute_parallel_for_simd:
624 case OMPD_teams_distribute:
625 case OMPD_teams_distribute_simd:
626 case OMPD_teams_distribute_parallel_for:
627 case OMPD_teams_distribute_parallel_for_simd:
628 case OMPD_target_update:
629 case OMPD_declare_simd:
630 case OMPD_declare_variant:
631 case OMPD_begin_declare_variant:
632 case OMPD_end_declare_variant:
633 case OMPD_declare_target:
634 case OMPD_end_declare_target:
635 case OMPD_declare_reduction:
636 case OMPD_declare_mapper:
637 case OMPD_taskloop:
638 case OMPD_taskloop_simd:
639 case OMPD_master_taskloop:
640 case OMPD_master_taskloop_simd:
641 case OMPD_parallel_master_taskloop:
642 case OMPD_parallel_master_taskloop_simd:
643 case OMPD_requires:
644 case OMPD_unknown:
645 default:
646 llvm_unreachable("Unexpected directive.")::llvm::llvm_unreachable_internal("Unexpected directive.", "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 646)
;
647 }
648 }
649
650 return false;
651}
652
653static bool supportsSPMDExecutionMode(ASTContext &Ctx,
654 const OMPExecutableDirective &D) {
655 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
656 switch (DirectiveKind) {
657 case OMPD_target:
658 case OMPD_target_teams:
659 return hasNestedSPMDDirective(Ctx, D);
660 case OMPD_target_parallel:
661 case OMPD_target_parallel_for:
662 case OMPD_target_parallel_for_simd:
663 case OMPD_target_teams_distribute_parallel_for:
664 case OMPD_target_teams_distribute_parallel_for_simd:
665 case OMPD_target_simd:
666 case OMPD_target_teams_distribute_simd:
667 return true;
668 case OMPD_target_teams_distribute:
669 return false;
670 case OMPD_parallel:
671 case OMPD_for:
672 case OMPD_parallel_for:
673 case OMPD_parallel_master:
674 case OMPD_parallel_sections:
675 case OMPD_for_simd:
676 case OMPD_parallel_for_simd:
677 case OMPD_cancel:
678 case OMPD_cancellation_point:
679 case OMPD_ordered:
680 case OMPD_threadprivate:
681 case OMPD_allocate:
682 case OMPD_task:
683 case OMPD_simd:
684 case OMPD_sections:
685 case OMPD_section:
686 case OMPD_single:
687 case OMPD_master:
688 case OMPD_critical:
689 case OMPD_taskyield:
690 case OMPD_barrier:
691 case OMPD_taskwait:
692 case OMPD_taskgroup:
693 case OMPD_atomic:
694 case OMPD_flush:
695 case OMPD_depobj:
696 case OMPD_scan:
697 case OMPD_teams:
698 case OMPD_target_data:
699 case OMPD_target_exit_data:
700 case OMPD_target_enter_data:
701 case OMPD_distribute:
702 case OMPD_distribute_simd:
703 case OMPD_distribute_parallel_for:
704 case OMPD_distribute_parallel_for_simd:
705 case OMPD_teams_distribute:
706 case OMPD_teams_distribute_simd:
707 case OMPD_teams_distribute_parallel_for:
708 case OMPD_teams_distribute_parallel_for_simd:
709 case OMPD_target_update:
710 case OMPD_declare_simd:
711 case OMPD_declare_variant:
712 case OMPD_begin_declare_variant:
713 case OMPD_end_declare_variant:
714 case OMPD_declare_target:
715 case OMPD_end_declare_target:
716 case OMPD_declare_reduction:
717 case OMPD_declare_mapper:
718 case OMPD_taskloop:
719 case OMPD_taskloop_simd:
720 case OMPD_master_taskloop:
721 case OMPD_master_taskloop_simd:
722 case OMPD_parallel_master_taskloop:
723 case OMPD_parallel_master_taskloop_simd:
724 case OMPD_requires:
725 case OMPD_unknown:
726 default:
727 break;
728 }
729 llvm_unreachable(::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 730)
730 "Unknown programming model for OpenMP directive on NVPTX target.")::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 730)
;
731}
732
733void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
734 StringRef ParentName,
735 llvm::Function *&OutlinedFn,
736 llvm::Constant *&OutlinedFnID,
737 bool IsOffloadEntry,
738 const RegionCodeGenTy &CodeGen) {
739 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode, EM_NonSPMD);
740 EntryFunctionState EST;
741 WrapperFunctionsMap.clear();
742
743 // Emit target region as a standalone region.
744 class NVPTXPrePostActionTy : public PrePostActionTy {
745 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
746
747 public:
748 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
749 : EST(EST) {}
750 void Enter(CodeGenFunction &CGF) override {
751 auto &RT =
752 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
753 RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
754 // Skip target region initialization.
755 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
756 }
757 void Exit(CodeGenFunction &CGF) override {
758 auto &RT =
759 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
760 RT.clearLocThreadIdInsertPt(CGF);
761 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
762 }
763 } Action(EST);
764 CodeGen.setAction(Action);
765 IsInTTDRegion = true;
766 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
767 IsOffloadEntry, CodeGen);
768 IsInTTDRegion = false;
769}
770
771void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
772 EntryFunctionState &EST, bool IsSPMD) {
773 CGBuilderTy &Bld = CGF.Builder;
774 Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, true));
775 IsInTargetMasterThreadRegion = IsSPMD;
776 if (!IsSPMD)
777 emitGenericVarsProlog(CGF, EST.Loc);
778}
779
780void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
781 EntryFunctionState &EST,
782 bool IsSPMD) {
783 if (!IsSPMD)
784 emitGenericVarsEpilog(CGF);
785
786 CGBuilderTy &Bld = CGF.Builder;
787 OMPBuilder.createTargetDeinit(Bld, IsSPMD, true);
788}
789
790void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
791 StringRef ParentName,
792 llvm::Function *&OutlinedFn,
793 llvm::Constant *&OutlinedFnID,
794 bool IsOffloadEntry,
795 const RegionCodeGenTy &CodeGen) {
796 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode, EM_SPMD);
797 EntryFunctionState EST;
798
799 // Emit target region as a standalone region.
800 class NVPTXPrePostActionTy : public PrePostActionTy {
801 CGOpenMPRuntimeGPU &RT;
802 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
803
804 public:
805 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
806 CGOpenMPRuntimeGPU::EntryFunctionState &EST)
807 : RT(RT), EST(EST) {}
808 void Enter(CodeGenFunction &CGF) override {
809 RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
810 // Skip target region initialization.
811 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
812 }
813 void Exit(CodeGenFunction &CGF) override {
814 RT.clearLocThreadIdInsertPt(CGF);
815 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
816 }
817 } Action(*this, EST);
818 CodeGen.setAction(Action);
819 IsInTTDRegion = true;
820 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
821 IsOffloadEntry, CodeGen);
822 IsInTTDRegion = false;
823}
824
825// Create a unique global variable to indicate the execution mode of this target
826// region. The execution mode is either 'generic', or 'spmd' depending on the
827// target directive. This variable is picked up by the offload library to setup
828// the device appropriately before kernel launch. If the execution mode is
829// 'generic', the runtime reserves one warp for the master, otherwise, all
830// warps participate in parallel work.
831static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
832 bool Mode) {
833 auto *GVMode = new llvm::GlobalVariable(
834 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
835 llvm::GlobalValue::WeakAnyLinkage,
836 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? OMP_TGT_EXEC_MODE_SPMD
837 : OMP_TGT_EXEC_MODE_GENERIC),
838 Twine(Name, "_exec_mode"));
839 CGM.addCompilerUsedGlobal(GVMode);
840}
841
842void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
843 llvm::Constant *Addr,
844 uint64_t Size, int32_t,
845 llvm::GlobalValue::LinkageTypes) {
846 // TODO: Add support for global variables on the device after declare target
847 // support.
848 llvm::Function *Fn = dyn_cast<llvm::Function>(Addr);
849 if (!Fn)
850 return;
851
852 llvm::Module &M = CGM.getModule();
853 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
854
855 // Get "nvvm.annotations" metadata node.
856 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
857
858 llvm::Metadata *MDVals[] = {
859 llvm::ConstantAsMetadata::get(Fn), llvm::MDString::get(Ctx, "kernel"),
860 llvm::ConstantAsMetadata::get(
861 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
862 // Append metadata to nvvm.annotations.
863 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
864
865 // Add a function attribute for the kernel.
866 Fn->addFnAttr(llvm::Attribute::get(Ctx, "kernel"));
867}
868
869void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
870 const OMPExecutableDirective &D, StringRef ParentName,
871 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
872 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
873 if (!IsOffloadEntry) // Nothing to do.
874 return;
875
876 assert(!ParentName.empty() && "Invalid target region parent name!")(static_cast <bool> (!ParentName.empty() && "Invalid target region parent name!"
) ? void (0) : __assert_fail ("!ParentName.empty() && \"Invalid target region parent name!\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 876, __extension__
__PRETTY_FUNCTION__))
;
877
878 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
879 if (Mode)
880 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
881 CodeGen);
882 else
883 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
884 CodeGen);
885
886 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
887}
888
889namespace {
890LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
891/// Enum for accessing the reserved_2 field of the ident_t struct.
892enum ModeFlagsTy : unsigned {
893 /// Bit set to 1 when in SPMD mode.
894 KMP_IDENT_SPMD_MODE = 0x01,
895 /// Bit set to 1 when a simplified runtime is used.
896 KMP_IDENT_SIMPLE_RT_MODE = 0x02,
897 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)LLVM_BITMASK_LARGEST_ENUMERATOR = KMP_IDENT_SIMPLE_RT_MODE
898};
899
900/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
901static const ModeFlagsTy UndefinedMode =
902 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
903} // anonymous namespace
904
905unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
906 switch (getExecutionMode()) {
907 case EM_SPMD:
908 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
909 case EM_NonSPMD:
910 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
911 case EM_Unknown:
912 return UndefinedMode;
913 }
914 llvm_unreachable("Unknown flags are requested.")::llvm::llvm_unreachable_internal("Unknown flags are requested."
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 914)
;
915}
916
917CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
918 : CGOpenMPRuntime(CGM, "_", "$") {
919 if (!CGM.getLangOpts().OpenMPIsDevice)
920 llvm_unreachable("OpenMP can only handle device code.")::llvm::llvm_unreachable_internal("OpenMP can only handle device code."
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 920)
;
921
922 llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
923 if (CGM.getLangOpts().NoGPULib || CGM.getLangOpts().OMPHostIRFile.empty())
924 return;
925
926 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
927 "__omp_rtl_debug_kind");
928 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,
929 "__omp_rtl_assume_teams_oversubscription");
930 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription,
931 "__omp_rtl_assume_threads_oversubscription");
932 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoThreadState,
933 "__omp_rtl_assume_no_thread_state");
934 OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoNestedParallelism,
935 "__omp_rtl_assume_no_nested_parallelism");
936}
937
938void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
939 ProcBindKind ProcBind,
940 SourceLocation Loc) {
941 // Do nothing in case of SPMD mode and L0 parallel.
942 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
943 return;
944
945 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
946}
947
948void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
949 llvm::Value *NumThreads,
950 SourceLocation Loc) {
951 // Nothing to do.
952}
953
954void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
955 const Expr *NumTeams,
956 const Expr *ThreadLimit,
957 SourceLocation Loc) {}
958
959llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
960 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
961 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
962 // Emit target region as a standalone region.
963 class NVPTXPrePostActionTy : public PrePostActionTy {
964 bool &IsInParallelRegion;
965 bool PrevIsInParallelRegion;
966
967 public:
968 NVPTXPrePostActionTy(bool &IsInParallelRegion)
969 : IsInParallelRegion(IsInParallelRegion) {}
970 void Enter(CodeGenFunction &CGF) override {
971 PrevIsInParallelRegion = IsInParallelRegion;
972 IsInParallelRegion = true;
973 }
974 void Exit(CodeGenFunction &CGF) override {
975 IsInParallelRegion = PrevIsInParallelRegion;
976 }
977 } Action(IsInParallelRegion);
978 CodeGen.setAction(Action);
979 bool PrevIsInTTDRegion = IsInTTDRegion;
980 IsInTTDRegion = false;
981 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
982 IsInTargetMasterThreadRegion = false;
983 auto *OutlinedFun =
984 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
985 D, ThreadIDVar, InnermostKind, CodeGen));
986 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
987 IsInTTDRegion = PrevIsInTTDRegion;
988 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
989 !IsInParallelRegion) {
990 llvm::Function *WrapperFun =
991 createParallelDataSharingWrapper(OutlinedFun, D);
992 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
993 }
994
995 return OutlinedFun;
996}
997
998/// Get list of lastprivate variables from the teams distribute ... or
999/// teams {distribute ...} directives.
1000static void
1001getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1002 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1003 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1004, __extension__
__PRETTY_FUNCTION__))
1004 "expected teams directive.")(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1004, __extension__
__PRETTY_FUNCTION__))
;
1005 const OMPExecutableDirective *Dir = &D;
1006 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1007 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1008 Ctx,
1009 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1010 /*IgnoreCaptured=*/true))) {
1011 Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1012 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1013 Dir = nullptr;
1014 }
1015 }
1016 if (!Dir)
1017 return;
1018 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1019 for (const Expr *E : C->getVarRefs())
1020 Vars.push_back(getPrivateItem(E));
1021 }
1022}
1023
1024/// Get list of reduction variables from the teams ... directives.
1025static void
1026getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1027 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1028 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1029, __extension__
__PRETTY_FUNCTION__))
1029 "expected teams directive.")(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1029, __extension__
__PRETTY_FUNCTION__))
;
1030 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1031 for (const Expr *E : C->privates())
1032 Vars.push_back(getPrivateItem(E));
1033 }
1034}
1035
1036llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1037 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1038 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1039 SourceLocation Loc = D.getBeginLoc();
1040
1041 const RecordDecl *GlobalizedRD = nullptr;
1042 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1043 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1044 unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size;
1045 // Globalize team reductions variable unconditionally in all modes.
1046 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1047 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1048 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1049 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1050 if (!LastPrivatesReductions.empty()) {
1051 GlobalizedRD = ::buildRecordForGlobalizedVars(
1052 CGM.getContext(), llvm::None, LastPrivatesReductions,
1053 MappedDeclsFields, WarpSize);
1054 }
1055 } else if (!LastPrivatesReductions.empty()) {
1056 assert(!TeamAndReductions.first &&(static_cast <bool> (!TeamAndReductions.first &&
"Previous team declaration is not expected.") ? void (0) : __assert_fail
("!TeamAndReductions.first && \"Previous team declaration is not expected.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1057, __extension__
__PRETTY_FUNCTION__))
1057 "Previous team declaration is not expected.")(static_cast <bool> (!TeamAndReductions.first &&
"Previous team declaration is not expected.") ? void (0) : __assert_fail
("!TeamAndReductions.first && \"Previous team declaration is not expected.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1057, __extension__
__PRETTY_FUNCTION__))
;
1058 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1059 std::swap(TeamAndReductions.second, LastPrivatesReductions);
1060 }
1061
1062 // Emit target region as a standalone region.
1063 class NVPTXPrePostActionTy : public PrePostActionTy {
1064 SourceLocation &Loc;
1065 const RecordDecl *GlobalizedRD;
1066 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1067 &MappedDeclsFields;
1068
1069 public:
1070 NVPTXPrePostActionTy(
1071 SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1072 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1073 &MappedDeclsFields)
1074 : Loc(Loc), GlobalizedRD(GlobalizedRD),
1075 MappedDeclsFields(MappedDeclsFields) {}
1076 void Enter(CodeGenFunction &CGF) override {
1077 auto &Rt =
1078 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1079 if (GlobalizedRD) {
1080 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1081 I->getSecond().MappedParams =
1082 std::make_unique<CodeGenFunction::OMPMapVars>();
1083 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1084 for (const auto &Pair : MappedDeclsFields) {
1085 assert(Pair.getFirst()->isCanonicalDecl() &&(static_cast <bool> (Pair.getFirst()->isCanonicalDecl
() && "Expected canonical declaration") ? void (0) : __assert_fail
("Pair.getFirst()->isCanonicalDecl() && \"Expected canonical declaration\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1086, __extension__
__PRETTY_FUNCTION__))
1086 "Expected canonical declaration")(static_cast <bool> (Pair.getFirst()->isCanonicalDecl
() && "Expected canonical declaration") ? void (0) : __assert_fail
("Pair.getFirst()->isCanonicalDecl() && \"Expected canonical declaration\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1086, __extension__
__PRETTY_FUNCTION__))
;
1087 Data.insert(std::make_pair(Pair.getFirst(), MappedVarData()));
1088 }
1089 }
1090 Rt.emitGenericVarsProlog(CGF, Loc);
1091 }
1092 void Exit(CodeGenFunction &CGF) override {
1093 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1094 .emitGenericVarsEpilog(CGF);
1095 }
1096 } Action(Loc, GlobalizedRD, MappedDeclsFields);
1097 CodeGen.setAction(Action);
1098 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1099 D, ThreadIDVar, InnermostKind, CodeGen);
1100
1101 return OutlinedFun;
1102}
1103
1104void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1105 SourceLocation Loc,
1106 bool WithSPMDCheck) {
1107 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1108 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1109 return;
1110
1111 CGBuilderTy &Bld = CGF.Builder;
1112
1113 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1114 if (I == FunctionGlobalizedDecls.end())
1115 return;
1116
1117 for (auto &Rec : I->getSecond().LocalVarData) {
1118 const auto *VD = cast<VarDecl>(Rec.first);
1119 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1120 QualType VarTy = VD->getType();
1121
1122 // Get the local allocation of a firstprivate variable before sharing
1123 llvm::Value *ParValue;
1124 if (EscapedParam) {
1125 LValue ParLVal =
1126 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1127 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1128 }
1129
1130 // Allocate space for the variable to be globalized
1131 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1132 llvm::CallBase *VoidPtr =
1133 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1134 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1135 AllocArgs, VD->getName());
1136 // FIXME: We should use the variables actual alignment as an argument.
1137 VoidPtr->addRetAttr(llvm::Attribute::get(
1138 CGM.getLLVMContext(), llvm::Attribute::Alignment,
1139 CGM.getContext().getTargetInfo().getNewAlign() / 8));
1140
1141 // Cast the void pointer and get the address of the globalized variable.
1142 llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
1143 llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1144 VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
1145 LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
1146 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1147 Rec.second.GlobalizedVal = VoidPtr;
1148
1149 // Assign the local allocation to the newly globalized location.
1150 if (EscapedParam) {
1151 CGF.EmitStoreOfScalar(ParValue, VarAddr);
1152 I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
1153 }
1154 if (auto *DI = CGF.getDebugInfo())
1155 VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
1156 }
1157 for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
1158 // Use actual memory size of the VLA object including the padding
1159 // for alignment purposes.
1160 llvm::Value *Size = CGF.getTypeSize(VD->getType());
1161 CharUnits Align = CGM.getContext().getDeclAlign(VD);
1162 Size = Bld.CreateNUWAdd(
1163 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1164 llvm::Value *AlignVal =
1165 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1166
1167 Size = Bld.CreateUDiv(Size, AlignVal);
1168 Size = Bld.CreateNUWMul(Size, AlignVal);
1169
1170 // Allocate space for this VLA object to be globalized.
1171 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1172 llvm::CallBase *VoidPtr =
1173 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1174 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1175 AllocArgs, VD->getName());
1176 VoidPtr->addRetAttr(
1177 llvm::Attribute::get(CGM.getLLVMContext(), llvm::Attribute::Alignment,
1178 CGM.getContext().getTargetInfo().getNewAlign()));
1179
1180 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
1181 std::pair<llvm::Value *, llvm::Value *>(
1182 {VoidPtr, CGF.getTypeSize(VD->getType())}));
1183 LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
1184 CGM.getContext().getDeclAlign(VD),
1185 AlignmentSource::Decl);
1186 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1187 Base.getAddress(CGF));
1188 }
1189 I->getSecond().MappedParams->apply(CGF);
1190}
1191
1192void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
1193 bool WithSPMDCheck) {
1194 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1195 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1196 return;
1197
1198 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1199 if (I != FunctionGlobalizedDecls.end()) {
1200 // Deallocate the memory for each globalized VLA object
1201 for (auto AddrSizePair :
1202 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
1203 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1204 CGM.getModule(), OMPRTL___kmpc_free_shared),
1205 {AddrSizePair.first, AddrSizePair.second});
1206 }
1207 // Deallocate the memory for each globalized value
1208 for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) {
1209 const auto *VD = cast<VarDecl>(Rec.first);
1210 I->getSecond().MappedParams->restore(CGF);
1211
1212 llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal,
1213 CGF.getTypeSize(VD->getType())};
1214 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1215 CGM.getModule(), OMPRTL___kmpc_free_shared),
1216 FreeArgs);
1217 }
1218 }
1219}
1220
1221void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
1222 const OMPExecutableDirective &D,
1223 SourceLocation Loc,
1224 llvm::Function *OutlinedFn,
1225 ArrayRef<llvm::Value *> CapturedVars) {
1226 if (!CGF.HaveInsertPoint())
1227 return;
1228
1229 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
1230 /*Name=*/".zero.addr");
1231 CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
1232 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1233 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
1234 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1235 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1236 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1237}
1238
1239void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
1240 SourceLocation Loc,
1241 llvm::Function *OutlinedFn,
1242 ArrayRef<llvm::Value *> CapturedVars,
1243 const Expr *IfCond,
1244 llvm::Value *NumThreads) {
1245 if (!CGF.HaveInsertPoint())
1246 return;
1247
1248 auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
1249 NumThreads](CodeGenFunction &CGF,
1250 PrePostActionTy &Action) {
1251 CGBuilderTy &Bld = CGF.Builder;
1252 llvm::Value *NumThreadsVal = NumThreads;
1253 llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
1254 llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
1255 if (WFn)
1256 ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
1257 llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
1258
1259 // Create a private scope that will globalize the arguments
1260 // passed from the outside of the target region.
1261 // TODO: Is that needed?
1262 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
1263
1264 Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
1265 llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
1266 "captured_vars_addrs");
1267 // There's something to share.
1268 if (!CapturedVars.empty()) {
1269 // Prepare for parallel region. Indicate the outlined function.
1270 ASTContext &Ctx = CGF.getContext();
1271 unsigned Idx = 0;
1272 for (llvm::Value *V : CapturedVars) {
1273 Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
1274 llvm::Value *PtrV;
1275 if (V->getType()->isIntegerTy())
1276 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
1277 else
1278 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
1279 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
1280 Ctx.getPointerType(Ctx.VoidPtrTy));
1281 ++Idx;
1282 }
1283 }
1284
1285 llvm::Value *IfCondVal = nullptr;
1286 if (IfCond)
1287 IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty,
1288 /* isSigned */ false);
1289 else
1290 IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
1291
1292 if (!NumThreadsVal)
1293 NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1);
1294 else
1295 NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty),
1296
1297 assert(IfCondVal && "Expected a value")(static_cast <bool> (IfCondVal && "Expected a value"
) ? void (0) : __assert_fail ("IfCondVal && \"Expected a value\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1297, __extension__
__PRETTY_FUNCTION__))
;
1298 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1299 llvm::Value *Args[] = {
1300 RTLoc,
1301 getThreadID(CGF, Loc),
1302 IfCondVal,
1303 NumThreadsVal,
1304 llvm::ConstantInt::get(CGF.Int32Ty, -1),
1305 FnPtr,
1306 ID,
1307 Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
1308 CGF.VoidPtrPtrTy),
1309 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
1310 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1311 CGM.getModule(), OMPRTL___kmpc_parallel_51),
1312 Args);
1313 };
1314
1315 RegionCodeGenTy RCG(ParallelGen);
1316 RCG(CGF);
1317}
1318
1319void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
1320 // Always emit simple barriers!
1321 if (!CGF.HaveInsertPoint())
1322 return;
1323 // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
1324 // This function does not use parameters, so we can emit just default values.
1325 llvm::Value *Args[] = {
1326 llvm::ConstantPointerNull::get(
1327 cast<llvm::PointerType>(getIdentTyPointerTy())),
1328 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
1329 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1330 CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
1331 Args);
1332}
1333
1334void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
1335 SourceLocation Loc,
1336 OpenMPDirectiveKind Kind, bool,
1337 bool) {
1338 // Always emit simple barriers!
1339 if (!CGF.HaveInsertPoint())
1340 return;
1341 // Build call __kmpc_cancel_barrier(loc, thread_id);
1342 unsigned Flags = getDefaultFlagsForBarriers(Kind);
1343 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
1344 getThreadID(CGF, Loc)};
1345
1346 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1347 CGM.getModule(), OMPRTL___kmpc_barrier),
1348 Args);
1349}
1350
1351void CGOpenMPRuntimeGPU::emitCriticalRegion(
1352 CodeGenFunction &CGF, StringRef CriticalName,
1353 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
1354 const Expr *Hint) {
1355 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
1356 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
1357 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
1358 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
1359 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
1360
1361 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1362
1363 // Get the mask of active threads in the warp.
1364 llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1365 CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
1366 // Fetch team-local id of the thread.
1367 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
1368
1369 // Get the width of the team.
1370 llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
1371
1372 // Initialize the counter variable for the loop.
1373 QualType Int32Ty =
1374 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
1375 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
1376 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
1377 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
1378 /*isInit=*/true);
1379
1380 // Block checks if loop counter exceeds upper bound.
1381 CGF.EmitBlock(LoopBB);
1382 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1383 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
1384 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
1385
1386 // Block tests which single thread should execute region, and which threads
1387 // should go straight to synchronisation point.
1388 CGF.EmitBlock(TestBB);
1389 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1390 llvm::Value *CmpThreadToCounter =
1391 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
1392 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
1393
1394 // Block emits the body of the critical region.
1395 CGF.EmitBlock(BodyBB);
1396
1397 // Output the critical statement.
1398 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
1399 Hint);
1400
1401 // After the body surrounded by the critical region, the single executing
1402 // thread will jump to the synchronisation point.
1403 // Block waits for all threads in current team to finish then increments the
1404 // counter variable and returns to the loop.
1405 CGF.EmitBlock(SyncBB);
1406 // Reconverge active threads in the warp.
1407 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1408 CGM.getModule(), OMPRTL___kmpc_syncwarp),
1409 Mask);
1410
1411 llvm::Value *IncCounterVal =
1412 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
1413 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
1414 CGF.EmitBranch(LoopBB);
1415
1416 // Block that is reached when all threads in the team complete the region.
1417 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1418}
1419
1420/// Cast value to the specified type.
1421static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
1422 QualType ValTy, QualType CastTy,
1423 SourceLocation Loc) {
1424 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(CastTy).isZero() && "Cast type must sized.") ? void (
0) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && \"Cast type must sized.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1425, __extension__
__PRETTY_FUNCTION__))
1425 "Cast type must sized.")(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(CastTy).isZero() && "Cast type must sized.") ? void (
0) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && \"Cast type must sized.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1425, __extension__
__PRETTY_FUNCTION__))
;
1426 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(ValTy).isZero() && "Val type must sized.") ? void (0
) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && \"Val type must sized.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1427, __extension__
__PRETTY_FUNCTION__))
1427 "Val type must sized.")(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(ValTy).isZero() && "Val type must sized.") ? void (0
) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && \"Val type must sized.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1427, __extension__
__PRETTY_FUNCTION__))
;
1428 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
1429 if (ValTy == CastTy)
1430 return Val;
1431 if (CGF.getContext().getTypeSizeInChars(ValTy) ==
1432 CGF.getContext().getTypeSizeInChars(CastTy))
1433 return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
1434 if (CastTy->isIntegerType() && ValTy->isIntegerType())
1435 return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
1436 CastTy->hasSignedIntegerRepresentation());
1437 Address CastItem = CGF.CreateMemTemp(CastTy);
1438 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1439 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()),
1440 Val->getType());
1441 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
1442 LValueBaseInfo(AlignmentSource::Type),
1443 TBAAAccessInfo());
1444 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
1445 LValueBaseInfo(AlignmentSource::Type),
1446 TBAAAccessInfo());
1447}
1448
1449/// This function creates calls to one of two shuffle functions to copy
1450/// variables between lanes in a warp.
1451static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
1452 llvm::Value *Elem,
1453 QualType ElemType,
1454 llvm::Value *Offset,
1455 SourceLocation Loc) {
1456 CodeGenModule &CGM = CGF.CGM;
1457 CGBuilderTy &Bld = CGF.Builder;
1458 CGOpenMPRuntimeGPU &RT =
1459 *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
1460 llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
1461
1462 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1463 assert(Size.getQuantity() <= 8 &&(static_cast <bool> (Size.getQuantity() <= 8 &&
"Unsupported bitwidth in shuffle instruction.") ? void (0) :
__assert_fail ("Size.getQuantity() <= 8 && \"Unsupported bitwidth in shuffle instruction.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1464, __extension__
__PRETTY_FUNCTION__))
1464 "Unsupported bitwidth in shuffle instruction.")(static_cast <bool> (Size.getQuantity() <= 8 &&
"Unsupported bitwidth in shuffle instruction.") ? void (0) :
__assert_fail ("Size.getQuantity() <= 8 && \"Unsupported bitwidth in shuffle instruction.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 1464, __extension__
__PRETTY_FUNCTION__))
;
1465
1466 RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
1467 ? OMPRTL___kmpc_shuffle_int32
1468 : OMPRTL___kmpc_shuffle_int64;
1469
1470 // Cast all types to 32- or 64-bit values before calling shuffle routines.
1471 QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
1472 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
1473 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
1474 llvm::Value *WarpSize =
1475 Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
1476
1477 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
1478 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
1479 {ElemCast, Offset, WarpSize});
1480
1481 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
1482}
1483
1484static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
1485 Address DestAddr, QualType ElemType,
1486 llvm::Value *Offset, SourceLocation Loc) {
1487 CGBuilderTy &Bld = CGF.Builder;
1488
1489 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1490 // Create the loop over the big sized data.
1491 // ptr = (void*)Elem;
1492 // ptrEnd = (void*) Elem + 1;
1493 // Step = 8;
1494 // while (ptr + Step < ptrEnd)
1495 // shuffle((int64_t)*ptr);
1496 // Step = 4;
1497 // while (ptr + Step < ptrEnd)
1498 // shuffle((int32_t)*ptr);
1499 // ...
1500 Address ElemPtr = DestAddr;
1501 Address Ptr = SrcAddr;
1502 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
1503 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy, CGF.Int8Ty);
1504 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
1505 if (Size < CharUnits::fromQuantity(IntSize))
1506 continue;
1507 QualType IntType = CGF.getContext().getIntTypeForBitwidth(
1508 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
1509 /*Signed=*/1);
1510 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
1511 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo(),
1512 IntTy);
1513 ElemPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1514 ElemPtr, IntTy->getPointerTo(), IntTy);
1515 if (Size.getQuantity() / IntSize > 1) {
1516 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
1517 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
1518 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
1519 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
1520 CGF.EmitBlock(PreCondBB);
1521 llvm::PHINode *PhiSrc =
1522 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
1523 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
1524 llvm::PHINode *PhiDest =
1525 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
1526 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
1527 Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment());
1528 ElemPtr =
1529 Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment());
1530 llvm::Value *PtrDiff = Bld.CreatePtrDiff(
1531 CGF.Int8Ty, PtrEnd.getPointer(),
1532 Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
1533 CGF.VoidPtrTy));
1534 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
1535 ThenBB, ExitBB);
1536 CGF.EmitBlock(ThenBB);
1537 llvm::Value *Res = createRuntimeShuffleFunction(
1538 CGF,
1539 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1540 LValueBaseInfo(AlignmentSource::Type),
1541 TBAAAccessInfo()),
1542 IntType, Offset, Loc);
1543 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1544 LValueBaseInfo(AlignmentSource::Type),
1545 TBAAAccessInfo());
1546 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
1547 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1548 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
1549 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
1550 CGF.EmitBranch(PreCondBB);
1551 CGF.EmitBlock(ExitBB);
1552 } else {
1553 llvm::Value *Res = createRuntimeShuffleFunction(
1554 CGF,
1555 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1556 LValueBaseInfo(AlignmentSource::Type),
1557 TBAAAccessInfo()),
1558 IntType, Offset, Loc);
1559 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1560 LValueBaseInfo(AlignmentSource::Type),
1561 TBAAAccessInfo());
1562 Ptr = Bld.CreateConstGEP(Ptr, 1);
1563 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1564 }
1565 Size = Size % IntSize;
1566 }
1567}
1568
1569namespace {
1570enum CopyAction : unsigned {
1571 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1572 // the warp using shuffle instructions.
1573 RemoteLaneToThread,
1574 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1575 ThreadCopy,
1576 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1577 ThreadToScratchpad,
1578 // ScratchpadToThread: Copy from a scratchpad array in global memory
1579 // containing team-reduced data to a thread's stack.
1580 ScratchpadToThread,
1581};
1582} // namespace
1583
1584struct CopyOptionsTy {
1585 llvm::Value *RemoteLaneOffset;
1586 llvm::Value *ScratchpadIndex;
1587 llvm::Value *ScratchpadWidth;
1588};
1589
1590/// Emit instructions to copy a Reduce list, which contains partially
1591/// aggregated values, in the specified direction.
1592static void emitReductionListCopy(
1593 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1594 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1595 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
1596
1597 CodeGenModule &CGM = CGF.CGM;
1598 ASTContext &C = CGM.getContext();
1599 CGBuilderTy &Bld = CGF.Builder;
1600
1601 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1602 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1603 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1604
1605 // Iterates, element-by-element, through the source Reduce list and
1606 // make a copy.
1607 unsigned Idx = 0;
1608 unsigned Size = Privates.size();
1609 for (const Expr *Private : Privates) {
1610 Address SrcElementAddr = Address::invalid();
1611 Address DestElementAddr = Address::invalid();
1612 Address DestElementPtrAddr = Address::invalid();
1613 // Should we shuffle in an element from a remote lane?
1614 bool ShuffleInElement = false;
1615 // Set to true to update the pointer in the dest Reduce list to a
1616 // newly created element.
1617 bool UpdateDestListPtr = false;
1618 // Increment the src or dest pointer to the scratchpad, for each
1619 // new element.
1620 bool IncrScratchpadSrc = false;
1621 bool IncrScratchpadDest = false;
1622 QualType PrivatePtrType = C.getPointerType(Private->getType());
1623 llvm::Type *PrivateLlvmPtrType = CGF.ConvertType(PrivatePtrType);
1624
1625 switch (Action) {
1626 case RemoteLaneToThread: {
1627 // Step 1.1: Get the address for the src element in the Reduce list.
1628 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1629 SrcElementAddr =
1630 CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1631 SrcElementPtrAddr, PrivateLlvmPtrType),
1632 PrivatePtrType->castAs<PointerType>());
1633
1634 // Step 1.2: Create a temporary to store the element in the destination
1635 // Reduce list.
1636 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1637 DestElementAddr =
1638 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1639 ShuffleInElement = true;
1640 UpdateDestListPtr = true;
1641 break;
1642 }
1643 case ThreadCopy: {
1644 // Step 1.1: Get the address for the src element in the Reduce list.
1645 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1646 SrcElementAddr =
1647 CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1648 SrcElementPtrAddr, PrivateLlvmPtrType),
1649 PrivatePtrType->castAs<PointerType>());
1650
1651 // Step 1.2: Get the address for dest element. The destination
1652 // element has already been created on the thread's stack.
1653 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1654 DestElementAddr =
1655 CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1656 DestElementPtrAddr, PrivateLlvmPtrType),
1657 PrivatePtrType->castAs<PointerType>());
1658 break;
1659 }
1660 case ThreadToScratchpad: {
1661 // Step 1.1: Get the address for the src element in the Reduce list.
1662 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1663 SrcElementAddr =
1664 CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
1665 SrcElementPtrAddr, PrivateLlvmPtrType),
1666 PrivatePtrType->castAs<PointerType>());
1667
1668 // Step 1.2: Get the address for dest element:
1669 // address = base + index * ElementSizeInChars.
1670 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1671 llvm::Value *CurrentOffset =
1672 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1673 llvm::Value *ScratchPadElemAbsolutePtrVal =
1674 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
1675 ScratchPadElemAbsolutePtrVal =
1676 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1677 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
1678 C.getTypeAlignInChars(Private->getType()));
1679 IncrScratchpadDest = true;
1680 break;
1681 }
1682 case ScratchpadToThread: {
1683 // Step 1.1: Get the address for the src element in the scratchpad.
1684 // address = base + index * ElementSizeInChars.
1685 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1686 llvm::Value *CurrentOffset =
1687 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1688 llvm::Value *ScratchPadElemAbsolutePtrVal =
1689 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
1690 ScratchPadElemAbsolutePtrVal =
1691 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1692 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
1693 C.getTypeAlignInChars(Private->getType()));
1694 IncrScratchpadSrc = true;
1695
1696 // Step 1.2: Create a temporary to store the element in the destination
1697 // Reduce list.
1698 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1699 DestElementAddr =
1700 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1701 UpdateDestListPtr = true;
1702 break;
1703 }
1704 }
1705
1706 // Regardless of src and dest of copy, we emit the load of src
1707 // element as this is required in all directions
1708 SrcElementAddr = Bld.CreateElementBitCast(
1709 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1710 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
1711 SrcElementAddr.getElementType());
1712
1713 // Now that all active lanes have read the element in the
1714 // Reduce list, shuffle over the value from the remote lane.
1715 if (ShuffleInElement) {
1716 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
1717 RemoteLaneOffset, Private->getExprLoc());
1718 } else {
1719 switch (CGF.getEvaluationKind(Private->getType())) {
1720 case TEK_Scalar: {
1721 llvm::Value *Elem = CGF.EmitLoadOfScalar(
1722 SrcElementAddr, /*Volatile=*/false, Private->getType(),
1723 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
1724 TBAAAccessInfo());
1725 // Store the source element value to the dest element address.
1726 CGF.EmitStoreOfScalar(
1727 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
1728 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
1729 break;
1730 }
1731 case TEK_Complex: {
1732 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
1733 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1734 Private->getExprLoc());
1735 CGF.EmitStoreOfComplex(
1736 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1737 /*isInit=*/false);
1738 break;
1739 }
1740 case TEK_Aggregate:
1741 CGF.EmitAggregateCopy(
1742 CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1743 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1744 Private->getType(), AggValueSlot::DoesNotOverlap);
1745 break;
1746 }
1747 }
1748
1749 // Step 3.1: Modify reference in dest Reduce list as needed.
1750 // Modifying the reference in Reduce list to point to the newly
1751 // created element. The element is live in the current function
1752 // scope and that of functions it invokes (i.e., reduce_function).
1753 // RemoteReduceData[i] = (void*)&RemoteElem
1754 if (UpdateDestListPtr) {
1755 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
1756 DestElementAddr.getPointer(), CGF.VoidPtrTy),
1757 DestElementPtrAddr, /*Volatile=*/false,
1758 C.VoidPtrTy);
1759 }
1760
1761 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
1762 // address of the next element in scratchpad memory, unless we're currently
1763 // processing the last one. Memory alignment is also taken care of here.
1764 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
1765 // FIXME: This code doesn't make any sense, it's trying to perform
1766 // integer arithmetic on pointers.
1767 llvm::Value *ScratchpadBasePtr =
1768 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
1769 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1770 ScratchpadBasePtr = Bld.CreateNUWAdd(
1771 ScratchpadBasePtr,
1772 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
1773
1774 // Take care of global memory alignment for performance
1775 ScratchpadBasePtr = Bld.CreateNUWSub(
1776 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
1777 ScratchpadBasePtr = Bld.CreateUDiv(
1778 ScratchpadBasePtr,
1779 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
1780 ScratchpadBasePtr = Bld.CreateNUWAdd(
1781 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
1782 ScratchpadBasePtr = Bld.CreateNUWMul(
1783 ScratchpadBasePtr,
1784 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
1785
1786 if (IncrScratchpadDest)
1787 DestBase =
1788 Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
1789 else /* IncrScratchpadSrc = true */
1790 SrcBase =
1791 Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
1792 }
1793
1794 ++Idx;
1795 }
1796}
1797
1798/// This function emits a helper that gathers Reduce lists from the first
1799/// lane of every active warp to lanes in the first warp.
1800///
1801/// void inter_warp_copy_func(void* reduce_data, num_warps)
1802/// shared smem[warp_size];
1803/// For all data entries D in reduce_data:
1804/// sync
1805/// If (I am the first lane in each warp)
1806/// Copy my local D to smem[warp_id]
1807/// sync
1808/// if (I am the first warp)
1809/// Copy smem[thread_id] to my local D
1810static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
1811 ArrayRef<const Expr *> Privates,
1812 QualType ReductionArrayTy,
1813 SourceLocation Loc) {
1814 ASTContext &C = CGM.getContext();
1815 llvm::Module &M = CGM.getModule();
1816
1817 // ReduceList: thread local Reduce list.
1818 // At the stage of the computation when this function is called, partially
1819 // aggregated values reside in the first lane of every active warp.
1820 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
1821 C.VoidPtrTy, ImplicitParamDecl::Other);
1822 // NumWarps: number of warps active in the parallel region. This could
1823 // be smaller than 32 (max warps in a CTA) for partial block reduction.
1824 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
1825 C.getIntTypeForBitwidth(32, /* Signed */ true),
1826 ImplicitParamDecl::Other);
1827 FunctionArgList Args;
1828 Args.push_back(&ReduceListArg);
1829 Args.push_back(&NumWarpsArg);
1830
1831 const CGFunctionInfo &CGFI =
1832 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1833 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
1834 llvm::GlobalValue::InternalLinkage,
1835 "_omp_reduction_inter_warp_copy_func", &M);
1836 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
1837 Fn->setDoesNotRecurse();
1838 CodeGenFunction CGF(CGM);
1839 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
1840
1841 CGBuilderTy &Bld = CGF.Builder;
1842
1843 // This array is used as a medium to transfer, one reduce element at a time,
1844 // the data from the first lane of every warp to lanes in the first warp
1845 // in order to perform the final step of a reduction in a parallel region
1846 // (reduction across warps). The array is placed in NVPTX __shared__ memory
1847 // for reduced latency, as well as to have a distinct copy for concurrently
1848 // executing target regions. The array is declared with common linkage so
1849 // as to be shared across compilation units.
1850 StringRef TransferMediumName =
1851 "__openmp_nvptx_data_transfer_temporary_storage";
1852 llvm::GlobalVariable *TransferMedium =
1853 M.getGlobalVariable(TransferMediumName);
1854 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
1855 if (!TransferMedium) {
1856 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
1857 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
1858 TransferMedium = new llvm::GlobalVariable(
1859 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
1860 llvm::UndefValue::get(Ty), TransferMediumName,
1861 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
1862 SharedAddressSpace);
1863 CGM.addCompilerUsedGlobal(TransferMedium);
1864 }
1865
1866 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1867 // Get the CUDA thread id of the current OpenMP thread on the GPU.
1868 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
1869 // nvptx_lane_id = nvptx_id % warpsize
1870 llvm::Value *LaneID = getNVPTXLaneID(CGF);
1871 // nvptx_warp_id = nvptx_id / warpsize
1872 llvm::Value *WarpID = getNVPTXWarpID(CGF);
1873
1874 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1875 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
1876 Address LocalReduceList(
1877 Bld.CreatePointerBitCastOrAddrSpaceCast(
1878 CGF.EmitLoadOfScalar(
1879 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
1880 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
1881 ElemTy->getPointerTo()),
1882 ElemTy, CGF.getPointerAlign());
1883
1884 unsigned Idx = 0;
1885 for (const Expr *Private : Privates) {
1886 //
1887 // Warp master copies reduce element to transfer medium in __shared__
1888 // memory.
1889 //
1890 unsigned RealTySize =
1891 C.getTypeSizeInChars(Private->getType())
1892 .alignTo(C.getTypeAlignInChars(Private->getType()))
1893 .getQuantity();
1894 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
1895 unsigned NumIters = RealTySize / TySize;
1896 if (NumIters == 0)
1897 continue;
1898 QualType CType = C.getIntTypeForBitwidth(
1899 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
1900 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
1901 CharUnits Align = CharUnits::fromQuantity(TySize);
1902 llvm::Value *Cnt = nullptr;
1903 Address CntAddr = Address::invalid();
1904 llvm::BasicBlock *PrecondBB = nullptr;
1905 llvm::BasicBlock *ExitBB = nullptr;
1906 if (NumIters > 1) {
1907 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
1908 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
1909 /*Volatile=*/false, C.IntTy);
1910 PrecondBB = CGF.createBasicBlock("precond");
1911 ExitBB = CGF.createBasicBlock("exit");
1912 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
1913 // There is no need to emit line number for unconditional branch.
1914 (void)ApplyDebugLocation::CreateEmpty(CGF);
1915 CGF.EmitBlock(PrecondBB);
1916 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
1917 llvm::Value *Cmp =
1918 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
1919 Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
1920 CGF.EmitBlock(BodyBB);
1921 }
1922 // kmpc_barrier.
1923 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
1924 /*EmitChecks=*/false,
1925 /*ForceSimpleCall=*/true);
1926 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
1927 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
1928 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
1929
1930 // if (lane_id == 0)
1931 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
1932 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
1933 CGF.EmitBlock(ThenBB);
1934
1935 // Reduce element = LocalReduceList[i]
1936 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
1937 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
1938 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1939 // elemptr = ((CopyType*)(elemptrptr)) + I
1940 Address ElemPtr(ElemPtrPtr, CGF.Int8Ty, Align);
1941 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
1942 if (NumIters > 1)
1943 ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
1944
1945 // Get pointer to location in transfer medium.
1946 // MediumPtr = &medium[warp_id]
1947 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
1948 TransferMedium->getValueType(), TransferMedium,
1949 {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
1950 // Casting to actual data type.
1951 // MediumPtr = (CopyType*)MediumPtrAddr;
1952 Address MediumPtr(
1953 Bld.CreateBitCast(
1954 MediumPtrVal,
1955 CopyType->getPointerTo(
1956 MediumPtrVal->getType()->getPointerAddressSpace())),
1957 CopyType, Align);
1958
1959 // elem = *elemptr
1960 //*MediumPtr = elem
1961 llvm::Value *Elem = CGF.EmitLoadOfScalar(
1962 ElemPtr, /*Volatile=*/false, CType, Loc,
1963 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
1964 // Store the source element value to the dest element address.
1965 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
1966 LValueBaseInfo(AlignmentSource::Type),
1967 TBAAAccessInfo());
1968
1969 Bld.CreateBr(MergeBB);
1970
1971 CGF.EmitBlock(ElseBB);
1972 Bld.CreateBr(MergeBB);
1973
1974 CGF.EmitBlock(MergeBB);
1975
1976 // kmpc_barrier.
1977 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
1978 /*EmitChecks=*/false,
1979 /*ForceSimpleCall=*/true);
1980
1981 //
1982 // Warp 0 copies reduce element from transfer medium.
1983 //
1984 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
1985 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
1986 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
1987
1988 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
1989 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
1990 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
1991
1992 // Up to 32 threads in warp 0 are active.
1993 llvm::Value *IsActiveThread =
1994 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
1995 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
1996
1997 CGF.EmitBlock(W0ThenBB);
1998
1999 // SrcMediumPtr = &medium[tid]
2000 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2001 TransferMedium->getValueType(), TransferMedium,
2002 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2003 // SrcMediumVal = *SrcMediumPtr;
2004 Address SrcMediumPtr(
2005 Bld.CreateBitCast(
2006 SrcMediumPtrVal,
2007 CopyType->getPointerTo(
2008 SrcMediumPtrVal->getType()->getPointerAddressSpace())),
2009 CopyType, Align);
2010
2011 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
2012 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2013 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
2014 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
2015 Address TargetElemPtr(TargetElemPtrVal, CGF.Int8Ty, Align);
2016 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
2017 if (NumIters > 1)
2018 TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
2019
2020 // *TargetElemPtr = SrcMediumVal;
2021 llvm::Value *SrcMediumValue =
2022 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
2023 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
2024 CType);
2025 Bld.CreateBr(W0MergeBB);
2026
2027 CGF.EmitBlock(W0ElseBB);
2028 Bld.CreateBr(W0MergeBB);
2029
2030 CGF.EmitBlock(W0MergeBB);
2031
2032 if (NumIters > 1) {
2033 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
2034 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
2035 CGF.EmitBranch(PrecondBB);
2036 (void)ApplyDebugLocation::CreateEmpty(CGF);
2037 CGF.EmitBlock(ExitBB);
2038 }
2039 RealTySize %= TySize;
2040 }
2041 ++Idx;
2042 }
2043
2044 CGF.FinishFunction();
2045 return Fn;
2046}
2047
2048/// Emit a helper that reduces data across two OpenMP threads (lanes)
2049/// in the same warp. It uses shuffle instructions to copy over data from
2050/// a remote lane's stack. The reduction algorithm performed is specified
2051/// by the fourth parameter.
2052///
2053/// Algorithm Versions.
2054/// Full Warp Reduce (argument value 0):
2055/// This algorithm assumes that all 32 lanes are active and gathers
2056/// data from these 32 lanes, producing a single resultant value.
2057/// Contiguous Partial Warp Reduce (argument value 1):
2058/// This algorithm assumes that only a *contiguous* subset of lanes
2059/// are active. This happens for the last warp in a parallel region
2060/// when the user specified num_threads is not an integer multiple of
2061/// 32. This contiguous subset always starts with the zeroth lane.
2062/// Partial Warp Reduce (argument value 2):
2063/// This algorithm gathers data from any number of lanes at any position.
2064/// All reduced values are stored in the lowest possible lane. The set
2065/// of problems every algorithm addresses is a super set of those
2066/// addressable by algorithms with a lower version number. Overhead
2067/// increases as algorithm version increases.
2068///
2069/// Terminology
2070/// Reduce element:
2071/// Reduce element refers to the individual data field with primitive
2072/// data types to be combined and reduced across threads.
2073/// Reduce list:
2074/// Reduce list refers to a collection of local, thread-private
2075/// reduce elements.
2076/// Remote Reduce list:
2077/// Remote Reduce list refers to a collection of remote (relative to
2078/// the current thread) reduce elements.
2079///
2080/// We distinguish between three states of threads that are important to
2081/// the implementation of this function.
2082/// Alive threads:
2083/// Threads in a warp executing the SIMT instruction, as distinguished from
2084/// threads that are inactive due to divergent control flow.
2085/// Active threads:
2086/// The minimal set of threads that has to be alive upon entry to this
2087/// function. The computation is correct iff active threads are alive.
2088/// Some threads are alive but they are not active because they do not
2089/// contribute to the computation in any useful manner. Turning them off
2090/// may introduce control flow overheads without any tangible benefits.
2091/// Effective threads:
2092/// In order to comply with the argument requirements of the shuffle
2093/// function, we must keep all lanes holding data alive. But at most
2094/// half of them perform value aggregation; we refer to this half of
2095/// threads as effective. The other half is simply handing off their
2096/// data.
2097///
2098/// Procedure
2099/// Value shuffle:
2100/// In this step active threads transfer data from higher lane positions
2101/// in the warp to lower lane positions, creating Remote Reduce list.
2102/// Value aggregation:
2103/// In this step, effective threads combine their thread local Reduce list
2104/// with Remote Reduce list and store the result in the thread local
2105/// Reduce list.
2106/// Value copy:
2107/// In this step, we deal with the assumption made by algorithm 2
2108/// (i.e. contiguity assumption). When we have an odd number of lanes
2109/// active, say 2k+1, only k threads will be effective and therefore k
2110/// new values will be produced. However, the Reduce list owned by the
2111/// (2k+1)th thread is ignored in the value aggregation. Therefore
2112/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
2113/// that the contiguity assumption still holds.
2114static llvm::Function *emitShuffleAndReduceFunction(
2115 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2116 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
2117 ASTContext &C = CGM.getContext();
2118
2119 // Thread local Reduce list used to host the values of data to be reduced.
2120 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2121 C.VoidPtrTy, ImplicitParamDecl::Other);
2122 // Current lane id; could be logical.
2123 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
2124 ImplicitParamDecl::Other);
2125 // Offset of the remote source lane relative to the current lane.
2126 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2127 C.ShortTy, ImplicitParamDecl::Other);
2128 // Algorithm version. This is expected to be known at compile time.
2129 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2130 C.ShortTy, ImplicitParamDecl::Other);
2131 FunctionArgList Args;
2132 Args.push_back(&ReduceListArg);
2133 Args.push_back(&LaneIDArg);
2134 Args.push_back(&RemoteLaneOffsetArg);
2135 Args.push_back(&AlgoVerArg);
2136
2137 const CGFunctionInfo &CGFI =
2138 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2139 auto *Fn = llvm::Function::Create(
2140 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2141 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
2142 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2143 Fn->setDoesNotRecurse();
2144
2145 CodeGenFunction CGF(CGM);
2146 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2147
2148 CGBuilderTy &Bld = CGF.Builder;
2149
2150 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2151 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2152 Address LocalReduceList(
2153 Bld.CreatePointerBitCastOrAddrSpaceCast(
2154 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2155 C.VoidPtrTy, SourceLocation()),
2156 ElemTy->getPointerTo()),
2157 ElemTy, CGF.getPointerAlign());
2158
2159 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
2160 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
2161 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2162
2163 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
2164 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
2165 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2166
2167 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
2168 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
2169 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2170
2171 // Create a local thread-private variable to host the Reduce list
2172 // from a remote lane.
2173 Address RemoteReduceList =
2174 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
2175
2176 // This loop iterates through the list of reduce elements and copies,
2177 // element by element, from a remote lane in the warp to RemoteReduceList,
2178 // hosted on the thread's stack.
2179 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
2180 LocalReduceList, RemoteReduceList,
2181 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
2182 /*ScratchpadIndex=*/nullptr,
2183 /*ScratchpadWidth=*/nullptr});
2184
2185 // The actions to be performed on the Remote Reduce list is dependent
2186 // on the algorithm version.
2187 //
2188 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
2189 // LaneId % 2 == 0 && Offset > 0):
2190 // do the reduction value aggregation
2191 //
2192 // The thread local variable Reduce list is mutated in place to host the
2193 // reduced data, which is the aggregated value produced from local and
2194 // remote lanes.
2195 //
2196 // Note that AlgoVer is expected to be a constant integer known at compile
2197 // time.
2198 // When AlgoVer==0, the first conjunction evaluates to true, making
2199 // the entire predicate true during compile time.
2200 // When AlgoVer==1, the second conjunction has only the second part to be
2201 // evaluated during runtime. Other conjunctions evaluates to false
2202 // during compile time.
2203 // When AlgoVer==2, the third conjunction has only the second part to be
2204 // evaluated during runtime. Other conjunctions evaluates to false
2205 // during compile time.
2206 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
2207
2208 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2209 llvm::Value *CondAlgo1 = Bld.CreateAnd(
2210 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
2211
2212 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
2213 llvm::Value *CondAlgo2 = Bld.CreateAnd(
2214 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
2215 CondAlgo2 = Bld.CreateAnd(
2216 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
2217
2218 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
2219 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
2220
2221 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2222 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2223 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2224 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
2225
2226 CGF.EmitBlock(ThenBB);
2227 // reduce_function(LocalReduceList, RemoteReduceList)
2228 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2229 LocalReduceList.getPointer(), CGF.VoidPtrTy);
2230 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2231 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
2232 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2233 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
2234 Bld.CreateBr(MergeBB);
2235
2236 CGF.EmitBlock(ElseBB);
2237 Bld.CreateBr(MergeBB);
2238
2239 CGF.EmitBlock(MergeBB);
2240
2241 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
2242 // Reduce list.
2243 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2244 llvm::Value *CondCopy = Bld.CreateAnd(
2245 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
2246
2247 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
2248 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
2249 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
2250 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
2251
2252 CGF.EmitBlock(CpyThenBB);
2253 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
2254 RemoteReduceList, LocalReduceList);
2255 Bld.CreateBr(CpyMergeBB);
2256
2257 CGF.EmitBlock(CpyElseBB);
2258 Bld.CreateBr(CpyMergeBB);
2259
2260 CGF.EmitBlock(CpyMergeBB);
2261
2262 CGF.FinishFunction();
2263 return Fn;
2264}
2265
2266/// This function emits a helper that copies all the reduction variables from
2267/// the team into the provided global buffer for the reduction variables.
2268///
2269/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2270/// For all data entries D in reduce_data:
2271/// Copy local D to buffer.D[Idx]
2272static llvm::Value *emitListToGlobalCopyFunction(
2273 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2274 QualType ReductionArrayTy, SourceLocation Loc,
2275 const RecordDecl *TeamReductionRec,
2276 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2277 &VarFieldMap) {
2278 ASTContext &C = CGM.getContext();
2279
2280 // Buffer: global reduction buffer.
2281 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2282 C.VoidPtrTy, ImplicitParamDecl::Other);
2283 // Idx: index of the buffer.
2284 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2285 ImplicitParamDecl::Other);
2286 // ReduceList: thread local Reduce list.
2287 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2288 C.VoidPtrTy, ImplicitParamDecl::Other);
2289 FunctionArgList Args;
2290 Args.push_back(&BufferArg);
2291 Args.push_back(&IdxArg);
2292 Args.push_back(&ReduceListArg);
2293
2294 const CGFunctionInfo &CGFI =
2295 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2296 auto *Fn = llvm::Function::Create(
2297 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2298 "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
2299 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2300 Fn->setDoesNotRecurse();
2301 CodeGenFunction CGF(CGM);
2302 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2303
2304 CGBuilderTy &Bld = CGF.Builder;
2305
2306 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2307 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2308 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2309 Address LocalReduceList(
2310 Bld.CreatePointerBitCastOrAddrSpaceCast(
2311 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2312 C.VoidPtrTy, Loc),
2313 ElemTy->getPointerTo()),
2314 ElemTy, CGF.getPointerAlign());
2315 QualType StaticTy = C.getRecordType(TeamReductionRec);
2316 llvm::Type *LLVMReductionsBufferTy =
2317 CGM.getTypes().ConvertTypeForMem(StaticTy);
2318 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2319 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2320 LLVMReductionsBufferTy->getPointerTo());
2321 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2322 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2323 /*Volatile=*/false, C.IntTy,
2324 Loc)};
2325 unsigned Idx = 0;
2326 for (const Expr *Private : Privates) {
2327 // Reduce element = LocalReduceList[i]
2328 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2329 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2330 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2331 // elemptr = ((CopyType*)(elemptrptr)) + I
2332 ElemTy = CGF.ConvertTypeForMem(Private->getType());
2333 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2334 ElemPtrPtr, ElemTy->getPointerTo());
2335 Address ElemPtr =
2336 Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
2337 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2338 // Global = Buffer.VD[Idx];
2339 const FieldDecl *FD = VarFieldMap.lookup(VD);
2340 LValue GlobLVal = CGF.EmitLValueForField(
2341 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2342 Address GlobAddr = GlobLVal.getAddress(CGF);
2343 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
2344 GlobAddr.getPointer(), Idxs);
2345 GlobLVal.setAddress(Address(BufferPtr,
2346 CGF.ConvertTypeForMem(Private->getType()),
2347 GlobAddr.getAlignment()));
2348 switch (CGF.getEvaluationKind(Private->getType())) {
2349 case TEK_Scalar: {
2350 llvm::Value *V = CGF.EmitLoadOfScalar(
2351 ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
2352 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2353 CGF.EmitStoreOfScalar(V, GlobLVal);
2354 break;
2355 }
2356 case TEK_Complex: {
2357 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
2358 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
2359 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
2360 break;
2361 }
2362 case TEK_Aggregate:
2363 CGF.EmitAggregateCopy(GlobLVal,
2364 CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2365 Private->getType(), AggValueSlot::DoesNotOverlap);
2366 break;
2367 }
2368 ++Idx;
2369 }
2370
2371 CGF.FinishFunction();
2372 return Fn;
2373}
2374
2375/// This function emits a helper that reduces all the reduction variables from
2376/// the team into the provided global buffer for the reduction variables.
2377///
2378/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
2379/// void *GlobPtrs[];
2380/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2381/// ...
2382/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2383/// reduce_function(GlobPtrs, reduce_data);
2384static llvm::Value *emitListToGlobalReduceFunction(
2385 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2386 QualType ReductionArrayTy, SourceLocation Loc,
2387 const RecordDecl *TeamReductionRec,
2388 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2389 &VarFieldMap,
2390 llvm::Function *ReduceFn) {
2391 ASTContext &C = CGM.getContext();
2392
2393 // Buffer: global reduction buffer.
2394 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2395 C.VoidPtrTy, ImplicitParamDecl::Other);
2396 // Idx: index of the buffer.
2397 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2398 ImplicitParamDecl::Other);
2399 // ReduceList: thread local Reduce list.
2400 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2401 C.VoidPtrTy, ImplicitParamDecl::Other);
2402 FunctionArgList Args;
2403 Args.push_back(&BufferArg);
2404 Args.push_back(&IdxArg);
2405 Args.push_back(&ReduceListArg);
2406
2407 const CGFunctionInfo &CGFI =
2408 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2409 auto *Fn = llvm::Function::Create(
2410 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2411 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
2412 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2413 Fn->setDoesNotRecurse();
2414 CodeGenFunction CGF(CGM);
2415 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2416
2417 CGBuilderTy &Bld = CGF.Builder;
2418
2419 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2420 QualType StaticTy = C.getRecordType(TeamReductionRec);
2421 llvm::Type *LLVMReductionsBufferTy =
2422 CGM.getTypes().ConvertTypeForMem(StaticTy);
2423 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2424 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2425 LLVMReductionsBufferTy->getPointerTo());
2426
2427 // 1. Build a list of reduction variables.
2428 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2429 Address ReductionList =
2430 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2431 auto IPriv = Privates.begin();
2432 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2433 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2434 /*Volatile=*/false, C.IntTy,
2435 Loc)};
2436 unsigned Idx = 0;
2437 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2438 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2439 // Global = Buffer.VD[Idx];
2440 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2441 const FieldDecl *FD = VarFieldMap.lookup(VD);
2442 LValue GlobLVal = CGF.EmitLValueForField(
2443 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2444 Address GlobAddr = GlobLVal.getAddress(CGF);
2445 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2446 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2447 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2448 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2449 if ((*IPriv)->getType()->isVariablyModifiedType()) {
2450 // Store array size.
2451 ++Idx;
2452 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2453 llvm::Value *Size = CGF.Builder.CreateIntCast(
2454 CGF.getVLASize(
2455 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2456 .NumElts,
2457 CGF.SizeTy, /*isSigned=*/false);
2458 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2459 Elem);
2460 }
2461 }
2462
2463 // Call reduce_function(GlobalReduceList, ReduceList)
2464 llvm::Value *GlobalReduceList =
2465 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2466 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2467 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2468 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2469 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2470 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
2471 CGF.FinishFunction();
2472 return Fn;
2473}
2474
2475/// This function emits a helper that copies all the reduction variables from
2476/// the team into the provided global buffer for the reduction variables.
2477///
2478/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2479/// For all data entries D in reduce_data:
2480/// Copy buffer.D[Idx] to local D;
2481static llvm::Value *emitGlobalToListCopyFunction(
2482 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2483 QualType ReductionArrayTy, SourceLocation Loc,
2484 const RecordDecl *TeamReductionRec,
2485 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2486 &VarFieldMap) {
2487 ASTContext &C = CGM.getContext();
2488
2489 // Buffer: global reduction buffer.
2490 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2491 C.VoidPtrTy, ImplicitParamDecl::Other);
2492 // Idx: index of the buffer.
2493 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2494 ImplicitParamDecl::Other);
2495 // ReduceList: thread local Reduce list.
2496 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2497 C.VoidPtrTy, ImplicitParamDecl::Other);
2498 FunctionArgList Args;
2499 Args.push_back(&BufferArg);
2500 Args.push_back(&IdxArg);
2501 Args.push_back(&ReduceListArg);
2502
2503 const CGFunctionInfo &CGFI =
2504 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2505 auto *Fn = llvm::Function::Create(
2506 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2507 "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
2508 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2509 Fn->setDoesNotRecurse();
2510 CodeGenFunction CGF(CGM);
2511 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2512
2513 CGBuilderTy &Bld = CGF.Builder;
2514
2515 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2516 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2517 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
2518 Address LocalReduceList(
2519 Bld.CreatePointerBitCastOrAddrSpaceCast(
2520 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2521 C.VoidPtrTy, Loc),
2522 ElemTy->getPointerTo()),
2523 ElemTy, CGF.getPointerAlign());
2524 QualType StaticTy = C.getRecordType(TeamReductionRec);
2525 llvm::Type *LLVMReductionsBufferTy =
2526 CGM.getTypes().ConvertTypeForMem(StaticTy);
2527 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2528 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2529 LLVMReductionsBufferTy->getPointerTo());
2530
2531 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2532 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2533 /*Volatile=*/false, C.IntTy,
2534 Loc)};
2535 unsigned Idx = 0;
2536 for (const Expr *Private : Privates) {
2537 // Reduce element = LocalReduceList[i]
2538 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2539 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2540 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2541 // elemptr = ((CopyType*)(elemptrptr)) + I
2542 ElemTy = CGF.ConvertTypeForMem(Private->getType());
2543 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2544 ElemPtrPtr, ElemTy->getPointerTo());
2545 Address ElemPtr =
2546 Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
2547 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2548 // Global = Buffer.VD[Idx];
2549 const FieldDecl *FD = VarFieldMap.lookup(VD);
2550 LValue GlobLVal = CGF.EmitLValueForField(
2551 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2552 Address GlobAddr = GlobLVal.getAddress(CGF);
2553 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
2554 GlobAddr.getPointer(), Idxs);
2555 GlobLVal.setAddress(Address(BufferPtr,
2556 CGF.ConvertTypeForMem(Private->getType()),
2557 GlobAddr.getAlignment()));
2558 switch (CGF.getEvaluationKind(Private->getType())) {
2559 case TEK_Scalar: {
2560 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
2561 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
2562 LValueBaseInfo(AlignmentSource::Type),
2563 TBAAAccessInfo());
2564 break;
2565 }
2566 case TEK_Complex: {
2567 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
2568 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2569 /*isInit=*/false);
2570 break;
2571 }
2572 case TEK_Aggregate:
2573 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2574 GlobLVal, Private->getType(),
2575 AggValueSlot::DoesNotOverlap);
2576 break;
2577 }
2578 ++Idx;
2579 }
2580
2581 CGF.FinishFunction();
2582 return Fn;
2583}
2584
2585/// This function emits a helper that reduces all the reduction variables from
2586/// the team into the provided global buffer for the reduction variables.
2587///
2588/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
2589/// void *GlobPtrs[];
2590/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2591/// ...
2592/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2593/// reduce_function(reduce_data, GlobPtrs);
2594static llvm::Value *emitGlobalToListReduceFunction(
2595 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2596 QualType ReductionArrayTy, SourceLocation Loc,
2597 const RecordDecl *TeamReductionRec,
2598 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2599 &VarFieldMap,
2600 llvm::Function *ReduceFn) {
2601 ASTContext &C = CGM.getContext();
2602
2603 // Buffer: global reduction buffer.
2604 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2605 C.VoidPtrTy, ImplicitParamDecl::Other);
2606 // Idx: index of the buffer.
2607 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2608 ImplicitParamDecl::Other);
2609 // ReduceList: thread local Reduce list.
2610 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2611 C.VoidPtrTy, ImplicitParamDecl::Other);
2612 FunctionArgList Args;
2613 Args.push_back(&BufferArg);
2614 Args.push_back(&IdxArg);
2615 Args.push_back(&ReduceListArg);
2616
2617 const CGFunctionInfo &CGFI =
2618 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2619 auto *Fn = llvm::Function::Create(
2620 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2621 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
2622 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2623 Fn->setDoesNotRecurse();
2624 CodeGenFunction CGF(CGM);
2625 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2626
2627 CGBuilderTy &Bld = CGF.Builder;
2628
2629 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2630 QualType StaticTy = C.getRecordType(TeamReductionRec);
2631 llvm::Type *LLVMReductionsBufferTy =
2632 CGM.getTypes().ConvertTypeForMem(StaticTy);
2633 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2634 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2635 LLVMReductionsBufferTy->getPointerTo());
2636
2637 // 1. Build a list of reduction variables.
2638 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2639 Address ReductionList =
2640 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2641 auto IPriv = Privates.begin();
2642 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2643 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2644 /*Volatile=*/false, C.IntTy,
2645 Loc)};
2646 unsigned Idx = 0;
2647 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2648 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2649 // Global = Buffer.VD[Idx];
2650 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2651 const FieldDecl *FD = VarFieldMap.lookup(VD);
2652 LValue GlobLVal = CGF.EmitLValueForField(
2653 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2654 Address GlobAddr = GlobLVal.getAddress(CGF);
2655 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2656 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2657 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2658 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2659 if ((*IPriv)->getType()->isVariablyModifiedType()) {
2660 // Store array size.
2661 ++Idx;
2662 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2663 llvm::Value *Size = CGF.Builder.CreateIntCast(
2664 CGF.getVLASize(
2665 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2666 .NumElts,
2667 CGF.SizeTy, /*isSigned=*/false);
2668 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2669 Elem);
2670 }
2671 }
2672
2673 // Call reduce_function(ReduceList, GlobalReduceList)
2674 llvm::Value *GlobalReduceList =
2675 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2676 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2677 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2678 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2679 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2680 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
2681 CGF.FinishFunction();
2682 return Fn;
2683}
2684
2685///
2686/// Design of OpenMP reductions on the GPU
2687///
2688/// Consider a typical OpenMP program with one or more reduction
2689/// clauses:
2690///
2691/// float foo;
2692/// double bar;
2693/// #pragma omp target teams distribute parallel for \
2694/// reduction(+:foo) reduction(*:bar)
2695/// for (int i = 0; i < N; i++) {
2696/// foo += A[i]; bar *= B[i];
2697/// }
2698///
2699/// where 'foo' and 'bar' are reduced across all OpenMP threads in
2700/// all teams. In our OpenMP implementation on the NVPTX device an
2701/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
2702/// within a team are mapped to CUDA threads within a threadblock.
2703/// Our goal is to efficiently aggregate values across all OpenMP
2704/// threads such that:
2705///
2706/// - the compiler and runtime are logically concise, and
2707/// - the reduction is performed efficiently in a hierarchical
2708/// manner as follows: within OpenMP threads in the same warp,
2709/// across warps in a threadblock, and finally across teams on
2710/// the NVPTX device.
2711///
2712/// Introduction to Decoupling
2713///
2714/// We would like to decouple the compiler and the runtime so that the
2715/// latter is ignorant of the reduction variables (number, data types)
2716/// and the reduction operators. This allows a simpler interface
2717/// and implementation while still attaining good performance.
2718///
2719/// Pseudocode for the aforementioned OpenMP program generated by the
2720/// compiler is as follows:
2721///
2722/// 1. Create private copies of reduction variables on each OpenMP
2723/// thread: 'foo_private', 'bar_private'
2724/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
2725/// to it and writes the result in 'foo_private' and 'bar_private'
2726/// respectively.
2727/// 3. Call the OpenMP runtime on the GPU to reduce within a team
2728/// and store the result on the team master:
2729///
2730/// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
2731/// reduceData, shuffleReduceFn, interWarpCpyFn)
2732///
2733/// where:
2734/// struct ReduceData {
2735/// double *foo;
2736/// double *bar;
2737/// } reduceData
2738/// reduceData.foo = &foo_private
2739/// reduceData.bar = &bar_private
2740///
2741/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
2742/// auxiliary functions generated by the compiler that operate on
2743/// variables of type 'ReduceData'. They aid the runtime perform
2744/// algorithmic steps in a data agnostic manner.
2745///
2746/// 'shuffleReduceFn' is a pointer to a function that reduces data
2747/// of type 'ReduceData' across two OpenMP threads (lanes) in the
2748/// same warp. It takes the following arguments as input:
2749///
2750/// a. variable of type 'ReduceData' on the calling lane,
2751/// b. its lane_id,
2752/// c. an offset relative to the current lane_id to generate a
2753/// remote_lane_id. The remote lane contains the second
2754/// variable of type 'ReduceData' that is to be reduced.
2755/// d. an algorithm version parameter determining which reduction
2756/// algorithm to use.
2757///
2758/// 'shuffleReduceFn' retrieves data from the remote lane using
2759/// efficient GPU shuffle intrinsics and reduces, using the
2760/// algorithm specified by the 4th parameter, the two operands
2761/// element-wise. The result is written to the first operand.
2762///
2763/// Different reduction algorithms are implemented in different
2764/// runtime functions, all calling 'shuffleReduceFn' to perform
2765/// the essential reduction step. Therefore, based on the 4th
2766/// parameter, this function behaves slightly differently to
2767/// cooperate with the runtime to ensure correctness under
2768/// different circumstances.
2769///
2770/// 'InterWarpCpyFn' is a pointer to a function that transfers
2771/// reduced variables across warps. It tunnels, through CUDA
2772/// shared memory, the thread-private data of type 'ReduceData'
2773/// from lane 0 of each warp to a lane in the first warp.
2774/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
2775/// The last team writes the global reduced value to memory.
2776///
2777/// ret = __kmpc_nvptx_teams_reduce_nowait(...,
2778/// reduceData, shuffleReduceFn, interWarpCpyFn,
2779/// scratchpadCopyFn, loadAndReduceFn)
2780///
2781/// 'scratchpadCopyFn' is a helper that stores reduced
2782/// data from the team master to a scratchpad array in
2783/// global memory.
2784///
2785/// 'loadAndReduceFn' is a helper that loads data from
2786/// the scratchpad array and reduces it with the input
2787/// operand.
2788///
2789/// These compiler generated functions hide address
2790/// calculation and alignment information from the runtime.
2791/// 5. if ret == 1:
2792/// The team master of the last team stores the reduced
2793/// result to the globals in memory.
2794/// foo += reduceData.foo; bar *= reduceData.bar
2795///
2796///
2797/// Warp Reduction Algorithms
2798///
2799/// On the warp level, we have three algorithms implemented in the
2800/// OpenMP runtime depending on the number of active lanes:
2801///
2802/// Full Warp Reduction
2803///
2804/// The reduce algorithm within a warp where all lanes are active
2805/// is implemented in the runtime as follows:
2806///
2807/// full_warp_reduce(void *reduce_data,
2808/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2809/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
2810/// ShuffleReduceFn(reduce_data, 0, offset, 0);
2811/// }
2812///
2813/// The algorithm completes in log(2, WARPSIZE) steps.
2814///
2815/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
2816/// not used therefore we save instructions by not retrieving lane_id
2817/// from the corresponding special registers. The 4th parameter, which
2818/// represents the version of the algorithm being used, is set to 0 to
2819/// signify full warp reduction.
2820///
2821/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2822///
2823/// #reduce_elem refers to an element in the local lane's data structure
2824/// #remote_elem is retrieved from a remote lane
2825/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2826/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
2827///
2828/// Contiguous Partial Warp Reduction
2829///
2830/// This reduce algorithm is used within a warp where only the first
2831/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
2832/// number of OpenMP threads in a parallel region is not a multiple of
2833/// WARPSIZE. The algorithm is implemented in the runtime as follows:
2834///
2835/// void
2836/// contiguous_partial_reduce(void *reduce_data,
2837/// kmp_ShuffleReductFctPtr ShuffleReduceFn,
2838/// int size, int lane_id) {
2839/// int curr_size;
2840/// int offset;
2841/// curr_size = size;
2842/// mask = curr_size/2;
2843/// while (offset>0) {
2844/// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
2845/// curr_size = (curr_size+1)/2;
2846/// offset = curr_size/2;
2847/// }
2848/// }
2849///
2850/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2851///
2852/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2853/// if (lane_id < offset)
2854/// reduce_elem = reduce_elem REDUCE_OP remote_elem
2855/// else
2856/// reduce_elem = remote_elem
2857///
2858/// This algorithm assumes that the data to be reduced are located in a
2859/// contiguous subset of lanes starting from the first. When there is
2860/// an odd number of active lanes, the data in the last lane is not
2861/// aggregated with any other lane's dat but is instead copied over.
2862///
2863/// Dispersed Partial Warp Reduction
2864///
2865/// This algorithm is used within a warp when any discontiguous subset of
2866/// lanes are active. It is used to implement the reduction operation
2867/// across lanes in an OpenMP simd region or in a nested parallel region.
2868///
2869/// void
2870/// dispersed_partial_reduce(void *reduce_data,
2871/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2872/// int size, remote_id;
2873/// int logical_lane_id = number_of_active_lanes_before_me() * 2;
2874/// do {
2875/// remote_id = next_active_lane_id_right_after_me();
2876/// # the above function returns 0 of no active lane
2877/// # is present right after the current lane.
2878/// size = number_of_active_lanes_in_this_warp();
2879/// logical_lane_id /= 2;
2880/// ShuffleReduceFn(reduce_data, logical_lane_id,
2881/// remote_id-1-threadIdx.x, 2);
2882/// } while (logical_lane_id % 2 == 0 && size > 1);
2883/// }
2884///
2885/// There is no assumption made about the initial state of the reduction.
2886/// Any number of lanes (>=1) could be active at any position. The reduction
2887/// result is returned in the first active lane.
2888///
2889/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2890///
2891/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2892/// if (lane_id % 2 == 0 && offset > 0)
2893/// reduce_elem = reduce_elem REDUCE_OP remote_elem
2894/// else
2895/// reduce_elem = remote_elem
2896///
2897///
2898/// Intra-Team Reduction
2899///
2900/// This function, as implemented in the runtime call
2901/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
2902/// threads in a team. It first reduces within a warp using the
2903/// aforementioned algorithms. We then proceed to gather all such
2904/// reduced values at the first warp.
2905///
2906/// The runtime makes use of the function 'InterWarpCpyFn', which copies
2907/// data from each of the "warp master" (zeroth lane of each warp, where
2908/// warp-reduced data is held) to the zeroth warp. This step reduces (in
2909/// a mathematical sense) the problem of reduction across warp masters in
2910/// a block to the problem of warp reduction.
2911///
2912///
2913/// Inter-Team Reduction
2914///
2915/// Once a team has reduced its data to a single value, it is stored in
2916/// a global scratchpad array. Since each team has a distinct slot, this
2917/// can be done without locking.
2918///
2919/// The last team to write to the scratchpad array proceeds to reduce the
2920/// scratchpad array. One or more workers in the last team use the helper
2921/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
2922/// the k'th worker reduces every k'th element.
2923///
2924/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
2925/// reduce across workers and compute a globally reduced value.
2926///
2927void CGOpenMPRuntimeGPU::emitReduction(
2928 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
2929 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
2930 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
2931 if (!CGF.HaveInsertPoint())
2932 return;
2933
2934 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
2935#ifndef NDEBUG
2936 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
2937#endif
2938
2939 if (Options.SimpleReduction) {
2940 assert(!TeamsReduction && !ParallelReduction &&(static_cast <bool> (!TeamsReduction && !ParallelReduction
&& "Invalid reduction selection in emitReduction.") ?
void (0) : __assert_fail ("!TeamsReduction && !ParallelReduction && \"Invalid reduction selection in emitReduction.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 2941, __extension__
__PRETTY_FUNCTION__))
2941 "Invalid reduction selection in emitReduction.")(static_cast <bool> (!TeamsReduction && !ParallelReduction
&& "Invalid reduction selection in emitReduction.") ?
void (0) : __assert_fail ("!TeamsReduction && !ParallelReduction && \"Invalid reduction selection in emitReduction.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 2941, __extension__
__PRETTY_FUNCTION__))
;
2942 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
2943 ReductionOps, Options);
2944 return;
2945 }
2946
2947 assert((TeamsReduction || ParallelReduction) &&(static_cast <bool> ((TeamsReduction || ParallelReduction
) && "Invalid reduction selection in emitReduction.")
? void (0) : __assert_fail ("(TeamsReduction || ParallelReduction) && \"Invalid reduction selection in emitReduction.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 2948, __extension__
__PRETTY_FUNCTION__))
2948 "Invalid reduction selection in emitReduction.")(static_cast <bool> ((TeamsReduction || ParallelReduction
) && "Invalid reduction selection in emitReduction.")
? void (0) : __assert_fail ("(TeamsReduction || ParallelReduction) && \"Invalid reduction selection in emitReduction.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 2948, __extension__
__PRETTY_FUNCTION__))
;
2949
2950 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
2951 // RedList, shuffle_reduce_func, interwarp_copy_func);
2952 // or
2953 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
2954 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2955 llvm::Value *ThreadId = getThreadID(CGF, Loc);
2956
2957 llvm::Value *Res;
2958 ASTContext &C = CGM.getContext();
2959 // 1. Build a list of reduction variables.
2960 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2961 auto Size = RHSExprs.size();
2962 for (const Expr *E : Privates) {
2963 if (E->getType()->isVariablyModifiedType())
2964 // Reserve place for array size.
2965 ++Size;
2966 }
2967 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
2968 QualType ReductionArrayTy =
2969 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
2970 /*IndexTypeQuals=*/0);
2971 Address ReductionList =
2972 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2973 auto IPriv = Privates.begin();
2974 unsigned Idx = 0;
2975 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
2976 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2977 CGF.Builder.CreateStore(
2978 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2979 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
2980 Elem);
2981 if ((*IPriv)->getType()->isVariablyModifiedType()) {
2982 // Store array size.
2983 ++Idx;
2984 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2985 llvm::Value *Size = CGF.Builder.CreateIntCast(
2986 CGF.getVLASize(
2987 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2988 .NumElts,
2989 CGF.SizeTy, /*isSigned=*/false);
2990 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2991 Elem);
2992 }
2993 }
2994
2995 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2996 ReductionList.getPointer(), CGF.VoidPtrTy);
2997 llvm::Function *ReductionFn =
2998 emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
2999 Privates, LHSExprs, RHSExprs, ReductionOps);
3000 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3001 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3002 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3003 llvm::Value *InterWarpCopyFn =
3004 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3005
3006 if (ParallelReduction) {
3007 llvm::Value *Args[] = {RTLoc,
3008 ThreadId,
3009 CGF.Builder.getInt32(RHSExprs.size()),
3010 ReductionArrayTySize,
3011 RL,
3012 ShuffleAndReduceFn,
3013 InterWarpCopyFn};
3014
3015 Res = CGF.EmitRuntimeCall(
3016 OMPBuilder.getOrCreateRuntimeFunction(
3017 CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
3018 Args);
3019 } else {
3020 assert(TeamsReduction && "expected teams reduction.")(static_cast <bool> (TeamsReduction && "expected teams reduction."
) ? void (0) : __assert_fail ("TeamsReduction && \"expected teams reduction.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3020, __extension__
__PRETTY_FUNCTION__))
;
3021 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
3022 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
3023 int Cnt = 0;
3024 for (const Expr *DRE : Privates) {
3025 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
3026 ++Cnt;
3027 }
3028 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
3029 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
3030 C.getLangOpts().OpenMPCUDAReductionBufNum);
3031 TeamsReductions.push_back(TeamReductionRec);
3032 if (!KernelTeamsReductionPtr) {
3033 KernelTeamsReductionPtr = new llvm::GlobalVariable(
3034 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
3035 llvm::GlobalValue::InternalLinkage, nullptr,
3036 "_openmp_teams_reductions_buffer_$_$ptr");
3037 }
3038 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
3039 Address(KernelTeamsReductionPtr, CGF.VoidPtrTy, CGM.getPointerAlign()),
3040 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
3041 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
3042 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3043 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
3044 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3045 ReductionFn);
3046 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
3047 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3048 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
3049 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3050 ReductionFn);
3051
3052 llvm::Value *Args[] = {
3053 RTLoc,
3054 ThreadId,
3055 GlobalBufferPtr,
3056 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
3057 RL,
3058 ShuffleAndReduceFn,
3059 InterWarpCopyFn,
3060 GlobalToBufferCpyFn,
3061 GlobalToBufferRedFn,
3062 BufferToGlobalCpyFn,
3063 BufferToGlobalRedFn};
3064
3065 Res = CGF.EmitRuntimeCall(
3066 OMPBuilder.getOrCreateRuntimeFunction(
3067 CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
3068 Args);
3069 }
3070
3071 // 5. Build if (res == 1)
3072 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
3073 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
3074 llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
3075 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
3076 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
3077
3078 // 6. Build then branch: where we have reduced values in the master
3079 // thread in each team.
3080 // __kmpc_end_reduce{_nowait}(<gtid>);
3081 // break;
3082 CGF.EmitBlock(ThenBB);
3083
3084 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
3085 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
3086 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
3087 auto IPriv = Privates.begin();
3088 auto ILHS = LHSExprs.begin();
3089 auto IRHS = RHSExprs.begin();
3090 for (const Expr *E : ReductionOps) {
3091 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
3092 cast<DeclRefExpr>(*IRHS));
3093 ++IPriv;
3094 ++ILHS;
3095 ++IRHS;
3096 }
3097 };
3098 llvm::Value *EndArgs[] = {ThreadId};
3099 RegionCodeGenTy RCG(CodeGen);
3100 NVPTXActionTy Action(
3101 nullptr, llvm::None,
3102 OMPBuilder.getOrCreateRuntimeFunction(
3103 CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
3104 EndArgs);
3105 RCG.setAction(Action);
3106 RCG(CGF);
3107 // There is no need to emit line number for unconditional branch.
3108 (void)ApplyDebugLocation::CreateEmpty(CGF);
3109 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3110}
3111
3112const VarDecl *
3113CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
3114 const VarDecl *NativeParam) const {
3115 if (!NativeParam->getType()->isReferenceType())
3116 return NativeParam;
3117 QualType ArgType = NativeParam->getType();
3118 QualifierCollector QC;
3119 const Type *NonQualTy = QC.strip(ArgType);
3120 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3121 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
3122 if (Attr->getCaptureKind() == OMPC_map) {
3123 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
3124 LangAS::opencl_global);
3125 }
3126 }
3127 ArgType = CGM.getContext().getPointerType(PointeeTy);
3128 QC.addRestrict();
3129 enum { NVPTX_local_addr = 5 };
3130 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
3131 ArgType = QC.apply(CGM.getContext(), ArgType);
3132 if (isa<ImplicitParamDecl>(NativeParam))
3133 return ImplicitParamDecl::Create(
3134 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
3135 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
3136 return ParmVarDecl::Create(
3137 CGM.getContext(),
3138 const_cast<DeclContext *>(NativeParam->getDeclContext()),
3139 NativeParam->getBeginLoc(), NativeParam->getLocation(),
3140 NativeParam->getIdentifier(), ArgType,
3141 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
3142}
3143
3144Address
3145CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
3146 const VarDecl *NativeParam,
3147 const VarDecl *TargetParam) const {
3148 assert(NativeParam != TargetParam &&(static_cast <bool> (NativeParam != TargetParam &&
NativeParam->getType()->isReferenceType() && "Native arg must not be the same as target arg."
) ? void (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
3149 NativeParam->getType()->isReferenceType() &&(static_cast <bool> (NativeParam != TargetParam &&
NativeParam->getType()->isReferenceType() && "Native arg must not be the same as target arg."
) ? void (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
3150 "Native arg must not be the same as target arg.")(static_cast <bool> (NativeParam != TargetParam &&
NativeParam->getType()->isReferenceType() && "Native arg must not be the same as target arg."
) ? void (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
;
3151 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
3152 QualType NativeParamType = NativeParam->getType();
3153 QualifierCollector QC;
3154 const Type *NonQualTy = QC.strip(NativeParamType);
3155 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3156 unsigned NativePointeeAddrSpace =
3157 CGF.getContext().getTargetAddressSpace(NativePointeeTy);
3158 QualType TargetTy = TargetParam->getType();
3159 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
3160 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
3161 // First cast to generic.
3162 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3163 TargetAddr, llvm::PointerType::getWithSamePointeeType(
3164 cast<llvm::PointerType>(TargetAddr->getType()), /*AddrSpace=*/0));
3165 // Cast from generic to native address space.
3166 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3167 TargetAddr, llvm::PointerType::getWithSamePointeeType(
3168 cast<llvm::PointerType>(TargetAddr->getType()),
3169 NativePointeeAddrSpace));
3170 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
3171 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
3172 NativeParamType);
3173 return NativeParamAddr;
3174}
3175
3176void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
3177 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
3178 ArrayRef<llvm::Value *> Args) const {
3179 SmallVector<llvm::Value *, 4> TargetArgs;
3180 TargetArgs.reserve(Args.size());
3181 auto *FnType = OutlinedFn.getFunctionType();
3182 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
3183 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
3184 TargetArgs.append(std::next(Args.begin(), I), Args.end());
3185 break;
3186 }
3187 llvm::Type *TargetType = FnType->getParamType(I);
3188 llvm::Value *NativeArg = Args[I];
3189 if (!TargetType->isPointerTy()) {
3190 TargetArgs.emplace_back(NativeArg);
3191 continue;
3192 }
3193 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3194 NativeArg, llvm::PointerType::getWithSamePointeeType(
3195 cast<llvm::PointerType>(NativeArg->getType()), /*AddrSpace*/ 0));
3196 TargetArgs.emplace_back(
3197 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
3198 }
3199 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
3200}
3201
3202/// Emit function which wraps the outline parallel region
3203/// and controls the arguments which are passed to this function.
3204/// The wrapper ensures that the outlined function is called
3205/// with the correct arguments when data is shared.
3206llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
3207 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
3208 ASTContext &Ctx = CGM.getContext();
3209 const auto &CS = *D.getCapturedStmt(OMPD_parallel);
3210
3211 // Create a function that takes as argument the source thread.
3212 FunctionArgList WrapperArgs;
3213 QualType Int16QTy =
3214 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
3215 QualType Int32QTy =
3216 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
3217 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3218 /*Id=*/nullptr, Int16QTy,
3219 ImplicitParamDecl::Other);
3220 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3221 /*Id=*/nullptr, Int32QTy,
3222 ImplicitParamDecl::Other);
3223 WrapperArgs.emplace_back(&ParallelLevelArg);
3224 WrapperArgs.emplace_back(&WrapperArg);
3225
3226 const CGFunctionInfo &CGFI =
3227 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
3228
3229 auto *Fn = llvm::Function::Create(
3230 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3231 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
3232
3233 // Ensure we do not inline the function. This is trivially true for the ones
3234 // passed to __kmpc_fork_call but the ones calles in serialized regions
3235 // could be inlined. This is not a perfect but it is closer to the invariant
3236 // we want, namely, every data environment starts with a new function.
3237 // TODO: We should pass the if condition to the runtime function and do the
3238 // handling there. Much cleaner code.
3239 Fn->addFnAttr(llvm::Attribute::NoInline);
3240
3241 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3242 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
3243 Fn->setDoesNotRecurse();
3244
3245 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3246 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
3247 D.getBeginLoc(), D.getBeginLoc());
3248
3249 const auto *RD = CS.getCapturedRecordDecl();
3250 auto CurField = RD->field_begin();
3251
3252 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3253 /*Name=*/".zero.addr");
3254 CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
3255 // Get the array of arguments.
3256 SmallVector<llvm::Value *, 8> Args;
3257
3258 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
3259 Args.emplace_back(ZeroAddr.getPointer());
3260
3261 CGBuilderTy &Bld = CGF.Builder;
3262 auto CI = CS.capture_begin();
3263
3264 // Use global memory for data sharing.
3265 // Handle passing of global args to workers.
3266 Address GlobalArgs =
3267 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
3268 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
3269 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
3270 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3271 CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
3272 DataSharingArgs);
3273
3274 // Retrieve the shared variables from the list of references returned
3275 // by the runtime. Pass the variables to the outlined function.
3276 Address SharedArgListAddress = Address::invalid();
3277 if (CS.capture_size() > 0 ||
3278 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3279 SharedArgListAddress = CGF.EmitLoadOfPointer(
3280 GlobalArgs, CGF.getContext()
3281 .getPointerType(CGF.getContext().VoidPtrTy)
3282 .castAs<PointerType>());
3283 }
3284 unsigned Idx = 0;
3285 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3286 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3287 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3288 Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
3289 llvm::Value *LB = CGF.EmitLoadOfScalar(
3290 TypedAddress,
3291 /*Volatile=*/false,
3292 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3293 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
3294 Args.emplace_back(LB);
3295 ++Idx;
3296 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3297 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3298 Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
3299 llvm::Value *UB = CGF.EmitLoadOfScalar(
3300 TypedAddress,
3301 /*Volatile=*/false,
3302 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3303 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
3304 Args.emplace_back(UB);
3305 ++Idx;
3306 }
3307 if (CS.capture_size() > 0) {
3308 ASTContext &CGFContext = CGF.getContext();
3309 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
3310 QualType ElemTy = CurField->getType();
3311 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
3312 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3313 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)),
3314 CGF.ConvertTypeForMem(ElemTy));
3315 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
3316 /*Volatile=*/false,
3317 CGFContext.getPointerType(ElemTy),
3318 CI->getLocation());
3319 if (CI->capturesVariableByCopy() &&
3320 !CI->getCapturedVar()->getType()->isAnyPointerType()) {
3321 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
3322 CI->getLocation());
3323 }
3324 Args.emplace_back(Arg);
3325 }
3326 }
3327
3328 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
3329 CGF.FinishFunction();
3330 return Fn;
3331}
3332
3333void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
3334 const Decl *D) {
3335 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3336 return;
3337
3338 assert(D && "Expected function or captured|block decl.")(static_cast <bool> (D && "Expected function or captured|block decl."
) ? void (0) : __assert_fail ("D && \"Expected function or captured|block decl.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3338, __extension__
__PRETTY_FUNCTION__))
;
3339 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&(static_cast <bool> (FunctionGlobalizedDecls.count(CGF.
CurFn) == 0 && "Function is registered already.") ? void
(0) : __assert_fail ("FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && \"Function is registered already.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3340, __extension__
__PRETTY_FUNCTION__))
3340 "Function is registered already.")(static_cast <bool> (FunctionGlobalizedDecls.count(CGF.
CurFn) == 0 && "Function is registered already.") ? void
(0) : __assert_fail ("FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && \"Function is registered already.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3340, __extension__
__PRETTY_FUNCTION__))
;
3341 assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&(static_cast <bool> ((!TeamAndReductions.first || TeamAndReductions
.first == D) && "Team is set but not processed.") ? void
(0) : __assert_fail ("(!TeamAndReductions.first || TeamAndReductions.first == D) && \"Team is set but not processed.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3342, __extension__
__PRETTY_FUNCTION__))
3342 "Team is set but not processed.")(static_cast <bool> ((!TeamAndReductions.first || TeamAndReductions
.first == D) && "Team is set but not processed.") ? void
(0) : __assert_fail ("(!TeamAndReductions.first || TeamAndReductions.first == D) && \"Team is set but not processed.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3342, __extension__
__PRETTY_FUNCTION__))
;
3343 const Stmt *Body = nullptr;
3344 bool NeedToDelayGlobalization = false;
3345 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3346 Body = FD->getBody();
3347 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
3348 Body = BD->getBody();
3349 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
3350 Body = CD->getBody();
3351 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
3352 if (NeedToDelayGlobalization &&
3353 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
3354 return;
3355 }
3356 if (!Body)
3357 return;
3358 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
3359 VarChecker.Visit(Body);
3360 const RecordDecl *GlobalizedVarsRecord =
3361 VarChecker.getGlobalizedRecord(IsInTTDRegion);
3362 TeamAndReductions.first = nullptr;
3363 TeamAndReductions.second.clear();
3364 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
3365 VarChecker.getEscapedVariableLengthDecls();
3366 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
3367 return;
3368 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
3369 I->getSecond().MappedParams =
3370 std::make_unique<CodeGenFunction::OMPMapVars>();
3371 I->getSecond().EscapedParameters.insert(
3372 VarChecker.getEscapedParameters().begin(),
3373 VarChecker.getEscapedParameters().end());
3374 I->getSecond().EscapedVariableLengthDecls.append(
3375 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
3376 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
3377 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3378 assert(VD->isCanonicalDecl() && "Expected canonical declaration")(static_cast <bool> (VD->isCanonicalDecl() &&
"Expected canonical declaration") ? void (0) : __assert_fail
("VD->isCanonicalDecl() && \"Expected canonical declaration\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3378, __extension__
__PRETTY_FUNCTION__))
;
3379 Data.insert(std::make_pair(VD, MappedVarData()));
3380 }
3381 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
3382 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
3383 VarChecker.Visit(Body);
3384 I->getSecond().SecondaryLocalVarData.emplace();
3385 DeclToAddrMapTy &Data = *I->getSecond().SecondaryLocalVarData;
3386 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3387 assert(VD->isCanonicalDecl() && "Expected canonical declaration")(static_cast <bool> (VD->isCanonicalDecl() &&
"Expected canonical declaration") ? void (0) : __assert_fail
("VD->isCanonicalDecl() && \"Expected canonical declaration\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3387, __extension__
__PRETTY_FUNCTION__))
;
3388 Data.insert(std::make_pair(VD, MappedVarData()));
3389 }
3390 }
3391 if (!NeedToDelayGlobalization) {
3392 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
3393 struct GlobalizationScope final : EHScopeStack::Cleanup {
3394 GlobalizationScope() = default;
3395
3396 void Emit(CodeGenFunction &CGF, Flags flags) override {
3397 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
3398 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
3399 }
3400 };
3401 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
3402 }
3403}
3404
3405Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
3406 const VarDecl *VD) {
3407 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
1
Assuming 'VD' is null
3408 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3409 auto AS = LangAS::Default;
3410 switch (A->getAllocatorType()) {
3411 // Use the default allocator here as by default local vars are
3412 // threadlocal.
3413 case OMPAllocateDeclAttr::OMPNullMemAlloc:
3414 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3415 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3416 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3417 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3418 // Follow the user decision - use default allocation.
3419 return Address::invalid();
3420 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3421 // TODO: implement aupport for user-defined allocators.
3422 return Address::invalid();
3423 case OMPAllocateDeclAttr::OMPConstMemAlloc:
3424 AS = LangAS::cuda_constant;
3425 break;
3426 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3427 AS = LangAS::cuda_shared;
3428 break;
3429 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3430 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3431 break;
3432 }
3433 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
3434 auto *GV = new llvm::GlobalVariable(
3435 CGM.getModule(), VarTy, /*isConstant=*/false,
3436 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
3437 VD->getName(),
3438 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
3439 CGM.getContext().getTargetAddressSpace(AS));
3440 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3441 GV->setAlignment(Align.getAsAlign());
3442 return Address(
3443 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3444 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
3445 VD->getType().getAddressSpace()))),
3446 VarTy, Align);
3447 }
3448
3449 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
2
Taking false branch
3450 return Address::invalid();
3451
3452 VD = VD->getCanonicalDecl();
3
Called C++ object pointer is null
3453 auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
3454 if (I == FunctionGlobalizedDecls.end())
3455 return Address::invalid();
3456 auto VDI = I->getSecond().LocalVarData.find(VD);
3457 if (VDI != I->getSecond().LocalVarData.end())
3458 return VDI->second.PrivateAddr;
3459 if (VD->hasAttrs()) {
3460 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
3461 E(VD->attr_end());
3462 IT != E; ++IT) {
3463 auto VDI = I->getSecond().LocalVarData.find(
3464 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
3465 ->getCanonicalDecl());
3466 if (VDI != I->getSecond().LocalVarData.end())
3467 return VDI->second.PrivateAddr;
3468 }
3469 }
3470
3471 return Address::invalid();
3472}
3473
3474void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
3475 FunctionGlobalizedDecls.erase(CGF.CurFn);
3476 CGOpenMPRuntime::functionFinished(CGF);
3477}
3478
3479void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
3480 CodeGenFunction &CGF, const OMPLoopDirective &S,
3481 OpenMPDistScheduleClauseKind &ScheduleKind,
3482 llvm::Value *&Chunk) const {
3483 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
3484 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
3485 ScheduleKind = OMPC_DIST_SCHEDULE_static;
3486 Chunk = CGF.EmitScalarConversion(
3487 RT.getGPUNumThreads(CGF),
3488 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3489 S.getIterationVariable()->getType(), S.getBeginLoc());
3490 return;
3491 }
3492 CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
3493 CGF, S, ScheduleKind, Chunk);
3494}
3495
3496void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
3497 CodeGenFunction &CGF, const OMPLoopDirective &S,
3498 OpenMPScheduleClauseKind &ScheduleKind,
3499 const Expr *&ChunkExpr) const {
3500 ScheduleKind = OMPC_SCHEDULE_static;
3501 // Chunk size is 1 in this case.
3502 llvm::APInt ChunkSize(32, 1);
3503 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
3504 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3505 SourceLocation());
3506}
3507
3508void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
3509 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
3510 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPTargetExecutionDirective(D
.getDirectiveKind()) && " Expected target-based directive."
) ? void (0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3511, __extension__
__PRETTY_FUNCTION__))
3511 " Expected target-based directive.")(static_cast <bool> (isOpenMPTargetExecutionDirective(D
.getDirectiveKind()) && " Expected target-based directive."
) ? void (0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3511, __extension__
__PRETTY_FUNCTION__))
;
3512 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
3513 for (const CapturedStmt::Capture &C : CS->captures()) {
3514 // Capture variables captured by reference in lambdas for target-based
3515 // directives.
3516 if (!C.capturesVariable())
3517 continue;
3518 const VarDecl *VD = C.getCapturedVar();
3519 const auto *RD = VD->getType()
3520 .getCanonicalType()
3521 .getNonReferenceType()
3522 ->getAsCXXRecordDecl();
3523 if (!RD || !RD->isLambda())
3524 continue;
3525 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3526 LValue VDLVal;
3527 if (VD->getType().getCanonicalType()->isReferenceType())
3528 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
3529 else
3530 VDLVal = CGF.MakeAddrLValue(
3531 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
3532 llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
3533 FieldDecl *ThisCapture = nullptr;
3534 RD->getCaptureFields(Captures, ThisCapture);
3535 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
3536 LValue ThisLVal =
3537 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
3538 llvm::Value *CXXThis = CGF.LoadCXXThis();
3539 CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
3540 }
3541 for (const LambdaCapture &LC : RD->captures()) {
3542 if (LC.getCaptureKind() != LCK_ByRef)
3543 continue;
3544 const ValueDecl *VD = LC.getCapturedVar();
3545 // FIXME: For now VD is always a VarDecl because OpenMP does not support
3546 // capturing structured bindings in lambdas yet.
3547 if (!CS->capturesVariable(cast<VarDecl>(VD)))
3548 continue;
3549 auto It = Captures.find(VD);
3550 assert(It != Captures.end() && "Found lambda capture without field.")(static_cast <bool> (It != Captures.end() && "Found lambda capture without field."
) ? void (0) : __assert_fail ("It != Captures.end() && \"Found lambda capture without field.\""
, "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp", 3550, __extension__
__PRETTY_FUNCTION__))
;
3551 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
3552 Address VDAddr = CGF.GetAddrOfLocalVar(cast<VarDecl>(VD));
3553 if (VD->getType().getCanonicalType()->isReferenceType())
3554 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
3555 VD->getType().getCanonicalType())
3556 .getAddress(CGF);
3557 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
3558 }
3559 }
3560}
3561
3562bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
3563 LangAS &AS) {
3564 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
3565 return false;
3566 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3567 switch(A->getAllocatorType()) {
3568 case OMPAllocateDeclAttr::OMPNullMemAlloc:
3569 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3570 // Not supported, fallback to the default mem space.
3571 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3572 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3573 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3574 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3575 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3576 AS = LangAS::Default;
3577 return true;
3578 case OMPAllocateDeclAttr::OMPConstMemAlloc:
3579 AS = LangAS::cuda_constant;
3580 return true;
3581 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3582 AS = LangAS::cuda_shared;
3583 return true;
3584 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3585 llvm_unreachable("Expected predefined allocator for the variables with the "::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3586)
3586 "static storage.")::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3586)
;
3587 }
3588 return false;
3589}
3590
3591// Get current CudaArch and ignore any unknown values
3592static CudaArch getCudaArch(CodeGenModule &CGM) {
3593 if (!CGM.getTarget().hasFeature("ptx"))
3594 return CudaArch::UNKNOWN;
3595 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
3596 if (Feature.getValue()) {
3597 CudaArch Arch = StringToCudaArch(Feature.getKey());
3598 if (Arch != CudaArch::UNKNOWN)
3599 return Arch;
3600 }
3601 }
3602 return CudaArch::UNKNOWN;
3603}
3604
3605/// Check to see if target architecture supports unified addressing which is
3606/// a restriction for OpenMP requires clause "unified_shared_memory".
3607void CGOpenMPRuntimeGPU::processRequiresDirective(
3608 const OMPRequiresDecl *D) {
3609 for (const OMPClause *Clause : D->clauselists()) {
3610 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
3611 CudaArch Arch = getCudaArch(CGM);
3612 switch (Arch) {
3613 case CudaArch::SM_20:
3614 case CudaArch::SM_21:
3615 case CudaArch::SM_30:
3616 case CudaArch::SM_32:
3617 case CudaArch::SM_35:
3618 case CudaArch::SM_37:
3619 case CudaArch::SM_50:
3620 case CudaArch::SM_52:
3621 case CudaArch::SM_53: {
3622 SmallString<256> Buffer;
3623 llvm::raw_svector_ostream Out(Buffer);
3624 Out << "Target architecture " << CudaArchToString(Arch)
3625 << " does not support unified addressing";
3626 CGM.Error(Clause->getBeginLoc(), Out.str());
3627 return;
3628 }
3629 case CudaArch::SM_60:
3630 case CudaArch::SM_61:
3631 case CudaArch::SM_62:
3632 case CudaArch::SM_70:
3633 case CudaArch::SM_72:
3634 case CudaArch::SM_75:
3635 case CudaArch::SM_80:
3636 case CudaArch::SM_86:
3637 case CudaArch::GFX600:
3638 case CudaArch::GFX601:
3639 case CudaArch::GFX602:
3640 case CudaArch::GFX700:
3641 case CudaArch::GFX701:
3642 case CudaArch::GFX702:
3643 case CudaArch::GFX703:
3644 case CudaArch::GFX704:
3645 case CudaArch::GFX705:
3646 case CudaArch::GFX801:
3647 case CudaArch::GFX802:
3648 case CudaArch::GFX803:
3649 case CudaArch::GFX805:
3650 case CudaArch::GFX810:
3651 case CudaArch::GFX900:
3652 case CudaArch::GFX902:
3653 case CudaArch::GFX904:
3654 case CudaArch::GFX906:
3655 case CudaArch::GFX908:
3656 case CudaArch::GFX909:
3657 case CudaArch::GFX90a:
3658 case CudaArch::GFX90c:
3659 case CudaArch::GFX940:
3660 case CudaArch::GFX1010:
3661 case CudaArch::GFX1011:
3662 case CudaArch::GFX1012:
3663 case CudaArch::GFX1013:
3664 case CudaArch::GFX1030:
3665 case CudaArch::GFX1031:
3666 case CudaArch::GFX1032:
3667 case CudaArch::GFX1033:
3668 case CudaArch::GFX1034:
3669 case CudaArch::GFX1035:
3670 case CudaArch::GFX1036:
3671 case CudaArch::GFX1100:
3672 case CudaArch::GFX1101:
3673 case CudaArch::GFX1102:
3674 case CudaArch::GFX1103:
3675 case CudaArch::Generic:
3676 case CudaArch::UNUSED:
3677 case CudaArch::UNKNOWN:
3678 break;
3679 case CudaArch::LAST:
3680 llvm_unreachable("Unexpected Cuda arch.")::llvm::llvm_unreachable_internal("Unexpected Cuda arch.", "clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3680)
;
3681 }
3682 }
3683 }
3684 CGOpenMPRuntime::processRequiresDirective(D);
3685}
3686
3687void CGOpenMPRuntimeGPU::clear() {
3688
3689 if (!TeamsReductions.empty()) {
3690 ASTContext &C = CGM.getContext();
3691 RecordDecl *StaticRD = C.buildImplicitRecord(
3692 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
3693 StaticRD->startDefinition();
3694 for (const RecordDecl *TeamReductionRec : TeamsReductions) {
3695 QualType RecTy = C.getRecordType(TeamReductionRec);
3696 auto *Field = FieldDecl::Create(
3697 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
3698 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
3699 /*BW=*/nullptr, /*Mutable=*/false,
3700 /*InitStyle=*/ICIS_NoInit);
3701 Field->setAccess(AS_public);
3702 StaticRD->addDecl(Field);
3703 }
3704 StaticRD->completeDefinition();
3705 QualType StaticTy = C.getRecordType(StaticRD);
3706 llvm::Type *LLVMReductionsBufferTy =
3707 CGM.getTypes().ConvertTypeForMem(StaticTy);
3708 // FIXME: nvlink does not handle weak linkage correctly (object with the
3709 // different size are reported as erroneous).
3710 // Restore CommonLinkage as soon as nvlink is fixed.
3711 auto *GV = new llvm::GlobalVariable(
3712 CGM.getModule(), LLVMReductionsBufferTy,
3713 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
3714 llvm::Constant::getNullValue(LLVMReductionsBufferTy),
3715 "_openmp_teams_reductions_buffer_$_");
3716 KernelTeamsReductionPtr->setInitializer(
3717 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
3718 CGM.VoidPtrTy));
3719 }
3720 CGOpenMPRuntime::clear();
3721}
3722
3723llvm::Value *CGOpenMPRuntimeGPU::getGPUNumThreads(CodeGenFunction &CGF) {
3724 CGBuilderTy &Bld = CGF.Builder;
3725 llvm::Module *M = &CGF.CGM.getModule();
3726 const char *LocSize = "__kmpc_get_hardware_num_threads_in_block";
3727 llvm::Function *F = M->getFunction(LocSize);
3728 if (!F) {
3729 F = llvm::Function::Create(
3730 llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false),
3731 llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule());
3732 }
3733 return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
3734}
3735
3736llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) {
3737 ArrayRef<llvm::Value *> Args{};
3738 return CGF.EmitRuntimeCall(
3739 OMPBuilder.getOrCreateRuntimeFunction(
3740 CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block),
3741 Args);
3742}
3743
3744llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) {
3745 ArrayRef<llvm::Value *> Args{};
3746 return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3747 CGM.getModule(), OMPRTL___kmpc_get_warp_size),
3748 Args);
3749}