Bug Summary

File:clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
Warning:line 1437, column 5
Value stored to 'Size' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGOpenMPRuntimeGPU.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D CLANG_ROUND_TRIP_CC1_ARGS=ON -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/include -I tools/clang/include -I include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -ferror-limit 19 -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-26-234817-15343-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
1//===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a generalized class for OpenMP runtime code generation
10// specialized by GPU targets NVPTX and AMDGCN.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGOpenMPRuntimeGPU.h"
15#include "CGOpenMPRuntimeNVPTX.h"
16#include "CodeGenFunction.h"
17#include "clang/AST/Attr.h"
18#include "clang/AST/DeclOpenMP.h"
19#include "clang/AST/StmtOpenMP.h"
20#include "clang/AST/StmtVisitor.h"
21#include "clang/Basic/Cuda.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/Frontend/OpenMP/OMPGridValues.h"
24#include "llvm/IR/IntrinsicsNVPTX.h"
25#include "llvm/Support/MathExtras.h"
26
27using namespace clang;
28using namespace CodeGen;
29using namespace llvm::omp;
30
31namespace {
32/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
33class NVPTXActionTy final : public PrePostActionTy {
34 llvm::FunctionCallee EnterCallee = nullptr;
35 ArrayRef<llvm::Value *> EnterArgs;
36 llvm::FunctionCallee ExitCallee = nullptr;
37 ArrayRef<llvm::Value *> ExitArgs;
38 bool Conditional = false;
39 llvm::BasicBlock *ContBlock = nullptr;
40
41public:
42 NVPTXActionTy(llvm::FunctionCallee EnterCallee,
43 ArrayRef<llvm::Value *> EnterArgs,
44 llvm::FunctionCallee ExitCallee,
45 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
46 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
47 ExitArgs(ExitArgs), Conditional(Conditional) {}
48 void Enter(CodeGenFunction &CGF) override {
49 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
50 if (Conditional) {
51 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
52 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
53 ContBlock = CGF.createBasicBlock("omp_if.end");
54 // Generate the branch (If-stmt)
55 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
56 CGF.EmitBlock(ThenBlock);
57 }
58 }
59 void Done(CodeGenFunction &CGF) {
60 // Emit the rest of blocks/branches
61 CGF.EmitBranch(ContBlock);
62 CGF.EmitBlock(ContBlock, true);
63 }
64 void Exit(CodeGenFunction &CGF) override {
65 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
66 }
67};
68
69/// A class to track the execution mode when codegening directives within
70/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
71/// to the target region and used by containing directives such as 'parallel'
72/// to emit optimized code.
73class ExecutionRuntimeModesRAII {
74private:
75 CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
76 CGOpenMPRuntimeGPU::EM_Unknown;
77 CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
78 bool SavedRuntimeMode = false;
79 bool *RuntimeMode = nullptr;
80
81public:
82 /// Constructor for Non-SPMD mode.
83 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
84 : ExecMode(ExecMode) {
85 SavedExecMode = ExecMode;
86 ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
87 }
88 /// Constructor for SPMD mode.
89 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
90 bool &RuntimeMode, bool FullRuntimeMode)
91 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
92 SavedExecMode = ExecMode;
93 SavedRuntimeMode = RuntimeMode;
94 ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
95 RuntimeMode = FullRuntimeMode;
96 }
97 ~ExecutionRuntimeModesRAII() {
98 ExecMode = SavedExecMode;
99 if (RuntimeMode)
100 *RuntimeMode = SavedRuntimeMode;
101 }
102};
103
104/// GPU Configuration: This information can be derived from cuda registers,
105/// however, providing compile time constants helps generate more efficient
106/// code. For all practical purposes this is fine because the configuration
107/// is the same for all known NVPTX architectures.
108enum MachineConfiguration : unsigned {
109 /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
110 /// specific Grid Values like GV_Warp_Size, GV_Slot_Size
111
112 /// Global memory alignment for performance.
113 GlobalMemoryAlignment = 128,
114
115 /// Maximal size of the shared memory buffer.
116 SharedMemorySize = 128,
117};
118
119static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
120 RefExpr = RefExpr->IgnoreParens();
121 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
122 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
123 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
124 Base = TempASE->getBase()->IgnoreParenImpCasts();
125 RefExpr = Base;
126 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
127 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
128 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
129 Base = TempOASE->getBase()->IgnoreParenImpCasts();
130 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
131 Base = TempASE->getBase()->IgnoreParenImpCasts();
132 RefExpr = Base;
133 }
134 RefExpr = RefExpr->IgnoreParenImpCasts();
135 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
136 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
137 const auto *ME = cast<MemberExpr>(RefExpr);
138 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
139}
140
141
142static RecordDecl *buildRecordForGlobalizedVars(
143 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
144 ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
145 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
146 &MappedDeclsFields, int BufSize) {
147 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
148 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
149 return nullptr;
150 SmallVector<VarsDataTy, 4> GlobalizedVars;
151 for (const ValueDecl *D : EscapedDecls)
152 GlobalizedVars.emplace_back(
153 CharUnits::fromQuantity(std::max(
154 C.getDeclAlign(D).getQuantity(),
155 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
156 D);
157 for (const ValueDecl *D : EscapedDeclsForTeams)
158 GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
159 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
160 return L.first > R.first;
161 });
162
163 // Build struct _globalized_locals_ty {
164 // /* globalized vars */[WarSize] align (max(decl_align,
165 // GlobalMemoryAlignment))
166 // /* globalized vars */ for EscapedDeclsForTeams
167 // };
168 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
169 GlobalizedRD->startDefinition();
170 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
171 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
172 for (const auto &Pair : GlobalizedVars) {
173 const ValueDecl *VD = Pair.second;
174 QualType Type = VD->getType();
175 if (Type->isLValueReferenceType())
176 Type = C.getPointerType(Type.getNonReferenceType());
177 else
178 Type = Type.getNonReferenceType();
179 SourceLocation Loc = VD->getLocation();
180 FieldDecl *Field;
181 if (SingleEscaped.count(VD)) {
182 Field = FieldDecl::Create(
183 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
184 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
185 /*BW=*/nullptr, /*Mutable=*/false,
186 /*InitStyle=*/ICIS_NoInit);
187 Field->setAccess(AS_public);
188 if (VD->hasAttrs()) {
189 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
190 E(VD->getAttrs().end());
191 I != E; ++I)
192 Field->addAttr(*I);
193 }
194 } else {
195 llvm::APInt ArraySize(32, BufSize);
196 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
197 0);
198 Field = FieldDecl::Create(
199 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
200 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
201 /*BW=*/nullptr, /*Mutable=*/false,
202 /*InitStyle=*/ICIS_NoInit);
203 Field->setAccess(AS_public);
204 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
205 static_cast<CharUnits::QuantityType>(
206 GlobalMemoryAlignment)));
207 Field->addAttr(AlignedAttr::CreateImplicit(
208 C, /*IsAlignmentExpr=*/true,
209 IntegerLiteral::Create(C, Align,
210 C.getIntTypeForBitwidth(32, /*Signed=*/0),
211 SourceLocation()),
212 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
213 }
214 GlobalizedRD->addDecl(Field);
215 MappedDeclsFields.try_emplace(VD, Field);
216 }
217 GlobalizedRD->completeDefinition();
218 return GlobalizedRD;
219}
220
221/// Get the list of variables that can escape their declaration context.
222class CheckVarsEscapingDeclContext final
223 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
224 CodeGenFunction &CGF;
225 llvm::SetVector<const ValueDecl *> EscapedDecls;
226 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
227 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
228 RecordDecl *GlobalizedRD = nullptr;
229 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
230 bool AllEscaped = false;
231 bool IsForCombinedParallelRegion = false;
232
233 void markAsEscaped(const ValueDecl *VD) {
234 // Do not globalize declare target variables.
235 if (!isa<VarDecl>(VD) ||
236 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
237 return;
238 VD = cast<ValueDecl>(VD->getCanonicalDecl());
239 // Use user-specified allocation.
240 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
241 return;
242 // Variables captured by value must be globalized.
243 if (auto *CSI = CGF.CapturedStmtInfo) {
244 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
245 // Check if need to capture the variable that was already captured by
246 // value in the outer region.
247 if (!IsForCombinedParallelRegion) {
248 if (!FD->hasAttrs())
249 return;
250 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
251 if (!Attr)
252 return;
253 if (((Attr->getCaptureKind() != OMPC_map) &&
254 !isOpenMPPrivate(Attr->getCaptureKind())) ||
255 ((Attr->getCaptureKind() == OMPC_map) &&
256 !FD->getType()->isAnyPointerType()))
257 return;
258 }
259 if (!FD->getType()->isReferenceType()) {
260 assert(!VD->getType()->isVariablyModifiedType() &&(static_cast <bool> (!VD->getType()->isVariablyModifiedType
() && "Parameter captured by value with variably modified type"
) ? void (0) : __assert_fail ("!VD->getType()->isVariablyModifiedType() && \"Parameter captured by value with variably modified type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 261, __extension__ __PRETTY_FUNCTION__))
261 "Parameter captured by value with variably modified type")(static_cast <bool> (!VD->getType()->isVariablyModifiedType
() && "Parameter captured by value with variably modified type"
) ? void (0) : __assert_fail ("!VD->getType()->isVariablyModifiedType() && \"Parameter captured by value with variably modified type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 261, __extension__ __PRETTY_FUNCTION__))
;
262 EscapedParameters.insert(VD);
263 } else if (!IsForCombinedParallelRegion) {
264 return;
265 }
266 }
267 }
268 if ((!CGF.CapturedStmtInfo ||
269 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
270 VD->getType()->isReferenceType())
271 // Do not globalize variables with reference type.
272 return;
273 if (VD->getType()->isVariablyModifiedType())
274 EscapedVariableLengthDecls.insert(VD);
275 else
276 EscapedDecls.insert(VD);
277 }
278
279 void VisitValueDecl(const ValueDecl *VD) {
280 if (VD->getType()->isLValueReferenceType())
281 markAsEscaped(VD);
282 if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
283 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
284 const bool SavedAllEscaped = AllEscaped;
285 AllEscaped = VD->getType()->isLValueReferenceType();
286 Visit(VarD->getInit());
287 AllEscaped = SavedAllEscaped;
288 }
289 }
290 }
291 void VisitOpenMPCapturedStmt(const CapturedStmt *S,
292 ArrayRef<OMPClause *> Clauses,
293 bool IsCombinedParallelRegion) {
294 if (!S)
295 return;
296 for (const CapturedStmt::Capture &C : S->captures()) {
297 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
298 const ValueDecl *VD = C.getCapturedVar();
299 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
300 if (IsCombinedParallelRegion) {
301 // Check if the variable is privatized in the combined construct and
302 // those private copies must be shared in the inner parallel
303 // directive.
304 IsForCombinedParallelRegion = false;
305 for (const OMPClause *C : Clauses) {
306 if (!isOpenMPPrivate(C->getClauseKind()) ||
307 C->getClauseKind() == OMPC_reduction ||
308 C->getClauseKind() == OMPC_linear ||
309 C->getClauseKind() == OMPC_private)
310 continue;
311 ArrayRef<const Expr *> Vars;
312 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
313 Vars = PC->getVarRefs();
314 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
315 Vars = PC->getVarRefs();
316 else
317 llvm_unreachable("Unexpected clause.")::llvm::llvm_unreachable_internal("Unexpected clause.", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 317)
;
318 for (const auto *E : Vars) {
319 const Decl *D =
320 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
321 if (D == VD->getCanonicalDecl()) {
322 IsForCombinedParallelRegion = true;
323 break;
324 }
325 }
326 if (IsForCombinedParallelRegion)
327 break;
328 }
329 }
330 markAsEscaped(VD);
331 if (isa<OMPCapturedExprDecl>(VD))
332 VisitValueDecl(VD);
333 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
334 }
335 }
336 }
337
338 void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
339 assert(!GlobalizedRD &&(static_cast <bool> (!GlobalizedRD && "Record for globalized variables is built already."
) ? void (0) : __assert_fail ("!GlobalizedRD && \"Record for globalized variables is built already.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 340, __extension__ __PRETTY_FUNCTION__))
340 "Record for globalized variables is built already.")(static_cast <bool> (!GlobalizedRD && "Record for globalized variables is built already."
) ? void (0) : __assert_fail ("!GlobalizedRD && \"Record for globalized variables is built already.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 340, __extension__ __PRETTY_FUNCTION__))
;
341 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
342 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
343 if (IsInTTDRegion)
344 EscapedDeclsForTeams = EscapedDecls.getArrayRef();
345 else
346 EscapedDeclsForParallel = EscapedDecls.getArrayRef();
347 GlobalizedRD = ::buildRecordForGlobalizedVars(
348 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
349 MappedDeclsFields, WarpSize);
350 }
351
352public:
353 CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
354 ArrayRef<const ValueDecl *> TeamsReductions)
355 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
356 }
357 virtual ~CheckVarsEscapingDeclContext() = default;
358 void VisitDeclStmt(const DeclStmt *S) {
359 if (!S)
360 return;
361 for (const Decl *D : S->decls())
362 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
363 VisitValueDecl(VD);
364 }
365 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
366 if (!D)
367 return;
368 if (!D->hasAssociatedStmt())
369 return;
370 if (const auto *S =
371 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
372 // Do not analyze directives that do not actually require capturing,
373 // like `omp for` or `omp simd` directives.
374 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
375 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
376 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
377 VisitStmt(S->getCapturedStmt());
378 return;
379 }
380 VisitOpenMPCapturedStmt(
381 S, D->clauses(),
382 CaptureRegions.back() == OMPD_parallel &&
383 isOpenMPDistributeDirective(D->getDirectiveKind()));
384 }
385 }
386 void VisitCapturedStmt(const CapturedStmt *S) {
387 if (!S)
388 return;
389 for (const CapturedStmt::Capture &C : S->captures()) {
390 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
391 const ValueDecl *VD = C.getCapturedVar();
392 markAsEscaped(VD);
393 if (isa<OMPCapturedExprDecl>(VD))
394 VisitValueDecl(VD);
395 }
396 }
397 }
398 void VisitLambdaExpr(const LambdaExpr *E) {
399 if (!E)
400 return;
401 for (const LambdaCapture &C : E->captures()) {
402 if (C.capturesVariable()) {
403 if (C.getCaptureKind() == LCK_ByRef) {
404 const ValueDecl *VD = C.getCapturedVar();
405 markAsEscaped(VD);
406 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
407 VisitValueDecl(VD);
408 }
409 }
410 }
411 }
412 void VisitBlockExpr(const BlockExpr *E) {
413 if (!E)
414 return;
415 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
416 if (C.isByRef()) {
417 const VarDecl *VD = C.getVariable();
418 markAsEscaped(VD);
419 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
420 VisitValueDecl(VD);
421 }
422 }
423 }
424 void VisitCallExpr(const CallExpr *E) {
425 if (!E)
426 return;
427 for (const Expr *Arg : E->arguments()) {
428 if (!Arg)
429 continue;
430 if (Arg->isLValue()) {
431 const bool SavedAllEscaped = AllEscaped;
432 AllEscaped = true;
433 Visit(Arg);
434 AllEscaped = SavedAllEscaped;
435 } else {
436 Visit(Arg);
437 }
438 }
439 Visit(E->getCallee());
440 }
441 void VisitDeclRefExpr(const DeclRefExpr *E) {
442 if (!E)
443 return;
444 const ValueDecl *VD = E->getDecl();
445 if (AllEscaped)
446 markAsEscaped(VD);
447 if (isa<OMPCapturedExprDecl>(VD))
448 VisitValueDecl(VD);
449 else if (const auto *VarD = dyn_cast<VarDecl>(VD))
450 if (VarD->isInitCapture())
451 VisitValueDecl(VD);
452 }
453 void VisitUnaryOperator(const UnaryOperator *E) {
454 if (!E)
455 return;
456 if (E->getOpcode() == UO_AddrOf) {
457 const bool SavedAllEscaped = AllEscaped;
458 AllEscaped = true;
459 Visit(E->getSubExpr());
460 AllEscaped = SavedAllEscaped;
461 } else {
462 Visit(E->getSubExpr());
463 }
464 }
465 void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
466 if (!E)
467 return;
468 if (E->getCastKind() == CK_ArrayToPointerDecay) {
469 const bool SavedAllEscaped = AllEscaped;
470 AllEscaped = true;
471 Visit(E->getSubExpr());
472 AllEscaped = SavedAllEscaped;
473 } else {
474 Visit(E->getSubExpr());
475 }
476 }
477 void VisitExpr(const Expr *E) {
478 if (!E)
479 return;
480 bool SavedAllEscaped = AllEscaped;
481 if (!E->isLValue())
482 AllEscaped = false;
483 for (const Stmt *Child : E->children())
484 if (Child)
485 Visit(Child);
486 AllEscaped = SavedAllEscaped;
487 }
488 void VisitStmt(const Stmt *S) {
489 if (!S)
490 return;
491 for (const Stmt *Child : S->children())
492 if (Child)
493 Visit(Child);
494 }
495
496 /// Returns the record that handles all the escaped local variables and used
497 /// instead of their original storage.
498 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
499 if (!GlobalizedRD)
500 buildRecordForGlobalizedVars(IsInTTDRegion);
501 return GlobalizedRD;
502 }
503
504 /// Returns the field in the globalized record for the escaped variable.
505 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
506 assert(GlobalizedRD &&(static_cast <bool> (GlobalizedRD && "Record for globalized variables must be generated already."
) ? void (0) : __assert_fail ("GlobalizedRD && \"Record for globalized variables must be generated already.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 507, __extension__ __PRETTY_FUNCTION__))
507 "Record for globalized variables must be generated already.")(static_cast <bool> (GlobalizedRD && "Record for globalized variables must be generated already."
) ? void (0) : __assert_fail ("GlobalizedRD && \"Record for globalized variables must be generated already.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 507, __extension__ __PRETTY_FUNCTION__))
;
508 auto I = MappedDeclsFields.find(VD);
509 if (I == MappedDeclsFields.end())
510 return nullptr;
511 return I->getSecond();
512 }
513
514 /// Returns the list of the escaped local variables/parameters.
515 ArrayRef<const ValueDecl *> getEscapedDecls() const {
516 return EscapedDecls.getArrayRef();
517 }
518
519 /// Checks if the escaped local variable is actually a parameter passed by
520 /// value.
521 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
522 return EscapedParameters;
523 }
524
525 /// Returns the list of the escaped variables with the variably modified
526 /// types.
527 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
528 return EscapedVariableLengthDecls.getArrayRef();
529 }
530};
531} // anonymous namespace
532
533/// Get the id of the warp in the block.
534/// We assume that the warp size is 32, which is always the case
535/// on the NVPTX device, to generate more efficient code.
536static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
537 CGBuilderTy &Bld = CGF.Builder;
538 unsigned LaneIDBits =
539 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
540 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
541 return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
542}
543
544/// Get the id of the current lane in the Warp.
545/// We assume that the warp size is 32, which is always the case
546/// on the NVPTX device, to generate more efficient code.
547static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
548 CGBuilderTy &Bld = CGF.Builder;
549 unsigned LaneIDBits =
550 llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
551 unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
552 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
553 return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
554 "nvptx_lane_id");
555}
556
557CGOpenMPRuntimeGPU::ExecutionMode
558CGOpenMPRuntimeGPU::getExecutionMode() const {
559 return CurrentExecutionMode;
560}
561
562static CGOpenMPRuntimeGPU::DataSharingMode
563getDataSharingMode(CodeGenModule &CGM) {
564 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
565 : CGOpenMPRuntimeGPU::Generic;
566}
567
568/// Check for inner (nested) SPMD construct, if any
569static bool hasNestedSPMDDirective(ASTContext &Ctx,
570 const OMPExecutableDirective &D) {
571 const auto *CS = D.getInnermostCapturedStmt();
572 const auto *Body =
573 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
574 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
575
576 if (const auto *NestedDir =
577 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
578 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
579 switch (D.getDirectiveKind()) {
580 case OMPD_target:
581 if (isOpenMPParallelDirective(DKind))
582 return true;
583 if (DKind == OMPD_teams) {
584 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
585 /*IgnoreCaptured=*/true);
586 if (!Body)
587 return false;
588 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
589 if (const auto *NND =
590 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
591 DKind = NND->getDirectiveKind();
592 if (isOpenMPParallelDirective(DKind))
593 return true;
594 }
595 }
596 return false;
597 case OMPD_target_teams:
598 return isOpenMPParallelDirective(DKind);
599 case OMPD_target_simd:
600 case OMPD_target_parallel:
601 case OMPD_target_parallel_for:
602 case OMPD_target_parallel_for_simd:
603 case OMPD_target_teams_distribute:
604 case OMPD_target_teams_distribute_simd:
605 case OMPD_target_teams_distribute_parallel_for:
606 case OMPD_target_teams_distribute_parallel_for_simd:
607 case OMPD_parallel:
608 case OMPD_for:
609 case OMPD_parallel_for:
610 case OMPD_parallel_master:
611 case OMPD_parallel_sections:
612 case OMPD_for_simd:
613 case OMPD_parallel_for_simd:
614 case OMPD_cancel:
615 case OMPD_cancellation_point:
616 case OMPD_ordered:
617 case OMPD_threadprivate:
618 case OMPD_allocate:
619 case OMPD_task:
620 case OMPD_simd:
621 case OMPD_sections:
622 case OMPD_section:
623 case OMPD_single:
624 case OMPD_master:
625 case OMPD_critical:
626 case OMPD_taskyield:
627 case OMPD_barrier:
628 case OMPD_taskwait:
629 case OMPD_taskgroup:
630 case OMPD_atomic:
631 case OMPD_flush:
632 case OMPD_depobj:
633 case OMPD_scan:
634 case OMPD_teams:
635 case OMPD_target_data:
636 case OMPD_target_exit_data:
637 case OMPD_target_enter_data:
638 case OMPD_distribute:
639 case OMPD_distribute_simd:
640 case OMPD_distribute_parallel_for:
641 case OMPD_distribute_parallel_for_simd:
642 case OMPD_teams_distribute:
643 case OMPD_teams_distribute_simd:
644 case OMPD_teams_distribute_parallel_for:
645 case OMPD_teams_distribute_parallel_for_simd:
646 case OMPD_target_update:
647 case OMPD_declare_simd:
648 case OMPD_declare_variant:
649 case OMPD_begin_declare_variant:
650 case OMPD_end_declare_variant:
651 case OMPD_declare_target:
652 case OMPD_end_declare_target:
653 case OMPD_declare_reduction:
654 case OMPD_declare_mapper:
655 case OMPD_taskloop:
656 case OMPD_taskloop_simd:
657 case OMPD_master_taskloop:
658 case OMPD_master_taskloop_simd:
659 case OMPD_parallel_master_taskloop:
660 case OMPD_parallel_master_taskloop_simd:
661 case OMPD_requires:
662 case OMPD_unknown:
663 default:
664 llvm_unreachable("Unexpected directive.")::llvm::llvm_unreachable_internal("Unexpected directive.", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 664)
;
665 }
666 }
667
668 return false;
669}
670
671static bool supportsSPMDExecutionMode(ASTContext &Ctx,
672 const OMPExecutableDirective &D) {
673 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
674 switch (DirectiveKind) {
675 case OMPD_target:
676 case OMPD_target_teams:
677 return hasNestedSPMDDirective(Ctx, D);
678 case OMPD_target_parallel:
679 case OMPD_target_parallel_for:
680 case OMPD_target_parallel_for_simd:
681 case OMPD_target_teams_distribute_parallel_for:
682 case OMPD_target_teams_distribute_parallel_for_simd:
683 case OMPD_target_simd:
684 case OMPD_target_teams_distribute_simd:
685 return true;
686 case OMPD_target_teams_distribute:
687 return false;
688 case OMPD_parallel:
689 case OMPD_for:
690 case OMPD_parallel_for:
691 case OMPD_parallel_master:
692 case OMPD_parallel_sections:
693 case OMPD_for_simd:
694 case OMPD_parallel_for_simd:
695 case OMPD_cancel:
696 case OMPD_cancellation_point:
697 case OMPD_ordered:
698 case OMPD_threadprivate:
699 case OMPD_allocate:
700 case OMPD_task:
701 case OMPD_simd:
702 case OMPD_sections:
703 case OMPD_section:
704 case OMPD_single:
705 case OMPD_master:
706 case OMPD_critical:
707 case OMPD_taskyield:
708 case OMPD_barrier:
709 case OMPD_taskwait:
710 case OMPD_taskgroup:
711 case OMPD_atomic:
712 case OMPD_flush:
713 case OMPD_depobj:
714 case OMPD_scan:
715 case OMPD_teams:
716 case OMPD_target_data:
717 case OMPD_target_exit_data:
718 case OMPD_target_enter_data:
719 case OMPD_distribute:
720 case OMPD_distribute_simd:
721 case OMPD_distribute_parallel_for:
722 case OMPD_distribute_parallel_for_simd:
723 case OMPD_teams_distribute:
724 case OMPD_teams_distribute_simd:
725 case OMPD_teams_distribute_parallel_for:
726 case OMPD_teams_distribute_parallel_for_simd:
727 case OMPD_target_update:
728 case OMPD_declare_simd:
729 case OMPD_declare_variant:
730 case OMPD_begin_declare_variant:
731 case OMPD_end_declare_variant:
732 case OMPD_declare_target:
733 case OMPD_end_declare_target:
734 case OMPD_declare_reduction:
735 case OMPD_declare_mapper:
736 case OMPD_taskloop:
737 case OMPD_taskloop_simd:
738 case OMPD_master_taskloop:
739 case OMPD_master_taskloop_simd:
740 case OMPD_parallel_master_taskloop:
741 case OMPD_parallel_master_taskloop_simd:
742 case OMPD_requires:
743 case OMPD_unknown:
744 default:
745 break;
746 }
747 llvm_unreachable(::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 748)
748 "Unknown programming model for OpenMP directive on NVPTX target.")::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 748)
;
749}
750
751/// Check if the directive is loops based and has schedule clause at all or has
752/// static scheduling.
753static bool hasStaticScheduling(const OMPExecutableDirective &D) {
754 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPWorksharingDirective(D.getDirectiveKind
()) && isOpenMPLoopDirective(D.getDirectiveKind()) &&
"Expected loop-based directive.") ? void (0) : __assert_fail
("isOpenMPWorksharingDirective(D.getDirectiveKind()) && isOpenMPLoopDirective(D.getDirectiveKind()) && \"Expected loop-based directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 756, __extension__ __PRETTY_FUNCTION__))
755 isOpenMPLoopDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPWorksharingDirective(D.getDirectiveKind
()) && isOpenMPLoopDirective(D.getDirectiveKind()) &&
"Expected loop-based directive.") ? void (0) : __assert_fail
("isOpenMPWorksharingDirective(D.getDirectiveKind()) && isOpenMPLoopDirective(D.getDirectiveKind()) && \"Expected loop-based directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 756, __extension__ __PRETTY_FUNCTION__))
756 "Expected loop-based directive.")(static_cast <bool> (isOpenMPWorksharingDirective(D.getDirectiveKind
()) && isOpenMPLoopDirective(D.getDirectiveKind()) &&
"Expected loop-based directive.") ? void (0) : __assert_fail
("isOpenMPWorksharingDirective(D.getDirectiveKind()) && isOpenMPLoopDirective(D.getDirectiveKind()) && \"Expected loop-based directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 756, __extension__ __PRETTY_FUNCTION__))
;
757 return !D.hasClausesOfKind<OMPOrderedClause>() &&
758 (!D.hasClausesOfKind<OMPScheduleClause>() ||
759 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
760 [](const OMPScheduleClause *C) {
761 return C->getScheduleKind() == OMPC_SCHEDULE_static;
762 }));
763}
764
765/// Check for inner (nested) lightweight runtime construct, if any
766static bool hasNestedLightweightDirective(ASTContext &Ctx,
767 const OMPExecutableDirective &D) {
768 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.")(static_cast <bool> (supportsSPMDExecutionMode(Ctx, D) &&
"Expected SPMD mode directive.") ? void (0) : __assert_fail (
"supportsSPMDExecutionMode(Ctx, D) && \"Expected SPMD mode directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 768, __extension__ __PRETTY_FUNCTION__))
;
769 const auto *CS = D.getInnermostCapturedStmt();
770 const auto *Body =
771 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
772 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
773
774 if (const auto *NestedDir =
775 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
776 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
777 switch (D.getDirectiveKind()) {
778 case OMPD_target:
779 if (isOpenMPParallelDirective(DKind) &&
780 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
781 hasStaticScheduling(*NestedDir))
782 return true;
783 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
784 return true;
785 if (DKind == OMPD_parallel) {
786 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
787 /*IgnoreCaptured=*/true);
788 if (!Body)
789 return false;
790 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
791 if (const auto *NND =
792 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
793 DKind = NND->getDirectiveKind();
794 if (isOpenMPWorksharingDirective(DKind) &&
795 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
796 return true;
797 }
798 } else if (DKind == OMPD_teams) {
799 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
800 /*IgnoreCaptured=*/true);
801 if (!Body)
802 return false;
803 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
804 if (const auto *NND =
805 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
806 DKind = NND->getDirectiveKind();
807 if (isOpenMPParallelDirective(DKind) &&
808 isOpenMPWorksharingDirective(DKind) &&
809 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
810 return true;
811 if (DKind == OMPD_parallel) {
812 Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
813 /*IgnoreCaptured=*/true);
814 if (!Body)
815 return false;
816 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
817 if (const auto *NND =
818 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
819 DKind = NND->getDirectiveKind();
820 if (isOpenMPWorksharingDirective(DKind) &&
821 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
822 return true;
823 }
824 }
825 }
826 }
827 return false;
828 case OMPD_target_teams:
829 if (isOpenMPParallelDirective(DKind) &&
830 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
831 hasStaticScheduling(*NestedDir))
832 return true;
833 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
834 return true;
835 if (DKind == OMPD_parallel) {
836 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
837 /*IgnoreCaptured=*/true);
838 if (!Body)
839 return false;
840 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
841 if (const auto *NND =
842 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
843 DKind = NND->getDirectiveKind();
844 if (isOpenMPWorksharingDirective(DKind) &&
845 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
846 return true;
847 }
848 }
849 return false;
850 case OMPD_target_parallel:
851 if (DKind == OMPD_simd)
852 return true;
853 return isOpenMPWorksharingDirective(DKind) &&
854 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
855 case OMPD_target_teams_distribute:
856 case OMPD_target_simd:
857 case OMPD_target_parallel_for:
858 case OMPD_target_parallel_for_simd:
859 case OMPD_target_teams_distribute_simd:
860 case OMPD_target_teams_distribute_parallel_for:
861 case OMPD_target_teams_distribute_parallel_for_simd:
862 case OMPD_parallel:
863 case OMPD_for:
864 case OMPD_parallel_for:
865 case OMPD_parallel_master:
866 case OMPD_parallel_sections:
867 case OMPD_for_simd:
868 case OMPD_parallel_for_simd:
869 case OMPD_cancel:
870 case OMPD_cancellation_point:
871 case OMPD_ordered:
872 case OMPD_threadprivate:
873 case OMPD_allocate:
874 case OMPD_task:
875 case OMPD_simd:
876 case OMPD_sections:
877 case OMPD_section:
878 case OMPD_single:
879 case OMPD_master:
880 case OMPD_critical:
881 case OMPD_taskyield:
882 case OMPD_barrier:
883 case OMPD_taskwait:
884 case OMPD_taskgroup:
885 case OMPD_atomic:
886 case OMPD_flush:
887 case OMPD_depobj:
888 case OMPD_scan:
889 case OMPD_teams:
890 case OMPD_target_data:
891 case OMPD_target_exit_data:
892 case OMPD_target_enter_data:
893 case OMPD_distribute:
894 case OMPD_distribute_simd:
895 case OMPD_distribute_parallel_for:
896 case OMPD_distribute_parallel_for_simd:
897 case OMPD_teams_distribute:
898 case OMPD_teams_distribute_simd:
899 case OMPD_teams_distribute_parallel_for:
900 case OMPD_teams_distribute_parallel_for_simd:
901 case OMPD_target_update:
902 case OMPD_declare_simd:
903 case OMPD_declare_variant:
904 case OMPD_begin_declare_variant:
905 case OMPD_end_declare_variant:
906 case OMPD_declare_target:
907 case OMPD_end_declare_target:
908 case OMPD_declare_reduction:
909 case OMPD_declare_mapper:
910 case OMPD_taskloop:
911 case OMPD_taskloop_simd:
912 case OMPD_master_taskloop:
913 case OMPD_master_taskloop_simd:
914 case OMPD_parallel_master_taskloop:
915 case OMPD_parallel_master_taskloop_simd:
916 case OMPD_requires:
917 case OMPD_unknown:
918 default:
919 llvm_unreachable("Unexpected directive.")::llvm::llvm_unreachable_internal("Unexpected directive.", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 919)
;
920 }
921 }
922
923 return false;
924}
925
926/// Checks if the construct supports lightweight runtime. It must be SPMD
927/// construct + inner loop-based construct with static scheduling.
928static bool supportsLightweightRuntime(ASTContext &Ctx,
929 const OMPExecutableDirective &D) {
930 if (!supportsSPMDExecutionMode(Ctx, D))
931 return false;
932 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
933 switch (DirectiveKind) {
934 case OMPD_target:
935 case OMPD_target_teams:
936 case OMPD_target_parallel:
937 return hasNestedLightweightDirective(Ctx, D);
938 case OMPD_target_parallel_for:
939 case OMPD_target_parallel_for_simd:
940 case OMPD_target_teams_distribute_parallel_for:
941 case OMPD_target_teams_distribute_parallel_for_simd:
942 // (Last|First)-privates must be shared in parallel region.
943 return hasStaticScheduling(D);
944 case OMPD_target_simd:
945 case OMPD_target_teams_distribute_simd:
946 return true;
947 case OMPD_target_teams_distribute:
948 return false;
949 case OMPD_parallel:
950 case OMPD_for:
951 case OMPD_parallel_for:
952 case OMPD_parallel_master:
953 case OMPD_parallel_sections:
954 case OMPD_for_simd:
955 case OMPD_parallel_for_simd:
956 case OMPD_cancel:
957 case OMPD_cancellation_point:
958 case OMPD_ordered:
959 case OMPD_threadprivate:
960 case OMPD_allocate:
961 case OMPD_task:
962 case OMPD_simd:
963 case OMPD_sections:
964 case OMPD_section:
965 case OMPD_single:
966 case OMPD_master:
967 case OMPD_critical:
968 case OMPD_taskyield:
969 case OMPD_barrier:
970 case OMPD_taskwait:
971 case OMPD_taskgroup:
972 case OMPD_atomic:
973 case OMPD_flush:
974 case OMPD_depobj:
975 case OMPD_scan:
976 case OMPD_teams:
977 case OMPD_target_data:
978 case OMPD_target_exit_data:
979 case OMPD_target_enter_data:
980 case OMPD_distribute:
981 case OMPD_distribute_simd:
982 case OMPD_distribute_parallel_for:
983 case OMPD_distribute_parallel_for_simd:
984 case OMPD_teams_distribute:
985 case OMPD_teams_distribute_simd:
986 case OMPD_teams_distribute_parallel_for:
987 case OMPD_teams_distribute_parallel_for_simd:
988 case OMPD_target_update:
989 case OMPD_declare_simd:
990 case OMPD_declare_variant:
991 case OMPD_begin_declare_variant:
992 case OMPD_end_declare_variant:
993 case OMPD_declare_target:
994 case OMPD_end_declare_target:
995 case OMPD_declare_reduction:
996 case OMPD_declare_mapper:
997 case OMPD_taskloop:
998 case OMPD_taskloop_simd:
999 case OMPD_master_taskloop:
1000 case OMPD_master_taskloop_simd:
1001 case OMPD_parallel_master_taskloop:
1002 case OMPD_parallel_master_taskloop_simd:
1003 case OMPD_requires:
1004 case OMPD_unknown:
1005 default:
1006 break;
1007 }
1008 llvm_unreachable(::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1009)
1009 "Unknown programming model for OpenMP directive on NVPTX target.")::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1009)
;
1010}
1011
1012void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1013 StringRef ParentName,
1014 llvm::Function *&OutlinedFn,
1015 llvm::Constant *&OutlinedFnID,
1016 bool IsOffloadEntry,
1017 const RegionCodeGenTy &CodeGen) {
1018 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1019 EntryFunctionState EST;
1020 WrapperFunctionsMap.clear();
1021
1022 // Emit target region as a standalone region.
1023 class NVPTXPrePostActionTy : public PrePostActionTy {
1024 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1025
1026 public:
1027 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1028 : EST(EST) {}
1029 void Enter(CodeGenFunction &CGF) override {
1030 auto &RT =
1031 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1032 RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
1033 // Skip target region initialization.
1034 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1035 }
1036 void Exit(CodeGenFunction &CGF) override {
1037 auto &RT =
1038 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1039 RT.clearLocThreadIdInsertPt(CGF);
1040 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
1041 }
1042 } Action(EST);
1043 CodeGen.setAction(Action);
1044 IsInTTDRegion = true;
1045 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1046 IsOffloadEntry, CodeGen);
1047 IsInTTDRegion = false;
1048}
1049
1050void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
1051 EntryFunctionState &EST, bool IsSPMD) {
1052 CGBuilderTy &Bld = CGF.Builder;
1053 Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime()));
1054 IsInTargetMasterThreadRegion = IsSPMD;
1055 if (!IsSPMD)
1056 emitGenericVarsProlog(CGF, EST.Loc);
1057}
1058
1059void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
1060 EntryFunctionState &EST,
1061 bool IsSPMD) {
1062 if (!IsSPMD)
1063 emitGenericVarsEpilog(CGF);
1064
1065 CGBuilderTy &Bld = CGF.Builder;
1066 OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime());
1067}
1068
1069void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1070 StringRef ParentName,
1071 llvm::Function *&OutlinedFn,
1072 llvm::Constant *&OutlinedFnID,
1073 bool IsOffloadEntry,
1074 const RegionCodeGenTy &CodeGen) {
1075 ExecutionRuntimeModesRAII ModeRAII(
1076 CurrentExecutionMode, RequiresFullRuntime,
1077 CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1078 !supportsLightweightRuntime(CGM.getContext(), D));
1079 EntryFunctionState EST;
1080
1081 // Emit target region as a standalone region.
1082 class NVPTXPrePostActionTy : public PrePostActionTy {
1083 CGOpenMPRuntimeGPU &RT;
1084 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1085
1086 public:
1087 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1088 CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1089 : RT(RT), EST(EST) {}
1090 void Enter(CodeGenFunction &CGF) override {
1091 RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
1092 // Skip target region initialization.
1093 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1094 }
1095 void Exit(CodeGenFunction &CGF) override {
1096 RT.clearLocThreadIdInsertPt(CGF);
1097 RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
1098 }
1099 } Action(*this, EST);
1100 CodeGen.setAction(Action);
1101 IsInTTDRegion = true;
1102 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1103 IsOffloadEntry, CodeGen);
1104 IsInTTDRegion = false;
1105}
1106
1107// Create a unique global variable to indicate the execution mode of this target
1108// region. The execution mode is either 'generic', or 'spmd' depending on the
1109// target directive. This variable is picked up by the offload library to setup
1110// the device appropriately before kernel launch. If the execution mode is
1111// 'generic', the runtime reserves one warp for the master, otherwise, all
1112// warps participate in parallel work.
1113static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1114 bool Mode) {
1115 auto *GVMode = new llvm::GlobalVariable(
1116 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1117 llvm::GlobalValue::WeakAnyLinkage,
1118 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? OMP_TGT_EXEC_MODE_SPMD
1119 : OMP_TGT_EXEC_MODE_GENERIC),
1120 Twine(Name, "_exec_mode"));
1121 CGM.addCompilerUsedGlobal(GVMode);
1122}
1123
1124void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1125 llvm::Constant *Addr,
1126 uint64_t Size, int32_t,
1127 llvm::GlobalValue::LinkageTypes) {
1128 // TODO: Add support for global variables on the device after declare target
1129 // support.
1130 if (!isa<llvm::Function>(Addr))
1131 return;
1132 llvm::Module &M = CGM.getModule();
1133 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1134
1135 // Get "nvvm.annotations" metadata node
1136 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1137
1138 llvm::Metadata *MDVals[] = {
1139 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1140 llvm::ConstantAsMetadata::get(
1141 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1142 // Append metadata to nvvm.annotations
1143 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1144}
1145
1146void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1147 const OMPExecutableDirective &D, StringRef ParentName,
1148 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1149 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1150 if (!IsOffloadEntry) // Nothing to do.
1151 return;
1152
1153 assert(!ParentName.empty() && "Invalid target region parent name!")(static_cast <bool> (!ParentName.empty() && "Invalid target region parent name!"
) ? void (0) : __assert_fail ("!ParentName.empty() && \"Invalid target region parent name!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1153, __extension__ __PRETTY_FUNCTION__))
;
1154
1155 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1156 if (Mode)
1157 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1158 CodeGen);
1159 else
1160 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1161 CodeGen);
1162
1163 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1164}
1165
1166namespace {
1167LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
1168/// Enum for accesseing the reserved_2 field of the ident_t struct.
1169enum ModeFlagsTy : unsigned {
1170 /// Bit set to 1 when in SPMD mode.
1171 KMP_IDENT_SPMD_MODE = 0x01,
1172 /// Bit set to 1 when a simplified runtime is used.
1173 KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1174 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)LLVM_BITMASK_LARGEST_ENUMERATOR = KMP_IDENT_SIMPLE_RT_MODE
1175};
1176
1177/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1178static const ModeFlagsTy UndefinedMode =
1179 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1180} // anonymous namespace
1181
1182unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1183 switch (getExecutionMode()) {
1184 case EM_SPMD:
1185 if (requiresFullRuntime())
1186 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1187 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1188 case EM_NonSPMD:
1189 assert(requiresFullRuntime() && "Expected full runtime.")(static_cast <bool> (requiresFullRuntime() && "Expected full runtime."
) ? void (0) : __assert_fail ("requiresFullRuntime() && \"Expected full runtime.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1191 case EM_Unknown:
1192 return UndefinedMode;
1193 }
1194 llvm_unreachable("Unknown flags are requested.")::llvm::llvm_unreachable_internal("Unknown flags are requested."
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1194)
;
1195}
1196
1197CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1198 : CGOpenMPRuntime(CGM, "_", "$") {
1199 if (!CGM.getLangOpts().OpenMPIsDevice)
1200 llvm_unreachable("OpenMP NVPTX can only handle device code.")::llvm::llvm_unreachable_internal("OpenMP NVPTX can only handle device code."
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1200)
;
1201
1202 llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
1203 if (CGM.getLangOpts().OpenMPTargetNewRuntime)
1204 OMPBuilder.createDebugKind(CGM.getLangOpts().OpenMPTargetDebug);
1205}
1206
1207void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1208 ProcBindKind ProcBind,
1209 SourceLocation Loc) {
1210 // Do nothing in case of SPMD mode and L0 parallel.
1211 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1212 return;
1213
1214 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1215}
1216
1217void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1218 llvm::Value *NumThreads,
1219 SourceLocation Loc) {
1220 // Do nothing in case of SPMD mode and L0 parallel.
1221 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1222 return;
1223
1224 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1225}
1226
1227void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1228 const Expr *NumTeams,
1229 const Expr *ThreadLimit,
1230 SourceLocation Loc) {}
1231
1232llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1233 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1234 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1235 // Emit target region as a standalone region.
1236 class NVPTXPrePostActionTy : public PrePostActionTy {
1237 bool &IsInParallelRegion;
1238 bool PrevIsInParallelRegion;
1239
1240 public:
1241 NVPTXPrePostActionTy(bool &IsInParallelRegion)
1242 : IsInParallelRegion(IsInParallelRegion) {}
1243 void Enter(CodeGenFunction &CGF) override {
1244 PrevIsInParallelRegion = IsInParallelRegion;
1245 IsInParallelRegion = true;
1246 }
1247 void Exit(CodeGenFunction &CGF) override {
1248 IsInParallelRegion = PrevIsInParallelRegion;
1249 }
1250 } Action(IsInParallelRegion);
1251 CodeGen.setAction(Action);
1252 bool PrevIsInTTDRegion = IsInTTDRegion;
1253 IsInTTDRegion = false;
1254 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1255 IsInTargetMasterThreadRegion = false;
1256 auto *OutlinedFun =
1257 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1258 D, ThreadIDVar, InnermostKind, CodeGen));
1259 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1260 IsInTTDRegion = PrevIsInTTDRegion;
1261 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1262 !IsInParallelRegion) {
1263 llvm::Function *WrapperFun =
1264 createParallelDataSharingWrapper(OutlinedFun, D);
1265 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1266 }
1267
1268 return OutlinedFun;
1269}
1270
1271/// Get list of lastprivate variables from the teams distribute ... or
1272/// teams {distribute ...} directives.
1273static void
1274getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1275 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1276 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1277, __extension__ __PRETTY_FUNCTION__))
1277 "expected teams directive.")(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1277, __extension__ __PRETTY_FUNCTION__))
;
1278 const OMPExecutableDirective *Dir = &D;
1279 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1280 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1281 Ctx,
1282 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1283 /*IgnoreCaptured=*/true))) {
1284 Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1285 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1286 Dir = nullptr;
1287 }
1288 }
1289 if (!Dir)
1290 return;
1291 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1292 for (const Expr *E : C->getVarRefs())
1293 Vars.push_back(getPrivateItem(E));
1294 }
1295}
1296
1297/// Get list of reduction variables from the teams ... directives.
1298static void
1299getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1300 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1301 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1302, __extension__ __PRETTY_FUNCTION__))
1302 "expected teams directive.")(static_cast <bool> (isOpenMPTeamsDirective(D.getDirectiveKind
()) && "expected teams directive.") ? void (0) : __assert_fail
("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1302, __extension__ __PRETTY_FUNCTION__))
;
1303 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1304 for (const Expr *E : C->privates())
1305 Vars.push_back(getPrivateItem(E));
1306 }
1307}
1308
1309llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1310 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1311 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1312 SourceLocation Loc = D.getBeginLoc();
1313
1314 const RecordDecl *GlobalizedRD = nullptr;
1315 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1316 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1317 unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size;
1318 // Globalize team reductions variable unconditionally in all modes.
1319 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1320 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1321 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1322 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1323 if (!LastPrivatesReductions.empty()) {
1324 GlobalizedRD = ::buildRecordForGlobalizedVars(
1325 CGM.getContext(), llvm::None, LastPrivatesReductions,
1326 MappedDeclsFields, WarpSize);
1327 }
1328 } else if (!LastPrivatesReductions.empty()) {
1329 assert(!TeamAndReductions.first &&(static_cast <bool> (!TeamAndReductions.first &&
"Previous team declaration is not expected.") ? void (0) : __assert_fail
("!TeamAndReductions.first && \"Previous team declaration is not expected.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1330, __extension__ __PRETTY_FUNCTION__))
1330 "Previous team declaration is not expected.")(static_cast <bool> (!TeamAndReductions.first &&
"Previous team declaration is not expected.") ? void (0) : __assert_fail
("!TeamAndReductions.first && \"Previous team declaration is not expected.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1330, __extension__ __PRETTY_FUNCTION__))
;
1331 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1332 std::swap(TeamAndReductions.second, LastPrivatesReductions);
1333 }
1334
1335 // Emit target region as a standalone region.
1336 class NVPTXPrePostActionTy : public PrePostActionTy {
1337 SourceLocation &Loc;
1338 const RecordDecl *GlobalizedRD;
1339 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1340 &MappedDeclsFields;
1341
1342 public:
1343 NVPTXPrePostActionTy(
1344 SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1345 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1346 &MappedDeclsFields)
1347 : Loc(Loc), GlobalizedRD(GlobalizedRD),
1348 MappedDeclsFields(MappedDeclsFields) {}
1349 void Enter(CodeGenFunction &CGF) override {
1350 auto &Rt =
1351 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1352 if (GlobalizedRD) {
1353 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1354 I->getSecond().MappedParams =
1355 std::make_unique<CodeGenFunction::OMPMapVars>();
1356 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1357 for (const auto &Pair : MappedDeclsFields) {
1358 assert(Pair.getFirst()->isCanonicalDecl() &&(static_cast <bool> (Pair.getFirst()->isCanonicalDecl
() && "Expected canonical declaration") ? void (0) : __assert_fail
("Pair.getFirst()->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1359, __extension__ __PRETTY_FUNCTION__))
1359 "Expected canonical declaration")(static_cast <bool> (Pair.getFirst()->isCanonicalDecl
() && "Expected canonical declaration") ? void (0) : __assert_fail
("Pair.getFirst()->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1359, __extension__ __PRETTY_FUNCTION__))
;
1360 Data.insert(std::make_pair(Pair.getFirst(), MappedVarData()));
1361 }
1362 }
1363 Rt.emitGenericVarsProlog(CGF, Loc);
1364 }
1365 void Exit(CodeGenFunction &CGF) override {
1366 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1367 .emitGenericVarsEpilog(CGF);
1368 }
1369 } Action(Loc, GlobalizedRD, MappedDeclsFields);
1370 CodeGen.setAction(Action);
1371 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1372 D, ThreadIDVar, InnermostKind, CodeGen);
1373
1374 return OutlinedFun;
1375}
1376
1377void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1378 SourceLocation Loc,
1379 bool WithSPMDCheck) {
1380 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1381 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1382 return;
1383
1384 CGBuilderTy &Bld = CGF.Builder;
1385
1386 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1387 if (I == FunctionGlobalizedDecls.end())
1388 return;
1389
1390 for (auto &Rec : I->getSecond().LocalVarData) {
1391 const auto *VD = cast<VarDecl>(Rec.first);
1392 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1393 QualType VarTy = VD->getType();
1394
1395 // Get the local allocation of a firstprivate variable before sharing
1396 llvm::Value *ParValue;
1397 if (EscapedParam) {
1398 LValue ParLVal =
1399 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1400 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1401 }
1402
1403 // Allocate space for the variable to be globalized
1404 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1405 llvm::Instruction *VoidPtr =
1406 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1407 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1408 AllocArgs, VD->getName());
1409
1410 // Cast the void pointer and get the address of the globalized variable.
1411 llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
1412 llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1413 VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
1414 LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
1415 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1416 Rec.second.GlobalizedVal = VoidPtr;
1417
1418 // Assign the local allocation to the newly globalized location.
1419 if (EscapedParam) {
1420 CGF.EmitStoreOfScalar(ParValue, VarAddr);
1421 I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
1422 }
1423 if (auto *DI = CGF.getDebugInfo())
1424 VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
1425 }
1426 for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
1427 // Use actual memory size of the VLA object including the padding
1428 // for alignment purposes.
1429 llvm::Value *Size = CGF.getTypeSize(VD->getType());
1430 CharUnits Align = CGM.getContext().getDeclAlign(VD);
1431 Size = Bld.CreateNUWAdd(
1432 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1433 llvm::Value *AlignVal =
1434 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1435
1436 Size = Bld.CreateUDiv(Size, AlignVal);
1437 Size = Bld.CreateNUWMul(Size, AlignVal);
Value stored to 'Size' is never read
1438
1439 // Allocate space for this VLA object to be globalized.
1440 llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1441 llvm::Instruction *VoidPtr =
1442 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1443 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1444 AllocArgs, VD->getName());
1445
1446 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
1447 std::pair<llvm::Value *, llvm::Value *>(
1448 {VoidPtr, CGF.getTypeSize(VD->getType())}));
1449 LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
1450 CGM.getContext().getDeclAlign(VD),
1451 AlignmentSource::Decl);
1452 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1453 Base.getAddress(CGF));
1454 }
1455 I->getSecond().MappedParams->apply(CGF);
1456}
1457
1458void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
1459 bool WithSPMDCheck) {
1460 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1461 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1462 return;
1463
1464 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1465 if (I != FunctionGlobalizedDecls.end()) {
1466 // Deallocate the memory for each globalized VLA object
1467 for (auto AddrSizePair :
1468 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
1469 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1470 CGM.getModule(), OMPRTL___kmpc_free_shared),
1471 {AddrSizePair.first, AddrSizePair.second});
1472 }
1473 // Deallocate the memory for each globalized value
1474 for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) {
1475 const auto *VD = cast<VarDecl>(Rec.first);
1476 I->getSecond().MappedParams->restore(CGF);
1477
1478 llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal,
1479 CGF.getTypeSize(VD->getType())};
1480 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1481 CGM.getModule(), OMPRTL___kmpc_free_shared),
1482 FreeArgs);
1483 }
1484 }
1485}
1486
1487void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
1488 const OMPExecutableDirective &D,
1489 SourceLocation Loc,
1490 llvm::Function *OutlinedFn,
1491 ArrayRef<llvm::Value *> CapturedVars) {
1492 if (!CGF.HaveInsertPoint())
1493 return;
1494
1495 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
1496 /*Name=*/".zero.addr");
1497 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
1498 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1499 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
1500 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1501 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1502 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1503}
1504
1505void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
1506 SourceLocation Loc,
1507 llvm::Function *OutlinedFn,
1508 ArrayRef<llvm::Value *> CapturedVars,
1509 const Expr *IfCond) {
1510 if (!CGF.HaveInsertPoint())
1511 return;
1512
1513 auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
1514 IfCond](CodeGenFunction &CGF, PrePostActionTy &Action) {
1515 CGBuilderTy &Bld = CGF.Builder;
1516 llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
1517 llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
1518 if (WFn)
1519 ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
1520 llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
1521
1522 // Create a private scope that will globalize the arguments
1523 // passed from the outside of the target region.
1524 // TODO: Is that needed?
1525 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
1526
1527 Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
1528 llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
1529 "captured_vars_addrs");
1530 // There's something to share.
1531 if (!CapturedVars.empty()) {
1532 // Prepare for parallel region. Indicate the outlined function.
1533 ASTContext &Ctx = CGF.getContext();
1534 unsigned Idx = 0;
1535 for (llvm::Value *V : CapturedVars) {
1536 Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
1537 llvm::Value *PtrV;
1538 if (V->getType()->isIntegerTy())
1539 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
1540 else
1541 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
1542 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
1543 Ctx.getPointerType(Ctx.VoidPtrTy));
1544 ++Idx;
1545 }
1546 }
1547
1548 llvm::Value *IfCondVal = nullptr;
1549 if (IfCond)
1550 IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty,
1551 /* isSigned */ false);
1552 else
1553 IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
1554
1555 assert(IfCondVal && "Expected a value")(static_cast <bool> (IfCondVal && "Expected a value"
) ? void (0) : __assert_fail ("IfCondVal && \"Expected a value\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1555, __extension__ __PRETTY_FUNCTION__))
;
1556 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1557 llvm::Value *Args[] = {
1558 RTLoc,
1559 getThreadID(CGF, Loc),
1560 IfCondVal,
1561 llvm::ConstantInt::get(CGF.Int32Ty, -1),
1562 llvm::ConstantInt::get(CGF.Int32Ty, -1),
1563 FnPtr,
1564 ID,
1565 Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
1566 CGF.VoidPtrPtrTy),
1567 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
1568 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1569 CGM.getModule(), OMPRTL___kmpc_parallel_51),
1570 Args);
1571 };
1572
1573 RegionCodeGenTy RCG(ParallelGen);
1574 RCG(CGF);
1575}
1576
1577void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
1578 // Always emit simple barriers!
1579 if (!CGF.HaveInsertPoint())
1580 return;
1581 // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
1582 // This function does not use parameters, so we can emit just default values.
1583 llvm::Value *Args[] = {
1584 llvm::ConstantPointerNull::get(
1585 cast<llvm::PointerType>(getIdentTyPointerTy())),
1586 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
1587 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1588 CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
1589 Args);
1590}
1591
1592void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
1593 SourceLocation Loc,
1594 OpenMPDirectiveKind Kind, bool,
1595 bool) {
1596 // Always emit simple barriers!
1597 if (!CGF.HaveInsertPoint())
1598 return;
1599 // Build call __kmpc_cancel_barrier(loc, thread_id);
1600 unsigned Flags = getDefaultFlagsForBarriers(Kind);
1601 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
1602 getThreadID(CGF, Loc)};
1603
1604 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1605 CGM.getModule(), OMPRTL___kmpc_barrier),
1606 Args);
1607}
1608
1609void CGOpenMPRuntimeGPU::emitCriticalRegion(
1610 CodeGenFunction &CGF, StringRef CriticalName,
1611 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
1612 const Expr *Hint) {
1613 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
1614 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
1615 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
1616 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
1617 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
1618
1619 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1620
1621 // Get the mask of active threads in the warp.
1622 llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1623 CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
1624 // Fetch team-local id of the thread.
1625 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
1626
1627 // Get the width of the team.
1628 llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
1629
1630 // Initialize the counter variable for the loop.
1631 QualType Int32Ty =
1632 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
1633 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
1634 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
1635 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
1636 /*isInit=*/true);
1637
1638 // Block checks if loop counter exceeds upper bound.
1639 CGF.EmitBlock(LoopBB);
1640 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1641 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
1642 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
1643
1644 // Block tests which single thread should execute region, and which threads
1645 // should go straight to synchronisation point.
1646 CGF.EmitBlock(TestBB);
1647 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1648 llvm::Value *CmpThreadToCounter =
1649 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
1650 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
1651
1652 // Block emits the body of the critical region.
1653 CGF.EmitBlock(BodyBB);
1654
1655 // Output the critical statement.
1656 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
1657 Hint);
1658
1659 // After the body surrounded by the critical region, the single executing
1660 // thread will jump to the synchronisation point.
1661 // Block waits for all threads in current team to finish then increments the
1662 // counter variable and returns to the loop.
1663 CGF.EmitBlock(SyncBB);
1664 // Reconverge active threads in the warp.
1665 (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1666 CGM.getModule(), OMPRTL___kmpc_syncwarp),
1667 Mask);
1668
1669 llvm::Value *IncCounterVal =
1670 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
1671 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
1672 CGF.EmitBranch(LoopBB);
1673
1674 // Block that is reached when all threads in the team complete the region.
1675 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1676}
1677
1678/// Cast value to the specified type.
1679static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
1680 QualType ValTy, QualType CastTy,
1681 SourceLocation Loc) {
1682 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(CastTy).isZero() && "Cast type must sized.") ? void (
0) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && \"Cast type must sized.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1683, __extension__ __PRETTY_FUNCTION__))
1683 "Cast type must sized.")(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(CastTy).isZero() && "Cast type must sized.") ? void (
0) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && \"Cast type must sized.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1683, __extension__ __PRETTY_FUNCTION__))
;
1684 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(ValTy).isZero() && "Val type must sized.") ? void (0
) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && \"Val type must sized.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1685, __extension__ __PRETTY_FUNCTION__))
1685 "Val type must sized.")(static_cast <bool> (!CGF.getContext().getTypeSizeInChars
(ValTy).isZero() && "Val type must sized.") ? void (0
) : __assert_fail ("!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && \"Val type must sized.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1685, __extension__ __PRETTY_FUNCTION__))
;
1686 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
1687 if (ValTy == CastTy)
1688 return Val;
1689 if (CGF.getContext().getTypeSizeInChars(ValTy) ==
1690 CGF.getContext().getTypeSizeInChars(CastTy))
1691 return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
1692 if (CastTy->isIntegerType() && ValTy->isIntegerType())
1693 return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
1694 CastTy->hasSignedIntegerRepresentation());
1695 Address CastItem = CGF.CreateMemTemp(CastTy);
1696 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1697 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
1698 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
1699 LValueBaseInfo(AlignmentSource::Type),
1700 TBAAAccessInfo());
1701 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
1702 LValueBaseInfo(AlignmentSource::Type),
1703 TBAAAccessInfo());
1704}
1705
1706/// This function creates calls to one of two shuffle functions to copy
1707/// variables between lanes in a warp.
1708static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
1709 llvm::Value *Elem,
1710 QualType ElemType,
1711 llvm::Value *Offset,
1712 SourceLocation Loc) {
1713 CodeGenModule &CGM = CGF.CGM;
1714 CGBuilderTy &Bld = CGF.Builder;
1715 CGOpenMPRuntimeGPU &RT =
1716 *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
1717 llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
1718
1719 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1720 assert(Size.getQuantity() <= 8 &&(static_cast <bool> (Size.getQuantity() <= 8 &&
"Unsupported bitwidth in shuffle instruction.") ? void (0) :
__assert_fail ("Size.getQuantity() <= 8 && \"Unsupported bitwidth in shuffle instruction.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1721, __extension__ __PRETTY_FUNCTION__))
1721 "Unsupported bitwidth in shuffle instruction.")(static_cast <bool> (Size.getQuantity() <= 8 &&
"Unsupported bitwidth in shuffle instruction.") ? void (0) :
__assert_fail ("Size.getQuantity() <= 8 && \"Unsupported bitwidth in shuffle instruction.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1721, __extension__ __PRETTY_FUNCTION__))
;
1722
1723 RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
1724 ? OMPRTL___kmpc_shuffle_int32
1725 : OMPRTL___kmpc_shuffle_int64;
1726
1727 // Cast all types to 32- or 64-bit values before calling shuffle routines.
1728 QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
1729 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
1730 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
1731 llvm::Value *WarpSize =
1732 Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
1733
1734 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
1735 OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
1736 {ElemCast, Offset, WarpSize});
1737
1738 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
1739}
1740
1741static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
1742 Address DestAddr, QualType ElemType,
1743 llvm::Value *Offset, SourceLocation Loc) {
1744 CGBuilderTy &Bld = CGF.Builder;
1745
1746 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1747 // Create the loop over the big sized data.
1748 // ptr = (void*)Elem;
1749 // ptrEnd = (void*) Elem + 1;
1750 // Step = 8;
1751 // while (ptr + Step < ptrEnd)
1752 // shuffle((int64_t)*ptr);
1753 // Step = 4;
1754 // while (ptr + Step < ptrEnd)
1755 // shuffle((int32_t)*ptr);
1756 // ...
1757 Address ElemPtr = DestAddr;
1758 Address Ptr = SrcAddr;
1759 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
1760 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
1761 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
1762 if (Size < CharUnits::fromQuantity(IntSize))
1763 continue;
1764 QualType IntType = CGF.getContext().getIntTypeForBitwidth(
1765 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
1766 /*Signed=*/1);
1767 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
1768 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
1769 ElemPtr =
1770 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
1771 if (Size.getQuantity() / IntSize > 1) {
1772 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
1773 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
1774 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
1775 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
1776 CGF.EmitBlock(PreCondBB);
1777 llvm::PHINode *PhiSrc =
1778 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
1779 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
1780 llvm::PHINode *PhiDest =
1781 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
1782 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
1783 Ptr = Address(PhiSrc, Ptr.getAlignment());
1784 ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
1785 llvm::Value *PtrDiff = Bld.CreatePtrDiff(
1786 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
1787 Ptr.getPointer(), CGF.VoidPtrTy));
1788 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
1789 ThenBB, ExitBB);
1790 CGF.EmitBlock(ThenBB);
1791 llvm::Value *Res = createRuntimeShuffleFunction(
1792 CGF,
1793 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1794 LValueBaseInfo(AlignmentSource::Type),
1795 TBAAAccessInfo()),
1796 IntType, Offset, Loc);
1797 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1798 LValueBaseInfo(AlignmentSource::Type),
1799 TBAAAccessInfo());
1800 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
1801 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1802 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
1803 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
1804 CGF.EmitBranch(PreCondBB);
1805 CGF.EmitBlock(ExitBB);
1806 } else {
1807 llvm::Value *Res = createRuntimeShuffleFunction(
1808 CGF,
1809 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1810 LValueBaseInfo(AlignmentSource::Type),
1811 TBAAAccessInfo()),
1812 IntType, Offset, Loc);
1813 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1814 LValueBaseInfo(AlignmentSource::Type),
1815 TBAAAccessInfo());
1816 Ptr = Bld.CreateConstGEP(Ptr, 1);
1817 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1818 }
1819 Size = Size % IntSize;
1820 }
1821}
1822
1823namespace {
1824enum CopyAction : unsigned {
1825 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1826 // the warp using shuffle instructions.
1827 RemoteLaneToThread,
1828 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1829 ThreadCopy,
1830 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1831 ThreadToScratchpad,
1832 // ScratchpadToThread: Copy from a scratchpad array in global memory
1833 // containing team-reduced data to a thread's stack.
1834 ScratchpadToThread,
1835};
1836} // namespace
1837
1838struct CopyOptionsTy {
1839 llvm::Value *RemoteLaneOffset;
1840 llvm::Value *ScratchpadIndex;
1841 llvm::Value *ScratchpadWidth;
1842};
1843
1844/// Emit instructions to copy a Reduce list, which contains partially
1845/// aggregated values, in the specified direction.
1846static void emitReductionListCopy(
1847 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1848 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1849 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
1850
1851 CodeGenModule &CGM = CGF.CGM;
1852 ASTContext &C = CGM.getContext();
1853 CGBuilderTy &Bld = CGF.Builder;
1854
1855 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1856 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1857 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1858
1859 // Iterates, element-by-element, through the source Reduce list and
1860 // make a copy.
1861 unsigned Idx = 0;
1862 unsigned Size = Privates.size();
1863 for (const Expr *Private : Privates) {
1864 Address SrcElementAddr = Address::invalid();
1865 Address DestElementAddr = Address::invalid();
1866 Address DestElementPtrAddr = Address::invalid();
1867 // Should we shuffle in an element from a remote lane?
1868 bool ShuffleInElement = false;
1869 // Set to true to update the pointer in the dest Reduce list to a
1870 // newly created element.
1871 bool UpdateDestListPtr = false;
1872 // Increment the src or dest pointer to the scratchpad, for each
1873 // new element.
1874 bool IncrScratchpadSrc = false;
1875 bool IncrScratchpadDest = false;
1876
1877 switch (Action) {
1878 case RemoteLaneToThread: {
1879 // Step 1.1: Get the address for the src element in the Reduce list.
1880 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1881 SrcElementAddr = CGF.EmitLoadOfPointer(
1882 SrcElementPtrAddr,
1883 C.getPointerType(Private->getType())->castAs<PointerType>());
1884
1885 // Step 1.2: Create a temporary to store the element in the destination
1886 // Reduce list.
1887 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1888 DestElementAddr =
1889 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1890 ShuffleInElement = true;
1891 UpdateDestListPtr = true;
1892 break;
1893 }
1894 case ThreadCopy: {
1895 // Step 1.1: Get the address for the src element in the Reduce list.
1896 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1897 SrcElementAddr = CGF.EmitLoadOfPointer(
1898 SrcElementPtrAddr,
1899 C.getPointerType(Private->getType())->castAs<PointerType>());
1900
1901 // Step 1.2: Get the address for dest element. The destination
1902 // element has already been created on the thread's stack.
1903 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1904 DestElementAddr = CGF.EmitLoadOfPointer(
1905 DestElementPtrAddr,
1906 C.getPointerType(Private->getType())->castAs<PointerType>());
1907 break;
1908 }
1909 case ThreadToScratchpad: {
1910 // Step 1.1: Get the address for the src element in the Reduce list.
1911 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1912 SrcElementAddr = CGF.EmitLoadOfPointer(
1913 SrcElementPtrAddr,
1914 C.getPointerType(Private->getType())->castAs<PointerType>());
1915
1916 // Step 1.2: Get the address for dest element:
1917 // address = base + index * ElementSizeInChars.
1918 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1919 llvm::Value *CurrentOffset =
1920 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1921 llvm::Value *ScratchPadElemAbsolutePtrVal =
1922 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
1923 ScratchPadElemAbsolutePtrVal =
1924 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1925 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1926 C.getTypeAlignInChars(Private->getType()));
1927 IncrScratchpadDest = true;
1928 break;
1929 }
1930 case ScratchpadToThread: {
1931 // Step 1.1: Get the address for the src element in the scratchpad.
1932 // address = base + index * ElementSizeInChars.
1933 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1934 llvm::Value *CurrentOffset =
1935 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1936 llvm::Value *ScratchPadElemAbsolutePtrVal =
1937 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
1938 ScratchPadElemAbsolutePtrVal =
1939 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1940 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1941 C.getTypeAlignInChars(Private->getType()));
1942 IncrScratchpadSrc = true;
1943
1944 // Step 1.2: Create a temporary to store the element in the destination
1945 // Reduce list.
1946 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1947 DestElementAddr =
1948 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1949 UpdateDestListPtr = true;
1950 break;
1951 }
1952 }
1953
1954 // Regardless of src and dest of copy, we emit the load of src
1955 // element as this is required in all directions
1956 SrcElementAddr = Bld.CreateElementBitCast(
1957 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1958 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
1959 SrcElementAddr.getElementType());
1960
1961 // Now that all active lanes have read the element in the
1962 // Reduce list, shuffle over the value from the remote lane.
1963 if (ShuffleInElement) {
1964 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
1965 RemoteLaneOffset, Private->getExprLoc());
1966 } else {
1967 switch (CGF.getEvaluationKind(Private->getType())) {
1968 case TEK_Scalar: {
1969 llvm::Value *Elem = CGF.EmitLoadOfScalar(
1970 SrcElementAddr, /*Volatile=*/false, Private->getType(),
1971 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
1972 TBAAAccessInfo());
1973 // Store the source element value to the dest element address.
1974 CGF.EmitStoreOfScalar(
1975 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
1976 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
1977 break;
1978 }
1979 case TEK_Complex: {
1980 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
1981 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1982 Private->getExprLoc());
1983 CGF.EmitStoreOfComplex(
1984 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1985 /*isInit=*/false);
1986 break;
1987 }
1988 case TEK_Aggregate:
1989 CGF.EmitAggregateCopy(
1990 CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1991 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1992 Private->getType(), AggValueSlot::DoesNotOverlap);
1993 break;
1994 }
1995 }
1996
1997 // Step 3.1: Modify reference in dest Reduce list as needed.
1998 // Modifying the reference in Reduce list to point to the newly
1999 // created element. The element is live in the current function
2000 // scope and that of functions it invokes (i.e., reduce_function).
2001 // RemoteReduceData[i] = (void*)&RemoteElem
2002 if (UpdateDestListPtr) {
2003 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
2004 DestElementAddr.getPointer(), CGF.VoidPtrTy),
2005 DestElementPtrAddr, /*Volatile=*/false,
2006 C.VoidPtrTy);
2007 }
2008
2009 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
2010 // address of the next element in scratchpad memory, unless we're currently
2011 // processing the last one. Memory alignment is also taken care of here.
2012 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
2013 llvm::Value *ScratchpadBasePtr =
2014 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2015 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2016 ScratchpadBasePtr = Bld.CreateNUWAdd(
2017 ScratchpadBasePtr,
2018 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2019
2020 // Take care of global memory alignment for performance
2021 ScratchpadBasePtr = Bld.CreateNUWSub(
2022 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2023 ScratchpadBasePtr = Bld.CreateUDiv(
2024 ScratchpadBasePtr,
2025 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2026 ScratchpadBasePtr = Bld.CreateNUWAdd(
2027 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2028 ScratchpadBasePtr = Bld.CreateNUWMul(
2029 ScratchpadBasePtr,
2030 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2031
2032 if (IncrScratchpadDest)
2033 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2034 else /* IncrScratchpadSrc = true */
2035 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2036 }
2037
2038 ++Idx;
2039 }
2040}
2041
2042/// This function emits a helper that gathers Reduce lists from the first
2043/// lane of every active warp to lanes in the first warp.
2044///
2045/// void inter_warp_copy_func(void* reduce_data, num_warps)
2046/// shared smem[warp_size];
2047/// For all data entries D in reduce_data:
2048/// sync
2049/// If (I am the first lane in each warp)
2050/// Copy my local D to smem[warp_id]
2051/// sync
2052/// if (I am the first warp)
2053/// Copy smem[thread_id] to my local D
2054static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2055 ArrayRef<const Expr *> Privates,
2056 QualType ReductionArrayTy,
2057 SourceLocation Loc) {
2058 ASTContext &C = CGM.getContext();
2059 llvm::Module &M = CGM.getModule();
2060
2061 // ReduceList: thread local Reduce list.
2062 // At the stage of the computation when this function is called, partially
2063 // aggregated values reside in the first lane of every active warp.
2064 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2065 C.VoidPtrTy, ImplicitParamDecl::Other);
2066 // NumWarps: number of warps active in the parallel region. This could
2067 // be smaller than 32 (max warps in a CTA) for partial block reduction.
2068 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2069 C.getIntTypeForBitwidth(32, /* Signed */ true),
2070 ImplicitParamDecl::Other);
2071 FunctionArgList Args;
2072 Args.push_back(&ReduceListArg);
2073 Args.push_back(&NumWarpsArg);
2074
2075 const CGFunctionInfo &CGFI =
2076 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2077 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2078 llvm::GlobalValue::InternalLinkage,
2079 "_omp_reduction_inter_warp_copy_func", &M);
2080 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2081 Fn->setDoesNotRecurse();
2082 CodeGenFunction CGF(CGM);
2083 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2084
2085 CGBuilderTy &Bld = CGF.Builder;
2086
2087 // This array is used as a medium to transfer, one reduce element at a time,
2088 // the data from the first lane of every warp to lanes in the first warp
2089 // in order to perform the final step of a reduction in a parallel region
2090 // (reduction across warps). The array is placed in NVPTX __shared__ memory
2091 // for reduced latency, as well as to have a distinct copy for concurrently
2092 // executing target regions. The array is declared with common linkage so
2093 // as to be shared across compilation units.
2094 StringRef TransferMediumName =
2095 "__openmp_nvptx_data_transfer_temporary_storage";
2096 llvm::GlobalVariable *TransferMedium =
2097 M.getGlobalVariable(TransferMediumName);
2098 unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
2099 if (!TransferMedium) {
2100 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2101 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2102 TransferMedium = new llvm::GlobalVariable(
2103 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
2104 llvm::UndefValue::get(Ty), TransferMediumName,
2105 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2106 SharedAddressSpace);
2107 CGM.addCompilerUsedGlobal(TransferMedium);
2108 }
2109
2110 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2111 // Get the CUDA thread id of the current OpenMP thread on the GPU.
2112 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2113 // nvptx_lane_id = nvptx_id % warpsize
2114 llvm::Value *LaneID = getNVPTXLaneID(CGF);
2115 // nvptx_warp_id = nvptx_id / warpsize
2116 llvm::Value *WarpID = getNVPTXWarpID(CGF);
2117
2118 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2119 Address LocalReduceList(
2120 Bld.CreatePointerBitCastOrAddrSpaceCast(
2121 CGF.EmitLoadOfScalar(
2122 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2123 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
2124 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2125 CGF.getPointerAlign());
2126
2127 unsigned Idx = 0;
2128 for (const Expr *Private : Privates) {
2129 //
2130 // Warp master copies reduce element to transfer medium in __shared__
2131 // memory.
2132 //
2133 unsigned RealTySize =
2134 C.getTypeSizeInChars(Private->getType())
2135 .alignTo(C.getTypeAlignInChars(Private->getType()))
2136 .getQuantity();
2137 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
2138 unsigned NumIters = RealTySize / TySize;
2139 if (NumIters == 0)
2140 continue;
2141 QualType CType = C.getIntTypeForBitwidth(
2142 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2143 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2144 CharUnits Align = CharUnits::fromQuantity(TySize);
2145 llvm::Value *Cnt = nullptr;
2146 Address CntAddr = Address::invalid();
2147 llvm::BasicBlock *PrecondBB = nullptr;
2148 llvm::BasicBlock *ExitBB = nullptr;
2149 if (NumIters > 1) {
2150 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2151 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2152 /*Volatile=*/false, C.IntTy);
2153 PrecondBB = CGF.createBasicBlock("precond");
2154 ExitBB = CGF.createBasicBlock("exit");
2155 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2156 // There is no need to emit line number for unconditional branch.
2157 (void)ApplyDebugLocation::CreateEmpty(CGF);
2158 CGF.EmitBlock(PrecondBB);
2159 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2160 llvm::Value *Cmp =
2161 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2162 Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2163 CGF.EmitBlock(BodyBB);
2164 }
2165 // kmpc_barrier.
2166 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2167 /*EmitChecks=*/false,
2168 /*ForceSimpleCall=*/true);
2169 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2170 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2171 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2172
2173 // if (lane_id == 0)
2174 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2175 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2176 CGF.EmitBlock(ThenBB);
2177
2178 // Reduce element = LocalReduceList[i]
2179 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2180 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2181 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2182 // elemptr = ((CopyType*)(elemptrptr)) + I
2183 Address ElemPtr = Address(ElemPtrPtr, Align);
2184 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2185 if (NumIters > 1) {
2186 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getElementType(),
2187 ElemPtr.getPointer(), Cnt),
2188 ElemPtr.getAlignment());
2189 }
2190
2191 // Get pointer to location in transfer medium.
2192 // MediumPtr = &medium[warp_id]
2193 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2194 TransferMedium->getValueType(), TransferMedium,
2195 {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2196 Address MediumPtr(MediumPtrVal, Align);
2197 // Casting to actual data type.
2198 // MediumPtr = (CopyType*)MediumPtrAddr;
2199 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
2200
2201 // elem = *elemptr
2202 //*MediumPtr = elem
2203 llvm::Value *Elem = CGF.EmitLoadOfScalar(
2204 ElemPtr, /*Volatile=*/false, CType, Loc,
2205 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2206 // Store the source element value to the dest element address.
2207 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2208 LValueBaseInfo(AlignmentSource::Type),
2209 TBAAAccessInfo());
2210
2211 Bld.CreateBr(MergeBB);
2212
2213 CGF.EmitBlock(ElseBB);
2214 Bld.CreateBr(MergeBB);
2215
2216 CGF.EmitBlock(MergeBB);
2217
2218 // kmpc_barrier.
2219 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2220 /*EmitChecks=*/false,
2221 /*ForceSimpleCall=*/true);
2222
2223 //
2224 // Warp 0 copies reduce element from transfer medium.
2225 //
2226 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2227 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2228 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2229
2230 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2231 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2232 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2233
2234 // Up to 32 threads in warp 0 are active.
2235 llvm::Value *IsActiveThread =
2236 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2237 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2238
2239 CGF.EmitBlock(W0ThenBB);
2240
2241 // SrcMediumPtr = &medium[tid]
2242 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2243 TransferMedium->getValueType(), TransferMedium,
2244 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2245 Address SrcMediumPtr(SrcMediumPtrVal, Align);
2246 // SrcMediumVal = *SrcMediumPtr;
2247 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
2248
2249 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
2250 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2251 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
2252 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
2253 Address TargetElemPtr = Address(TargetElemPtrVal, Align);
2254 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
2255 if (NumIters > 1) {
2256 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getElementType(),
2257 TargetElemPtr.getPointer(), Cnt),
2258 TargetElemPtr.getAlignment());
2259 }
2260
2261 // *TargetElemPtr = SrcMediumVal;
2262 llvm::Value *SrcMediumValue =
2263 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
2264 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
2265 CType);
2266 Bld.CreateBr(W0MergeBB);
2267
2268 CGF.EmitBlock(W0ElseBB);
2269 Bld.CreateBr(W0MergeBB);
2270
2271 CGF.EmitBlock(W0MergeBB);
2272
2273 if (NumIters > 1) {
2274 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
2275 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
2276 CGF.EmitBranch(PrecondBB);
2277 (void)ApplyDebugLocation::CreateEmpty(CGF);
2278 CGF.EmitBlock(ExitBB);
2279 }
2280 RealTySize %= TySize;
2281 }
2282 ++Idx;
2283 }
2284
2285 CGF.FinishFunction();
2286 return Fn;
2287}
2288
2289/// Emit a helper that reduces data across two OpenMP threads (lanes)
2290/// in the same warp. It uses shuffle instructions to copy over data from
2291/// a remote lane's stack. The reduction algorithm performed is specified
2292/// by the fourth parameter.
2293///
2294/// Algorithm Versions.
2295/// Full Warp Reduce (argument value 0):
2296/// This algorithm assumes that all 32 lanes are active and gathers
2297/// data from these 32 lanes, producing a single resultant value.
2298/// Contiguous Partial Warp Reduce (argument value 1):
2299/// This algorithm assumes that only a *contiguous* subset of lanes
2300/// are active. This happens for the last warp in a parallel region
2301/// when the user specified num_threads is not an integer multiple of
2302/// 32. This contiguous subset always starts with the zeroth lane.
2303/// Partial Warp Reduce (argument value 2):
2304/// This algorithm gathers data from any number of lanes at any position.
2305/// All reduced values are stored in the lowest possible lane. The set
2306/// of problems every algorithm addresses is a super set of those
2307/// addressable by algorithms with a lower version number. Overhead
2308/// increases as algorithm version increases.
2309///
2310/// Terminology
2311/// Reduce element:
2312/// Reduce element refers to the individual data field with primitive
2313/// data types to be combined and reduced across threads.
2314/// Reduce list:
2315/// Reduce list refers to a collection of local, thread-private
2316/// reduce elements.
2317/// Remote Reduce list:
2318/// Remote Reduce list refers to a collection of remote (relative to
2319/// the current thread) reduce elements.
2320///
2321/// We distinguish between three states of threads that are important to
2322/// the implementation of this function.
2323/// Alive threads:
2324/// Threads in a warp executing the SIMT instruction, as distinguished from
2325/// threads that are inactive due to divergent control flow.
2326/// Active threads:
2327/// The minimal set of threads that has to be alive upon entry to this
2328/// function. The computation is correct iff active threads are alive.
2329/// Some threads are alive but they are not active because they do not
2330/// contribute to the computation in any useful manner. Turning them off
2331/// may introduce control flow overheads without any tangible benefits.
2332/// Effective threads:
2333/// In order to comply with the argument requirements of the shuffle
2334/// function, we must keep all lanes holding data alive. But at most
2335/// half of them perform value aggregation; we refer to this half of
2336/// threads as effective. The other half is simply handing off their
2337/// data.
2338///
2339/// Procedure
2340/// Value shuffle:
2341/// In this step active threads transfer data from higher lane positions
2342/// in the warp to lower lane positions, creating Remote Reduce list.
2343/// Value aggregation:
2344/// In this step, effective threads combine their thread local Reduce list
2345/// with Remote Reduce list and store the result in the thread local
2346/// Reduce list.
2347/// Value copy:
2348/// In this step, we deal with the assumption made by algorithm 2
2349/// (i.e. contiguity assumption). When we have an odd number of lanes
2350/// active, say 2k+1, only k threads will be effective and therefore k
2351/// new values will be produced. However, the Reduce list owned by the
2352/// (2k+1)th thread is ignored in the value aggregation. Therefore
2353/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
2354/// that the contiguity assumption still holds.
2355static llvm::Function *emitShuffleAndReduceFunction(
2356 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2357 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
2358 ASTContext &C = CGM.getContext();
2359
2360 // Thread local Reduce list used to host the values of data to be reduced.
2361 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2362 C.VoidPtrTy, ImplicitParamDecl::Other);
2363 // Current lane id; could be logical.
2364 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
2365 ImplicitParamDecl::Other);
2366 // Offset of the remote source lane relative to the current lane.
2367 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2368 C.ShortTy, ImplicitParamDecl::Other);
2369 // Algorithm version. This is expected to be known at compile time.
2370 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2371 C.ShortTy, ImplicitParamDecl::Other);
2372 FunctionArgList Args;
2373 Args.push_back(&ReduceListArg);
2374 Args.push_back(&LaneIDArg);
2375 Args.push_back(&RemoteLaneOffsetArg);
2376 Args.push_back(&AlgoVerArg);
2377
2378 const CGFunctionInfo &CGFI =
2379 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2380 auto *Fn = llvm::Function::Create(
2381 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2382 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
2383 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2384 Fn->setDoesNotRecurse();
2385
2386 CodeGenFunction CGF(CGM);
2387 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2388
2389 CGBuilderTy &Bld = CGF.Builder;
2390
2391 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2392 Address LocalReduceList(
2393 Bld.CreatePointerBitCastOrAddrSpaceCast(
2394 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2395 C.VoidPtrTy, SourceLocation()),
2396 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2397 CGF.getPointerAlign());
2398
2399 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
2400 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
2401 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2402
2403 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
2404 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
2405 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2406
2407 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
2408 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
2409 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2410
2411 // Create a local thread-private variable to host the Reduce list
2412 // from a remote lane.
2413 Address RemoteReduceList =
2414 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
2415
2416 // This loop iterates through the list of reduce elements and copies,
2417 // element by element, from a remote lane in the warp to RemoteReduceList,
2418 // hosted on the thread's stack.
2419 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
2420 LocalReduceList, RemoteReduceList,
2421 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
2422 /*ScratchpadIndex=*/nullptr,
2423 /*ScratchpadWidth=*/nullptr});
2424
2425 // The actions to be performed on the Remote Reduce list is dependent
2426 // on the algorithm version.
2427 //
2428 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
2429 // LaneId % 2 == 0 && Offset > 0):
2430 // do the reduction value aggregation
2431 //
2432 // The thread local variable Reduce list is mutated in place to host the
2433 // reduced data, which is the aggregated value produced from local and
2434 // remote lanes.
2435 //
2436 // Note that AlgoVer is expected to be a constant integer known at compile
2437 // time.
2438 // When AlgoVer==0, the first conjunction evaluates to true, making
2439 // the entire predicate true during compile time.
2440 // When AlgoVer==1, the second conjunction has only the second part to be
2441 // evaluated during runtime. Other conjunctions evaluates to false
2442 // during compile time.
2443 // When AlgoVer==2, the third conjunction has only the second part to be
2444 // evaluated during runtime. Other conjunctions evaluates to false
2445 // during compile time.
2446 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
2447
2448 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2449 llvm::Value *CondAlgo1 = Bld.CreateAnd(
2450 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
2451
2452 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
2453 llvm::Value *CondAlgo2 = Bld.CreateAnd(
2454 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
2455 CondAlgo2 = Bld.CreateAnd(
2456 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
2457
2458 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
2459 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
2460
2461 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2462 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2463 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2464 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
2465
2466 CGF.EmitBlock(ThenBB);
2467 // reduce_function(LocalReduceList, RemoteReduceList)
2468 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2469 LocalReduceList.getPointer(), CGF.VoidPtrTy);
2470 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2471 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
2472 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2473 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
2474 Bld.CreateBr(MergeBB);
2475
2476 CGF.EmitBlock(ElseBB);
2477 Bld.CreateBr(MergeBB);
2478
2479 CGF.EmitBlock(MergeBB);
2480
2481 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
2482 // Reduce list.
2483 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2484 llvm::Value *CondCopy = Bld.CreateAnd(
2485 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
2486
2487 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
2488 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
2489 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
2490 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
2491
2492 CGF.EmitBlock(CpyThenBB);
2493 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
2494 RemoteReduceList, LocalReduceList);
2495 Bld.CreateBr(CpyMergeBB);
2496
2497 CGF.EmitBlock(CpyElseBB);
2498 Bld.CreateBr(CpyMergeBB);
2499
2500 CGF.EmitBlock(CpyMergeBB);
2501
2502 CGF.FinishFunction();
2503 return Fn;
2504}
2505
2506/// This function emits a helper that copies all the reduction variables from
2507/// the team into the provided global buffer for the reduction variables.
2508///
2509/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2510/// For all data entries D in reduce_data:
2511/// Copy local D to buffer.D[Idx]
2512static llvm::Value *emitListToGlobalCopyFunction(
2513 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2514 QualType ReductionArrayTy, SourceLocation Loc,
2515 const RecordDecl *TeamReductionRec,
2516 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2517 &VarFieldMap) {
2518 ASTContext &C = CGM.getContext();
2519
2520 // Buffer: global reduction buffer.
2521 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2522 C.VoidPtrTy, ImplicitParamDecl::Other);
2523 // Idx: index of the buffer.
2524 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2525 ImplicitParamDecl::Other);
2526 // ReduceList: thread local Reduce list.
2527 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2528 C.VoidPtrTy, ImplicitParamDecl::Other);
2529 FunctionArgList Args;
2530 Args.push_back(&BufferArg);
2531 Args.push_back(&IdxArg);
2532 Args.push_back(&ReduceListArg);
2533
2534 const CGFunctionInfo &CGFI =
2535 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2536 auto *Fn = llvm::Function::Create(
2537 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2538 "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
2539 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2540 Fn->setDoesNotRecurse();
2541 CodeGenFunction CGF(CGM);
2542 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2543
2544 CGBuilderTy &Bld = CGF.Builder;
2545
2546 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2547 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2548 Address LocalReduceList(
2549 Bld.CreatePointerBitCastOrAddrSpaceCast(
2550 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2551 C.VoidPtrTy, Loc),
2552 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2553 CGF.getPointerAlign());
2554 QualType StaticTy = C.getRecordType(TeamReductionRec);
2555 llvm::Type *LLVMReductionsBufferTy =
2556 CGM.getTypes().ConvertTypeForMem(StaticTy);
2557 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2558 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2559 LLVMReductionsBufferTy->getPointerTo());
2560 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2561 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2562 /*Volatile=*/false, C.IntTy,
2563 Loc)};
2564 unsigned Idx = 0;
2565 for (const Expr *Private : Privates) {
2566 // Reduce element = LocalReduceList[i]
2567 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2568 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2569 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2570 // elemptr = ((CopyType*)(elemptrptr)) + I
2571 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2572 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
2573 Address ElemPtr =
2574 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
2575 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2576 // Global = Buffer.VD[Idx];
2577 const FieldDecl *FD = VarFieldMap.lookup(VD);
2578 LValue GlobLVal = CGF.EmitLValueForField(
2579 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2580 Address GlobAddr = GlobLVal.getAddress(CGF);
2581 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2582 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2583 GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
2584 switch (CGF.getEvaluationKind(Private->getType())) {
2585 case TEK_Scalar: {
2586 llvm::Value *V = CGF.EmitLoadOfScalar(
2587 ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
2588 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2589 CGF.EmitStoreOfScalar(V, GlobLVal);
2590 break;
2591 }
2592 case TEK_Complex: {
2593 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
2594 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
2595 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
2596 break;
2597 }
2598 case TEK_Aggregate:
2599 CGF.EmitAggregateCopy(GlobLVal,
2600 CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2601 Private->getType(), AggValueSlot::DoesNotOverlap);
2602 break;
2603 }
2604 ++Idx;
2605 }
2606
2607 CGF.FinishFunction();
2608 return Fn;
2609}
2610
2611/// This function emits a helper that reduces all the reduction variables from
2612/// the team into the provided global buffer for the reduction variables.
2613///
2614/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
2615/// void *GlobPtrs[];
2616/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2617/// ...
2618/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2619/// reduce_function(GlobPtrs, reduce_data);
2620static llvm::Value *emitListToGlobalReduceFunction(
2621 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2622 QualType ReductionArrayTy, SourceLocation Loc,
2623 const RecordDecl *TeamReductionRec,
2624 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2625 &VarFieldMap,
2626 llvm::Function *ReduceFn) {
2627 ASTContext &C = CGM.getContext();
2628
2629 // Buffer: global reduction buffer.
2630 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2631 C.VoidPtrTy, ImplicitParamDecl::Other);
2632 // Idx: index of the buffer.
2633 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2634 ImplicitParamDecl::Other);
2635 // ReduceList: thread local Reduce list.
2636 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2637 C.VoidPtrTy, ImplicitParamDecl::Other);
2638 FunctionArgList Args;
2639 Args.push_back(&BufferArg);
2640 Args.push_back(&IdxArg);
2641 Args.push_back(&ReduceListArg);
2642
2643 const CGFunctionInfo &CGFI =
2644 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2645 auto *Fn = llvm::Function::Create(
2646 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2647 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
2648 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2649 Fn->setDoesNotRecurse();
2650 CodeGenFunction CGF(CGM);
2651 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2652
2653 CGBuilderTy &Bld = CGF.Builder;
2654
2655 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2656 QualType StaticTy = C.getRecordType(TeamReductionRec);
2657 llvm::Type *LLVMReductionsBufferTy =
2658 CGM.getTypes().ConvertTypeForMem(StaticTy);
2659 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2660 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2661 LLVMReductionsBufferTy->getPointerTo());
2662
2663 // 1. Build a list of reduction variables.
2664 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2665 Address ReductionList =
2666 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2667 auto IPriv = Privates.begin();
2668 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2669 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2670 /*Volatile=*/false, C.IntTy,
2671 Loc)};
2672 unsigned Idx = 0;
2673 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2674 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2675 // Global = Buffer.VD[Idx];
2676 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2677 const FieldDecl *FD = VarFieldMap.lookup(VD);
2678 LValue GlobLVal = CGF.EmitLValueForField(
2679 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2680 Address GlobAddr = GlobLVal.getAddress(CGF);
2681 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2682 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2683 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2684 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2685 if ((*IPriv)->getType()->isVariablyModifiedType()) {
2686 // Store array size.
2687 ++Idx;
2688 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2689 llvm::Value *Size = CGF.Builder.CreateIntCast(
2690 CGF.getVLASize(
2691 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2692 .NumElts,
2693 CGF.SizeTy, /*isSigned=*/false);
2694 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2695 Elem);
2696 }
2697 }
2698
2699 // Call reduce_function(GlobalReduceList, ReduceList)
2700 llvm::Value *GlobalReduceList =
2701 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2702 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2703 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2704 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2705 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2706 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
2707 CGF.FinishFunction();
2708 return Fn;
2709}
2710
2711/// This function emits a helper that copies all the reduction variables from
2712/// the team into the provided global buffer for the reduction variables.
2713///
2714/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2715/// For all data entries D in reduce_data:
2716/// Copy buffer.D[Idx] to local D;
2717static llvm::Value *emitGlobalToListCopyFunction(
2718 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2719 QualType ReductionArrayTy, SourceLocation Loc,
2720 const RecordDecl *TeamReductionRec,
2721 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2722 &VarFieldMap) {
2723 ASTContext &C = CGM.getContext();
2724
2725 // Buffer: global reduction buffer.
2726 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2727 C.VoidPtrTy, ImplicitParamDecl::Other);
2728 // Idx: index of the buffer.
2729 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2730 ImplicitParamDecl::Other);
2731 // ReduceList: thread local Reduce list.
2732 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2733 C.VoidPtrTy, ImplicitParamDecl::Other);
2734 FunctionArgList Args;
2735 Args.push_back(&BufferArg);
2736 Args.push_back(&IdxArg);
2737 Args.push_back(&ReduceListArg);
2738
2739 const CGFunctionInfo &CGFI =
2740 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2741 auto *Fn = llvm::Function::Create(
2742 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2743 "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
2744 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2745 Fn->setDoesNotRecurse();
2746 CodeGenFunction CGF(CGM);
2747 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2748
2749 CGBuilderTy &Bld = CGF.Builder;
2750
2751 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2752 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2753 Address LocalReduceList(
2754 Bld.CreatePointerBitCastOrAddrSpaceCast(
2755 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2756 C.VoidPtrTy, Loc),
2757 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2758 CGF.getPointerAlign());
2759 QualType StaticTy = C.getRecordType(TeamReductionRec);
2760 llvm::Type *LLVMReductionsBufferTy =
2761 CGM.getTypes().ConvertTypeForMem(StaticTy);
2762 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2763 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2764 LLVMReductionsBufferTy->getPointerTo());
2765
2766 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2767 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2768 /*Volatile=*/false, C.IntTy,
2769 Loc)};
2770 unsigned Idx = 0;
2771 for (const Expr *Private : Privates) {
2772 // Reduce element = LocalReduceList[i]
2773 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2774 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2775 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2776 // elemptr = ((CopyType*)(elemptrptr)) + I
2777 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2778 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
2779 Address ElemPtr =
2780 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
2781 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2782 // Global = Buffer.VD[Idx];
2783 const FieldDecl *FD = VarFieldMap.lookup(VD);
2784 LValue GlobLVal = CGF.EmitLValueForField(
2785 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2786 Address GlobAddr = GlobLVal.getAddress(CGF);
2787 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2788 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2789 GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
2790 switch (CGF.getEvaluationKind(Private->getType())) {
2791 case TEK_Scalar: {
2792 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
2793 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
2794 LValueBaseInfo(AlignmentSource::Type),
2795 TBAAAccessInfo());
2796 break;
2797 }
2798 case TEK_Complex: {
2799 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
2800 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2801 /*isInit=*/false);
2802 break;
2803 }
2804 case TEK_Aggregate:
2805 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2806 GlobLVal, Private->getType(),
2807 AggValueSlot::DoesNotOverlap);
2808 break;
2809 }
2810 ++Idx;
2811 }
2812
2813 CGF.FinishFunction();
2814 return Fn;
2815}
2816
2817/// This function emits a helper that reduces all the reduction variables from
2818/// the team into the provided global buffer for the reduction variables.
2819///
2820/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
2821/// void *GlobPtrs[];
2822/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2823/// ...
2824/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2825/// reduce_function(reduce_data, GlobPtrs);
2826static llvm::Value *emitGlobalToListReduceFunction(
2827 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2828 QualType ReductionArrayTy, SourceLocation Loc,
2829 const RecordDecl *TeamReductionRec,
2830 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2831 &VarFieldMap,
2832 llvm::Function *ReduceFn) {
2833 ASTContext &C = CGM.getContext();
2834
2835 // Buffer: global reduction buffer.
2836 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2837 C.VoidPtrTy, ImplicitParamDecl::Other);
2838 // Idx: index of the buffer.
2839 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2840 ImplicitParamDecl::Other);
2841 // ReduceList: thread local Reduce list.
2842 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2843 C.VoidPtrTy, ImplicitParamDecl::Other);
2844 FunctionArgList Args;
2845 Args.push_back(&BufferArg);
2846 Args.push_back(&IdxArg);
2847 Args.push_back(&ReduceListArg);
2848
2849 const CGFunctionInfo &CGFI =
2850 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2851 auto *Fn = llvm::Function::Create(
2852 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2853 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
2854 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2855 Fn->setDoesNotRecurse();
2856 CodeGenFunction CGF(CGM);
2857 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2858
2859 CGBuilderTy &Bld = CGF.Builder;
2860
2861 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2862 QualType StaticTy = C.getRecordType(TeamReductionRec);
2863 llvm::Type *LLVMReductionsBufferTy =
2864 CGM.getTypes().ConvertTypeForMem(StaticTy);
2865 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2866 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2867 LLVMReductionsBufferTy->getPointerTo());
2868
2869 // 1. Build a list of reduction variables.
2870 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2871 Address ReductionList =
2872 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2873 auto IPriv = Privates.begin();
2874 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2875 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2876 /*Volatile=*/false, C.IntTy,
2877 Loc)};
2878 unsigned Idx = 0;
2879 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2880 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2881 // Global = Buffer.VD[Idx];
2882 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2883 const FieldDecl *FD = VarFieldMap.lookup(VD);
2884 LValue GlobLVal = CGF.EmitLValueForField(
2885 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2886 Address GlobAddr = GlobLVal.getAddress(CGF);
2887 llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2888 GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2889 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2890 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2891 if ((*IPriv)->getType()->isVariablyModifiedType()) {
2892 // Store array size.
2893 ++Idx;
2894 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2895 llvm::Value *Size = CGF.Builder.CreateIntCast(
2896 CGF.getVLASize(
2897 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2898 .NumElts,
2899 CGF.SizeTy, /*isSigned=*/false);
2900 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2901 Elem);
2902 }
2903 }
2904
2905 // Call reduce_function(ReduceList, GlobalReduceList)
2906 llvm::Value *GlobalReduceList =
2907 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2908 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2909 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2910 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2911 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2912 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
2913 CGF.FinishFunction();
2914 return Fn;
2915}
2916
2917///
2918/// Design of OpenMP reductions on the GPU
2919///
2920/// Consider a typical OpenMP program with one or more reduction
2921/// clauses:
2922///
2923/// float foo;
2924/// double bar;
2925/// #pragma omp target teams distribute parallel for \
2926/// reduction(+:foo) reduction(*:bar)
2927/// for (int i = 0; i < N; i++) {
2928/// foo += A[i]; bar *= B[i];
2929/// }
2930///
2931/// where 'foo' and 'bar' are reduced across all OpenMP threads in
2932/// all teams. In our OpenMP implementation on the NVPTX device an
2933/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
2934/// within a team are mapped to CUDA threads within a threadblock.
2935/// Our goal is to efficiently aggregate values across all OpenMP
2936/// threads such that:
2937///
2938/// - the compiler and runtime are logically concise, and
2939/// - the reduction is performed efficiently in a hierarchical
2940/// manner as follows: within OpenMP threads in the same warp,
2941/// across warps in a threadblock, and finally across teams on
2942/// the NVPTX device.
2943///
2944/// Introduction to Decoupling
2945///
2946/// We would like to decouple the compiler and the runtime so that the
2947/// latter is ignorant of the reduction variables (number, data types)
2948/// and the reduction operators. This allows a simpler interface
2949/// and implementation while still attaining good performance.
2950///
2951/// Pseudocode for the aforementioned OpenMP program generated by the
2952/// compiler is as follows:
2953///
2954/// 1. Create private copies of reduction variables on each OpenMP
2955/// thread: 'foo_private', 'bar_private'
2956/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
2957/// to it and writes the result in 'foo_private' and 'bar_private'
2958/// respectively.
2959/// 3. Call the OpenMP runtime on the GPU to reduce within a team
2960/// and store the result on the team master:
2961///
2962/// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
2963/// reduceData, shuffleReduceFn, interWarpCpyFn)
2964///
2965/// where:
2966/// struct ReduceData {
2967/// double *foo;
2968/// double *bar;
2969/// } reduceData
2970/// reduceData.foo = &foo_private
2971/// reduceData.bar = &bar_private
2972///
2973/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
2974/// auxiliary functions generated by the compiler that operate on
2975/// variables of type 'ReduceData'. They aid the runtime perform
2976/// algorithmic steps in a data agnostic manner.
2977///
2978/// 'shuffleReduceFn' is a pointer to a function that reduces data
2979/// of type 'ReduceData' across two OpenMP threads (lanes) in the
2980/// same warp. It takes the following arguments as input:
2981///
2982/// a. variable of type 'ReduceData' on the calling lane,
2983/// b. its lane_id,
2984/// c. an offset relative to the current lane_id to generate a
2985/// remote_lane_id. The remote lane contains the second
2986/// variable of type 'ReduceData' that is to be reduced.
2987/// d. an algorithm version parameter determining which reduction
2988/// algorithm to use.
2989///
2990/// 'shuffleReduceFn' retrieves data from the remote lane using
2991/// efficient GPU shuffle intrinsics and reduces, using the
2992/// algorithm specified by the 4th parameter, the two operands
2993/// element-wise. The result is written to the first operand.
2994///
2995/// Different reduction algorithms are implemented in different
2996/// runtime functions, all calling 'shuffleReduceFn' to perform
2997/// the essential reduction step. Therefore, based on the 4th
2998/// parameter, this function behaves slightly differently to
2999/// cooperate with the runtime to ensure correctness under
3000/// different circumstances.
3001///
3002/// 'InterWarpCpyFn' is a pointer to a function that transfers
3003/// reduced variables across warps. It tunnels, through CUDA
3004/// shared memory, the thread-private data of type 'ReduceData'
3005/// from lane 0 of each warp to a lane in the first warp.
3006/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
3007/// The last team writes the global reduced value to memory.
3008///
3009/// ret = __kmpc_nvptx_teams_reduce_nowait(...,
3010/// reduceData, shuffleReduceFn, interWarpCpyFn,
3011/// scratchpadCopyFn, loadAndReduceFn)
3012///
3013/// 'scratchpadCopyFn' is a helper that stores reduced
3014/// data from the team master to a scratchpad array in
3015/// global memory.
3016///
3017/// 'loadAndReduceFn' is a helper that loads data from
3018/// the scratchpad array and reduces it with the input
3019/// operand.
3020///
3021/// These compiler generated functions hide address
3022/// calculation and alignment information from the runtime.
3023/// 5. if ret == 1:
3024/// The team master of the last team stores the reduced
3025/// result to the globals in memory.
3026/// foo += reduceData.foo; bar *= reduceData.bar
3027///
3028///
3029/// Warp Reduction Algorithms
3030///
3031/// On the warp level, we have three algorithms implemented in the
3032/// OpenMP runtime depending on the number of active lanes:
3033///
3034/// Full Warp Reduction
3035///
3036/// The reduce algorithm within a warp where all lanes are active
3037/// is implemented in the runtime as follows:
3038///
3039/// full_warp_reduce(void *reduce_data,
3040/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3041/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3042/// ShuffleReduceFn(reduce_data, 0, offset, 0);
3043/// }
3044///
3045/// The algorithm completes in log(2, WARPSIZE) steps.
3046///
3047/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3048/// not used therefore we save instructions by not retrieving lane_id
3049/// from the corresponding special registers. The 4th parameter, which
3050/// represents the version of the algorithm being used, is set to 0 to
3051/// signify full warp reduction.
3052///
3053/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3054///
3055/// #reduce_elem refers to an element in the local lane's data structure
3056/// #remote_elem is retrieved from a remote lane
3057/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3058/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3059///
3060/// Contiguous Partial Warp Reduction
3061///
3062/// This reduce algorithm is used within a warp where only the first
3063/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
3064/// number of OpenMP threads in a parallel region is not a multiple of
3065/// WARPSIZE. The algorithm is implemented in the runtime as follows:
3066///
3067/// void
3068/// contiguous_partial_reduce(void *reduce_data,
3069/// kmp_ShuffleReductFctPtr ShuffleReduceFn,
3070/// int size, int lane_id) {
3071/// int curr_size;
3072/// int offset;
3073/// curr_size = size;
3074/// mask = curr_size/2;
3075/// while (offset>0) {
3076/// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3077/// curr_size = (curr_size+1)/2;
3078/// offset = curr_size/2;
3079/// }
3080/// }
3081///
3082/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3083///
3084/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3085/// if (lane_id < offset)
3086/// reduce_elem = reduce_elem REDUCE_OP remote_elem
3087/// else
3088/// reduce_elem = remote_elem
3089///
3090/// This algorithm assumes that the data to be reduced are located in a
3091/// contiguous subset of lanes starting from the first. When there is
3092/// an odd number of active lanes, the data in the last lane is not
3093/// aggregated with any other lane's dat but is instead copied over.
3094///
3095/// Dispersed Partial Warp Reduction
3096///
3097/// This algorithm is used within a warp when any discontiguous subset of
3098/// lanes are active. It is used to implement the reduction operation
3099/// across lanes in an OpenMP simd region or in a nested parallel region.
3100///
3101/// void
3102/// dispersed_partial_reduce(void *reduce_data,
3103/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3104/// int size, remote_id;
3105/// int logical_lane_id = number_of_active_lanes_before_me() * 2;
3106/// do {
3107/// remote_id = next_active_lane_id_right_after_me();
3108/// # the above function returns 0 of no active lane
3109/// # is present right after the current lane.
3110/// size = number_of_active_lanes_in_this_warp();
3111/// logical_lane_id /= 2;
3112/// ShuffleReduceFn(reduce_data, logical_lane_id,
3113/// remote_id-1-threadIdx.x, 2);
3114/// } while (logical_lane_id % 2 == 0 && size > 1);
3115/// }
3116///
3117/// There is no assumption made about the initial state of the reduction.
3118/// Any number of lanes (>=1) could be active at any position. The reduction
3119/// result is returned in the first active lane.
3120///
3121/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3122///
3123/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3124/// if (lane_id % 2 == 0 && offset > 0)
3125/// reduce_elem = reduce_elem REDUCE_OP remote_elem
3126/// else
3127/// reduce_elem = remote_elem
3128///
3129///
3130/// Intra-Team Reduction
3131///
3132/// This function, as implemented in the runtime call
3133/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3134/// threads in a team. It first reduces within a warp using the
3135/// aforementioned algorithms. We then proceed to gather all such
3136/// reduced values at the first warp.
3137///
3138/// The runtime makes use of the function 'InterWarpCpyFn', which copies
3139/// data from each of the "warp master" (zeroth lane of each warp, where
3140/// warp-reduced data is held) to the zeroth warp. This step reduces (in
3141/// a mathematical sense) the problem of reduction across warp masters in
3142/// a block to the problem of warp reduction.
3143///
3144///
3145/// Inter-Team Reduction
3146///
3147/// Once a team has reduced its data to a single value, it is stored in
3148/// a global scratchpad array. Since each team has a distinct slot, this
3149/// can be done without locking.
3150///
3151/// The last team to write to the scratchpad array proceeds to reduce the
3152/// scratchpad array. One or more workers in the last team use the helper
3153/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3154/// the k'th worker reduces every k'th element.
3155///
3156/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3157/// reduce across workers and compute a globally reduced value.
3158///
3159void CGOpenMPRuntimeGPU::emitReduction(
3160 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3161 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3162 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3163 if (!CGF.HaveInsertPoint())
3164 return;
3165
3166 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3167#ifndef NDEBUG
3168 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3169#endif
3170
3171 if (Options.SimpleReduction) {
3172 assert(!TeamsReduction && !ParallelReduction &&(static_cast <bool> (!TeamsReduction && !ParallelReduction
&& "Invalid reduction selection in emitReduction.") ?
void (0) : __assert_fail ("!TeamsReduction && !ParallelReduction && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3173, __extension__ __PRETTY_FUNCTION__))
3173 "Invalid reduction selection in emitReduction.")(static_cast <bool> (!TeamsReduction && !ParallelReduction
&& "Invalid reduction selection in emitReduction.") ?
void (0) : __assert_fail ("!TeamsReduction && !ParallelReduction && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3173, __extension__ __PRETTY_FUNCTION__))
;
3174 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3175 ReductionOps, Options);
3176 return;
3177 }
3178
3179 assert((TeamsReduction || ParallelReduction) &&(static_cast <bool> ((TeamsReduction || ParallelReduction
) && "Invalid reduction selection in emitReduction.")
? void (0) : __assert_fail ("(TeamsReduction || ParallelReduction) && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3180, __extension__ __PRETTY_FUNCTION__))
3180 "Invalid reduction selection in emitReduction.")(static_cast <bool> ((TeamsReduction || ParallelReduction
) && "Invalid reduction selection in emitReduction.")
? void (0) : __assert_fail ("(TeamsReduction || ParallelReduction) && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3180, __extension__ __PRETTY_FUNCTION__))
;
3181
3182 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3183 // RedList, shuffle_reduce_func, interwarp_copy_func);
3184 // or
3185 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3186 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3187 llvm::Value *ThreadId = getThreadID(CGF, Loc);
3188
3189 llvm::Value *Res;
3190 ASTContext &C = CGM.getContext();
3191 // 1. Build a list of reduction variables.
3192 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3193 auto Size = RHSExprs.size();
3194 for (const Expr *E : Privates) {
3195 if (E->getType()->isVariablyModifiedType())
3196 // Reserve place for array size.
3197 ++Size;
3198 }
3199 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3200 QualType ReductionArrayTy =
3201 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3202 /*IndexTypeQuals=*/0);
3203 Address ReductionList =
3204 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3205 auto IPriv = Privates.begin();
3206 unsigned Idx = 0;
3207 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
3208 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3209 CGF.Builder.CreateStore(
3210 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3211 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3212 Elem);
3213 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3214 // Store array size.
3215 ++Idx;
3216 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3217 llvm::Value *Size = CGF.Builder.CreateIntCast(
3218 CGF.getVLASize(
3219 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3220 .NumElts,
3221 CGF.SizeTy, /*isSigned=*/false);
3222 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3223 Elem);
3224 }
3225 }
3226
3227 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3228 ReductionList.getPointer(), CGF.VoidPtrTy);
3229 llvm::Function *ReductionFn = emitReductionFunction(
3230 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
3231 LHSExprs, RHSExprs, ReductionOps);
3232 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3233 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3234 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3235 llvm::Value *InterWarpCopyFn =
3236 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3237
3238 if (ParallelReduction) {
3239 llvm::Value *Args[] = {RTLoc,
3240 ThreadId,
3241 CGF.Builder.getInt32(RHSExprs.size()),
3242 ReductionArrayTySize,
3243 RL,
3244 ShuffleAndReduceFn,
3245 InterWarpCopyFn};
3246
3247 Res = CGF.EmitRuntimeCall(
3248 OMPBuilder.getOrCreateRuntimeFunction(
3249 CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
3250 Args);
3251 } else {
3252 assert(TeamsReduction && "expected teams reduction.")(static_cast <bool> (TeamsReduction && "expected teams reduction."
) ? void (0) : __assert_fail ("TeamsReduction && \"expected teams reduction.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3252, __extension__ __PRETTY_FUNCTION__))
;
3253 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
3254 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
3255 int Cnt = 0;
3256 for (const Expr *DRE : Privates) {
3257 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
3258 ++Cnt;
3259 }
3260 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
3261 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
3262 C.getLangOpts().OpenMPCUDAReductionBufNum);
3263 TeamsReductions.push_back(TeamReductionRec);
3264 if (!KernelTeamsReductionPtr) {
3265 KernelTeamsReductionPtr = new llvm::GlobalVariable(
3266 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
3267 llvm::GlobalValue::InternalLinkage, nullptr,
3268 "_openmp_teams_reductions_buffer_$_$ptr");
3269 }
3270 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
3271 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
3272 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
3273 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
3274 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3275 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
3276 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3277 ReductionFn);
3278 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
3279 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3280 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
3281 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3282 ReductionFn);
3283
3284 llvm::Value *Args[] = {
3285 RTLoc,
3286 ThreadId,
3287 GlobalBufferPtr,
3288 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
3289 RL,
3290 ShuffleAndReduceFn,
3291 InterWarpCopyFn,
3292 GlobalToBufferCpyFn,
3293 GlobalToBufferRedFn,
3294 BufferToGlobalCpyFn,
3295 BufferToGlobalRedFn};
3296
3297 Res = CGF.EmitRuntimeCall(
3298 OMPBuilder.getOrCreateRuntimeFunction(
3299 CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
3300 Args);
3301 }
3302
3303 // 5. Build if (res == 1)
3304 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
3305 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
3306 llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
3307 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
3308 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
3309
3310 // 6. Build then branch: where we have reduced values in the master
3311 // thread in each team.
3312 // __kmpc_end_reduce{_nowait}(<gtid>);
3313 // break;
3314 CGF.EmitBlock(ThenBB);
3315
3316 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
3317 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
3318 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
3319 auto IPriv = Privates.begin();
3320 auto ILHS = LHSExprs.begin();
3321 auto IRHS = RHSExprs.begin();
3322 for (const Expr *E : ReductionOps) {
3323 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
3324 cast<DeclRefExpr>(*IRHS));
3325 ++IPriv;
3326 ++ILHS;
3327 ++IRHS;
3328 }
3329 };
3330 llvm::Value *EndArgs[] = {ThreadId};
3331 RegionCodeGenTy RCG(CodeGen);
3332 NVPTXActionTy Action(
3333 nullptr, llvm::None,
3334 OMPBuilder.getOrCreateRuntimeFunction(
3335 CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
3336 EndArgs);
3337 RCG.setAction(Action);
3338 RCG(CGF);
3339 // There is no need to emit line number for unconditional branch.
3340 (void)ApplyDebugLocation::CreateEmpty(CGF);
3341 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3342}
3343
3344const VarDecl *
3345CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
3346 const VarDecl *NativeParam) const {
3347 if (!NativeParam->getType()->isReferenceType())
3348 return NativeParam;
3349 QualType ArgType = NativeParam->getType();
3350 QualifierCollector QC;
3351 const Type *NonQualTy = QC.strip(ArgType);
3352 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3353 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
3354 if (Attr->getCaptureKind() == OMPC_map) {
3355 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
3356 LangAS::opencl_global);
3357 }
3358 }
3359 ArgType = CGM.getContext().getPointerType(PointeeTy);
3360 QC.addRestrict();
3361 enum { NVPTX_local_addr = 5 };
3362 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
3363 ArgType = QC.apply(CGM.getContext(), ArgType);
3364 if (isa<ImplicitParamDecl>(NativeParam))
3365 return ImplicitParamDecl::Create(
3366 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
3367 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
3368 return ParmVarDecl::Create(
3369 CGM.getContext(),
3370 const_cast<DeclContext *>(NativeParam->getDeclContext()),
3371 NativeParam->getBeginLoc(), NativeParam->getLocation(),
3372 NativeParam->getIdentifier(), ArgType,
3373 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
3374}
3375
3376Address
3377CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
3378 const VarDecl *NativeParam,
3379 const VarDecl *TargetParam) const {
3380 assert(NativeParam != TargetParam &&(static_cast <bool> (NativeParam != TargetParam &&
NativeParam->getType()->isReferenceType() && "Native arg must not be the same as target arg."
) ? void (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3382, __extension__ __PRETTY_FUNCTION__))
3381 NativeParam->getType()->isReferenceType() &&(static_cast <bool> (NativeParam != TargetParam &&
NativeParam->getType()->isReferenceType() && "Native arg must not be the same as target arg."
) ? void (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3382, __extension__ __PRETTY_FUNCTION__))
3382 "Native arg must not be the same as target arg.")(static_cast <bool> (NativeParam != TargetParam &&
NativeParam->getType()->isReferenceType() && "Native arg must not be the same as target arg."
) ? void (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3382, __extension__ __PRETTY_FUNCTION__))
;
3383 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
3384 QualType NativeParamType = NativeParam->getType();
3385 QualifierCollector QC;
3386 const Type *NonQualTy = QC.strip(NativeParamType);
3387 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3388 unsigned NativePointeeAddrSpace =
3389 CGF.getContext().getTargetAddressSpace(NativePointeeTy);
3390 QualType TargetTy = TargetParam->getType();
3391 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
3392 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
3393 // First cast to generic.
3394 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3395 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3396 /*AddrSpace=*/0));
3397 // Cast from generic to native address space.
3398 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3399 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3400 NativePointeeAddrSpace));
3401 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
3402 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
3403 NativeParamType);
3404 return NativeParamAddr;
3405}
3406
3407void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
3408 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
3409 ArrayRef<llvm::Value *> Args) const {
3410 SmallVector<llvm::Value *, 4> TargetArgs;
3411 TargetArgs.reserve(Args.size());
3412 auto *FnType = OutlinedFn.getFunctionType();
3413 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
3414 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
3415 TargetArgs.append(std::next(Args.begin(), I), Args.end());
3416 break;
3417 }
3418 llvm::Type *TargetType = FnType->getParamType(I);
3419 llvm::Value *NativeArg = Args[I];
3420 if (!TargetType->isPointerTy()) {
3421 TargetArgs.emplace_back(NativeArg);
3422 continue;
3423 }
3424 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3425 NativeArg,
3426 NativeArg->getType()->getPointerElementType()->getPointerTo());
3427 TargetArgs.emplace_back(
3428 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
3429 }
3430 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
3431}
3432
3433/// Emit function which wraps the outline parallel region
3434/// and controls the arguments which are passed to this function.
3435/// The wrapper ensures that the outlined function is called
3436/// with the correct arguments when data is shared.
3437llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
3438 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
3439 ASTContext &Ctx = CGM.getContext();
3440 const auto &CS = *D.getCapturedStmt(OMPD_parallel);
3441
3442 // Create a function that takes as argument the source thread.
3443 FunctionArgList WrapperArgs;
3444 QualType Int16QTy =
3445 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
3446 QualType Int32QTy =
3447 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
3448 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3449 /*Id=*/nullptr, Int16QTy,
3450 ImplicitParamDecl::Other);
3451 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3452 /*Id=*/nullptr, Int32QTy,
3453 ImplicitParamDecl::Other);
3454 WrapperArgs.emplace_back(&ParallelLevelArg);
3455 WrapperArgs.emplace_back(&WrapperArg);
3456
3457 const CGFunctionInfo &CGFI =
3458 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
3459
3460 auto *Fn = llvm::Function::Create(
3461 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3462 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
3463
3464 // Ensure we do not inline the function. This is trivially true for the ones
3465 // passed to __kmpc_fork_call but the ones calles in serialized regions
3466 // could be inlined. This is not a perfect but it is closer to the invariant
3467 // we want, namely, every data environment starts with a new function.
3468 // TODO: We should pass the if condition to the runtime function and do the
3469 // handling there. Much cleaner code.
3470 Fn->addFnAttr(llvm::Attribute::NoInline);
3471
3472 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3473 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
3474 Fn->setDoesNotRecurse();
3475
3476 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3477 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
3478 D.getBeginLoc(), D.getBeginLoc());
3479
3480 const auto *RD = CS.getCapturedRecordDecl();
3481 auto CurField = RD->field_begin();
3482
3483 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3484 /*Name=*/".zero.addr");
3485 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
3486 // Get the array of arguments.
3487 SmallVector<llvm::Value *, 8> Args;
3488
3489 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
3490 Args.emplace_back(ZeroAddr.getPointer());
3491
3492 CGBuilderTy &Bld = CGF.Builder;
3493 auto CI = CS.capture_begin();
3494
3495 // Use global memory for data sharing.
3496 // Handle passing of global args to workers.
3497 Address GlobalArgs =
3498 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
3499 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
3500 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
3501 CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3502 CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
3503 DataSharingArgs);
3504
3505 // Retrieve the shared variables from the list of references returned
3506 // by the runtime. Pass the variables to the outlined function.
3507 Address SharedArgListAddress = Address::invalid();
3508 if (CS.capture_size() > 0 ||
3509 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3510 SharedArgListAddress = CGF.EmitLoadOfPointer(
3511 GlobalArgs, CGF.getContext()
3512 .getPointerType(CGF.getContext().getPointerType(
3513 CGF.getContext().VoidPtrTy))
3514 .castAs<PointerType>());
3515 }
3516 unsigned Idx = 0;
3517 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3518 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3519 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3520 Src, CGF.SizeTy->getPointerTo());
3521 llvm::Value *LB = CGF.EmitLoadOfScalar(
3522 TypedAddress,
3523 /*Volatile=*/false,
3524 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3525 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
3526 Args.emplace_back(LB);
3527 ++Idx;
3528 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3529 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3530 Src, CGF.SizeTy->getPointerTo());
3531 llvm::Value *UB = CGF.EmitLoadOfScalar(
3532 TypedAddress,
3533 /*Volatile=*/false,
3534 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3535 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
3536 Args.emplace_back(UB);
3537 ++Idx;
3538 }
3539 if (CS.capture_size() > 0) {
3540 ASTContext &CGFContext = CGF.getContext();
3541 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
3542 QualType ElemTy = CurField->getType();
3543 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
3544 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3545 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
3546 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
3547 /*Volatile=*/false,
3548 CGFContext.getPointerType(ElemTy),
3549 CI->getLocation());
3550 if (CI->capturesVariableByCopy() &&
3551 !CI->getCapturedVar()->getType()->isAnyPointerType()) {
3552 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
3553 CI->getLocation());
3554 }
3555 Args.emplace_back(Arg);
3556 }
3557 }
3558
3559 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
3560 CGF.FinishFunction();
3561 return Fn;
3562}
3563
3564void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
3565 const Decl *D) {
3566 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3567 return;
3568
3569 assert(D && "Expected function or captured|block decl.")(static_cast <bool> (D && "Expected function or captured|block decl."
) ? void (0) : __assert_fail ("D && \"Expected function or captured|block decl.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3569, __extension__ __PRETTY_FUNCTION__))
;
3570 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&(static_cast <bool> (FunctionGlobalizedDecls.count(CGF.
CurFn) == 0 && "Function is registered already.") ? void
(0) : __assert_fail ("FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && \"Function is registered already.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3571, __extension__ __PRETTY_FUNCTION__))
3571 "Function is registered already.")(static_cast <bool> (FunctionGlobalizedDecls.count(CGF.
CurFn) == 0 && "Function is registered already.") ? void
(0) : __assert_fail ("FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && \"Function is registered already.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3571, __extension__ __PRETTY_FUNCTION__))
;
3572 assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&(static_cast <bool> ((!TeamAndReductions.first || TeamAndReductions
.first == D) && "Team is set but not processed.") ? void
(0) : __assert_fail ("(!TeamAndReductions.first || TeamAndReductions.first == D) && \"Team is set but not processed.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3573, __extension__ __PRETTY_FUNCTION__))
3573 "Team is set but not processed.")(static_cast <bool> ((!TeamAndReductions.first || TeamAndReductions
.first == D) && "Team is set but not processed.") ? void
(0) : __assert_fail ("(!TeamAndReductions.first || TeamAndReductions.first == D) && \"Team is set but not processed.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3573, __extension__ __PRETTY_FUNCTION__))
;
3574 const Stmt *Body = nullptr;
3575 bool NeedToDelayGlobalization = false;
3576 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3577 Body = FD->getBody();
3578 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
3579 Body = BD->getBody();
3580 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
3581 Body = CD->getBody();
3582 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
3583 if (NeedToDelayGlobalization &&
3584 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
3585 return;
3586 }
3587 if (!Body)
3588 return;
3589 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
3590 VarChecker.Visit(Body);
3591 const RecordDecl *GlobalizedVarsRecord =
3592 VarChecker.getGlobalizedRecord(IsInTTDRegion);
3593 TeamAndReductions.first = nullptr;
3594 TeamAndReductions.second.clear();
3595 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
3596 VarChecker.getEscapedVariableLengthDecls();
3597 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
3598 return;
3599 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
3600 I->getSecond().MappedParams =
3601 std::make_unique<CodeGenFunction::OMPMapVars>();
3602 I->getSecond().EscapedParameters.insert(
3603 VarChecker.getEscapedParameters().begin(),
3604 VarChecker.getEscapedParameters().end());
3605 I->getSecond().EscapedVariableLengthDecls.append(
3606 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
3607 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
3608 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3609 assert(VD->isCanonicalDecl() && "Expected canonical declaration")(static_cast <bool> (VD->isCanonicalDecl() &&
"Expected canonical declaration") ? void (0) : __assert_fail
("VD->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3609, __extension__ __PRETTY_FUNCTION__))
;
3610 Data.insert(std::make_pair(VD, MappedVarData()));
3611 }
3612 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
3613 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
3614 VarChecker.Visit(Body);
3615 I->getSecond().SecondaryLocalVarData.emplace();
3616 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
3617 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3618 assert(VD->isCanonicalDecl() && "Expected canonical declaration")(static_cast <bool> (VD->isCanonicalDecl() &&
"Expected canonical declaration") ? void (0) : __assert_fail
("VD->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3618, __extension__ __PRETTY_FUNCTION__))
;
3619 Data.insert(std::make_pair(VD, MappedVarData()));
3620 }
3621 }
3622 if (!NeedToDelayGlobalization) {
3623 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
3624 struct GlobalizationScope final : EHScopeStack::Cleanup {
3625 GlobalizationScope() = default;
3626
3627 void Emit(CodeGenFunction &CGF, Flags flags) override {
3628 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
3629 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
3630 }
3631 };
3632 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
3633 }
3634}
3635
3636Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
3637 const VarDecl *VD) {
3638 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
3639 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3640 auto AS = LangAS::Default;
3641 switch (A->getAllocatorType()) {
3642 // Use the default allocator here as by default local vars are
3643 // threadlocal.
3644 case OMPAllocateDeclAttr::OMPNullMemAlloc:
3645 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3646 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3647 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3648 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3649 // Follow the user decision - use default allocation.
3650 return Address::invalid();
3651 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3652 // TODO: implement aupport for user-defined allocators.
3653 return Address::invalid();
3654 case OMPAllocateDeclAttr::OMPConstMemAlloc:
3655 AS = LangAS::cuda_constant;
3656 break;
3657 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3658 AS = LangAS::cuda_shared;
3659 break;
3660 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3661 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3662 break;
3663 }
3664 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
3665 auto *GV = new llvm::GlobalVariable(
3666 CGM.getModule(), VarTy, /*isConstant=*/false,
3667 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
3668 VD->getName(),
3669 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
3670 CGM.getContext().getTargetAddressSpace(AS));
3671 CharUnits Align = CGM.getContext().getDeclAlign(VD);
3672 GV->setAlignment(Align.getAsAlign());
3673 return Address(
3674 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3675 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
3676 VD->getType().getAddressSpace()))),
3677 Align);
3678 }
3679
3680 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3681 return Address::invalid();
3682
3683 VD = VD->getCanonicalDecl();
3684 auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
3685 if (I == FunctionGlobalizedDecls.end())
3686 return Address::invalid();
3687 auto VDI = I->getSecond().LocalVarData.find(VD);
3688 if (VDI != I->getSecond().LocalVarData.end())
3689 return VDI->second.PrivateAddr;
3690 if (VD->hasAttrs()) {
3691 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
3692 E(VD->attr_end());
3693 IT != E; ++IT) {
3694 auto VDI = I->getSecond().LocalVarData.find(
3695 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
3696 ->getCanonicalDecl());
3697 if (VDI != I->getSecond().LocalVarData.end())
3698 return VDI->second.PrivateAddr;
3699 }
3700 }
3701
3702 return Address::invalid();
3703}
3704
3705void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
3706 FunctionGlobalizedDecls.erase(CGF.CurFn);
3707 CGOpenMPRuntime::functionFinished(CGF);
3708}
3709
3710void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
3711 CodeGenFunction &CGF, const OMPLoopDirective &S,
3712 OpenMPDistScheduleClauseKind &ScheduleKind,
3713 llvm::Value *&Chunk) const {
3714 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
3715 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
3716 ScheduleKind = OMPC_DIST_SCHEDULE_static;
3717 Chunk = CGF.EmitScalarConversion(
3718 RT.getGPUNumThreads(CGF),
3719 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3720 S.getIterationVariable()->getType(), S.getBeginLoc());
3721 return;
3722 }
3723 CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
3724 CGF, S, ScheduleKind, Chunk);
3725}
3726
3727void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
3728 CodeGenFunction &CGF, const OMPLoopDirective &S,
3729 OpenMPScheduleClauseKind &ScheduleKind,
3730 const Expr *&ChunkExpr) const {
3731 ScheduleKind = OMPC_SCHEDULE_static;
3732 // Chunk size is 1 in this case.
3733 llvm::APInt ChunkSize(32, 1);
3734 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
3735 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3736 SourceLocation());
3737}
3738
3739void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
3740 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
3741 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&(static_cast <bool> (isOpenMPTargetExecutionDirective(D
.getDirectiveKind()) && " Expected target-based directive."
) ? void (0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3742, __extension__ __PRETTY_FUNCTION__))
3742 " Expected target-based directive.")(static_cast <bool> (isOpenMPTargetExecutionDirective(D
.getDirectiveKind()) && " Expected target-based directive."
) ? void (0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3742, __extension__ __PRETTY_FUNCTION__))
;
3743 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
3744 for (const CapturedStmt::Capture &C : CS->captures()) {
3745 // Capture variables captured by reference in lambdas for target-based
3746 // directives.
3747 if (!C.capturesVariable())
3748 continue;
3749 const VarDecl *VD = C.getCapturedVar();
3750 const auto *RD = VD->getType()
3751 .getCanonicalType()
3752 .getNonReferenceType()
3753 ->getAsCXXRecordDecl();
3754 if (!RD || !RD->isLambda())
3755 continue;
3756 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3757 LValue VDLVal;
3758 if (VD->getType().getCanonicalType()->isReferenceType())
3759 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
3760 else
3761 VDLVal = CGF.MakeAddrLValue(
3762 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
3763 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
3764 FieldDecl *ThisCapture = nullptr;
3765 RD->getCaptureFields(Captures, ThisCapture);
3766 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
3767 LValue ThisLVal =
3768 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
3769 llvm::Value *CXXThis = CGF.LoadCXXThis();
3770 CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
3771 }
3772 for (const LambdaCapture &LC : RD->captures()) {
3773 if (LC.getCaptureKind() != LCK_ByRef)
3774 continue;
3775 const VarDecl *VD = LC.getCapturedVar();
3776 if (!CS->capturesVariable(VD))
3777 continue;
3778 auto It = Captures.find(VD);
3779 assert(It != Captures.end() && "Found lambda capture without field.")(static_cast <bool> (It != Captures.end() && "Found lambda capture without field."
) ? void (0) : __assert_fail ("It != Captures.end() && \"Found lambda capture without field.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3779, __extension__ __PRETTY_FUNCTION__))
;
3780 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
3781 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3782 if (VD->getType().getCanonicalType()->isReferenceType())
3783 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
3784 VD->getType().getCanonicalType())
3785 .getAddress(CGF);
3786 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
3787 }
3788 }
3789}
3790
3791bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
3792 LangAS &AS) {
3793 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
3794 return false;
3795 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3796 switch(A->getAllocatorType()) {
3797 case OMPAllocateDeclAttr::OMPNullMemAlloc:
3798 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3799 // Not supported, fallback to the default mem space.
3800 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3801 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3802 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3803 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3804 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3805 AS = LangAS::Default;
3806 return true;
3807 case OMPAllocateDeclAttr::OMPConstMemAlloc:
3808 AS = LangAS::cuda_constant;
3809 return true;
3810 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3811 AS = LangAS::cuda_shared;
3812 return true;
3813 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3814 llvm_unreachable("Expected predefined allocator for the variables with the "::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3815)
3815 "static storage.")::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3815)
;
3816 }
3817 return false;
3818}
3819
3820// Get current CudaArch and ignore any unknown values
3821static CudaArch getCudaArch(CodeGenModule &CGM) {
3822 if (!CGM.getTarget().hasFeature("ptx"))
3823 return CudaArch::UNKNOWN;
3824 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
3825 if (Feature.getValue()) {
3826 CudaArch Arch = StringToCudaArch(Feature.getKey());
3827 if (Arch != CudaArch::UNKNOWN)
3828 return Arch;
3829 }
3830 }
3831 return CudaArch::UNKNOWN;
3832}
3833
3834/// Check to see if target architecture supports unified addressing which is
3835/// a restriction for OpenMP requires clause "unified_shared_memory".
3836void CGOpenMPRuntimeGPU::processRequiresDirective(
3837 const OMPRequiresDecl *D) {
3838 for (const OMPClause *Clause : D->clauselists()) {
3839 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
3840 CudaArch Arch = getCudaArch(CGM);
3841 switch (Arch) {
3842 case CudaArch::SM_20:
3843 case CudaArch::SM_21:
3844 case CudaArch::SM_30:
3845 case CudaArch::SM_32:
3846 case CudaArch::SM_35:
3847 case CudaArch::SM_37:
3848 case CudaArch::SM_50:
3849 case CudaArch::SM_52:
3850 case CudaArch::SM_53: {
3851 SmallString<256> Buffer;
3852 llvm::raw_svector_ostream Out(Buffer);
3853 Out << "Target architecture " << CudaArchToString(Arch)
3854 << " does not support unified addressing";
3855 CGM.Error(Clause->getBeginLoc(), Out.str());
3856 return;
3857 }
3858 case CudaArch::SM_60:
3859 case CudaArch::SM_61:
3860 case CudaArch::SM_62:
3861 case CudaArch::SM_70:
3862 case CudaArch::SM_72:
3863 case CudaArch::SM_75:
3864 case CudaArch::SM_80:
3865 case CudaArch::SM_86:
3866 case CudaArch::GFX600:
3867 case CudaArch::GFX601:
3868 case CudaArch::GFX602:
3869 case CudaArch::GFX700:
3870 case CudaArch::GFX701:
3871 case CudaArch::GFX702:
3872 case CudaArch::GFX703:
3873 case CudaArch::GFX704:
3874 case CudaArch::GFX705:
3875 case CudaArch::GFX801:
3876 case CudaArch::GFX802:
3877 case CudaArch::GFX803:
3878 case CudaArch::GFX805:
3879 case CudaArch::GFX810:
3880 case CudaArch::GFX900:
3881 case CudaArch::GFX902:
3882 case CudaArch::GFX904:
3883 case CudaArch::GFX906:
3884 case CudaArch::GFX908:
3885 case CudaArch::GFX909:
3886 case CudaArch::GFX90a:
3887 case CudaArch::GFX90c:
3888 case CudaArch::GFX1010:
3889 case CudaArch::GFX1011:
3890 case CudaArch::GFX1012:
3891 case CudaArch::GFX1013:
3892 case CudaArch::GFX1030:
3893 case CudaArch::GFX1031:
3894 case CudaArch::GFX1032:
3895 case CudaArch::GFX1033:
3896 case CudaArch::GFX1034:
3897 case CudaArch::GFX1035:
3898 case CudaArch::UNUSED:
3899 case CudaArch::UNKNOWN:
3900 break;
3901 case CudaArch::LAST:
3902 llvm_unreachable("Unexpected Cuda arch.")::llvm::llvm_unreachable_internal("Unexpected Cuda arch.", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 3902)
;
3903 }
3904 }
3905 }
3906 CGOpenMPRuntime::processRequiresDirective(D);
3907}
3908
3909void CGOpenMPRuntimeGPU::clear() {
3910
3911 if (!TeamsReductions.empty()) {
3912 ASTContext &C = CGM.getContext();
3913 RecordDecl *StaticRD = C.buildImplicitRecord(
3914 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
3915 StaticRD->startDefinition();
3916 for (const RecordDecl *TeamReductionRec : TeamsReductions) {
3917 QualType RecTy = C.getRecordType(TeamReductionRec);
3918 auto *Field = FieldDecl::Create(
3919 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
3920 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
3921 /*BW=*/nullptr, /*Mutable=*/false,
3922 /*InitStyle=*/ICIS_NoInit);
3923 Field->setAccess(AS_public);
3924 StaticRD->addDecl(Field);
3925 }
3926 StaticRD->completeDefinition();
3927 QualType StaticTy = C.getRecordType(StaticRD);
3928 llvm::Type *LLVMReductionsBufferTy =
3929 CGM.getTypes().ConvertTypeForMem(StaticTy);
3930 // FIXME: nvlink does not handle weak linkage correctly (object with the
3931 // different size are reported as erroneous).
3932 // Restore CommonLinkage as soon as nvlink is fixed.
3933 auto *GV = new llvm::GlobalVariable(
3934 CGM.getModule(), LLVMReductionsBufferTy,
3935 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
3936 llvm::Constant::getNullValue(LLVMReductionsBufferTy),
3937 "_openmp_teams_reductions_buffer_$_");
3938 KernelTeamsReductionPtr->setInitializer(
3939 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
3940 CGM.VoidPtrTy));
3941 }
3942 CGOpenMPRuntime::clear();
3943}