Bug Summary

File:clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
Warning:line 4829, column 8
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGOpenMPRuntimeGPU.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-17-195756-12974-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
1//===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides a generalized class for OpenMP runtime code generation
10// specialized by GPU targets NVPTX and AMDGCN.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGOpenMPRuntimeGPU.h"
15#include "CGOpenMPRuntimeNVPTX.h"
16#include "CodeGenFunction.h"
17#include "clang/AST/Attr.h"
18#include "clang/AST/DeclOpenMP.h"
19#include "clang/AST/StmtOpenMP.h"
20#include "clang/AST/StmtVisitor.h"
21#include "clang/Basic/Cuda.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/Frontend/OpenMP/OMPGridValues.h"
24#include "llvm/IR/IntrinsicsNVPTX.h"
25
26using namespace clang;
27using namespace CodeGen;
28using namespace llvm::omp;
29
30namespace {
31enum OpenMPRTLFunctionNVPTX {
32 /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
33 /// int16_t RequiresOMPRuntime);
34 OMPRTL_NVPTX__kmpc_kernel_init,
35 /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
36 OMPRTL_NVPTX__kmpc_kernel_deinit,
37 /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
38 /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
39 OMPRTL_NVPTX__kmpc_spmd_kernel_init,
40 /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
41 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
42 /// Call to void __kmpc_kernel_prepare_parallel(void
43 /// *outlined_function);
44 OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
45 /// Call to bool __kmpc_kernel_parallel(void **outlined_function);
46 OMPRTL_NVPTX__kmpc_kernel_parallel,
47 /// Call to void __kmpc_kernel_end_parallel();
48 OMPRTL_NVPTX__kmpc_kernel_end_parallel,
49 /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
50 /// global_tid);
51 OMPRTL_NVPTX__kmpc_serialized_parallel,
52 /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
53 /// global_tid);
54 OMPRTL_NVPTX__kmpc_end_serialized_parallel,
55 /// Call to int32_t __kmpc_shuffle_int32(int32_t element,
56 /// int16_t lane_offset, int16_t warp_size);
57 OMPRTL_NVPTX__kmpc_shuffle_int32,
58 /// Call to int64_t __kmpc_shuffle_int64(int64_t element,
59 /// int16_t lane_offset, int16_t warp_size);
60 OMPRTL_NVPTX__kmpc_shuffle_int64,
61 /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32
62 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
63 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
64 /// lane_offset, int16_t shortCircuit),
65 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
66 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2,
67 /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
68 /// global_tid, void *global_buffer, int32_t num_of_records, void*
69 /// reduce_data,
70 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
71 /// lane_offset, int16_t shortCircuit),
72 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
73 /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
74 /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
75 /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
76 /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
77 /// *buffer, int idx, void *reduce_data));
78 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2,
79 /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
80 OMPRTL_NVPTX__kmpc_end_reduce_nowait,
81 /// Call to void __kmpc_data_sharing_init_stack();
82 OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
83 /// Call to void __kmpc_data_sharing_init_stack_spmd();
84 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
85 /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
86 /// int16_t UseSharedMemory);
87 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
88 /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t
89 /// UseSharedMemory);
90 OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
91 /// Call to void __kmpc_data_sharing_pop_stack(void *a);
92 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
93 /// Call to void __kmpc_begin_sharing_variables(void ***args,
94 /// size_t n_args);
95 OMPRTL_NVPTX__kmpc_begin_sharing_variables,
96 /// Call to void __kmpc_end_sharing_variables();
97 OMPRTL_NVPTX__kmpc_end_sharing_variables,
98 /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
99 OMPRTL_NVPTX__kmpc_get_shared_variables,
100 /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
101 /// global_tid);
102 OMPRTL_NVPTX__kmpc_parallel_level,
103 /// Call to int8_t __kmpc_is_spmd_exec_mode();
104 OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
105 /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
106 /// const void *buf, size_t size, int16_t is_shared, const void **res);
107 OMPRTL_NVPTX__kmpc_get_team_static_memory,
108 /// Call to void __kmpc_restore_team_static_memory(int16_t
109 /// isSPMDExecutionMode, int16_t is_shared);
110 OMPRTL_NVPTX__kmpc_restore_team_static_memory,
111 /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
112 OMPRTL__kmpc_barrier,
113 /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
114 /// global_tid);
115 OMPRTL__kmpc_barrier_simple_spmd,
116 /// Call to int32_t __kmpc_warp_active_thread_mask(void);
117 OMPRTL_NVPTX__kmpc_warp_active_thread_mask,
118 /// Call to void __kmpc_syncwarp(int32_t Mask);
119 OMPRTL_NVPTX__kmpc_syncwarp,
120};
121
122/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
123class NVPTXActionTy final : public PrePostActionTy {
124 llvm::FunctionCallee EnterCallee = nullptr;
125 ArrayRef<llvm::Value *> EnterArgs;
126 llvm::FunctionCallee ExitCallee = nullptr;
127 ArrayRef<llvm::Value *> ExitArgs;
128 bool Conditional = false;
129 llvm::BasicBlock *ContBlock = nullptr;
130
131public:
132 NVPTXActionTy(llvm::FunctionCallee EnterCallee,
133 ArrayRef<llvm::Value *> EnterArgs,
134 llvm::FunctionCallee ExitCallee,
135 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
136 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
137 ExitArgs(ExitArgs), Conditional(Conditional) {}
138 void Enter(CodeGenFunction &CGF) override {
139 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
140 if (Conditional) {
141 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
142 auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
143 ContBlock = CGF.createBasicBlock("omp_if.end");
144 // Generate the branch (If-stmt)
145 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
146 CGF.EmitBlock(ThenBlock);
147 }
148 }
149 void Done(CodeGenFunction &CGF) {
150 // Emit the rest of blocks/branches
151 CGF.EmitBranch(ContBlock);
152 CGF.EmitBlock(ContBlock, true);
153 }
154 void Exit(CodeGenFunction &CGF) override {
155 CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
156 }
157};
158
159/// A class to track the execution mode when codegening directives within
160/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
161/// to the target region and used by containing directives such as 'parallel'
162/// to emit optimized code.
163class ExecutionRuntimeModesRAII {
164private:
165 CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
166 CGOpenMPRuntimeGPU::EM_Unknown;
167 CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
168 bool SavedRuntimeMode = false;
169 bool *RuntimeMode = nullptr;
170
171public:
172 /// Constructor for Non-SPMD mode.
173 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
174 : ExecMode(ExecMode) {
175 SavedExecMode = ExecMode;
176 ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
177 }
178 /// Constructor for SPMD mode.
179 ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
180 bool &RuntimeMode, bool FullRuntimeMode)
181 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
182 SavedExecMode = ExecMode;
183 SavedRuntimeMode = RuntimeMode;
184 ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
185 RuntimeMode = FullRuntimeMode;
186 }
187 ~ExecutionRuntimeModesRAII() {
188 ExecMode = SavedExecMode;
189 if (RuntimeMode)
190 *RuntimeMode = SavedRuntimeMode;
191 }
192};
193
194/// GPU Configuration: This information can be derived from cuda registers,
195/// however, providing compile time constants helps generate more efficient
196/// code. For all practical purposes this is fine because the configuration
197/// is the same for all known NVPTX architectures.
198enum MachineConfiguration : unsigned {
199 /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
200 /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2,
201 /// and GV_Warp_Size_Log2_Mask.
202
203 /// Global memory alignment for performance.
204 GlobalMemoryAlignment = 128,
205
206 /// Maximal size of the shared memory buffer.
207 SharedMemorySize = 128,
208};
209
210static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
211 RefExpr = RefExpr->IgnoreParens();
212 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
213 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
214 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
215 Base = TempASE->getBase()->IgnoreParenImpCasts();
216 RefExpr = Base;
217 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
218 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
219 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
220 Base = TempOASE->getBase()->IgnoreParenImpCasts();
221 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
222 Base = TempASE->getBase()->IgnoreParenImpCasts();
223 RefExpr = Base;
224 }
225 RefExpr = RefExpr->IgnoreParenImpCasts();
226 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
227 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
228 const auto *ME = cast<MemberExpr>(RefExpr);
229 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
230}
231
232
233static RecordDecl *buildRecordForGlobalizedVars(
234 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
235 ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
236 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
237 &MappedDeclsFields, int BufSize) {
238 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
239 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
240 return nullptr;
241 SmallVector<VarsDataTy, 4> GlobalizedVars;
242 for (const ValueDecl *D : EscapedDecls)
243 GlobalizedVars.emplace_back(
244 CharUnits::fromQuantity(std::max(
245 C.getDeclAlign(D).getQuantity(),
246 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
247 D);
248 for (const ValueDecl *D : EscapedDeclsForTeams)
249 GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
250 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
251 return L.first > R.first;
252 });
253
254 // Build struct _globalized_locals_ty {
255 // /* globalized vars */[WarSize] align (max(decl_align,
256 // GlobalMemoryAlignment))
257 // /* globalized vars */ for EscapedDeclsForTeams
258 // };
259 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
260 GlobalizedRD->startDefinition();
261 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
262 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
263 for (const auto &Pair : GlobalizedVars) {
264 const ValueDecl *VD = Pair.second;
265 QualType Type = VD->getType();
266 if (Type->isLValueReferenceType())
267 Type = C.getPointerType(Type.getNonReferenceType());
268 else
269 Type = Type.getNonReferenceType();
270 SourceLocation Loc = VD->getLocation();
271 FieldDecl *Field;
272 if (SingleEscaped.count(VD)) {
273 Field = FieldDecl::Create(
274 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
275 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
276 /*BW=*/nullptr, /*Mutable=*/false,
277 /*InitStyle=*/ICIS_NoInit);
278 Field->setAccess(AS_public);
279 if (VD->hasAttrs()) {
280 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
281 E(VD->getAttrs().end());
282 I != E; ++I)
283 Field->addAttr(*I);
284 }
285 } else {
286 llvm::APInt ArraySize(32, BufSize);
287 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
288 0);
289 Field = FieldDecl::Create(
290 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
291 C.getTrivialTypeSourceInfo(Type, SourceLocation()),
292 /*BW=*/nullptr, /*Mutable=*/false,
293 /*InitStyle=*/ICIS_NoInit);
294 Field->setAccess(AS_public);
295 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
296 static_cast<CharUnits::QuantityType>(
297 GlobalMemoryAlignment)));
298 Field->addAttr(AlignedAttr::CreateImplicit(
299 C, /*IsAlignmentExpr=*/true,
300 IntegerLiteral::Create(C, Align,
301 C.getIntTypeForBitwidth(32, /*Signed=*/0),
302 SourceLocation()),
303 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
304 }
305 GlobalizedRD->addDecl(Field);
306 MappedDeclsFields.try_emplace(VD, Field);
307 }
308 GlobalizedRD->completeDefinition();
309 return GlobalizedRD;
310}
311
312/// Get the list of variables that can escape their declaration context.
313class CheckVarsEscapingDeclContext final
314 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
315 CodeGenFunction &CGF;
316 llvm::SetVector<const ValueDecl *> EscapedDecls;
317 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
318 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
319 RecordDecl *GlobalizedRD = nullptr;
320 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
321 bool AllEscaped = false;
322 bool IsForCombinedParallelRegion = false;
323
324 void markAsEscaped(const ValueDecl *VD) {
325 // Do not globalize declare target variables.
326 if (!isa<VarDecl>(VD) ||
327 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
328 return;
329 VD = cast<ValueDecl>(VD->getCanonicalDecl());
330 // Use user-specified allocation.
331 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
332 return;
333 // Variables captured by value must be globalized.
334 if (auto *CSI = CGF.CapturedStmtInfo) {
335 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
336 // Check if need to capture the variable that was already captured by
337 // value in the outer region.
338 if (!IsForCombinedParallelRegion) {
339 if (!FD->hasAttrs())
340 return;
341 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
342 if (!Attr)
343 return;
344 if (((Attr->getCaptureKind() != OMPC_map) &&
345 !isOpenMPPrivate(Attr->getCaptureKind())) ||
346 ((Attr->getCaptureKind() == OMPC_map) &&
347 !FD->getType()->isAnyPointerType()))
348 return;
349 }
350 if (!FD->getType()->isReferenceType()) {
351 assert(!VD->getType()->isVariablyModifiedType() &&((!VD->getType()->isVariablyModifiedType() && "Parameter captured by value with variably modified type"
) ? static_cast<void> (0) : __assert_fail ("!VD->getType()->isVariablyModifiedType() && \"Parameter captured by value with variably modified type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 352, __PRETTY_FUNCTION__))
352 "Parameter captured by value with variably modified type")((!VD->getType()->isVariablyModifiedType() && "Parameter captured by value with variably modified type"
) ? static_cast<void> (0) : __assert_fail ("!VD->getType()->isVariablyModifiedType() && \"Parameter captured by value with variably modified type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 352, __PRETTY_FUNCTION__))
;
353 EscapedParameters.insert(VD);
354 } else if (!IsForCombinedParallelRegion) {
355 return;
356 }
357 }
358 }
359 if ((!CGF.CapturedStmtInfo ||
360 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
361 VD->getType()->isReferenceType())
362 // Do not globalize variables with reference type.
363 return;
364 if (VD->getType()->isVariablyModifiedType())
365 EscapedVariableLengthDecls.insert(VD);
366 else
367 EscapedDecls.insert(VD);
368 }
369
370 void VisitValueDecl(const ValueDecl *VD) {
371 if (VD->getType()->isLValueReferenceType())
372 markAsEscaped(VD);
373 if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
374 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
375 const bool SavedAllEscaped = AllEscaped;
376 AllEscaped = VD->getType()->isLValueReferenceType();
377 Visit(VarD->getInit());
378 AllEscaped = SavedAllEscaped;
379 }
380 }
381 }
382 void VisitOpenMPCapturedStmt(const CapturedStmt *S,
383 ArrayRef<OMPClause *> Clauses,
384 bool IsCombinedParallelRegion) {
385 if (!S)
386 return;
387 for (const CapturedStmt::Capture &C : S->captures()) {
388 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
389 const ValueDecl *VD = C.getCapturedVar();
390 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
391 if (IsCombinedParallelRegion) {
392 // Check if the variable is privatized in the combined construct and
393 // those private copies must be shared in the inner parallel
394 // directive.
395 IsForCombinedParallelRegion = false;
396 for (const OMPClause *C : Clauses) {
397 if (!isOpenMPPrivate(C->getClauseKind()) ||
398 C->getClauseKind() == OMPC_reduction ||
399 C->getClauseKind() == OMPC_linear ||
400 C->getClauseKind() == OMPC_private)
401 continue;
402 ArrayRef<const Expr *> Vars;
403 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
404 Vars = PC->getVarRefs();
405 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
406 Vars = PC->getVarRefs();
407 else
408 llvm_unreachable("Unexpected clause.")::llvm::llvm_unreachable_internal("Unexpected clause.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 408)
;
409 for (const auto *E : Vars) {
410 const Decl *D =
411 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
412 if (D == VD->getCanonicalDecl()) {
413 IsForCombinedParallelRegion = true;
414 break;
415 }
416 }
417 if (IsForCombinedParallelRegion)
418 break;
419 }
420 }
421 markAsEscaped(VD);
422 if (isa<OMPCapturedExprDecl>(VD))
423 VisitValueDecl(VD);
424 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
425 }
426 }
427 }
428
429 void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
430 assert(!GlobalizedRD &&((!GlobalizedRD && "Record for globalized variables is built already."
) ? static_cast<void> (0) : __assert_fail ("!GlobalizedRD && \"Record for globalized variables is built already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 431, __PRETTY_FUNCTION__))
431 "Record for globalized variables is built already.")((!GlobalizedRD && "Record for globalized variables is built already."
) ? static_cast<void> (0) : __assert_fail ("!GlobalizedRD && \"Record for globalized variables is built already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 431, __PRETTY_FUNCTION__))
;
432 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
433 unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
434 if (IsInTTDRegion)
435 EscapedDeclsForTeams = EscapedDecls.getArrayRef();
436 else
437 EscapedDeclsForParallel = EscapedDecls.getArrayRef();
438 GlobalizedRD = ::buildRecordForGlobalizedVars(
439 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
440 MappedDeclsFields, WarpSize);
441 }
442
443public:
444 CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
445 ArrayRef<const ValueDecl *> TeamsReductions)
446 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
447 }
448 virtual ~CheckVarsEscapingDeclContext() = default;
449 void VisitDeclStmt(const DeclStmt *S) {
450 if (!S)
451 return;
452 for (const Decl *D : S->decls())
453 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
454 VisitValueDecl(VD);
455 }
456 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
457 if (!D)
458 return;
459 if (!D->hasAssociatedStmt())
460 return;
461 if (const auto *S =
462 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
463 // Do not analyze directives that do not actually require capturing,
464 // like `omp for` or `omp simd` directives.
465 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
466 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
467 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
468 VisitStmt(S->getCapturedStmt());
469 return;
470 }
471 VisitOpenMPCapturedStmt(
472 S, D->clauses(),
473 CaptureRegions.back() == OMPD_parallel &&
474 isOpenMPDistributeDirective(D->getDirectiveKind()));
475 }
476 }
477 void VisitCapturedStmt(const CapturedStmt *S) {
478 if (!S)
479 return;
480 for (const CapturedStmt::Capture &C : S->captures()) {
481 if (C.capturesVariable() && !C.capturesVariableByCopy()) {
482 const ValueDecl *VD = C.getCapturedVar();
483 markAsEscaped(VD);
484 if (isa<OMPCapturedExprDecl>(VD))
485 VisitValueDecl(VD);
486 }
487 }
488 }
489 void VisitLambdaExpr(const LambdaExpr *E) {
490 if (!E)
491 return;
492 for (const LambdaCapture &C : E->captures()) {
493 if (C.capturesVariable()) {
494 if (C.getCaptureKind() == LCK_ByRef) {
495 const ValueDecl *VD = C.getCapturedVar();
496 markAsEscaped(VD);
497 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
498 VisitValueDecl(VD);
499 }
500 }
501 }
502 }
503 void VisitBlockExpr(const BlockExpr *E) {
504 if (!E)
505 return;
506 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
507 if (C.isByRef()) {
508 const VarDecl *VD = C.getVariable();
509 markAsEscaped(VD);
510 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
511 VisitValueDecl(VD);
512 }
513 }
514 }
515 void VisitCallExpr(const CallExpr *E) {
516 if (!E)
517 return;
518 for (const Expr *Arg : E->arguments()) {
519 if (!Arg)
520 continue;
521 if (Arg->isLValue()) {
522 const bool SavedAllEscaped = AllEscaped;
523 AllEscaped = true;
524 Visit(Arg);
525 AllEscaped = SavedAllEscaped;
526 } else {
527 Visit(Arg);
528 }
529 }
530 Visit(E->getCallee());
531 }
532 void VisitDeclRefExpr(const DeclRefExpr *E) {
533 if (!E)
534 return;
535 const ValueDecl *VD = E->getDecl();
536 if (AllEscaped)
537 markAsEscaped(VD);
538 if (isa<OMPCapturedExprDecl>(VD))
539 VisitValueDecl(VD);
540 else if (const auto *VarD = dyn_cast<VarDecl>(VD))
541 if (VarD->isInitCapture())
542 VisitValueDecl(VD);
543 }
544 void VisitUnaryOperator(const UnaryOperator *E) {
545 if (!E)
546 return;
547 if (E->getOpcode() == UO_AddrOf) {
548 const bool SavedAllEscaped = AllEscaped;
549 AllEscaped = true;
550 Visit(E->getSubExpr());
551 AllEscaped = SavedAllEscaped;
552 } else {
553 Visit(E->getSubExpr());
554 }
555 }
556 void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
557 if (!E)
558 return;
559 if (E->getCastKind() == CK_ArrayToPointerDecay) {
560 const bool SavedAllEscaped = AllEscaped;
561 AllEscaped = true;
562 Visit(E->getSubExpr());
563 AllEscaped = SavedAllEscaped;
564 } else {
565 Visit(E->getSubExpr());
566 }
567 }
568 void VisitExpr(const Expr *E) {
569 if (!E)
570 return;
571 bool SavedAllEscaped = AllEscaped;
572 if (!E->isLValue())
573 AllEscaped = false;
574 for (const Stmt *Child : E->children())
575 if (Child)
576 Visit(Child);
577 AllEscaped = SavedAllEscaped;
578 }
579 void VisitStmt(const Stmt *S) {
580 if (!S)
581 return;
582 for (const Stmt *Child : S->children())
583 if (Child)
584 Visit(Child);
585 }
586
587 /// Returns the record that handles all the escaped local variables and used
588 /// instead of their original storage.
589 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
590 if (!GlobalizedRD)
591 buildRecordForGlobalizedVars(IsInTTDRegion);
592 return GlobalizedRD;
593 }
594
595 /// Returns the field in the globalized record for the escaped variable.
596 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
597 assert(GlobalizedRD &&((GlobalizedRD && "Record for globalized variables must be generated already."
) ? static_cast<void> (0) : __assert_fail ("GlobalizedRD && \"Record for globalized variables must be generated already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 598, __PRETTY_FUNCTION__))
598 "Record for globalized variables must be generated already.")((GlobalizedRD && "Record for globalized variables must be generated already."
) ? static_cast<void> (0) : __assert_fail ("GlobalizedRD && \"Record for globalized variables must be generated already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 598, __PRETTY_FUNCTION__))
;
599 auto I = MappedDeclsFields.find(VD);
600 if (I == MappedDeclsFields.end())
601 return nullptr;
602 return I->getSecond();
603 }
604
605 /// Returns the list of the escaped local variables/parameters.
606 ArrayRef<const ValueDecl *> getEscapedDecls() const {
607 return EscapedDecls.getArrayRef();
608 }
609
610 /// Checks if the escaped local variable is actually a parameter passed by
611 /// value.
612 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
613 return EscapedParameters;
614 }
615
616 /// Returns the list of the escaped variables with the variably modified
617 /// types.
618 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
619 return EscapedVariableLengthDecls.getArrayRef();
620 }
621};
622} // anonymous namespace
623
624/// Get the id of the warp in the block.
625/// We assume that the warp size is 32, which is always the case
626/// on the NVPTX device, to generate more efficient code.
627static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
628 CGBuilderTy &Bld = CGF.Builder;
629 unsigned LaneIDBits =
630 CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2);
631 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
632 return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
633}
634
635/// Get the id of the current lane in the Warp.
636/// We assume that the warp size is 32, which is always the case
637/// on the NVPTX device, to generate more efficient code.
638static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
639 CGBuilderTy &Bld = CGF.Builder;
640 unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue(
641 llvm::omp::GV_Warp_Size_Log2_Mask);
642 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
643 return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
644 "nvptx_lane_id");
645}
646
647/// Get the value of the thread_limit clause in the teams directive.
648/// For the 'generic' execution mode, the runtime encodes thread_limit in
649/// the launch parameters, always starting thread_limit+warpSize threads per
650/// CTA. The threads in the last warp are reserved for master execution.
651/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
652static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
653 bool IsInSPMDExecutionMode = false) {
654 CGBuilderTy &Bld = CGF.Builder;
655 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
656 return IsInSPMDExecutionMode
657 ? RT.getGPUNumThreads(CGF)
658 : Bld.CreateNUWSub(RT.getGPUNumThreads(CGF),
659 RT.getGPUWarpSize(CGF), "thread_limit");
660}
661
662/// Get the thread id of the OMP master thread.
663/// The master thread id is the first thread (lane) of the last warp in the
664/// GPU block. Warp size is assumed to be some power of 2.
665/// Thread id is 0 indexed.
666/// E.g: If NumThreads is 33, master id is 32.
667/// If NumThreads is 64, master id is 32.
668/// If NumThreads is 1024, master id is 992.
669static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
670 CGBuilderTy &Bld = CGF.Builder;
671 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
672 llvm::Value *NumThreads = RT.getGPUNumThreads(CGF);
673 // We assume that the warp size is a power of 2.
674 llvm::Value *Mask = Bld.CreateNUWSub(RT.getGPUWarpSize(CGF), Bld.getInt32(1));
675
676 return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
677 Bld.CreateNot(Mask), "master_tid");
678}
679
680CGOpenMPRuntimeGPU::WorkerFunctionState::WorkerFunctionState(
681 CodeGenModule &CGM, SourceLocation Loc)
682 : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
683 Loc(Loc) {
684 createWorkerFunction(CGM);
685}
686
687void CGOpenMPRuntimeGPU::WorkerFunctionState::createWorkerFunction(
688 CodeGenModule &CGM) {
689 // Create an worker function with no arguments.
690
691 WorkerFn = llvm::Function::Create(
692 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
693 /*placeholder=*/"_worker", &CGM.getModule());
694 CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
695 WorkerFn->setDoesNotRecurse();
696}
697
698CGOpenMPRuntimeGPU::ExecutionMode
699CGOpenMPRuntimeGPU::getExecutionMode() const {
700 return CurrentExecutionMode;
701}
702
703static CGOpenMPRuntimeGPU::DataSharingMode
704getDataSharingMode(CodeGenModule &CGM) {
705 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
706 : CGOpenMPRuntimeGPU::Generic;
707}
708
709/// Check for inner (nested) SPMD construct, if any
710static bool hasNestedSPMDDirective(ASTContext &Ctx,
711 const OMPExecutableDirective &D) {
712 const auto *CS = D.getInnermostCapturedStmt();
713 const auto *Body =
714 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
715 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
716
717 if (const auto *NestedDir =
718 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
719 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
720 switch (D.getDirectiveKind()) {
721 case OMPD_target:
722 if (isOpenMPParallelDirective(DKind))
723 return true;
724 if (DKind == OMPD_teams) {
725 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
726 /*IgnoreCaptured=*/true);
727 if (!Body)
728 return false;
729 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
730 if (const auto *NND =
731 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
732 DKind = NND->getDirectiveKind();
733 if (isOpenMPParallelDirective(DKind))
734 return true;
735 }
736 }
737 return false;
738 case OMPD_target_teams:
739 return isOpenMPParallelDirective(DKind);
740 case OMPD_target_simd:
741 case OMPD_target_parallel:
742 case OMPD_target_parallel_for:
743 case OMPD_target_parallel_for_simd:
744 case OMPD_target_teams_distribute:
745 case OMPD_target_teams_distribute_simd:
746 case OMPD_target_teams_distribute_parallel_for:
747 case OMPD_target_teams_distribute_parallel_for_simd:
748 case OMPD_parallel:
749 case OMPD_for:
750 case OMPD_parallel_for:
751 case OMPD_parallel_master:
752 case OMPD_parallel_sections:
753 case OMPD_for_simd:
754 case OMPD_parallel_for_simd:
755 case OMPD_cancel:
756 case OMPD_cancellation_point:
757 case OMPD_ordered:
758 case OMPD_threadprivate:
759 case OMPD_allocate:
760 case OMPD_task:
761 case OMPD_simd:
762 case OMPD_sections:
763 case OMPD_section:
764 case OMPD_single:
765 case OMPD_master:
766 case OMPD_critical:
767 case OMPD_taskyield:
768 case OMPD_barrier:
769 case OMPD_taskwait:
770 case OMPD_taskgroup:
771 case OMPD_atomic:
772 case OMPD_flush:
773 case OMPD_depobj:
774 case OMPD_scan:
775 case OMPD_teams:
776 case OMPD_target_data:
777 case OMPD_target_exit_data:
778 case OMPD_target_enter_data:
779 case OMPD_distribute:
780 case OMPD_distribute_simd:
781 case OMPD_distribute_parallel_for:
782 case OMPD_distribute_parallel_for_simd:
783 case OMPD_teams_distribute:
784 case OMPD_teams_distribute_simd:
785 case OMPD_teams_distribute_parallel_for:
786 case OMPD_teams_distribute_parallel_for_simd:
787 case OMPD_target_update:
788 case OMPD_declare_simd:
789 case OMPD_declare_variant:
790 case OMPD_begin_declare_variant:
791 case OMPD_end_declare_variant:
792 case OMPD_declare_target:
793 case OMPD_end_declare_target:
794 case OMPD_declare_reduction:
795 case OMPD_declare_mapper:
796 case OMPD_taskloop:
797 case OMPD_taskloop_simd:
798 case OMPD_master_taskloop:
799 case OMPD_master_taskloop_simd:
800 case OMPD_parallel_master_taskloop:
801 case OMPD_parallel_master_taskloop_simd:
802 case OMPD_requires:
803 case OMPD_unknown:
804 default:
805 llvm_unreachable("Unexpected directive.")::llvm::llvm_unreachable_internal("Unexpected directive.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 805)
;
806 }
807 }
808
809 return false;
810}
811
812static bool supportsSPMDExecutionMode(ASTContext &Ctx,
813 const OMPExecutableDirective &D) {
814 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
815 switch (DirectiveKind) {
816 case OMPD_target:
817 case OMPD_target_teams:
818 return hasNestedSPMDDirective(Ctx, D);
819 case OMPD_target_parallel:
820 case OMPD_target_parallel_for:
821 case OMPD_target_parallel_for_simd:
822 case OMPD_target_teams_distribute_parallel_for:
823 case OMPD_target_teams_distribute_parallel_for_simd:
824 case OMPD_target_simd:
825 case OMPD_target_teams_distribute_simd:
826 return true;
827 case OMPD_target_teams_distribute:
828 return false;
829 case OMPD_parallel:
830 case OMPD_for:
831 case OMPD_parallel_for:
832 case OMPD_parallel_master:
833 case OMPD_parallel_sections:
834 case OMPD_for_simd:
835 case OMPD_parallel_for_simd:
836 case OMPD_cancel:
837 case OMPD_cancellation_point:
838 case OMPD_ordered:
839 case OMPD_threadprivate:
840 case OMPD_allocate:
841 case OMPD_task:
842 case OMPD_simd:
843 case OMPD_sections:
844 case OMPD_section:
845 case OMPD_single:
846 case OMPD_master:
847 case OMPD_critical:
848 case OMPD_taskyield:
849 case OMPD_barrier:
850 case OMPD_taskwait:
851 case OMPD_taskgroup:
852 case OMPD_atomic:
853 case OMPD_flush:
854 case OMPD_depobj:
855 case OMPD_scan:
856 case OMPD_teams:
857 case OMPD_target_data:
858 case OMPD_target_exit_data:
859 case OMPD_target_enter_data:
860 case OMPD_distribute:
861 case OMPD_distribute_simd:
862 case OMPD_distribute_parallel_for:
863 case OMPD_distribute_parallel_for_simd:
864 case OMPD_teams_distribute:
865 case OMPD_teams_distribute_simd:
866 case OMPD_teams_distribute_parallel_for:
867 case OMPD_teams_distribute_parallel_for_simd:
868 case OMPD_target_update:
869 case OMPD_declare_simd:
870 case OMPD_declare_variant:
871 case OMPD_begin_declare_variant:
872 case OMPD_end_declare_variant:
873 case OMPD_declare_target:
874 case OMPD_end_declare_target:
875 case OMPD_declare_reduction:
876 case OMPD_declare_mapper:
877 case OMPD_taskloop:
878 case OMPD_taskloop_simd:
879 case OMPD_master_taskloop:
880 case OMPD_master_taskloop_simd:
881 case OMPD_parallel_master_taskloop:
882 case OMPD_parallel_master_taskloop_simd:
883 case OMPD_requires:
884 case OMPD_unknown:
885 default:
886 break;
887 }
888 llvm_unreachable(::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 889)
889 "Unknown programming model for OpenMP directive on NVPTX target.")::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 889)
;
890}
891
892/// Check if the directive is loops based and has schedule clause at all or has
893/// static scheduling.
894static bool hasStaticScheduling(const OMPExecutableDirective &D) {
895 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&((isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
isOpenMPLoopDirective(D.getDirectiveKind()) && "Expected loop-based directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPWorksharingDirective(D.getDirectiveKind()) && isOpenMPLoopDirective(D.getDirectiveKind()) && \"Expected loop-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 897, __PRETTY_FUNCTION__))
896 isOpenMPLoopDirective(D.getDirectiveKind()) &&((isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
isOpenMPLoopDirective(D.getDirectiveKind()) && "Expected loop-based directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPWorksharingDirective(D.getDirectiveKind()) && isOpenMPLoopDirective(D.getDirectiveKind()) && \"Expected loop-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 897, __PRETTY_FUNCTION__))
897 "Expected loop-based directive.")((isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
isOpenMPLoopDirective(D.getDirectiveKind()) && "Expected loop-based directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPWorksharingDirective(D.getDirectiveKind()) && isOpenMPLoopDirective(D.getDirectiveKind()) && \"Expected loop-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 897, __PRETTY_FUNCTION__))
;
898 return !D.hasClausesOfKind<OMPOrderedClause>() &&
899 (!D.hasClausesOfKind<OMPScheduleClause>() ||
900 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
901 [](const OMPScheduleClause *C) {
902 return C->getScheduleKind() == OMPC_SCHEDULE_static;
903 }));
904}
905
906/// Check for inner (nested) lightweight runtime construct, if any
907static bool hasNestedLightweightDirective(ASTContext &Ctx,
908 const OMPExecutableDirective &D) {
909 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.")((supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive."
) ? static_cast<void> (0) : __assert_fail ("supportsSPMDExecutionMode(Ctx, D) && \"Expected SPMD mode directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 909, __PRETTY_FUNCTION__))
;
910 const auto *CS = D.getInnermostCapturedStmt();
911 const auto *Body =
912 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
913 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
914
915 if (const auto *NestedDir =
916 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
917 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
918 switch (D.getDirectiveKind()) {
919 case OMPD_target:
920 if (isOpenMPParallelDirective(DKind) &&
921 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
922 hasStaticScheduling(*NestedDir))
923 return true;
924 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
925 return true;
926 if (DKind == OMPD_parallel) {
927 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
928 /*IgnoreCaptured=*/true);
929 if (!Body)
930 return false;
931 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
932 if (const auto *NND =
933 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
934 DKind = NND->getDirectiveKind();
935 if (isOpenMPWorksharingDirective(DKind) &&
936 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
937 return true;
938 }
939 } else if (DKind == OMPD_teams) {
940 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
941 /*IgnoreCaptured=*/true);
942 if (!Body)
943 return false;
944 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
945 if (const auto *NND =
946 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
947 DKind = NND->getDirectiveKind();
948 if (isOpenMPParallelDirective(DKind) &&
949 isOpenMPWorksharingDirective(DKind) &&
950 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
951 return true;
952 if (DKind == OMPD_parallel) {
953 Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
954 /*IgnoreCaptured=*/true);
955 if (!Body)
956 return false;
957 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
958 if (const auto *NND =
959 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
960 DKind = NND->getDirectiveKind();
961 if (isOpenMPWorksharingDirective(DKind) &&
962 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
963 return true;
964 }
965 }
966 }
967 }
968 return false;
969 case OMPD_target_teams:
970 if (isOpenMPParallelDirective(DKind) &&
971 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
972 hasStaticScheduling(*NestedDir))
973 return true;
974 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
975 return true;
976 if (DKind == OMPD_parallel) {
977 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
978 /*IgnoreCaptured=*/true);
979 if (!Body)
980 return false;
981 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
982 if (const auto *NND =
983 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
984 DKind = NND->getDirectiveKind();
985 if (isOpenMPWorksharingDirective(DKind) &&
986 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
987 return true;
988 }
989 }
990 return false;
991 case OMPD_target_parallel:
992 if (DKind == OMPD_simd)
993 return true;
994 return isOpenMPWorksharingDirective(DKind) &&
995 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
996 case OMPD_target_teams_distribute:
997 case OMPD_target_simd:
998 case OMPD_target_parallel_for:
999 case OMPD_target_parallel_for_simd:
1000 case OMPD_target_teams_distribute_simd:
1001 case OMPD_target_teams_distribute_parallel_for:
1002 case OMPD_target_teams_distribute_parallel_for_simd:
1003 case OMPD_parallel:
1004 case OMPD_for:
1005 case OMPD_parallel_for:
1006 case OMPD_parallel_master:
1007 case OMPD_parallel_sections:
1008 case OMPD_for_simd:
1009 case OMPD_parallel_for_simd:
1010 case OMPD_cancel:
1011 case OMPD_cancellation_point:
1012 case OMPD_ordered:
1013 case OMPD_threadprivate:
1014 case OMPD_allocate:
1015 case OMPD_task:
1016 case OMPD_simd:
1017 case OMPD_sections:
1018 case OMPD_section:
1019 case OMPD_single:
1020 case OMPD_master:
1021 case OMPD_critical:
1022 case OMPD_taskyield:
1023 case OMPD_barrier:
1024 case OMPD_taskwait:
1025 case OMPD_taskgroup:
1026 case OMPD_atomic:
1027 case OMPD_flush:
1028 case OMPD_depobj:
1029 case OMPD_scan:
1030 case OMPD_teams:
1031 case OMPD_target_data:
1032 case OMPD_target_exit_data:
1033 case OMPD_target_enter_data:
1034 case OMPD_distribute:
1035 case OMPD_distribute_simd:
1036 case OMPD_distribute_parallel_for:
1037 case OMPD_distribute_parallel_for_simd:
1038 case OMPD_teams_distribute:
1039 case OMPD_teams_distribute_simd:
1040 case OMPD_teams_distribute_parallel_for:
1041 case OMPD_teams_distribute_parallel_for_simd:
1042 case OMPD_target_update:
1043 case OMPD_declare_simd:
1044 case OMPD_declare_variant:
1045 case OMPD_begin_declare_variant:
1046 case OMPD_end_declare_variant:
1047 case OMPD_declare_target:
1048 case OMPD_end_declare_target:
1049 case OMPD_declare_reduction:
1050 case OMPD_declare_mapper:
1051 case OMPD_taskloop:
1052 case OMPD_taskloop_simd:
1053 case OMPD_master_taskloop:
1054 case OMPD_master_taskloop_simd:
1055 case OMPD_parallel_master_taskloop:
1056 case OMPD_parallel_master_taskloop_simd:
1057 case OMPD_requires:
1058 case OMPD_unknown:
1059 default:
1060 llvm_unreachable("Unexpected directive.")::llvm::llvm_unreachable_internal("Unexpected directive.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1060)
;
1061 }
1062 }
1063
1064 return false;
1065}
1066
1067/// Checks if the construct supports lightweight runtime. It must be SPMD
1068/// construct + inner loop-based construct with static scheduling.
1069static bool supportsLightweightRuntime(ASTContext &Ctx,
1070 const OMPExecutableDirective &D) {
1071 if (!supportsSPMDExecutionMode(Ctx, D))
1072 return false;
1073 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
1074 switch (DirectiveKind) {
1075 case OMPD_target:
1076 case OMPD_target_teams:
1077 case OMPD_target_parallel:
1078 return hasNestedLightweightDirective(Ctx, D);
1079 case OMPD_target_parallel_for:
1080 case OMPD_target_parallel_for_simd:
1081 case OMPD_target_teams_distribute_parallel_for:
1082 case OMPD_target_teams_distribute_parallel_for_simd:
1083 // (Last|First)-privates must be shared in parallel region.
1084 return hasStaticScheduling(D);
1085 case OMPD_target_simd:
1086 case OMPD_target_teams_distribute_simd:
1087 return true;
1088 case OMPD_target_teams_distribute:
1089 return false;
1090 case OMPD_parallel:
1091 case OMPD_for:
1092 case OMPD_parallel_for:
1093 case OMPD_parallel_master:
1094 case OMPD_parallel_sections:
1095 case OMPD_for_simd:
1096 case OMPD_parallel_for_simd:
1097 case OMPD_cancel:
1098 case OMPD_cancellation_point:
1099 case OMPD_ordered:
1100 case OMPD_threadprivate:
1101 case OMPD_allocate:
1102 case OMPD_task:
1103 case OMPD_simd:
1104 case OMPD_sections:
1105 case OMPD_section:
1106 case OMPD_single:
1107 case OMPD_master:
1108 case OMPD_critical:
1109 case OMPD_taskyield:
1110 case OMPD_barrier:
1111 case OMPD_taskwait:
1112 case OMPD_taskgroup:
1113 case OMPD_atomic:
1114 case OMPD_flush:
1115 case OMPD_depobj:
1116 case OMPD_scan:
1117 case OMPD_teams:
1118 case OMPD_target_data:
1119 case OMPD_target_exit_data:
1120 case OMPD_target_enter_data:
1121 case OMPD_distribute:
1122 case OMPD_distribute_simd:
1123 case OMPD_distribute_parallel_for:
1124 case OMPD_distribute_parallel_for_simd:
1125 case OMPD_teams_distribute:
1126 case OMPD_teams_distribute_simd:
1127 case OMPD_teams_distribute_parallel_for:
1128 case OMPD_teams_distribute_parallel_for_simd:
1129 case OMPD_target_update:
1130 case OMPD_declare_simd:
1131 case OMPD_declare_variant:
1132 case OMPD_begin_declare_variant:
1133 case OMPD_end_declare_variant:
1134 case OMPD_declare_target:
1135 case OMPD_end_declare_target:
1136 case OMPD_declare_reduction:
1137 case OMPD_declare_mapper:
1138 case OMPD_taskloop:
1139 case OMPD_taskloop_simd:
1140 case OMPD_master_taskloop:
1141 case OMPD_master_taskloop_simd:
1142 case OMPD_parallel_master_taskloop:
1143 case OMPD_parallel_master_taskloop_simd:
1144 case OMPD_requires:
1145 case OMPD_unknown:
1146 default:
1147 break;
1148 }
1149 llvm_unreachable(::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1150)
1150 "Unknown programming model for OpenMP directive on NVPTX target.")::llvm::llvm_unreachable_internal("Unknown programming model for OpenMP directive on NVPTX target."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1150)
;
1151}
1152
1153void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1154 StringRef ParentName,
1155 llvm::Function *&OutlinedFn,
1156 llvm::Constant *&OutlinedFnID,
1157 bool IsOffloadEntry,
1158 const RegionCodeGenTy &CodeGen) {
1159 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1160 EntryFunctionState EST;
1161 WorkerFunctionState WST(CGM, D.getBeginLoc());
1162 Work.clear();
1163 WrapperFunctionsMap.clear();
1164
1165 // Emit target region as a standalone region.
1166 class NVPTXPrePostActionTy : public PrePostActionTy {
1167 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1168 CGOpenMPRuntimeGPU::WorkerFunctionState &WST;
1169
1170 public:
1171 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1172 CGOpenMPRuntimeGPU::WorkerFunctionState &WST)
1173 : EST(EST), WST(WST) {}
1174 void Enter(CodeGenFunction &CGF) override {
1175 auto &RT =
1176 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1177 RT.emitNonSPMDEntryHeader(CGF, EST, WST);
1178 // Skip target region initialization.
1179 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1180 }
1181 void Exit(CodeGenFunction &CGF) override {
1182 auto &RT =
1183 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1184 RT.clearLocThreadIdInsertPt(CGF);
1185 RT.emitNonSPMDEntryFooter(CGF, EST);
1186 }
1187 } Action(EST, WST);
1188 CodeGen.setAction(Action);
1189 IsInTTDRegion = true;
1190 // Reserve place for the globalized memory.
1191 GlobalizedRecords.emplace_back();
1192 if (!KernelStaticGlobalized) {
1193 KernelStaticGlobalized = new llvm::GlobalVariable(
1194 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1195 llvm::GlobalValue::InternalLinkage,
1196 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1197 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1198 llvm::GlobalValue::NotThreadLocal,
1199 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1200 }
1201 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1202 IsOffloadEntry, CodeGen);
1203 IsInTTDRegion = false;
1204
1205 // Now change the name of the worker function to correspond to this target
1206 // region's entry function.
1207 WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
1208
1209 // Create the worker function
1210 emitWorkerFunction(WST);
1211}
1212
1213// Setup NVPTX threads for master-worker OpenMP scheme.
1214void CGOpenMPRuntimeGPU::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
1215 EntryFunctionState &EST,
1216 WorkerFunctionState &WST) {
1217 CGBuilderTy &Bld = CGF.Builder;
1218
1219 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
1220 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1221 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
1222 EST.ExitBB = CGF.createBasicBlock(".exit");
1223
1224 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1225 llvm::Value *IsWorker =
1226 Bld.CreateICmpULT(RT.getGPUThreadID(CGF), getThreadLimit(CGF));
1227 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
1228
1229 CGF.EmitBlock(WorkerBB);
1230 emitCall(CGF, WST.Loc, WST.WorkerFn);
1231 CGF.EmitBranch(EST.ExitBB);
1232
1233 CGF.EmitBlock(MasterCheckBB);
1234 llvm::Value *IsMaster =
1235 Bld.CreateICmpEQ(RT.getGPUThreadID(CGF), getMasterThreadID(CGF));
1236 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
1237
1238 CGF.EmitBlock(MasterBB);
1239 IsInTargetMasterThreadRegion = true;
1240 // SEQUENTIAL (MASTER) REGION START
1241 // First action in sequential region:
1242 // Initialize the state of the OpenMP runtime library on the GPU.
1243 // TODO: Optimize runtime initialization and pass in correct value.
1244 llvm::Value *Args[] = {getThreadLimit(CGF),
1245 Bld.getInt16(/*RequiresOMPRuntime=*/1)};
1246 CGF.EmitRuntimeCall(
1247 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
1248
1249 // For data sharing, we need to initialize the stack.
1250 CGF.EmitRuntimeCall(
1251 createNVPTXRuntimeFunction(
1252 OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
1253
1254 emitGenericVarsProlog(CGF, WST.Loc);
1255}
1256
1257void CGOpenMPRuntimeGPU::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
1258 EntryFunctionState &EST) {
1259 IsInTargetMasterThreadRegion = false;
1260 if (!CGF.HaveInsertPoint())
1261 return;
1262
1263 emitGenericVarsEpilog(CGF);
1264
1265 if (!EST.ExitBB)
1266 EST.ExitBB = CGF.createBasicBlock(".exit");
1267
1268 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
1269 CGF.EmitBranch(TerminateBB);
1270
1271 CGF.EmitBlock(TerminateBB);
1272 // Signal termination condition.
1273 // TODO: Optimize runtime initialization and pass in correct value.
1274 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
1275 CGF.EmitRuntimeCall(
1276 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
1277 // Barrier to terminate worker threads.
1278 syncCTAThreads(CGF);
1279 // Master thread jumps to exit point.
1280 CGF.EmitBranch(EST.ExitBB);
1281
1282 CGF.EmitBlock(EST.ExitBB);
1283 EST.ExitBB = nullptr;
1284}
1285
1286void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1287 StringRef ParentName,
1288 llvm::Function *&OutlinedFn,
1289 llvm::Constant *&OutlinedFnID,
1290 bool IsOffloadEntry,
1291 const RegionCodeGenTy &CodeGen) {
1292 ExecutionRuntimeModesRAII ModeRAII(
1293 CurrentExecutionMode, RequiresFullRuntime,
1294 CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1295 !supportsLightweightRuntime(CGM.getContext(), D));
1296 EntryFunctionState EST;
1297
1298 // Emit target region as a standalone region.
1299 class NVPTXPrePostActionTy : public PrePostActionTy {
1300 CGOpenMPRuntimeGPU &RT;
1301 CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1302 const OMPExecutableDirective &D;
1303
1304 public:
1305 NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1306 CGOpenMPRuntimeGPU::EntryFunctionState &EST,
1307 const OMPExecutableDirective &D)
1308 : RT(RT), EST(EST), D(D) {}
1309 void Enter(CodeGenFunction &CGF) override {
1310 RT.emitSPMDEntryHeader(CGF, EST, D);
1311 // Skip target region initialization.
1312 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1313 }
1314 void Exit(CodeGenFunction &CGF) override {
1315 RT.clearLocThreadIdInsertPt(CGF);
1316 RT.emitSPMDEntryFooter(CGF, EST);
1317 }
1318 } Action(*this, EST, D);
1319 CodeGen.setAction(Action);
1320 IsInTTDRegion = true;
1321 // Reserve place for the globalized memory.
1322 GlobalizedRecords.emplace_back();
1323 if (!KernelStaticGlobalized) {
1324 KernelStaticGlobalized = new llvm::GlobalVariable(
1325 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1326 llvm::GlobalValue::InternalLinkage,
1327 llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1328 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1329 llvm::GlobalValue::NotThreadLocal,
1330 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1331 }
1332 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1333 IsOffloadEntry, CodeGen);
1334 IsInTTDRegion = false;
1335}
1336
1337void CGOpenMPRuntimeGPU::emitSPMDEntryHeader(
1338 CodeGenFunction &CGF, EntryFunctionState &EST,
1339 const OMPExecutableDirective &D) {
1340 CGBuilderTy &Bld = CGF.Builder;
1341
1342 // Setup BBs in entry function.
1343 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1344 EST.ExitBB = CGF.createBasicBlock(".exit");
1345
1346 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1347 /*RequiresOMPRuntime=*/
1348 Bld.getInt16(RequiresFullRuntime ? 1 : 0),
1349 /*RequiresDataSharing=*/Bld.getInt16(0)};
1350 CGF.EmitRuntimeCall(
1351 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
1352
1353 if (RequiresFullRuntime) {
1354 // For data sharing, we need to initialize the stack.
1355 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
1356 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
1357 }
1358
1359 CGF.EmitBranch(ExecuteBB);
1360
1361 CGF.EmitBlock(ExecuteBB);
1362
1363 IsInTargetMasterThreadRegion = true;
1364}
1365
1366void CGOpenMPRuntimeGPU::emitSPMDEntryFooter(CodeGenFunction &CGF,
1367 EntryFunctionState &EST) {
1368 IsInTargetMasterThreadRegion = false;
1369 if (!CGF.HaveInsertPoint())
1370 return;
1371
1372 if (!EST.ExitBB)
1373 EST.ExitBB = CGF.createBasicBlock(".exit");
1374
1375 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1376 CGF.EmitBranch(OMPDeInitBB);
1377
1378 CGF.EmitBlock(OMPDeInitBB);
1379 // DeInitialize the OMP state in the runtime; called by all active threads.
1380 llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
1381 CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
1382 CGF.EmitRuntimeCall(
1383 createNVPTXRuntimeFunction(
1384 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
1385 CGF.EmitBranch(EST.ExitBB);
1386
1387 CGF.EmitBlock(EST.ExitBB);
1388 EST.ExitBB = nullptr;
1389}
1390
1391// Create a unique global variable to indicate the execution mode of this target
1392// region. The execution mode is either 'generic', or 'spmd' depending on the
1393// target directive. This variable is picked up by the offload library to setup
1394// the device appropriately before kernel launch. If the execution mode is
1395// 'generic', the runtime reserves one warp for the master, otherwise, all
1396// warps participate in parallel work.
1397static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1398 bool Mode) {
1399 auto *GVMode =
1400 new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1401 llvm::GlobalValue::WeakAnyLinkage,
1402 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1403 Twine(Name, "_exec_mode"));
1404 CGM.addCompilerUsedGlobal(GVMode);
1405}
1406
1407void CGOpenMPRuntimeGPU::emitWorkerFunction(WorkerFunctionState &WST) {
1408 ASTContext &Ctx = CGM.getContext();
1409
1410 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1411 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1412 WST.Loc, WST.Loc);
1413 emitWorkerLoop(CGF, WST);
1414 CGF.FinishFunction();
1415}
1416
1417void CGOpenMPRuntimeGPU::emitWorkerLoop(CodeGenFunction &CGF,
1418 WorkerFunctionState &WST) {
1419 //
1420 // The workers enter this loop and wait for parallel work from the master.
1421 // When the master encounters a parallel region it sets up the work + variable
1422 // arguments, and wakes up the workers. The workers first check to see if
1423 // they are required for the parallel region, i.e., within the # of requested
1424 // parallel threads. The activated workers load the variable arguments and
1425 // execute the parallel work.
1426 //
1427
1428 CGBuilderTy &Bld = CGF.Builder;
1429
1430 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1431 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1432 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1433 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1434 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1435 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1436
1437 CGF.EmitBranch(AwaitBB);
1438
1439 // Workers wait for work from master.
1440 CGF.EmitBlock(AwaitBB);
1441 // Wait for parallel work
1442 syncCTAThreads(CGF);
1443
1444 Address WorkFn =
1445 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1446 Address ExecStatus =
1447 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1448 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1449 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1450
1451 // TODO: Optimize runtime initialization and pass in correct value.
1452 llvm::Value *Args[] = {WorkFn.getPointer()};
1453 llvm::Value *Ret = CGF.EmitRuntimeCall(
1454 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
1455 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1456
1457 // On termination condition (workid == 0), exit loop.
1458 llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1459 llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1460 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1461
1462 // Activate requested workers.
1463 CGF.EmitBlock(SelectWorkersBB);
1464 llvm::Value *IsActive =
1465 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1466 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1467
1468 // Signal start of parallel region.
1469 CGF.EmitBlock(ExecuteBB);
1470 // Skip initialization.
1471 setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1472
1473 // Process work items: outlined parallel functions.
1474 for (llvm::Function *W : Work) {
1475 // Try to match this outlined function.
1476 llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1477
1478 llvm::Value *WorkFnMatch =
1479 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1480
1481 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1482 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1483 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1484
1485 // Execute this outlined function.
1486 CGF.EmitBlock(ExecuteFNBB);
1487
1488 // Insert call to work function via shared wrapper. The shared
1489 // wrapper takes two arguments:
1490 // - the parallelism level;
1491 // - the thread ID;
1492 emitCall(CGF, WST.Loc, W,
1493 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1494
1495 // Go to end of parallel region.
1496 CGF.EmitBranch(TerminateBB);
1497
1498 CGF.EmitBlock(CheckNextBB);
1499 }
1500 // Default case: call to outlined function through pointer if the target
1501 // region makes a declare target call that may contain an orphaned parallel
1502 // directive.
1503 auto *ParallelFnTy =
1504 llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1505 /*isVarArg=*/false);
1506 llvm::Value *WorkFnCast =
1507 Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
1508 // Insert call to work function via shared wrapper. The shared
1509 // wrapper takes two arguments:
1510 // - the parallelism level;
1511 // - the thread ID;
1512 emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
1513 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1514 // Go to end of parallel region.
1515 CGF.EmitBranch(TerminateBB);
1516
1517 // Signal end of parallel region.
1518 CGF.EmitBlock(TerminateBB);
1519 CGF.EmitRuntimeCall(
1520 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
1521 llvm::None);
1522 CGF.EmitBranch(BarrierBB);
1523
1524 // All active and inactive workers wait at a barrier after parallel region.
1525 CGF.EmitBlock(BarrierBB);
1526 // Barrier after parallel region.
1527 syncCTAThreads(CGF);
1528 CGF.EmitBranch(AwaitBB);
1529
1530 // Exit target region.
1531 CGF.EmitBlock(ExitBB);
1532 // Skip initialization.
1533 clearLocThreadIdInsertPt(CGF);
1534}
1535
1536/// Returns specified OpenMP runtime function for the current OpenMP
1537/// implementation. Specialized for the NVPTX device.
1538/// \param Function OpenMP runtime function.
1539/// \return Specified function.
1540llvm::FunctionCallee
1541CGOpenMPRuntimeGPU::createNVPTXRuntimeFunction(unsigned Function) {
1542 llvm::FunctionCallee RTLFn = nullptr;
1543 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
1544 case OMPRTL_NVPTX__kmpc_kernel_init: {
1545 // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
1546 // RequiresOMPRuntime);
1547 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
1548 auto *FnTy =
1549 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1550 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
1551 break;
1552 }
1553 case OMPRTL_NVPTX__kmpc_kernel_deinit: {
1554 // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
1555 llvm::Type *TypeParams[] = {CGM.Int16Ty};
1556 auto *FnTy =
1557 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1558 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
1559 break;
1560 }
1561 case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
1562 // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
1563 // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
1564 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1565 auto *FnTy =
1566 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1567 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
1568 break;
1569 }
1570 case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
1571 // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
1572 llvm::Type *TypeParams[] = {CGM.Int16Ty};
1573 auto *FnTy =
1574 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1575 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
1576 break;
1577 }
1578 case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
1579 /// Build void __kmpc_kernel_prepare_parallel(
1580 /// void *outlined_function);
1581 llvm::Type *TypeParams[] = {CGM.Int8PtrTy};
1582 auto *FnTy =
1583 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1584 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
1585 break;
1586 }
1587 case OMPRTL_NVPTX__kmpc_kernel_parallel: {
1588 /// Build bool __kmpc_kernel_parallel(void **outlined_function);
1589 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy};
1590 llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
1591 auto *FnTy =
1592 llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
1593 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
1594 break;
1595 }
1596 case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
1597 /// Build void __kmpc_kernel_end_parallel();
1598 auto *FnTy =
1599 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1600 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
1601 break;
1602 }
1603 case OMPRTL_NVPTX__kmpc_serialized_parallel: {
1604 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1605 // global_tid);
1606 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1607 auto *FnTy =
1608 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1609 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1610 break;
1611 }
1612 case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
1613 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1614 // global_tid);
1615 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1616 auto *FnTy =
1617 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1618 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1619 break;
1620 }
1621 case OMPRTL_NVPTX__kmpc_shuffle_int32: {
1622 // Build int32_t __kmpc_shuffle_int32(int32_t element,
1623 // int16_t lane_offset, int16_t warp_size);
1624 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1625 auto *FnTy =
1626 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1627 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
1628 break;
1629 }
1630 case OMPRTL_NVPTX__kmpc_shuffle_int64: {
1631 // Build int64_t __kmpc_shuffle_int64(int64_t element,
1632 // int16_t lane_offset, int16_t warp_size);
1633 llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
1634 auto *FnTy =
1635 llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
1636 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
1637 break;
1638 }
1639 case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: {
1640 // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
1641 // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
1642 // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
1643 // lane_id, int16_t lane_offset, int16_t Algorithm Version), void
1644 // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
1645 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1646 CGM.Int16Ty, CGM.Int16Ty};
1647 auto *ShuffleReduceFnTy =
1648 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1649 /*isVarArg=*/false);
1650 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1651 auto *InterWarpCopyFnTy =
1652 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1653 /*isVarArg=*/false);
1654 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1655 CGM.Int32Ty,
1656 CGM.Int32Ty,
1657 CGM.SizeTy,
1658 CGM.VoidPtrTy,
1659 ShuffleReduceFnTy->getPointerTo(),
1660 InterWarpCopyFnTy->getPointerTo()};
1661 auto *FnTy =
1662 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1663 RTLFn = CGM.CreateRuntimeFunction(
1664 FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2");
1665 break;
1666 }
1667 case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
1668 // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
1669 llvm::Type *TypeParams[] = {CGM.Int32Ty};
1670 auto *FnTy =
1671 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1672 RTLFn = CGM.CreateRuntimeFunction(
1673 FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
1674 break;
1675 }
1676 case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: {
1677 // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
1678 // global_tid, void *global_buffer, int32_t num_of_records, void*
1679 // reduce_data,
1680 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
1681 // lane_offset, int16_t shortCircuit),
1682 // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
1683 // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
1684 // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
1685 // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
1686 // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
1687 // *buffer, int idx, void *reduce_data));
1688 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1689 CGM.Int16Ty, CGM.Int16Ty};
1690 auto *ShuffleReduceFnTy =
1691 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1692 /*isVarArg=*/false);
1693 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1694 auto *InterWarpCopyFnTy =
1695 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1696 /*isVarArg=*/false);
1697 llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy,
1698 CGM.VoidPtrTy};
1699 auto *GlobalListFnTy =
1700 llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams,
1701 /*isVarArg=*/false);
1702 llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1703 CGM.Int32Ty,
1704 CGM.VoidPtrTy,
1705 CGM.Int32Ty,
1706 CGM.VoidPtrTy,
1707 ShuffleReduceFnTy->getPointerTo(),
1708 InterWarpCopyFnTy->getPointerTo(),
1709 GlobalListFnTy->getPointerTo(),
1710 GlobalListFnTy->getPointerTo(),
1711 GlobalListFnTy->getPointerTo(),
1712 GlobalListFnTy->getPointerTo()};
1713 auto *FnTy =
1714 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1715 RTLFn = CGM.CreateRuntimeFunction(
1716 FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2");
1717 break;
1718 }
1719 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
1720 /// Build void __kmpc_data_sharing_init_stack();
1721 auto *FnTy =
1722 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1723 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
1724 break;
1725 }
1726 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
1727 /// Build void __kmpc_data_sharing_init_stack_spmd();
1728 auto *FnTy =
1729 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1730 RTLFn =
1731 CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
1732 break;
1733 }
1734 case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
1735 // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
1736 // int16_t UseSharedMemory);
1737 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1738 auto *FnTy =
1739 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1740 RTLFn = CGM.CreateRuntimeFunction(
1741 FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
1742 break;
1743 }
1744 case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
1745 // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t
1746 // UseSharedMemory);
1747 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1748 auto *FnTy =
1749 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1750 RTLFn = CGM.CreateRuntimeFunction(
1751 FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
1752 break;
1753 }
1754 case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
1755 // Build void __kmpc_data_sharing_pop_stack(void *a);
1756 llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
1757 auto *FnTy =
1758 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1759 RTLFn = CGM.CreateRuntimeFunction(FnTy,
1760 /*Name=*/"__kmpc_data_sharing_pop_stack");
1761 break;
1762 }
1763 case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
1764 /// Build void __kmpc_begin_sharing_variables(void ***args,
1765 /// size_t n_args);
1766 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
1767 auto *FnTy =
1768 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1769 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
1770 break;
1771 }
1772 case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
1773 /// Build void __kmpc_end_sharing_variables();
1774 auto *FnTy =
1775 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1776 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
1777 break;
1778 }
1779 case OMPRTL_NVPTX__kmpc_get_shared_variables: {
1780 /// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
1781 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
1782 auto *FnTy =
1783 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1784 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
1785 break;
1786 }
1787 case OMPRTL_NVPTX__kmpc_parallel_level: {
1788 // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
1789 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1790 auto *FnTy =
1791 llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
1792 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
1793 break;
1794 }
1795 case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
1796 // Build int8_t __kmpc_is_spmd_exec_mode();
1797 auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
1798 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
1799 break;
1800 }
1801 case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
1802 // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
1803 // const void *buf, size_t size, int16_t is_shared, const void **res);
1804 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy,
1805 CGM.Int16Ty, CGM.VoidPtrPtrTy};
1806 auto *FnTy =
1807 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1808 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
1809 break;
1810 }
1811 case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
1812 // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
1813 // int16_t is_shared);
1814 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty};
1815 auto *FnTy =
1816 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1817 RTLFn =
1818 CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
1819 break;
1820 }
1821 case OMPRTL__kmpc_barrier: {
1822 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1823 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1824 auto *FnTy =
1825 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1826 RTLFn =
1827 CGM.CreateConvergentRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1828 break;
1829 }
1830 case OMPRTL__kmpc_barrier_simple_spmd: {
1831 // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
1832 // global_tid);
1833 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1834 auto *FnTy =
1835 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1836 RTLFn = CGM.CreateConvergentRuntimeFunction(
1837 FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
1838 break;
1839 }
1840 case OMPRTL_NVPTX__kmpc_warp_active_thread_mask: {
1841 // Build int32_t __kmpc_warp_active_thread_mask(void);
1842 auto *FnTy =
1843 llvm::FunctionType::get(CGM.Int32Ty, llvm::None, /*isVarArg=*/false);
1844 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_warp_active_thread_mask");
1845 break;
1846 }
1847 case OMPRTL_NVPTX__kmpc_syncwarp: {
1848 // Build void __kmpc_syncwarp(kmp_int32 Mask);
1849 auto *FnTy =
1850 llvm::FunctionType::get(CGM.VoidTy, CGM.Int32Ty, /*isVarArg=*/false);
1851 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_syncwarp");
1852 break;
1853 }
1854 }
1855 return RTLFn;
1856}
1857
1858void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1859 llvm::Constant *Addr,
1860 uint64_t Size, int32_t,
1861 llvm::GlobalValue::LinkageTypes) {
1862 // TODO: Add support for global variables on the device after declare target
1863 // support.
1864 if (!isa<llvm::Function>(Addr))
1865 return;
1866 llvm::Module &M = CGM.getModule();
1867 llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1868
1869 // Get "nvvm.annotations" metadata node
1870 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1871
1872 llvm::Metadata *MDVals[] = {
1873 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1874 llvm::ConstantAsMetadata::get(
1875 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1876 // Append metadata to nvvm.annotations
1877 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1878}
1879
1880void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1881 const OMPExecutableDirective &D, StringRef ParentName,
1882 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1883 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1884 if (!IsOffloadEntry) // Nothing to do.
1885 return;
1886
1887 assert(!ParentName.empty() && "Invalid target region parent name!")((!ParentName.empty() && "Invalid target region parent name!"
) ? static_cast<void> (0) : __assert_fail ("!ParentName.empty() && \"Invalid target region parent name!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1887, __PRETTY_FUNCTION__))
;
1888
1889 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1890 if (Mode)
1891 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1892 CodeGen);
1893 else
1894 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1895 CodeGen);
1896
1897 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1898}
1899
1900namespace {
1901LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()using ::llvm::BitmaskEnumDetail::operator~; using ::llvm::BitmaskEnumDetail
::operator|; using ::llvm::BitmaskEnumDetail::operator&; using
::llvm::BitmaskEnumDetail::operator^; using ::llvm::BitmaskEnumDetail
::operator|=; using ::llvm::BitmaskEnumDetail::operator&=
; using ::llvm::BitmaskEnumDetail::operator^=
;
1902/// Enum for accesseing the reserved_2 field of the ident_t struct.
1903enum ModeFlagsTy : unsigned {
1904 /// Bit set to 1 when in SPMD mode.
1905 KMP_IDENT_SPMD_MODE = 0x01,
1906 /// Bit set to 1 when a simplified runtime is used.
1907 KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1908 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)LLVM_BITMASK_LARGEST_ENUMERATOR = KMP_IDENT_SIMPLE_RT_MODE
1909};
1910
1911/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1912static const ModeFlagsTy UndefinedMode =
1913 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1914} // anonymous namespace
1915
1916unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1917 switch (getExecutionMode()) {
1918 case EM_SPMD:
1919 if (requiresFullRuntime())
1920 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1921 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1922 case EM_NonSPMD:
1923 assert(requiresFullRuntime() && "Expected full runtime.")((requiresFullRuntime() && "Expected full runtime.") ?
static_cast<void> (0) : __assert_fail ("requiresFullRuntime() && \"Expected full runtime.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1923, __PRETTY_FUNCTION__))
;
1924 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1925 case EM_Unknown:
1926 return UndefinedMode;
1927 }
1928 llvm_unreachable("Unknown flags are requested.")::llvm::llvm_unreachable_internal("Unknown flags are requested."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1928)
;
1929}
1930
1931CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1932 : CGOpenMPRuntime(CGM, "_", "$") {
1933 if (!CGM.getLangOpts().OpenMPIsDevice)
1934 llvm_unreachable("OpenMP NVPTX can only handle device code.")::llvm::llvm_unreachable_internal("OpenMP NVPTX can only handle device code."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 1934)
;
1935}
1936
1937void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1938 ProcBindKind ProcBind,
1939 SourceLocation Loc) {
1940 // Do nothing in case of SPMD mode and L0 parallel.
1941 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1942 return;
1943
1944 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1945}
1946
1947void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1948 llvm::Value *NumThreads,
1949 SourceLocation Loc) {
1950 // Do nothing in case of SPMD mode and L0 parallel.
1951 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1952 return;
1953
1954 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1955}
1956
1957void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1958 const Expr *NumTeams,
1959 const Expr *ThreadLimit,
1960 SourceLocation Loc) {}
1961
1962llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1963 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1964 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1965 // Emit target region as a standalone region.
1966 class NVPTXPrePostActionTy : public PrePostActionTy {
1967 bool &IsInParallelRegion;
1968 bool PrevIsInParallelRegion;
1969
1970 public:
1971 NVPTXPrePostActionTy(bool &IsInParallelRegion)
1972 : IsInParallelRegion(IsInParallelRegion) {}
1973 void Enter(CodeGenFunction &CGF) override {
1974 PrevIsInParallelRegion = IsInParallelRegion;
1975 IsInParallelRegion = true;
1976 }
1977 void Exit(CodeGenFunction &CGF) override {
1978 IsInParallelRegion = PrevIsInParallelRegion;
1979 }
1980 } Action(IsInParallelRegion);
1981 CodeGen.setAction(Action);
1982 bool PrevIsInTTDRegion = IsInTTDRegion;
1983 IsInTTDRegion = false;
1984 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1985 IsInTargetMasterThreadRegion = false;
1986 auto *OutlinedFun =
1987 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1988 D, ThreadIDVar, InnermostKind, CodeGen));
1989 if (CGM.getLangOpts().Optimize) {
1990 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
1991 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
1992 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
1993 }
1994 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1995 IsInTTDRegion = PrevIsInTTDRegion;
1996 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1997 !IsInParallelRegion) {
1998 llvm::Function *WrapperFun =
1999 createParallelDataSharingWrapper(OutlinedFun, D);
2000 WrapperFunctionsMap[OutlinedFun] = WrapperFun;
2001 }
2002
2003 return OutlinedFun;
2004}
2005
2006/// Get list of lastprivate variables from the teams distribute ... or
2007/// teams {distribute ...} directives.
2008static void
2009getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
2010 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2011 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&((isOpenMPTeamsDirective(D.getDirectiveKind()) && "expected teams directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2012, __PRETTY_FUNCTION__))
2012 "expected teams directive.")((isOpenMPTeamsDirective(D.getDirectiveKind()) && "expected teams directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2012, __PRETTY_FUNCTION__))
;
2013 const OMPExecutableDirective *Dir = &D;
2014 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
2015 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
2016 Ctx,
2017 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
2018 /*IgnoreCaptured=*/true))) {
2019 Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
2020 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
2021 Dir = nullptr;
2022 }
2023 }
2024 if (!Dir)
2025 return;
2026 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
2027 for (const Expr *E : C->getVarRefs())
2028 Vars.push_back(getPrivateItem(E));
2029 }
2030}
2031
2032/// Get list of reduction variables from the teams ... directives.
2033static void
2034getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
2035 llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2036 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&((isOpenMPTeamsDirective(D.getDirectiveKind()) && "expected teams directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2037, __PRETTY_FUNCTION__))
2037 "expected teams directive.")((isOpenMPTeamsDirective(D.getDirectiveKind()) && "expected teams directive."
) ? static_cast<void> (0) : __assert_fail ("isOpenMPTeamsDirective(D.getDirectiveKind()) && \"expected teams directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2037, __PRETTY_FUNCTION__))
;
2038 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
2039 for (const Expr *E : C->privates())
2040 Vars.push_back(getPrivateItem(E));
2041 }
2042}
2043
2044llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
2045 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
2046 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
2047 SourceLocation Loc = D.getBeginLoc();
2048
2049 const RecordDecl *GlobalizedRD = nullptr;
2050 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
2051 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
2052 unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
2053 // Globalize team reductions variable unconditionally in all modes.
2054 if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
2055 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
2056 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
2057 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
2058 if (!LastPrivatesReductions.empty()) {
2059 GlobalizedRD = ::buildRecordForGlobalizedVars(
2060 CGM.getContext(), llvm::None, LastPrivatesReductions,
2061 MappedDeclsFields, WarpSize);
2062 }
2063 } else if (!LastPrivatesReductions.empty()) {
2064 assert(!TeamAndReductions.first &&((!TeamAndReductions.first && "Previous team declaration is not expected."
) ? static_cast<void> (0) : __assert_fail ("!TeamAndReductions.first && \"Previous team declaration is not expected.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2065, __PRETTY_FUNCTION__))
2065 "Previous team declaration is not expected.")((!TeamAndReductions.first && "Previous team declaration is not expected."
) ? static_cast<void> (0) : __assert_fail ("!TeamAndReductions.first && \"Previous team declaration is not expected.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2065, __PRETTY_FUNCTION__))
;
2066 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
2067 std::swap(TeamAndReductions.second, LastPrivatesReductions);
2068 }
2069
2070 // Emit target region as a standalone region.
2071 class NVPTXPrePostActionTy : public PrePostActionTy {
2072 SourceLocation &Loc;
2073 const RecordDecl *GlobalizedRD;
2074 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2075 &MappedDeclsFields;
2076
2077 public:
2078 NVPTXPrePostActionTy(
2079 SourceLocation &Loc, const RecordDecl *GlobalizedRD,
2080 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2081 &MappedDeclsFields)
2082 : Loc(Loc), GlobalizedRD(GlobalizedRD),
2083 MappedDeclsFields(MappedDeclsFields) {}
2084 void Enter(CodeGenFunction &CGF) override {
2085 auto &Rt =
2086 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2087 if (GlobalizedRD) {
2088 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
2089 I->getSecond().GlobalRecord = GlobalizedRD;
2090 I->getSecond().MappedParams =
2091 std::make_unique<CodeGenFunction::OMPMapVars>();
2092 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
2093 for (const auto &Pair : MappedDeclsFields) {
2094 assert(Pair.getFirst()->isCanonicalDecl() &&((Pair.getFirst()->isCanonicalDecl() && "Expected canonical declaration"
) ? static_cast<void> (0) : __assert_fail ("Pair.getFirst()->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2095, __PRETTY_FUNCTION__))
2095 "Expected canonical declaration")((Pair.getFirst()->isCanonicalDecl() && "Expected canonical declaration"
) ? static_cast<void> (0) : __assert_fail ("Pair.getFirst()->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2095, __PRETTY_FUNCTION__))
;
2096 Data.insert(std::make_pair(Pair.getFirst(),
2097 MappedVarData(Pair.getSecond(),
2098 /*IsOnePerTeam=*/true)));
2099 }
2100 }
2101 Rt.emitGenericVarsProlog(CGF, Loc);
2102 }
2103 void Exit(CodeGenFunction &CGF) override {
2104 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
2105 .emitGenericVarsEpilog(CGF);
2106 }
2107 } Action(Loc, GlobalizedRD, MappedDeclsFields);
2108 CodeGen.setAction(Action);
2109 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
2110 D, ThreadIDVar, InnermostKind, CodeGen);
2111 if (CGM.getLangOpts().Optimize) {
2112 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
2113 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
2114 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
2115 }
2116
2117 return OutlinedFun;
2118}
2119
2120void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
2121 SourceLocation Loc,
2122 bool WithSPMDCheck) {
2123 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
2124 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
2125 return;
2126
2127 CGBuilderTy &Bld = CGF.Builder;
2128
2129 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2130 if (I == FunctionGlobalizedDecls.end())
2131 return;
2132 if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
2133 QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
2134 QualType SecGlobalRecTy;
2135
2136 // Recover pointer to this function's global record. The runtime will
2137 // handle the specifics of the allocation of the memory.
2138 // Use actual memory size of the record including the padding
2139 // for alignment purposes.
2140 unsigned Alignment =
2141 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2142 unsigned GlobalRecordSize =
2143 CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
2144 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2145
2146 llvm::PointerType *GlobalRecPtrTy =
2147 CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
2148 llvm::Value *GlobalRecCastAddr;
2149 llvm::Value *IsTTD = nullptr;
2150 if (!IsInTTDRegion &&
2151 (WithSPMDCheck ||
2152 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
2153 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2154 llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
2155 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2156 if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
2157 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2158 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2159 llvm::Value *PL = CGF.EmitRuntimeCall(
2160 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2161 {RTLoc, ThreadID});
2162 IsTTD = Bld.CreateIsNull(PL);
2163 }
2164 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2165 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2166 Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
2167 // There is no need to emit line number for unconditional branch.
2168 (void)ApplyDebugLocation::CreateEmpty(CGF);
2169 CGF.EmitBlock(SPMDBB);
2170 Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
2171 CharUnits::fromQuantity(Alignment));
2172 CGF.EmitBranch(ExitBB);
2173 // There is no need to emit line number for unconditional branch.
2174 (void)ApplyDebugLocation::CreateEmpty(CGF);
2175 CGF.EmitBlock(NonSPMDBB);
2176 llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
2177 if (const RecordDecl *SecGlobalizedVarsRecord =
2178 I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
2179 SecGlobalRecTy =
2180 CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
2181
2182 // Recover pointer to this function's global record. The runtime will
2183 // handle the specifics of the allocation of the memory.
2184 // Use actual memory size of the record including the padding
2185 // for alignment purposes.
2186 unsigned Alignment =
2187 CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
2188 unsigned GlobalRecordSize =
2189 CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
2190 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2191 Size = Bld.CreateSelect(
2192 IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
2193 }
2194 // TODO: allow the usage of shared memory to be controlled by
2195 // the user, for now, default to global.
2196 llvm::Value *GlobalRecordSizeArg[] = {
2197 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2198 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2199 createNVPTXRuntimeFunction(
2200 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2201 GlobalRecordSizeArg);
2202 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2203 GlobalRecValue, GlobalRecPtrTy);
2204 CGF.EmitBlock(ExitBB);
2205 auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
2206 /*NumReservedValues=*/2, "_select_stack");
2207 Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
2208 Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
2209 GlobalRecCastAddr = Phi;
2210 I->getSecond().GlobalRecordAddr = Phi;
2211 I->getSecond().IsInSPMDModeFlag = IsSPMD;
2212 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2213 assert(GlobalizedRecords.back().Records.size() < 2 &&((GlobalizedRecords.back().Records.size() < 2 && "Expected less than 2 globalized records: one for target and one "
"for teams.") ? static_cast<void> (0) : __assert_fail (
"GlobalizedRecords.back().Records.size() < 2 && \"Expected less than 2 globalized records: one for target and one \" \"for teams.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2215, __PRETTY_FUNCTION__))
2214 "Expected less than 2 globalized records: one for target and one "((GlobalizedRecords.back().Records.size() < 2 && "Expected less than 2 globalized records: one for target and one "
"for teams.") ? static_cast<void> (0) : __assert_fail (
"GlobalizedRecords.back().Records.size() < 2 && \"Expected less than 2 globalized records: one for target and one \" \"for teams.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2215, __PRETTY_FUNCTION__))
2215 "for teams.")((GlobalizedRecords.back().Records.size() < 2 && "Expected less than 2 globalized records: one for target and one "
"for teams.") ? static_cast<void> (0) : __assert_fail (
"GlobalizedRecords.back().Records.size() < 2 && \"Expected less than 2 globalized records: one for target and one \" \"for teams.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2215, __PRETTY_FUNCTION__))
;
2216 unsigned Offset = 0;
2217 for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
2218 QualType RDTy = CGM.getContext().getRecordType(RD);
2219 unsigned Alignment =
2220 CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
2221 unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
2222 Offset =
2223 llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
2224 }
2225 unsigned Alignment =
2226 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2227 Offset = llvm::alignTo(Offset, Alignment);
2228 GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
2229 ++GlobalizedRecords.back().RegionCounter;
2230 if (GlobalizedRecords.back().Records.size() == 1) {
2231 assert(KernelStaticGlobalized &&((KernelStaticGlobalized && "Kernel static pointer must be initialized already."
) ? static_cast<void> (0) : __assert_fail ("KernelStaticGlobalized && \"Kernel static pointer must be initialized already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2232, __PRETTY_FUNCTION__))
2232 "Kernel static pointer must be initialized already.")((KernelStaticGlobalized && "Kernel static pointer must be initialized already."
) ? static_cast<void> (0) : __assert_fail ("KernelStaticGlobalized && \"Kernel static pointer must be initialized already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2232, __PRETTY_FUNCTION__))
;
2233 auto *UseSharedMemory = new llvm::GlobalVariable(
2234 CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
2235 llvm::GlobalValue::InternalLinkage, nullptr,
2236 "_openmp_static_kernel$is_shared");
2237 UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2238 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2239 /*DestWidth=*/16, /*Signed=*/0);
2240 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2241 Address(UseSharedMemory,
2242 CGM.getContext().getTypeAlignInChars(Int16Ty)),
2243 /*Volatile=*/false, Int16Ty, Loc);
2244 auto *StaticGlobalized = new llvm::GlobalVariable(
2245 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2246 llvm::GlobalValue::CommonLinkage, nullptr);
2247 auto *RecSize = new llvm::GlobalVariable(
2248 CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
2249 llvm::GlobalValue::InternalLinkage, nullptr,
2250 "_openmp_static_kernel$size");
2251 RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2252 llvm::Value *Ld = CGF.EmitLoadOfScalar(
2253 Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
2254 CGM.getContext().getSizeType(), Loc);
2255 llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2256 KernelStaticGlobalized, CGM.VoidPtrPtrTy);
2257 llvm::Value *GlobalRecordSizeArg[] = {
2258 llvm::ConstantInt::get(
2259 CGM.Int16Ty,
2260 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
2261 StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
2262 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2263 OMPRTL_NVPTX__kmpc_get_team_static_memory),
2264 GlobalRecordSizeArg);
2265 GlobalizedRecords.back().Buffer = StaticGlobalized;
2266 GlobalizedRecords.back().RecSize = RecSize;
2267 GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
2268 GlobalizedRecords.back().Loc = Loc;
2269 }
2270 assert(KernelStaticGlobalized && "Global address must be set already.")((KernelStaticGlobalized && "Global address must be set already."
) ? static_cast<void> (0) : __assert_fail ("KernelStaticGlobalized && \"Global address must be set already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2270, __PRETTY_FUNCTION__))
;
2271 Address FrameAddr = CGF.EmitLoadOfPointer(
2272 Address(KernelStaticGlobalized, CGM.getPointerAlign()),
2273 CGM.getContext()
2274 .getPointerType(CGM.getContext().VoidPtrTy)
2275 .castAs<PointerType>());
2276 llvm::Value *GlobalRecValue =
2277 Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
2278 I->getSecond().GlobalRecordAddr = GlobalRecValue;
2279 I->getSecond().IsInSPMDModeFlag = nullptr;
2280 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2281 GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
2282 } else {
2283 // TODO: allow the usage of shared memory to be controlled by
2284 // the user, for now, default to global.
2285 bool UseSharedMemory =
2286 IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
2287 llvm::Value *GlobalRecordSizeArg[] = {
2288 llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
2289 CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
2290 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2291 createNVPTXRuntimeFunction(
2292 IsInTTDRegion
2293 ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack
2294 : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2295 GlobalRecordSizeArg);
2296 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2297 GlobalRecValue, GlobalRecPtrTy);
2298 I->getSecond().GlobalRecordAddr = GlobalRecValue;
2299 I->getSecond().IsInSPMDModeFlag = nullptr;
2300 }
2301 LValue Base =
2302 CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
2303
2304 // Emit the "global alloca" which is a GEP from the global declaration
2305 // record using the pointer returned by the runtime.
2306 LValue SecBase;
2307 decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
2308 if (IsTTD) {
2309 SecIt = I->getSecond().SecondaryLocalVarData->begin();
2310 llvm::PointerType *SecGlobalRecPtrTy =
2311 CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
2312 SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
2313 Bld.CreatePointerBitCastOrAddrSpaceCast(
2314 I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
2315 SecGlobalRecTy);
2316 }
2317 for (auto &Rec : I->getSecond().LocalVarData) {
2318 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
2319 llvm::Value *ParValue;
2320 if (EscapedParam) {
2321 const auto *VD = cast<VarDecl>(Rec.first);
2322 LValue ParLVal =
2323 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
2324 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
2325 }
2326 LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
2327 // Emit VarAddr basing on lane-id if required.
2328 QualType VarTy;
2329 if (Rec.second.IsOnePerTeam) {
2330 VarTy = Rec.second.FD->getType();
2331 } else {
2332 llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
2333 VarAddr.getAddress(CGF).getPointer(),
2334 {Bld.getInt32(0), getNVPTXLaneID(CGF)});
2335 VarTy =
2336 Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
2337 VarAddr = CGF.MakeAddrLValue(
2338 Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
2339 AlignmentSource::Decl);
2340 }
2341 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
2342 if (!IsInTTDRegion &&
2343 (WithSPMDCheck ||
2344 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
2345 assert(I->getSecond().IsInSPMDModeFlag &&((I->getSecond().IsInSPMDModeFlag && "Expected unknown execution mode or required SPMD check."
) ? static_cast<void> (0) : __assert_fail ("I->getSecond().IsInSPMDModeFlag && \"Expected unknown execution mode or required SPMD check.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2346, __PRETTY_FUNCTION__))
2346 "Expected unknown execution mode or required SPMD check.")((I->getSecond().IsInSPMDModeFlag && "Expected unknown execution mode or required SPMD check."
) ? static_cast<void> (0) : __assert_fail ("I->getSecond().IsInSPMDModeFlag && \"Expected unknown execution mode or required SPMD check.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2346, __PRETTY_FUNCTION__))
;
2347 if (IsTTD) {
2348 assert(SecIt->second.IsOnePerTeam &&((SecIt->second.IsOnePerTeam && "Secondary glob data must be one per team."
) ? static_cast<void> (0) : __assert_fail ("SecIt->second.IsOnePerTeam && \"Secondary glob data must be one per team.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2349, __PRETTY_FUNCTION__))
2349 "Secondary glob data must be one per team.")((SecIt->second.IsOnePerTeam && "Secondary glob data must be one per team."
) ? static_cast<void> (0) : __assert_fail ("SecIt->second.IsOnePerTeam && \"Secondary glob data must be one per team.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2349, __PRETTY_FUNCTION__))
;
2350 LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
2351 VarAddr.setAddress(
2352 Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
2353 VarAddr.getPointer(CGF)),
2354 VarAddr.getAlignment()));
2355 Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
2356 }
2357 Address GlobalPtr = Rec.second.PrivateAddr;
2358 Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
2359 Rec.second.PrivateAddr = Address(
2360 Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
2361 LocalAddr.getPointer(), GlobalPtr.getPointer()),
2362 LocalAddr.getAlignment());
2363 }
2364 if (EscapedParam) {
2365 const auto *VD = cast<VarDecl>(Rec.first);
2366 CGF.EmitStoreOfScalar(ParValue, VarAddr);
2367 I->getSecond().MappedParams->setVarAddr(CGF, VD,
2368 VarAddr.getAddress(CGF));
2369 }
2370 if (IsTTD)
2371 ++SecIt;
2372 }
2373 }
2374 for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
2375 // Recover pointer to this function's global record. The runtime will
2376 // handle the specifics of the allocation of the memory.
2377 // Use actual memory size of the record including the padding
2378 // for alignment purposes.
2379 CGBuilderTy &Bld = CGF.Builder;
2380 llvm::Value *Size = CGF.getTypeSize(VD->getType());
2381 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2382 Size = Bld.CreateNUWAdd(
2383 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
2384 llvm::Value *AlignVal =
2385 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
2386 Size = Bld.CreateUDiv(Size, AlignVal);
2387 Size = Bld.CreateNUWMul(Size, AlignVal);
2388 // TODO: allow the usage of shared memory to be controlled by
2389 // the user, for now, default to global.
2390 llvm::Value *GlobalRecordSizeArg[] = {
2391 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2392 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2393 createNVPTXRuntimeFunction(
2394 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2395 GlobalRecordSizeArg);
2396 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2397 GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
2398 LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
2399 CGM.getContext().getDeclAlign(VD),
2400 AlignmentSource::Decl);
2401 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
2402 Base.getAddress(CGF));
2403 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
2404 }
2405 I->getSecond().MappedParams->apply(CGF);
2406}
2407
2408void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
2409 bool WithSPMDCheck) {
2410 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
2411 getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
2412 return;
2413
2414 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2415 if (I != FunctionGlobalizedDecls.end()) {
2416 I->getSecond().MappedParams->restore(CGF);
2417 if (!CGF.HaveInsertPoint())
2418 return;
2419 for (llvm::Value *Addr :
2420 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
2421 CGF.EmitRuntimeCall(
2422 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2423 Addr);
2424 }
2425 if (I->getSecond().GlobalRecordAddr) {
2426 if (!IsInTTDRegion &&
2427 (WithSPMDCheck ||
2428 getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
2429 CGBuilderTy &Bld = CGF.Builder;
2430 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2431 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2432 Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
2433 // There is no need to emit line number for unconditional branch.
2434 (void)ApplyDebugLocation::CreateEmpty(CGF);
2435 CGF.EmitBlock(NonSPMDBB);
2436 CGF.EmitRuntimeCall(
2437 createNVPTXRuntimeFunction(
2438 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2439 CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
2440 CGF.EmitBlock(ExitBB);
2441 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
2442 assert(GlobalizedRecords.back().RegionCounter > 0 &&((GlobalizedRecords.back().RegionCounter > 0 && "region counter must be > 0."
) ? static_cast<void> (0) : __assert_fail ("GlobalizedRecords.back().RegionCounter > 0 && \"region counter must be > 0.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2443, __PRETTY_FUNCTION__))
2443 "region counter must be > 0.")((GlobalizedRecords.back().RegionCounter > 0 && "region counter must be > 0."
) ? static_cast<void> (0) : __assert_fail ("GlobalizedRecords.back().RegionCounter > 0 && \"region counter must be > 0.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2443, __PRETTY_FUNCTION__))
;
2444 --GlobalizedRecords.back().RegionCounter;
2445 // Emit the restore function only in the target region.
2446 if (GlobalizedRecords.back().RegionCounter == 0) {
2447 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2448 /*DestWidth=*/16, /*Signed=*/0);
2449 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2450 Address(GlobalizedRecords.back().UseSharedMemory,
2451 CGM.getContext().getTypeAlignInChars(Int16Ty)),
2452 /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
2453 llvm::Value *Args[] = {
2454 llvm::ConstantInt::get(
2455 CGM.Int16Ty,
2456 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
2457 IsInSharedMemory};
2458 CGF.EmitRuntimeCall(
2459 createNVPTXRuntimeFunction(
2460 OMPRTL_NVPTX__kmpc_restore_team_static_memory),
2461 Args);
2462 }
2463 } else {
2464 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2465 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2466 I->getSecond().GlobalRecordAddr);
2467 }
2468 }
2469 }
2470}
2471
2472void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
2473 const OMPExecutableDirective &D,
2474 SourceLocation Loc,
2475 llvm::Function *OutlinedFn,
2476 ArrayRef<llvm::Value *> CapturedVars) {
2477 if (!CGF.HaveInsertPoint())
2478 return;
2479
2480 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2481 /*Name=*/".zero.addr");
2482 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2483 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2484 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
2485 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2486 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2487 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2488}
2489
2490void CGOpenMPRuntimeGPU::emitParallelCall(
2491 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2492 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2493 if (!CGF.HaveInsertPoint())
2494 return;
2495
2496 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
2497 emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2498 else
2499 emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2500}
2501
2502void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall(
2503 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2504 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2505 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
2506
2507 // Force inline this outlined function at its call site.
2508 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2509
2510 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2511 /*Name=*/".zero.addr");
2512 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2513 // ThreadId for serialized parallels is 0.
2514 Address ThreadIDAddr = ZeroAddr;
2515 auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
2516 CodeGenFunction &CGF, PrePostActionTy &Action) {
2517 Action.Enter(CGF);
2518
2519 Address ZeroAddr =
2520 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2521 /*Name=*/".bound.zero.addr");
2522 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2523 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2524 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2525 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2526 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2527 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
2528 };
2529 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2530 PrePostActionTy &) {
2531
2532 RegionCodeGenTy RCG(CodeGen);
2533 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2534 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2535 llvm::Value *Args[] = {RTLoc, ThreadID};
2536
2537 NVPTXActionTy Action(
2538 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2539 Args,
2540 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2541 Args);
2542 RCG.setAction(Action);
2543 RCG(CGF);
2544 };
2545
2546 auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
2547 PrePostActionTy &Action) {
2548 CGBuilderTy &Bld = CGF.Builder;
2549 llvm::Function *WFn = WrapperFunctionsMap[Fn];
2550 assert(WFn && "Wrapper function does not exist!")((WFn && "Wrapper function does not exist!") ? static_cast
<void> (0) : __assert_fail ("WFn && \"Wrapper function does not exist!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2550, __PRETTY_FUNCTION__))
;
2551 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
2552
2553 // Prepare for parallel region. Indicate the outlined function.
2554 llvm::Value *Args[] = {ID};
2555 CGF.EmitRuntimeCall(
2556 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
2557 Args);
2558
2559 // Create a private scope that will globalize the arguments
2560 // passed from the outside of the target region.
2561 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
2562
2563 // There's something to share.
2564 if (!CapturedVars.empty()) {
2565 // Prepare for parallel region. Indicate the outlined function.
2566 Address SharedArgs =
2567 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
2568 llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
2569
2570 llvm::Value *DataSharingArgs[] = {
2571 SharedArgsPtr,
2572 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
2573 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2574 OMPRTL_NVPTX__kmpc_begin_sharing_variables),
2575 DataSharingArgs);
2576
2577 // Store variable address in a list of references to pass to workers.
2578 unsigned Idx = 0;
2579 ASTContext &Ctx = CGF.getContext();
2580 Address SharedArgListAddress = CGF.EmitLoadOfPointer(
2581 SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
2582 .castAs<PointerType>());
2583 for (llvm::Value *V : CapturedVars) {
2584 Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
2585 llvm::Value *PtrV;
2586 if (V->getType()->isIntegerTy())
2587 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
2588 else
2589 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
2590 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
2591 Ctx.getPointerType(Ctx.VoidPtrTy));
2592 ++Idx;
2593 }
2594 }
2595
2596 // Activate workers. This barrier is used by the master to signal
2597 // work for the workers.
2598 syncCTAThreads(CGF);
2599
2600 // OpenMP [2.5, Parallel Construct, p.49]
2601 // There is an implied barrier at the end of a parallel region. After the
2602 // end of a parallel region, only the master thread of the team resumes
2603 // execution of the enclosing task region.
2604 //
2605 // The master waits at this barrier until all workers are done.
2606 syncCTAThreads(CGF);
2607
2608 if (!CapturedVars.empty())
2609 CGF.EmitRuntimeCall(
2610 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
2611
2612 // Remember for post-processing in worker loop.
2613 Work.emplace_back(WFn);
2614 };
2615
2616 auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
2617 CodeGenFunction &CGF, PrePostActionTy &Action) {
2618 if (IsInParallelRegion) {
2619 SeqGen(CGF, Action);
2620 } else if (IsInTargetMasterThreadRegion) {
2621 L0ParallelGen(CGF, Action);
2622 } else {
2623 // Check for master and then parallelism:
2624 // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
2625 // Serialized execution.
2626 // } else {
2627 // Worker call.
2628 // }
2629 CGBuilderTy &Bld = CGF.Builder;
2630 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2631 llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
2632 llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
2633 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
2634 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2635 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2636 Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
2637 // There is no need to emit line number for unconditional branch.
2638 (void)ApplyDebugLocation::CreateEmpty(CGF);
2639 CGF.EmitBlock(ParallelCheckBB);
2640 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2641 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2642 llvm::Value *PL = CGF.EmitRuntimeCall(
2643 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2644 {RTLoc, ThreadID});
2645 llvm::Value *Res = Bld.CreateIsNotNull(PL);
2646 Bld.CreateCondBr(Res, SeqBB, MasterBB);
2647 CGF.EmitBlock(SeqBB);
2648 SeqGen(CGF, Action);
2649 CGF.EmitBranch(ExitBB);
2650 // There is no need to emit line number for unconditional branch.
2651 (void)ApplyDebugLocation::CreateEmpty(CGF);
2652 CGF.EmitBlock(MasterBB);
2653 L0ParallelGen(CGF, Action);
2654 CGF.EmitBranch(ExitBB);
2655 // There is no need to emit line number for unconditional branch.
2656 (void)ApplyDebugLocation::CreateEmpty(CGF);
2657 // Emit the continuation block for code after the if.
2658 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2659 }
2660 };
2661
2662 if (IfCond) {
2663 emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
2664 } else {
2665 CodeGenFunction::RunCleanupsScope Scope(CGF);
2666 RegionCodeGenTy ThenRCG(LNParallelGen);
2667 ThenRCG(CGF);
2668 }
2669}
2670
2671void CGOpenMPRuntimeGPU::emitSPMDParallelCall(
2672 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2673 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2674 // Just call the outlined function to execute the parallel region.
2675 // OutlinedFn(&GTid, &zero, CapturedStruct);
2676 //
2677 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2678
2679 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2680 /*Name=*/".zero.addr");
2681 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2682 // ThreadId for serialized parallels is 0.
2683 Address ThreadIDAddr = ZeroAddr;
2684 auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
2685 CodeGenFunction &CGF, PrePostActionTy &Action) {
2686 Action.Enter(CGF);
2687
2688 Address ZeroAddr =
2689 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2690 /*Name=*/".bound.zero.addr");
2691 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2692 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2693 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2694 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2695 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2696 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2697 };
2698 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2699 PrePostActionTy &) {
2700
2701 RegionCodeGenTy RCG(CodeGen);
2702 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2703 llvm::Value *ThreadID = getThreadID(CGF, Loc);
2704 llvm::Value *Args[] = {RTLoc, ThreadID};
2705
2706 NVPTXActionTy Action(
2707 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2708 Args,
2709 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2710 Args);
2711 RCG.setAction(Action);
2712 RCG(CGF);
2713 };
2714
2715 if (IsInTargetMasterThreadRegion) {
2716 // In the worker need to use the real thread id.
2717 ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2718 RegionCodeGenTy RCG(CodeGen);
2719 RCG(CGF);
2720 } else {
2721 // If we are not in the target region, it is definitely L2 parallelism or
2722 // more, because for SPMD mode we always has L1 parallel level, sowe don't
2723 // need to check for orphaned directives.
2724 RegionCodeGenTy RCG(SeqGen);
2725 RCG(CGF);
2726 }
2727}
2728
2729void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
2730 // Always emit simple barriers!
2731 if (!CGF.HaveInsertPoint())
2732 return;
2733 // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
2734 // This function does not use parameters, so we can emit just default values.
2735 llvm::Value *Args[] = {
2736 llvm::ConstantPointerNull::get(
2737 cast<llvm::PointerType>(getIdentTyPointerTy())),
2738 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
2739 llvm::CallInst *Call = CGF.EmitRuntimeCall(
2740 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
2741 Call->setConvergent();
2742}
2743
2744void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
2745 SourceLocation Loc,
2746 OpenMPDirectiveKind Kind, bool,
2747 bool) {
2748 // Always emit simple barriers!
2749 if (!CGF.HaveInsertPoint())
2750 return;
2751 // Build call __kmpc_cancel_barrier(loc, thread_id);
2752 unsigned Flags = getDefaultFlagsForBarriers(Kind);
2753 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2754 getThreadID(CGF, Loc)};
2755 llvm::CallInst *Call = CGF.EmitRuntimeCall(
2756 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
2757 Call->setConvergent();
2758}
2759
2760void CGOpenMPRuntimeGPU::emitCriticalRegion(
2761 CodeGenFunction &CGF, StringRef CriticalName,
2762 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2763 const Expr *Hint) {
2764 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2765 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2766 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2767 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2768 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2769
2770 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2771
2772 // Get the mask of active threads in the warp.
2773 llvm::Value *Mask = CGF.EmitRuntimeCall(
2774 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_warp_active_thread_mask));
2775 // Fetch team-local id of the thread.
2776 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2777
2778 // Get the width of the team.
2779 llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
2780
2781 // Initialize the counter variable for the loop.
2782 QualType Int32Ty =
2783 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2784 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2785 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2786 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2787 /*isInit=*/true);
2788
2789 // Block checks if loop counter exceeds upper bound.
2790 CGF.EmitBlock(LoopBB);
2791 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2792 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2793 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2794
2795 // Block tests which single thread should execute region, and which threads
2796 // should go straight to synchronisation point.
2797 CGF.EmitBlock(TestBB);
2798 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2799 llvm::Value *CmpThreadToCounter =
2800 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2801 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2802
2803 // Block emits the body of the critical region.
2804 CGF.EmitBlock(BodyBB);
2805
2806 // Output the critical statement.
2807 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
2808 Hint);
2809
2810 // After the body surrounded by the critical region, the single executing
2811 // thread will jump to the synchronisation point.
2812 // Block waits for all threads in current team to finish then increments the
2813 // counter variable and returns to the loop.
2814 CGF.EmitBlock(SyncBB);
2815 // Reconverge active threads in the warp.
2816 (void)CGF.EmitRuntimeCall(
2817 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_syncwarp), Mask);
2818
2819 llvm::Value *IncCounterVal =
2820 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2821 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2822 CGF.EmitBranch(LoopBB);
2823
2824 // Block that is reached when all threads in the team complete the region.
2825 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2826}
2827
2828/// Cast value to the specified type.
2829static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2830 QualType ValTy, QualType CastTy,
2831 SourceLocation Loc) {
2832 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&((!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
"Cast type must sized.") ? static_cast<void> (0) : __assert_fail
("!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && \"Cast type must sized.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2833, __PRETTY_FUNCTION__))
2833 "Cast type must sized.")((!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
"Cast type must sized.") ? static_cast<void> (0) : __assert_fail
("!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && \"Cast type must sized.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2833, __PRETTY_FUNCTION__))
;
2834 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&((!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
"Val type must sized.") ? static_cast<void> (0) : __assert_fail
("!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && \"Val type must sized.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2835, __PRETTY_FUNCTION__))
2835 "Val type must sized.")((!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
"Val type must sized.") ? static_cast<void> (0) : __assert_fail
("!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && \"Val type must sized.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2835, __PRETTY_FUNCTION__))
;
2836 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2837 if (ValTy == CastTy)
2838 return Val;
2839 if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2840 CGF.getContext().getTypeSizeInChars(CastTy))
2841 return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2842 if (CastTy->isIntegerType() && ValTy->isIntegerType())
2843 return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2844 CastTy->hasSignedIntegerRepresentation());
2845 Address CastItem = CGF.CreateMemTemp(CastTy);
2846 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2847 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2848 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
2849 LValueBaseInfo(AlignmentSource::Type),
2850 TBAAAccessInfo());
2851 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
2852 LValueBaseInfo(AlignmentSource::Type),
2853 TBAAAccessInfo());
2854}
2855
2856/// This function creates calls to one of two shuffle functions to copy
2857/// variables between lanes in a warp.
2858static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2859 llvm::Value *Elem,
2860 QualType ElemType,
2861 llvm::Value *Offset,
2862 SourceLocation Loc) {
2863 CodeGenModule &CGM = CGF.CGM;
2864 CGBuilderTy &Bld = CGF.Builder;
2865 CGOpenMPRuntimeGPU &RT =
2866 *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
2867
2868 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2869 assert(Size.getQuantity() <= 8 &&((Size.getQuantity() <= 8 && "Unsupported bitwidth in shuffle instruction."
) ? static_cast<void> (0) : __assert_fail ("Size.getQuantity() <= 8 && \"Unsupported bitwidth in shuffle instruction.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2870, __PRETTY_FUNCTION__))
2870 "Unsupported bitwidth in shuffle instruction.")((Size.getQuantity() <= 8 && "Unsupported bitwidth in shuffle instruction."
) ? static_cast<void> (0) : __assert_fail ("Size.getQuantity() <= 8 && \"Unsupported bitwidth in shuffle instruction.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 2870, __PRETTY_FUNCTION__))
;
2871
2872 OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
2873 ? OMPRTL_NVPTX__kmpc_shuffle_int32
2874 : OMPRTL_NVPTX__kmpc_shuffle_int64;
2875
2876 // Cast all types to 32- or 64-bit values before calling shuffle routines.
2877 QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2878 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
2879 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2880 llvm::Value *WarpSize =
2881 Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2882
2883 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2884 RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
2885
2886 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2887}
2888
2889static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2890 Address DestAddr, QualType ElemType,
2891 llvm::Value *Offset, SourceLocation Loc) {
2892 CGBuilderTy &Bld = CGF.Builder;
2893
2894 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2895 // Create the loop over the big sized data.
2896 // ptr = (void*)Elem;
2897 // ptrEnd = (void*) Elem + 1;
2898 // Step = 8;
2899 // while (ptr + Step < ptrEnd)
2900 // shuffle((int64_t)*ptr);
2901 // Step = 4;
2902 // while (ptr + Step < ptrEnd)
2903 // shuffle((int32_t)*ptr);
2904 // ...
2905 Address ElemPtr = DestAddr;
2906 Address Ptr = SrcAddr;
2907 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2908 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
2909 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
2910 if (Size < CharUnits::fromQuantity(IntSize))
2911 continue;
2912 QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2913 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2914 /*Signed=*/1);
2915 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2916 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2917 ElemPtr =
2918 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2919 if (Size.getQuantity() / IntSize > 1) {
2920 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2921 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2922 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2923 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2924 CGF.EmitBlock(PreCondBB);
2925 llvm::PHINode *PhiSrc =
2926 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2927 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2928 llvm::PHINode *PhiDest =
2929 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2930 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2931 Ptr = Address(PhiSrc, Ptr.getAlignment());
2932 ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2933 llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2934 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2935 Ptr.getPointer(), CGF.VoidPtrTy));
2936 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2937 ThenBB, ExitBB);
2938 CGF.EmitBlock(ThenBB);
2939 llvm::Value *Res = createRuntimeShuffleFunction(
2940 CGF,
2941 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2942 LValueBaseInfo(AlignmentSource::Type),
2943 TBAAAccessInfo()),
2944 IntType, Offset, Loc);
2945 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2946 LValueBaseInfo(AlignmentSource::Type),
2947 TBAAAccessInfo());
2948 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
2949 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2950 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
2951 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
2952 CGF.EmitBranch(PreCondBB);
2953 CGF.EmitBlock(ExitBB);
2954 } else {
2955 llvm::Value *Res = createRuntimeShuffleFunction(
2956 CGF,
2957 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
2958 LValueBaseInfo(AlignmentSource::Type),
2959 TBAAAccessInfo()),
2960 IntType, Offset, Loc);
2961 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
2962 LValueBaseInfo(AlignmentSource::Type),
2963 TBAAAccessInfo());
2964 Ptr = Bld.CreateConstGEP(Ptr, 1);
2965 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2966 }
2967 Size = Size % IntSize;
2968 }
2969}
2970
2971namespace {
2972enum CopyAction : unsigned {
2973 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2974 // the warp using shuffle instructions.
2975 RemoteLaneToThread,
2976 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2977 ThreadCopy,
2978 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2979 ThreadToScratchpad,
2980 // ScratchpadToThread: Copy from a scratchpad array in global memory
2981 // containing team-reduced data to a thread's stack.
2982 ScratchpadToThread,
2983};
2984} // namespace
2985
2986struct CopyOptionsTy {
2987 llvm::Value *RemoteLaneOffset;
2988 llvm::Value *ScratchpadIndex;
2989 llvm::Value *ScratchpadWidth;
2990};
2991
2992/// Emit instructions to copy a Reduce list, which contains partially
2993/// aggregated values, in the specified direction.
2994static void emitReductionListCopy(
2995 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2996 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2997 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2998
2999 CodeGenModule &CGM = CGF.CGM;
3000 ASTContext &C = CGM.getContext();
3001 CGBuilderTy &Bld = CGF.Builder;
3002
3003 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
3004 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
3005 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
3006
3007 // Iterates, element-by-element, through the source Reduce list and
3008 // make a copy.
3009 unsigned Idx = 0;
3010 unsigned Size = Privates.size();
3011 for (const Expr *Private : Privates) {
3012 Address SrcElementAddr = Address::invalid();
3013 Address DestElementAddr = Address::invalid();
3014 Address DestElementPtrAddr = Address::invalid();
3015 // Should we shuffle in an element from a remote lane?
3016 bool ShuffleInElement = false;
3017 // Set to true to update the pointer in the dest Reduce list to a
3018 // newly created element.
3019 bool UpdateDestListPtr = false;
3020 // Increment the src or dest pointer to the scratchpad, for each
3021 // new element.
3022 bool IncrScratchpadSrc = false;
3023 bool IncrScratchpadDest = false;
3024
3025 switch (Action) {
3026 case RemoteLaneToThread: {
3027 // Step 1.1: Get the address for the src element in the Reduce list.
3028 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3029 SrcElementAddr = CGF.EmitLoadOfPointer(
3030 SrcElementPtrAddr,
3031 C.getPointerType(Private->getType())->castAs<PointerType>());
3032
3033 // Step 1.2: Create a temporary to store the element in the destination
3034 // Reduce list.
3035 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3036 DestElementAddr =
3037 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3038 ShuffleInElement = true;
3039 UpdateDestListPtr = true;
3040 break;
3041 }
3042 case ThreadCopy: {
3043 // Step 1.1: Get the address for the src element in the Reduce list.
3044 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3045 SrcElementAddr = CGF.EmitLoadOfPointer(
3046 SrcElementPtrAddr,
3047 C.getPointerType(Private->getType())->castAs<PointerType>());
3048
3049 // Step 1.2: Get the address for dest element. The destination
3050 // element has already been created on the thread's stack.
3051 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3052 DestElementAddr = CGF.EmitLoadOfPointer(
3053 DestElementPtrAddr,
3054 C.getPointerType(Private->getType())->castAs<PointerType>());
3055 break;
3056 }
3057 case ThreadToScratchpad: {
3058 // Step 1.1: Get the address for the src element in the Reduce list.
3059 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3060 SrcElementAddr = CGF.EmitLoadOfPointer(
3061 SrcElementPtrAddr,
3062 C.getPointerType(Private->getType())->castAs<PointerType>());
3063
3064 // Step 1.2: Get the address for dest element:
3065 // address = base + index * ElementSizeInChars.
3066 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3067 llvm::Value *CurrentOffset =
3068 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3069 llvm::Value *ScratchPadElemAbsolutePtrVal =
3070 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
3071 ScratchPadElemAbsolutePtrVal =
3072 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3073 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3074 C.getTypeAlignInChars(Private->getType()));
3075 IncrScratchpadDest = true;
3076 break;
3077 }
3078 case ScratchpadToThread: {
3079 // Step 1.1: Get the address for the src element in the scratchpad.
3080 // address = base + index * ElementSizeInChars.
3081 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3082 llvm::Value *CurrentOffset =
3083 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3084 llvm::Value *ScratchPadElemAbsolutePtrVal =
3085 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
3086 ScratchPadElemAbsolutePtrVal =
3087 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3088 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3089 C.getTypeAlignInChars(Private->getType()));
3090 IncrScratchpadSrc = true;
3091
3092 // Step 1.2: Create a temporary to store the element in the destination
3093 // Reduce list.
3094 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3095 DestElementAddr =
3096 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3097 UpdateDestListPtr = true;
3098 break;
3099 }
3100 }
3101
3102 // Regardless of src and dest of copy, we emit the load of src
3103 // element as this is required in all directions
3104 SrcElementAddr = Bld.CreateElementBitCast(
3105 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
3106 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
3107 SrcElementAddr.getElementType());
3108
3109 // Now that all active lanes have read the element in the
3110 // Reduce list, shuffle over the value from the remote lane.
3111 if (ShuffleInElement) {
3112 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
3113 RemoteLaneOffset, Private->getExprLoc());
3114 } else {
3115 switch (CGF.getEvaluationKind(Private->getType())) {
3116 case TEK_Scalar: {
3117 llvm::Value *Elem = CGF.EmitLoadOfScalar(
3118 SrcElementAddr, /*Volatile=*/false, Private->getType(),
3119 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
3120 TBAAAccessInfo());
3121 // Store the source element value to the dest element address.
3122 CGF.EmitStoreOfScalar(
3123 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
3124 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
3125 break;
3126 }
3127 case TEK_Complex: {
3128 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
3129 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3130 Private->getExprLoc());
3131 CGF.EmitStoreOfComplex(
3132 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3133 /*isInit=*/false);
3134 break;
3135 }
3136 case TEK_Aggregate:
3137 CGF.EmitAggregateCopy(
3138 CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3139 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3140 Private->getType(), AggValueSlot::DoesNotOverlap);
3141 break;
3142 }
3143 }
3144
3145 // Step 3.1: Modify reference in dest Reduce list as needed.
3146 // Modifying the reference in Reduce list to point to the newly
3147 // created element. The element is live in the current function
3148 // scope and that of functions it invokes (i.e., reduce_function).
3149 // RemoteReduceData[i] = (void*)&RemoteElem
3150 if (UpdateDestListPtr) {
3151 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
3152 DestElementAddr.getPointer(), CGF.VoidPtrTy),
3153 DestElementPtrAddr, /*Volatile=*/false,
3154 C.VoidPtrTy);
3155 }
3156
3157 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
3158 // address of the next element in scratchpad memory, unless we're currently
3159 // processing the last one. Memory alignment is also taken care of here.
3160 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
3161 llvm::Value *ScratchpadBasePtr =
3162 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
3163 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3164 ScratchpadBasePtr = Bld.CreateNUWAdd(
3165 ScratchpadBasePtr,
3166 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
3167
3168 // Take care of global memory alignment for performance
3169 ScratchpadBasePtr = Bld.CreateNUWSub(
3170 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3171 ScratchpadBasePtr = Bld.CreateUDiv(
3172 ScratchpadBasePtr,
3173 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3174 ScratchpadBasePtr = Bld.CreateNUWAdd(
3175 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3176 ScratchpadBasePtr = Bld.CreateNUWMul(
3177 ScratchpadBasePtr,
3178 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3179
3180 if (IncrScratchpadDest)
3181 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3182 else /* IncrScratchpadSrc = true */
3183 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3184 }
3185
3186 ++Idx;
3187 }
3188}
3189
3190/// This function emits a helper that gathers Reduce lists from the first
3191/// lane of every active warp to lanes in the first warp.
3192///
3193/// void inter_warp_copy_func(void* reduce_data, num_warps)
3194/// shared smem[warp_size];
3195/// For all data entries D in reduce_data:
3196/// sync
3197/// If (I am the first lane in each warp)
3198/// Copy my local D to smem[warp_id]
3199/// sync
3200/// if (I am the first warp)
3201/// Copy smem[thread_id] to my local D
3202static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
3203 ArrayRef<const Expr *> Privates,
3204 QualType ReductionArrayTy,
3205 SourceLocation Loc) {
3206 ASTContext &C = CGM.getContext();
3207 llvm::Module &M = CGM.getModule();
3208
3209 // ReduceList: thread local Reduce list.
3210 // At the stage of the computation when this function is called, partially
3211 // aggregated values reside in the first lane of every active warp.
3212 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3213 C.VoidPtrTy, ImplicitParamDecl::Other);
3214 // NumWarps: number of warps active in the parallel region. This could
3215 // be smaller than 32 (max warps in a CTA) for partial block reduction.
3216 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3217 C.getIntTypeForBitwidth(32, /* Signed */ true),
3218 ImplicitParamDecl::Other);
3219 FunctionArgList Args;
3220 Args.push_back(&ReduceListArg);
3221 Args.push_back(&NumWarpsArg);
3222
3223 const CGFunctionInfo &CGFI =
3224 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3225 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3226 llvm::GlobalValue::InternalLinkage,
3227 "_omp_reduction_inter_warp_copy_func", &M);
3228 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3229 Fn->setDoesNotRecurse();
3230 CodeGenFunction CGF(CGM);
3231 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3232
3233 CGBuilderTy &Bld = CGF.Builder;
3234
3235 // This array is used as a medium to transfer, one reduce element at a time,
3236 // the data from the first lane of every warp to lanes in the first warp
3237 // in order to perform the final step of a reduction in a parallel region
3238 // (reduction across warps). The array is placed in NVPTX __shared__ memory
3239 // for reduced latency, as well as to have a distinct copy for concurrently
3240 // executing target regions. The array is declared with common linkage so
3241 // as to be shared across compilation units.
3242 StringRef TransferMediumName =
3243 "__openmp_nvptx_data_transfer_temporary_storage";
3244 llvm::GlobalVariable *TransferMedium =
3245 M.getGlobalVariable(TransferMediumName);
3246 unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
3247 if (!TransferMedium) {
3248 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
3249 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
3250 TransferMedium = new llvm::GlobalVariable(
3251 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
3252 llvm::Constant::getNullValue(Ty), TransferMediumName,
3253 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
3254 SharedAddressSpace);
3255 CGM.addCompilerUsedGlobal(TransferMedium);
3256 }
3257
3258 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
3259 // Get the CUDA thread id of the current OpenMP thread on the GPU.
3260 llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
3261 // nvptx_lane_id = nvptx_id % warpsize
3262 llvm::Value *LaneID = getNVPTXLaneID(CGF);
3263 // nvptx_warp_id = nvptx_id / warpsize
3264 llvm::Value *WarpID = getNVPTXWarpID(CGF);
3265
3266 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3267 Address LocalReduceList(
3268 Bld.CreatePointerBitCastOrAddrSpaceCast(
3269 CGF.EmitLoadOfScalar(
3270 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
3271 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
3272 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3273 CGF.getPointerAlign());
3274
3275 unsigned Idx = 0;
3276 for (const Expr *Private : Privates) {
3277 //
3278 // Warp master copies reduce element to transfer medium in __shared__
3279 // memory.
3280 //
3281 unsigned RealTySize =
3282 C.getTypeSizeInChars(Private->getType())
3283 .alignTo(C.getTypeAlignInChars(Private->getType()))
3284 .getQuantity();
3285 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
3286 unsigned NumIters = RealTySize / TySize;
3287 if (NumIters == 0)
3288 continue;
3289 QualType CType = C.getIntTypeForBitwidth(
3290 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
3291 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
3292 CharUnits Align = CharUnits::fromQuantity(TySize);
3293 llvm::Value *Cnt = nullptr;
3294 Address CntAddr = Address::invalid();
3295 llvm::BasicBlock *PrecondBB = nullptr;
3296 llvm::BasicBlock *ExitBB = nullptr;
3297 if (NumIters > 1) {
3298 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
3299 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
3300 /*Volatile=*/false, C.IntTy);
3301 PrecondBB = CGF.createBasicBlock("precond");
3302 ExitBB = CGF.createBasicBlock("exit");
3303 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
3304 // There is no need to emit line number for unconditional branch.
3305 (void)ApplyDebugLocation::CreateEmpty(CGF);
3306 CGF.EmitBlock(PrecondBB);
3307 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
3308 llvm::Value *Cmp =
3309 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
3310 Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
3311 CGF.EmitBlock(BodyBB);
3312 }
3313 // kmpc_barrier.
3314 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3315 /*EmitChecks=*/false,
3316 /*ForceSimpleCall=*/true);
3317 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3318 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3319 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3320
3321 // if (lane_id == 0)
3322 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
3323 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
3324 CGF.EmitBlock(ThenBB);
3325
3326 // Reduce element = LocalReduceList[i]
3327 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3328 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3329 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3330 // elemptr = ((CopyType*)(elemptrptr)) + I
3331 Address ElemPtr = Address(ElemPtrPtr, Align);
3332 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
3333 if (NumIters > 1) {
3334 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
3335 ElemPtr.getAlignment());
3336 }
3337
3338 // Get pointer to location in transfer medium.
3339 // MediumPtr = &medium[warp_id]
3340 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
3341 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
3342 Address MediumPtr(MediumPtrVal, Align);
3343 // Casting to actual data type.
3344 // MediumPtr = (CopyType*)MediumPtrAddr;
3345 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
3346
3347 // elem = *elemptr
3348 //*MediumPtr = elem
3349 llvm::Value *Elem = CGF.EmitLoadOfScalar(
3350 ElemPtr, /*Volatile=*/false, CType, Loc,
3351 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
3352 // Store the source element value to the dest element address.
3353 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
3354 LValueBaseInfo(AlignmentSource::Type),
3355 TBAAAccessInfo());
3356
3357 Bld.CreateBr(MergeBB);
3358
3359 CGF.EmitBlock(ElseBB);
3360 Bld.CreateBr(MergeBB);
3361
3362 CGF.EmitBlock(MergeBB);
3363
3364 // kmpc_barrier.
3365 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3366 /*EmitChecks=*/false,
3367 /*ForceSimpleCall=*/true);
3368
3369 //
3370 // Warp 0 copies reduce element from transfer medium.
3371 //
3372 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
3373 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
3374 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
3375
3376 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
3377 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
3378 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
3379
3380 // Up to 32 threads in warp 0 are active.
3381 llvm::Value *IsActiveThread =
3382 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
3383 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
3384
3385 CGF.EmitBlock(W0ThenBB);
3386
3387 // SrcMediumPtr = &medium[tid]
3388 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
3389 TransferMedium,
3390 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
3391 Address SrcMediumPtr(SrcMediumPtrVal, Align);
3392 // SrcMediumVal = *SrcMediumPtr;
3393 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
3394
3395 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
3396 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3397 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
3398 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
3399 Address TargetElemPtr = Address(TargetElemPtrVal, Align);
3400 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
3401 if (NumIters > 1) {
3402 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
3403 TargetElemPtr.getAlignment());
3404 }
3405
3406 // *TargetElemPtr = SrcMediumVal;
3407 llvm::Value *SrcMediumValue =
3408 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
3409 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
3410 CType);
3411 Bld.CreateBr(W0MergeBB);
3412
3413 CGF.EmitBlock(W0ElseBB);
3414 Bld.CreateBr(W0MergeBB);
3415
3416 CGF.EmitBlock(W0MergeBB);
3417
3418 if (NumIters > 1) {
3419 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
3420 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
3421 CGF.EmitBranch(PrecondBB);
3422 (void)ApplyDebugLocation::CreateEmpty(CGF);
3423 CGF.EmitBlock(ExitBB);
3424 }
3425 RealTySize %= TySize;
3426 }
3427 ++Idx;
3428 }
3429
3430 CGF.FinishFunction();
3431 return Fn;
3432}
3433
3434/// Emit a helper that reduces data across two OpenMP threads (lanes)
3435/// in the same warp. It uses shuffle instructions to copy over data from
3436/// a remote lane's stack. The reduction algorithm performed is specified
3437/// by the fourth parameter.
3438///
3439/// Algorithm Versions.
3440/// Full Warp Reduce (argument value 0):
3441/// This algorithm assumes that all 32 lanes are active and gathers
3442/// data from these 32 lanes, producing a single resultant value.
3443/// Contiguous Partial Warp Reduce (argument value 1):
3444/// This algorithm assumes that only a *contiguous* subset of lanes
3445/// are active. This happens for the last warp in a parallel region
3446/// when the user specified num_threads is not an integer multiple of
3447/// 32. This contiguous subset always starts with the zeroth lane.
3448/// Partial Warp Reduce (argument value 2):
3449/// This algorithm gathers data from any number of lanes at any position.
3450/// All reduced values are stored in the lowest possible lane. The set
3451/// of problems every algorithm addresses is a super set of those
3452/// addressable by algorithms with a lower version number. Overhead
3453/// increases as algorithm version increases.
3454///
3455/// Terminology
3456/// Reduce element:
3457/// Reduce element refers to the individual data field with primitive
3458/// data types to be combined and reduced across threads.
3459/// Reduce list:
3460/// Reduce list refers to a collection of local, thread-private
3461/// reduce elements.
3462/// Remote Reduce list:
3463/// Remote Reduce list refers to a collection of remote (relative to
3464/// the current thread) reduce elements.
3465///
3466/// We distinguish between three states of threads that are important to
3467/// the implementation of this function.
3468/// Alive threads:
3469/// Threads in a warp executing the SIMT instruction, as distinguished from
3470/// threads that are inactive due to divergent control flow.
3471/// Active threads:
3472/// The minimal set of threads that has to be alive upon entry to this
3473/// function. The computation is correct iff active threads are alive.
3474/// Some threads are alive but they are not active because they do not
3475/// contribute to the computation in any useful manner. Turning them off
3476/// may introduce control flow overheads without any tangible benefits.
3477/// Effective threads:
3478/// In order to comply with the argument requirements of the shuffle
3479/// function, we must keep all lanes holding data alive. But at most
3480/// half of them perform value aggregation; we refer to this half of
3481/// threads as effective. The other half is simply handing off their
3482/// data.
3483///
3484/// Procedure
3485/// Value shuffle:
3486/// In this step active threads transfer data from higher lane positions
3487/// in the warp to lower lane positions, creating Remote Reduce list.
3488/// Value aggregation:
3489/// In this step, effective threads combine their thread local Reduce list
3490/// with Remote Reduce list and store the result in the thread local
3491/// Reduce list.
3492/// Value copy:
3493/// In this step, we deal with the assumption made by algorithm 2
3494/// (i.e. contiguity assumption). When we have an odd number of lanes
3495/// active, say 2k+1, only k threads will be effective and therefore k
3496/// new values will be produced. However, the Reduce list owned by the
3497/// (2k+1)th thread is ignored in the value aggregation. Therefore
3498/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
3499/// that the contiguity assumption still holds.
3500static llvm::Function *emitShuffleAndReduceFunction(
3501 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3502 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
3503 ASTContext &C = CGM.getContext();
3504
3505 // Thread local Reduce list used to host the values of data to be reduced.
3506 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3507 C.VoidPtrTy, ImplicitParamDecl::Other);
3508 // Current lane id; could be logical.
3509 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
3510 ImplicitParamDecl::Other);
3511 // Offset of the remote source lane relative to the current lane.
3512 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3513 C.ShortTy, ImplicitParamDecl::Other);
3514 // Algorithm version. This is expected to be known at compile time.
3515 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3516 C.ShortTy, ImplicitParamDecl::Other);
3517 FunctionArgList Args;
3518 Args.push_back(&ReduceListArg);
3519 Args.push_back(&LaneIDArg);
3520 Args.push_back(&RemoteLaneOffsetArg);
3521 Args.push_back(&AlgoVerArg);
3522
3523 const CGFunctionInfo &CGFI =
3524 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3525 auto *Fn = llvm::Function::Create(
3526 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3527 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
3528 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3529 Fn->setDoesNotRecurse();
3530 if (CGM.getLangOpts().Optimize) {
3531 Fn->removeFnAttr(llvm::Attribute::NoInline);
3532 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
3533 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
3534 }
3535
3536 CodeGenFunction CGF(CGM);
3537 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3538
3539 CGBuilderTy &Bld = CGF.Builder;
3540
3541 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3542 Address LocalReduceList(
3543 Bld.CreatePointerBitCastOrAddrSpaceCast(
3544 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3545 C.VoidPtrTy, SourceLocation()),
3546 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3547 CGF.getPointerAlign());
3548
3549 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
3550 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
3551 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3552
3553 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
3554 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
3555 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3556
3557 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
3558 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
3559 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3560
3561 // Create a local thread-private variable to host the Reduce list
3562 // from a remote lane.
3563 Address RemoteReduceList =
3564 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
3565
3566 // This loop iterates through the list of reduce elements and copies,
3567 // element by element, from a remote lane in the warp to RemoteReduceList,
3568 // hosted on the thread's stack.
3569 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
3570 LocalReduceList, RemoteReduceList,
3571 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
3572 /*ScratchpadIndex=*/nullptr,
3573 /*ScratchpadWidth=*/nullptr});
3574
3575 // The actions to be performed on the Remote Reduce list is dependent
3576 // on the algorithm version.
3577 //
3578 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
3579 // LaneId % 2 == 0 && Offset > 0):
3580 // do the reduction value aggregation
3581 //
3582 // The thread local variable Reduce list is mutated in place to host the
3583 // reduced data, which is the aggregated value produced from local and
3584 // remote lanes.
3585 //
3586 // Note that AlgoVer is expected to be a constant integer known at compile
3587 // time.
3588 // When AlgoVer==0, the first conjunction evaluates to true, making
3589 // the entire predicate true during compile time.
3590 // When AlgoVer==1, the second conjunction has only the second part to be
3591 // evaluated during runtime. Other conjunctions evaluates to false
3592 // during compile time.
3593 // When AlgoVer==2, the third conjunction has only the second part to be
3594 // evaluated during runtime. Other conjunctions evaluates to false
3595 // during compile time.
3596 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3597
3598 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3599 llvm::Value *CondAlgo1 = Bld.CreateAnd(
3600 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3601
3602 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3603 llvm::Value *CondAlgo2 = Bld.CreateAnd(
3604 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3605 CondAlgo2 = Bld.CreateAnd(
3606 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3607
3608 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3609 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3610
3611 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3612 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3613 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3614 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3615
3616 CGF.EmitBlock(ThenBB);
3617 // reduce_function(LocalReduceList, RemoteReduceList)
3618 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3619 LocalReduceList.getPointer(), CGF.VoidPtrTy);
3620 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3621 RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3622 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3623 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3624 Bld.CreateBr(MergeBB);
3625
3626 CGF.EmitBlock(ElseBB);
3627 Bld.CreateBr(MergeBB);
3628
3629 CGF.EmitBlock(MergeBB);
3630
3631 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3632 // Reduce list.
3633 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3634 llvm::Value *CondCopy = Bld.CreateAnd(
3635 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3636
3637 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3638 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3639 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3640 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3641
3642 CGF.EmitBlock(CpyThenBB);
3643 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3644 RemoteReduceList, LocalReduceList);
3645 Bld.CreateBr(CpyMergeBB);
3646
3647 CGF.EmitBlock(CpyElseBB);
3648 Bld.CreateBr(CpyMergeBB);
3649
3650 CGF.EmitBlock(CpyMergeBB);
3651
3652 CGF.FinishFunction();
3653 return Fn;
3654}
3655
3656/// This function emits a helper that copies all the reduction variables from
3657/// the team into the provided global buffer for the reduction variables.
3658///
3659/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3660/// For all data entries D in reduce_data:
3661/// Copy local D to buffer.D[Idx]
3662static llvm::Value *emitListToGlobalCopyFunction(
3663 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3664 QualType ReductionArrayTy, SourceLocation Loc,
3665 const RecordDecl *TeamReductionRec,
3666 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3667 &VarFieldMap) {
3668 ASTContext &C = CGM.getContext();
3669
3670 // Buffer: global reduction buffer.
3671 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3672 C.VoidPtrTy, ImplicitParamDecl::Other);
3673 // Idx: index of the buffer.
3674 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3675 ImplicitParamDecl::Other);
3676 // ReduceList: thread local Reduce list.
3677 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3678 C.VoidPtrTy, ImplicitParamDecl::Other);
3679 FunctionArgList Args;
3680 Args.push_back(&BufferArg);
3681 Args.push_back(&IdxArg);
3682 Args.push_back(&ReduceListArg);
3683
3684 const CGFunctionInfo &CGFI =
3685 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3686 auto *Fn = llvm::Function::Create(
3687 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3688 "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
3689 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3690 Fn->setDoesNotRecurse();
3691 CodeGenFunction CGF(CGM);
3692 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3693
3694 CGBuilderTy &Bld = CGF.Builder;
3695
3696 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3697 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3698 Address LocalReduceList(
3699 Bld.CreatePointerBitCastOrAddrSpaceCast(
3700 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3701 C.VoidPtrTy, Loc),
3702 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3703 CGF.getPointerAlign());
3704 QualType StaticTy = C.getRecordType(TeamReductionRec);
3705 llvm::Type *LLVMReductionsBufferTy =
3706 CGM.getTypes().ConvertTypeForMem(StaticTy);
3707 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3708 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3709 LLVMReductionsBufferTy->getPointerTo());
3710 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3711 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3712 /*Volatile=*/false, C.IntTy,
3713 Loc)};
3714 unsigned Idx = 0;
3715 for (const Expr *Private : Privates) {
3716 // Reduce element = LocalReduceList[i]
3717 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3718 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3719 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3720 // elemptr = ((CopyType*)(elemptrptr)) + I
3721 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3722 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3723 Address ElemPtr =
3724 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3725 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3726 // Global = Buffer.VD[Idx];
3727 const FieldDecl *FD = VarFieldMap.lookup(VD);
3728 LValue GlobLVal = CGF.EmitLValueForField(
3729 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3730 llvm::Value *BufferPtr =
3731 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3732 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3733 switch (CGF.getEvaluationKind(Private->getType())) {
3734 case TEK_Scalar: {
3735 llvm::Value *V = CGF.EmitLoadOfScalar(
3736 ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
3737 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
3738 CGF.EmitStoreOfScalar(V, GlobLVal);
3739 break;
3740 }
3741 case TEK_Complex: {
3742 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
3743 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
3744 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
3745 break;
3746 }
3747 case TEK_Aggregate:
3748 CGF.EmitAggregateCopy(GlobLVal,
3749 CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3750 Private->getType(), AggValueSlot::DoesNotOverlap);
3751 break;
3752 }
3753 ++Idx;
3754 }
3755
3756 CGF.FinishFunction();
3757 return Fn;
3758}
3759
3760/// This function emits a helper that reduces all the reduction variables from
3761/// the team into the provided global buffer for the reduction variables.
3762///
3763/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
3764/// void *GlobPtrs[];
3765/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3766/// ...
3767/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3768/// reduce_function(GlobPtrs, reduce_data);
3769static llvm::Value *emitListToGlobalReduceFunction(
3770 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3771 QualType ReductionArrayTy, SourceLocation Loc,
3772 const RecordDecl *TeamReductionRec,
3773 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3774 &VarFieldMap,
3775 llvm::Function *ReduceFn) {
3776 ASTContext &C = CGM.getContext();
3777
3778 // Buffer: global reduction buffer.
3779 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3780 C.VoidPtrTy, ImplicitParamDecl::Other);
3781 // Idx: index of the buffer.
3782 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3783 ImplicitParamDecl::Other);
3784 // ReduceList: thread local Reduce list.
3785 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3786 C.VoidPtrTy, ImplicitParamDecl::Other);
3787 FunctionArgList Args;
3788 Args.push_back(&BufferArg);
3789 Args.push_back(&IdxArg);
3790 Args.push_back(&ReduceListArg);
3791
3792 const CGFunctionInfo &CGFI =
3793 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3794 auto *Fn = llvm::Function::Create(
3795 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3796 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
3797 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3798 Fn->setDoesNotRecurse();
3799 CodeGenFunction CGF(CGM);
3800 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3801
3802 CGBuilderTy &Bld = CGF.Builder;
3803
3804 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3805 QualType StaticTy = C.getRecordType(TeamReductionRec);
3806 llvm::Type *LLVMReductionsBufferTy =
3807 CGM.getTypes().ConvertTypeForMem(StaticTy);
3808 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3809 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3810 LLVMReductionsBufferTy->getPointerTo());
3811
3812 // 1. Build a list of reduction variables.
3813 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3814 Address ReductionList =
3815 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3816 auto IPriv = Privates.begin();
3817 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3818 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3819 /*Volatile=*/false, C.IntTy,
3820 Loc)};
3821 unsigned Idx = 0;
3822 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3823 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3824 // Global = Buffer.VD[Idx];
3825 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3826 const FieldDecl *FD = VarFieldMap.lookup(VD);
3827 LValue GlobLVal = CGF.EmitLValueForField(
3828 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3829 llvm::Value *BufferPtr =
3830 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3831 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3832 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3833 if ((*IPriv)->getType()->isVariablyModifiedType()) {
3834 // Store array size.
3835 ++Idx;
3836 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3837 llvm::Value *Size = CGF.Builder.CreateIntCast(
3838 CGF.getVLASize(
3839 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3840 .NumElts,
3841 CGF.SizeTy, /*isSigned=*/false);
3842 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3843 Elem);
3844 }
3845 }
3846
3847 // Call reduce_function(GlobalReduceList, ReduceList)
3848 llvm::Value *GlobalReduceList =
3849 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3850 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3851 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3852 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3853 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3854 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
3855 CGF.FinishFunction();
3856 return Fn;
3857}
3858
3859/// This function emits a helper that copies all the reduction variables from
3860/// the team into the provided global buffer for the reduction variables.
3861///
3862/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3863/// For all data entries D in reduce_data:
3864/// Copy buffer.D[Idx] to local D;
3865static llvm::Value *emitGlobalToListCopyFunction(
3866 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3867 QualType ReductionArrayTy, SourceLocation Loc,
3868 const RecordDecl *TeamReductionRec,
3869 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3870 &VarFieldMap) {
3871 ASTContext &C = CGM.getContext();
3872
3873 // Buffer: global reduction buffer.
3874 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3875 C.VoidPtrTy, ImplicitParamDecl::Other);
3876 // Idx: index of the buffer.
3877 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3878 ImplicitParamDecl::Other);
3879 // ReduceList: thread local Reduce list.
3880 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3881 C.VoidPtrTy, ImplicitParamDecl::Other);
3882 FunctionArgList Args;
3883 Args.push_back(&BufferArg);
3884 Args.push_back(&IdxArg);
3885 Args.push_back(&ReduceListArg);
3886
3887 const CGFunctionInfo &CGFI =
3888 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3889 auto *Fn = llvm::Function::Create(
3890 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3891 "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
3892 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3893 Fn->setDoesNotRecurse();
3894 CodeGenFunction CGF(CGM);
3895 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3896
3897 CGBuilderTy &Bld = CGF.Builder;
3898
3899 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3900 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3901 Address LocalReduceList(
3902 Bld.CreatePointerBitCastOrAddrSpaceCast(
3903 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3904 C.VoidPtrTy, Loc),
3905 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3906 CGF.getPointerAlign());
3907 QualType StaticTy = C.getRecordType(TeamReductionRec);
3908 llvm::Type *LLVMReductionsBufferTy =
3909 CGM.getTypes().ConvertTypeForMem(StaticTy);
3910 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3911 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3912 LLVMReductionsBufferTy->getPointerTo());
3913
3914 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3915 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3916 /*Volatile=*/false, C.IntTy,
3917 Loc)};
3918 unsigned Idx = 0;
3919 for (const Expr *Private : Privates) {
3920 // Reduce element = LocalReduceList[i]
3921 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3922 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3923 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3924 // elemptr = ((CopyType*)(elemptrptr)) + I
3925 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3926 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3927 Address ElemPtr =
3928 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3929 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3930 // Global = Buffer.VD[Idx];
3931 const FieldDecl *FD = VarFieldMap.lookup(VD);
3932 LValue GlobLVal = CGF.EmitLValueForField(
3933 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3934 llvm::Value *BufferPtr =
3935 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
3936 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3937 switch (CGF.getEvaluationKind(Private->getType())) {
3938 case TEK_Scalar: {
3939 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
3940 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
3941 LValueBaseInfo(AlignmentSource::Type),
3942 TBAAAccessInfo());
3943 break;
3944 }
3945 case TEK_Complex: {
3946 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
3947 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3948 /*isInit=*/false);
3949 break;
3950 }
3951 case TEK_Aggregate:
3952 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3953 GlobLVal, Private->getType(),
3954 AggValueSlot::DoesNotOverlap);
3955 break;
3956 }
3957 ++Idx;
3958 }
3959
3960 CGF.FinishFunction();
3961 return Fn;
3962}
3963
3964/// This function emits a helper that reduces all the reduction variables from
3965/// the team into the provided global buffer for the reduction variables.
3966///
3967/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
3968/// void *GlobPtrs[];
3969/// GlobPtrs[0] = (void*)&buffer.D0[Idx];
3970/// ...
3971/// GlobPtrs[N] = (void*)&buffer.DN[Idx];
3972/// reduce_function(reduce_data, GlobPtrs);
3973static llvm::Value *emitGlobalToListReduceFunction(
3974 CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3975 QualType ReductionArrayTy, SourceLocation Loc,
3976 const RecordDecl *TeamReductionRec,
3977 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3978 &VarFieldMap,
3979 llvm::Function *ReduceFn) {
3980 ASTContext &C = CGM.getContext();
3981
3982 // Buffer: global reduction buffer.
3983 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3984 C.VoidPtrTy, ImplicitParamDecl::Other);
3985 // Idx: index of the buffer.
3986 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3987 ImplicitParamDecl::Other);
3988 // ReduceList: thread local Reduce list.
3989 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3990 C.VoidPtrTy, ImplicitParamDecl::Other);
3991 FunctionArgList Args;
3992 Args.push_back(&BufferArg);
3993 Args.push_back(&IdxArg);
3994 Args.push_back(&ReduceListArg);
3995
3996 const CGFunctionInfo &CGFI =
3997 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3998 auto *Fn = llvm::Function::Create(
3999 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4000 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
4001 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4002 Fn->setDoesNotRecurse();
4003 CodeGenFunction CGF(CGM);
4004 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
4005
4006 CGBuilderTy &Bld = CGF.Builder;
4007
4008 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
4009 QualType StaticTy = C.getRecordType(TeamReductionRec);
4010 llvm::Type *LLVMReductionsBufferTy =
4011 CGM.getTypes().ConvertTypeForMem(StaticTy);
4012 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
4013 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
4014 LLVMReductionsBufferTy->getPointerTo());
4015
4016 // 1. Build a list of reduction variables.
4017 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
4018 Address ReductionList =
4019 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
4020 auto IPriv = Privates.begin();
4021 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
4022 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
4023 /*Volatile=*/false, C.IntTy,
4024 Loc)};
4025 unsigned Idx = 0;
4026 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
4027 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4028 // Global = Buffer.VD[Idx];
4029 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
4030 const FieldDecl *FD = VarFieldMap.lookup(VD);
4031 LValue GlobLVal = CGF.EmitLValueForField(
4032 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
4033 llvm::Value *BufferPtr =
4034 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
4035 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
4036 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
4037 if ((*IPriv)->getType()->isVariablyModifiedType()) {
4038 // Store array size.
4039 ++Idx;
4040 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4041 llvm::Value *Size = CGF.Builder.CreateIntCast(
4042 CGF.getVLASize(
4043 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
4044 .NumElts,
4045 CGF.SizeTy, /*isSigned=*/false);
4046 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
4047 Elem);
4048 }
4049 }
4050
4051 // Call reduce_function(ReduceList, GlobalReduceList)
4052 llvm::Value *GlobalReduceList =
4053 CGF.EmitCastToVoidPtr(ReductionList.getPointer());
4054 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
4055 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
4056 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
4057 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4058 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
4059 CGF.FinishFunction();
4060 return Fn;
4061}
4062
4063///
4064/// Design of OpenMP reductions on the GPU
4065///
4066/// Consider a typical OpenMP program with one or more reduction
4067/// clauses:
4068///
4069/// float foo;
4070/// double bar;
4071/// #pragma omp target teams distribute parallel for \
4072/// reduction(+:foo) reduction(*:bar)
4073/// for (int i = 0; i < N; i++) {
4074/// foo += A[i]; bar *= B[i];
4075/// }
4076///
4077/// where 'foo' and 'bar' are reduced across all OpenMP threads in
4078/// all teams. In our OpenMP implementation on the NVPTX device an
4079/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
4080/// within a team are mapped to CUDA threads within a threadblock.
4081/// Our goal is to efficiently aggregate values across all OpenMP
4082/// threads such that:
4083///
4084/// - the compiler and runtime are logically concise, and
4085/// - the reduction is performed efficiently in a hierarchical
4086/// manner as follows: within OpenMP threads in the same warp,
4087/// across warps in a threadblock, and finally across teams on
4088/// the NVPTX device.
4089///
4090/// Introduction to Decoupling
4091///
4092/// We would like to decouple the compiler and the runtime so that the
4093/// latter is ignorant of the reduction variables (number, data types)
4094/// and the reduction operators. This allows a simpler interface
4095/// and implementation while still attaining good performance.
4096///
4097/// Pseudocode for the aforementioned OpenMP program generated by the
4098/// compiler is as follows:
4099///
4100/// 1. Create private copies of reduction variables on each OpenMP
4101/// thread: 'foo_private', 'bar_private'
4102/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
4103/// to it and writes the result in 'foo_private' and 'bar_private'
4104/// respectively.
4105/// 3. Call the OpenMP runtime on the GPU to reduce within a team
4106/// and store the result on the team master:
4107///
4108/// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
4109/// reduceData, shuffleReduceFn, interWarpCpyFn)
4110///
4111/// where:
4112/// struct ReduceData {
4113/// double *foo;
4114/// double *bar;
4115/// } reduceData
4116/// reduceData.foo = &foo_private
4117/// reduceData.bar = &bar_private
4118///
4119/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
4120/// auxiliary functions generated by the compiler that operate on
4121/// variables of type 'ReduceData'. They aid the runtime perform
4122/// algorithmic steps in a data agnostic manner.
4123///
4124/// 'shuffleReduceFn' is a pointer to a function that reduces data
4125/// of type 'ReduceData' across two OpenMP threads (lanes) in the
4126/// same warp. It takes the following arguments as input:
4127///
4128/// a. variable of type 'ReduceData' on the calling lane,
4129/// b. its lane_id,
4130/// c. an offset relative to the current lane_id to generate a
4131/// remote_lane_id. The remote lane contains the second
4132/// variable of type 'ReduceData' that is to be reduced.
4133/// d. an algorithm version parameter determining which reduction
4134/// algorithm to use.
4135///
4136/// 'shuffleReduceFn' retrieves data from the remote lane using
4137/// efficient GPU shuffle intrinsics and reduces, using the
4138/// algorithm specified by the 4th parameter, the two operands
4139/// element-wise. The result is written to the first operand.
4140///
4141/// Different reduction algorithms are implemented in different
4142/// runtime functions, all calling 'shuffleReduceFn' to perform
4143/// the essential reduction step. Therefore, based on the 4th
4144/// parameter, this function behaves slightly differently to
4145/// cooperate with the runtime to ensure correctness under
4146/// different circumstances.
4147///
4148/// 'InterWarpCpyFn' is a pointer to a function that transfers
4149/// reduced variables across warps. It tunnels, through CUDA
4150/// shared memory, the thread-private data of type 'ReduceData'
4151/// from lane 0 of each warp to a lane in the first warp.
4152/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
4153/// The last team writes the global reduced value to memory.
4154///
4155/// ret = __kmpc_nvptx_teams_reduce_nowait(...,
4156/// reduceData, shuffleReduceFn, interWarpCpyFn,
4157/// scratchpadCopyFn, loadAndReduceFn)
4158///
4159/// 'scratchpadCopyFn' is a helper that stores reduced
4160/// data from the team master to a scratchpad array in
4161/// global memory.
4162///
4163/// 'loadAndReduceFn' is a helper that loads data from
4164/// the scratchpad array and reduces it with the input
4165/// operand.
4166///
4167/// These compiler generated functions hide address
4168/// calculation and alignment information from the runtime.
4169/// 5. if ret == 1:
4170/// The team master of the last team stores the reduced
4171/// result to the globals in memory.
4172/// foo += reduceData.foo; bar *= reduceData.bar
4173///
4174///
4175/// Warp Reduction Algorithms
4176///
4177/// On the warp level, we have three algorithms implemented in the
4178/// OpenMP runtime depending on the number of active lanes:
4179///
4180/// Full Warp Reduction
4181///
4182/// The reduce algorithm within a warp where all lanes are active
4183/// is implemented in the runtime as follows:
4184///
4185/// full_warp_reduce(void *reduce_data,
4186/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4187/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
4188/// ShuffleReduceFn(reduce_data, 0, offset, 0);
4189/// }
4190///
4191/// The algorithm completes in log(2, WARPSIZE) steps.
4192///
4193/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
4194/// not used therefore we save instructions by not retrieving lane_id
4195/// from the corresponding special registers. The 4th parameter, which
4196/// represents the version of the algorithm being used, is set to 0 to
4197/// signify full warp reduction.
4198///
4199/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4200///
4201/// #reduce_elem refers to an element in the local lane's data structure
4202/// #remote_elem is retrieved from a remote lane
4203/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4204/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
4205///
4206/// Contiguous Partial Warp Reduction
4207///
4208/// This reduce algorithm is used within a warp where only the first
4209/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
4210/// number of OpenMP threads in a parallel region is not a multiple of
4211/// WARPSIZE. The algorithm is implemented in the runtime as follows:
4212///
4213/// void
4214/// contiguous_partial_reduce(void *reduce_data,
4215/// kmp_ShuffleReductFctPtr ShuffleReduceFn,
4216/// int size, int lane_id) {
4217/// int curr_size;
4218/// int offset;
4219/// curr_size = size;
4220/// mask = curr_size/2;
4221/// while (offset>0) {
4222/// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
4223/// curr_size = (curr_size+1)/2;
4224/// offset = curr_size/2;
4225/// }
4226/// }
4227///
4228/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4229///
4230/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4231/// if (lane_id < offset)
4232/// reduce_elem = reduce_elem REDUCE_OP remote_elem
4233/// else
4234/// reduce_elem = remote_elem
4235///
4236/// This algorithm assumes that the data to be reduced are located in a
4237/// contiguous subset of lanes starting from the first. When there is
4238/// an odd number of active lanes, the data in the last lane is not
4239/// aggregated with any other lane's dat but is instead copied over.
4240///
4241/// Dispersed Partial Warp Reduction
4242///
4243/// This algorithm is used within a warp when any discontiguous subset of
4244/// lanes are active. It is used to implement the reduction operation
4245/// across lanes in an OpenMP simd region or in a nested parallel region.
4246///
4247/// void
4248/// dispersed_partial_reduce(void *reduce_data,
4249/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4250/// int size, remote_id;
4251/// int logical_lane_id = number_of_active_lanes_before_me() * 2;
4252/// do {
4253/// remote_id = next_active_lane_id_right_after_me();
4254/// # the above function returns 0 of no active lane
4255/// # is present right after the current lane.
4256/// size = number_of_active_lanes_in_this_warp();
4257/// logical_lane_id /= 2;
4258/// ShuffleReduceFn(reduce_data, logical_lane_id,
4259/// remote_id-1-threadIdx.x, 2);
4260/// } while (logical_lane_id % 2 == 0 && size > 1);
4261/// }
4262///
4263/// There is no assumption made about the initial state of the reduction.
4264/// Any number of lanes (>=1) could be active at any position. The reduction
4265/// result is returned in the first active lane.
4266///
4267/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4268///
4269/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4270/// if (lane_id % 2 == 0 && offset > 0)
4271/// reduce_elem = reduce_elem REDUCE_OP remote_elem
4272/// else
4273/// reduce_elem = remote_elem
4274///
4275///
4276/// Intra-Team Reduction
4277///
4278/// This function, as implemented in the runtime call
4279/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
4280/// threads in a team. It first reduces within a warp using the
4281/// aforementioned algorithms. We then proceed to gather all such
4282/// reduced values at the first warp.
4283///
4284/// The runtime makes use of the function 'InterWarpCpyFn', which copies
4285/// data from each of the "warp master" (zeroth lane of each warp, where
4286/// warp-reduced data is held) to the zeroth warp. This step reduces (in
4287/// a mathematical sense) the problem of reduction across warp masters in
4288/// a block to the problem of warp reduction.
4289///
4290///
4291/// Inter-Team Reduction
4292///
4293/// Once a team has reduced its data to a single value, it is stored in
4294/// a global scratchpad array. Since each team has a distinct slot, this
4295/// can be done without locking.
4296///
4297/// The last team to write to the scratchpad array proceeds to reduce the
4298/// scratchpad array. One or more workers in the last team use the helper
4299/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
4300/// the k'th worker reduces every k'th element.
4301///
4302/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
4303/// reduce across workers and compute a globally reduced value.
4304///
4305void CGOpenMPRuntimeGPU::emitReduction(
4306 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
4307 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
4308 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
4309 if (!CGF.HaveInsertPoint())
4310 return;
4311
4312 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
4313#ifndef NDEBUG
4314 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
4315#endif
4316
4317 if (Options.SimpleReduction) {
4318 assert(!TeamsReduction && !ParallelReduction &&((!TeamsReduction && !ParallelReduction && "Invalid reduction selection in emitReduction."
) ? static_cast<void> (0) : __assert_fail ("!TeamsReduction && !ParallelReduction && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4319, __PRETTY_FUNCTION__))
4319 "Invalid reduction selection in emitReduction.")((!TeamsReduction && !ParallelReduction && "Invalid reduction selection in emitReduction."
) ? static_cast<void> (0) : __assert_fail ("!TeamsReduction && !ParallelReduction && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4319, __PRETTY_FUNCTION__))
;
4320 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
4321 ReductionOps, Options);
4322 return;
4323 }
4324
4325 assert((TeamsReduction || ParallelReduction) &&(((TeamsReduction || ParallelReduction) && "Invalid reduction selection in emitReduction."
) ? static_cast<void> (0) : __assert_fail ("(TeamsReduction || ParallelReduction) && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4326, __PRETTY_FUNCTION__))
4326 "Invalid reduction selection in emitReduction.")(((TeamsReduction || ParallelReduction) && "Invalid reduction selection in emitReduction."
) ? static_cast<void> (0) : __assert_fail ("(TeamsReduction || ParallelReduction) && \"Invalid reduction selection in emitReduction.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4326, __PRETTY_FUNCTION__))
;
4327
4328 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
4329 // RedList, shuffle_reduce_func, interwarp_copy_func);
4330 // or
4331 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
4332 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
4333 llvm::Value *ThreadId = getThreadID(CGF, Loc);
4334
4335 llvm::Value *Res;
4336 ASTContext &C = CGM.getContext();
4337 // 1. Build a list of reduction variables.
4338 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
4339 auto Size = RHSExprs.size();
4340 for (const Expr *E : Privates) {
4341 if (E->getType()->isVariablyModifiedType())
4342 // Reserve place for array size.
4343 ++Size;
4344 }
4345 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
4346 QualType ReductionArrayTy =
4347 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
4348 /*IndexTypeQuals=*/0);
4349 Address ReductionList =
4350 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
4351 auto IPriv = Privates.begin();
4352 unsigned Idx = 0;
4353 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
4354 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4355 CGF.Builder.CreateStore(
4356 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4357 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
4358 Elem);
4359 if ((*IPriv)->getType()->isVariablyModifiedType()) {
4360 // Store array size.
4361 ++Idx;
4362 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4363 llvm::Value *Size = CGF.Builder.CreateIntCast(
4364 CGF.getVLASize(
4365 CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
4366 .NumElts,
4367 CGF.SizeTy, /*isSigned=*/false);
4368 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
4369 Elem);
4370 }
4371 }
4372
4373 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4374 ReductionList.getPointer(), CGF.VoidPtrTy);
4375 llvm::Function *ReductionFn = emitReductionFunction(
4376 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
4377 LHSExprs, RHSExprs, ReductionOps);
4378 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
4379 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
4380 CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
4381 llvm::Value *InterWarpCopyFn =
4382 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
4383
4384 if (ParallelReduction) {
4385 llvm::Value *Args[] = {RTLoc,
4386 ThreadId,
4387 CGF.Builder.getInt32(RHSExprs.size()),
4388 ReductionArrayTySize,
4389 RL,
4390 ShuffleAndReduceFn,
4391 InterWarpCopyFn};
4392
4393 Res = CGF.EmitRuntimeCall(
4394 createNVPTXRuntimeFunction(
4395 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2),
4396 Args);
4397 } else {
4398 assert(TeamsReduction && "expected teams reduction.")((TeamsReduction && "expected teams reduction.") ? static_cast
<void> (0) : __assert_fail ("TeamsReduction && \"expected teams reduction.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4398, __PRETTY_FUNCTION__))
;
4399 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
4400 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
4401 int Cnt = 0;
4402 for (const Expr *DRE : Privates) {
4403 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
4404 ++Cnt;
4405 }
4406 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
4407 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
4408 C.getLangOpts().OpenMPCUDAReductionBufNum);
4409 TeamsReductions.push_back(TeamReductionRec);
4410 if (!KernelTeamsReductionPtr) {
4411 KernelTeamsReductionPtr = new llvm::GlobalVariable(
4412 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
4413 llvm::GlobalValue::InternalLinkage, nullptr,
4414 "_openmp_teams_reductions_buffer_$_$ptr");
4415 }
4416 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
4417 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
4418 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
4419 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
4420 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4421 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
4422 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4423 ReductionFn);
4424 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
4425 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4426 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
4427 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4428 ReductionFn);
4429
4430 llvm::Value *Args[] = {
4431 RTLoc,
4432 ThreadId,
4433 GlobalBufferPtr,
4434 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
4435 RL,
4436 ShuffleAndReduceFn,
4437 InterWarpCopyFn,
4438 GlobalToBufferCpyFn,
4439 GlobalToBufferRedFn,
4440 BufferToGlobalCpyFn,
4441 BufferToGlobalRedFn};
4442
4443 Res = CGF.EmitRuntimeCall(
4444 createNVPTXRuntimeFunction(
4445 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2),
4446 Args);
4447 }
4448
4449 // 5. Build if (res == 1)
4450 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
4451 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
4452 llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
4453 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
4454 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
4455
4456 // 6. Build then branch: where we have reduced values in the master
4457 // thread in each team.
4458 // __kmpc_end_reduce{_nowait}(<gtid>);
4459 // break;
4460 CGF.EmitBlock(ThenBB);
4461
4462 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
4463 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
4464 this](CodeGenFunction &CGF, PrePostActionTy &Action) {
4465 auto IPriv = Privates.begin();
4466 auto ILHS = LHSExprs.begin();
4467 auto IRHS = RHSExprs.begin();
4468 for (const Expr *E : ReductionOps) {
4469 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
4470 cast<DeclRefExpr>(*IRHS));
4471 ++IPriv;
4472 ++ILHS;
4473 ++IRHS;
4474 }
4475 };
4476 llvm::Value *EndArgs[] = {ThreadId};
4477 RegionCodeGenTy RCG(CodeGen);
4478 NVPTXActionTy Action(
4479 nullptr, llvm::None,
4480 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
4481 EndArgs);
4482 RCG.setAction(Action);
4483 RCG(CGF);
4484 // There is no need to emit line number for unconditional branch.
4485 (void)ApplyDebugLocation::CreateEmpty(CGF);
4486 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4487}
4488
4489const VarDecl *
4490CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
4491 const VarDecl *NativeParam) const {
4492 if (!NativeParam->getType()->isReferenceType())
4493 return NativeParam;
4494 QualType ArgType = NativeParam->getType();
4495 QualifierCollector QC;
4496 const Type *NonQualTy = QC.strip(ArgType);
4497 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4498 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
4499 if (Attr->getCaptureKind() == OMPC_map) {
4500 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4501 LangAS::opencl_global);
4502 } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
4503 PointeeTy.isConstant(CGM.getContext())) {
4504 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4505 LangAS::opencl_generic);
4506 }
4507 }
4508 ArgType = CGM.getContext().getPointerType(PointeeTy);
4509 QC.addRestrict();
4510 enum { NVPTX_local_addr = 5 };
4511 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
4512 ArgType = QC.apply(CGM.getContext(), ArgType);
4513 if (isa<ImplicitParamDecl>(NativeParam))
4514 return ImplicitParamDecl::Create(
4515 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
4516 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
4517 return ParmVarDecl::Create(
4518 CGM.getContext(),
4519 const_cast<DeclContext *>(NativeParam->getDeclContext()),
4520 NativeParam->getBeginLoc(), NativeParam->getLocation(),
4521 NativeParam->getIdentifier(), ArgType,
4522 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
4523}
4524
4525Address
4526CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
4527 const VarDecl *NativeParam,
4528 const VarDecl *TargetParam) const {
4529 assert(NativeParam != TargetParam &&((NativeParam != TargetParam && NativeParam->getType
()->isReferenceType() && "Native arg must not be the same as target arg."
) ? static_cast<void> (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4531, __PRETTY_FUNCTION__))
4530 NativeParam->getType()->isReferenceType() &&((NativeParam != TargetParam && NativeParam->getType
()->isReferenceType() && "Native arg must not be the same as target arg."
) ? static_cast<void> (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4531, __PRETTY_FUNCTION__))
4531 "Native arg must not be the same as target arg.")((NativeParam != TargetParam && NativeParam->getType
()->isReferenceType() && "Native arg must not be the same as target arg."
) ? static_cast<void> (0) : __assert_fail ("NativeParam != TargetParam && NativeParam->getType()->isReferenceType() && \"Native arg must not be the same as target arg.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4531, __PRETTY_FUNCTION__))
;
4532 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
4533 QualType NativeParamType = NativeParam->getType();
4534 QualifierCollector QC;
4535 const Type *NonQualTy = QC.strip(NativeParamType);
4536 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4537 unsigned NativePointeeAddrSpace =
4538 CGF.getContext().getTargetAddressSpace(NativePointeeTy);
4539 QualType TargetTy = TargetParam->getType();
4540 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
4541 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
4542 // First cast to generic.
4543 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4544 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4545 /*AddrSpace=*/0));
4546 // Cast from generic to native address space.
4547 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4548 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4549 NativePointeeAddrSpace));
4550 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
4551 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
4552 NativeParamType);
4553 return NativeParamAddr;
4554}
4555
4556void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
4557 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
4558 ArrayRef<llvm::Value *> Args) const {
4559 SmallVector<llvm::Value *, 4> TargetArgs;
4560 TargetArgs.reserve(Args.size());
4561 auto *FnType = OutlinedFn.getFunctionType();
4562 for (unsigned I = 0, E = Args.size(); I < E; ++I) {
4563 if (FnType->isVarArg() && FnType->getNumParams() <= I) {
4564 TargetArgs.append(std::next(Args.begin(), I), Args.end());
4565 break;
4566 }
4567 llvm::Type *TargetType = FnType->getParamType(I);
4568 llvm::Value *NativeArg = Args[I];
4569 if (!TargetType->isPointerTy()) {
4570 TargetArgs.emplace_back(NativeArg);
4571 continue;
4572 }
4573 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4574 NativeArg,
4575 NativeArg->getType()->getPointerElementType()->getPointerTo());
4576 TargetArgs.emplace_back(
4577 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
4578 }
4579 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
4580}
4581
4582/// Emit function which wraps the outline parallel region
4583/// and controls the arguments which are passed to this function.
4584/// The wrapper ensures that the outlined function is called
4585/// with the correct arguments when data is shared.
4586llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
4587 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
4588 ASTContext &Ctx = CGM.getContext();
4589 const auto &CS = *D.getCapturedStmt(OMPD_parallel);
4590
4591 // Create a function that takes as argument the source thread.
4592 FunctionArgList WrapperArgs;
4593 QualType Int16QTy =
4594 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
4595 QualType Int32QTy =
4596 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
4597 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4598 /*Id=*/nullptr, Int16QTy,
4599 ImplicitParamDecl::Other);
4600 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4601 /*Id=*/nullptr, Int32QTy,
4602 ImplicitParamDecl::Other);
4603 WrapperArgs.emplace_back(&ParallelLevelArg);
4604 WrapperArgs.emplace_back(&WrapperArg);
4605
4606 const CGFunctionInfo &CGFI =
4607 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
4608
4609 auto *Fn = llvm::Function::Create(
4610 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4611 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
4612 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4613 Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
4614 Fn->setDoesNotRecurse();
4615
4616 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4617 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
4618 D.getBeginLoc(), D.getBeginLoc());
4619
4620 const auto *RD = CS.getCapturedRecordDecl();
4621 auto CurField = RD->field_begin();
4622
4623 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
4624 /*Name=*/".zero.addr");
4625 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
4626 // Get the array of arguments.
4627 SmallVector<llvm::Value *, 8> Args;
4628
4629 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
4630 Args.emplace_back(ZeroAddr.getPointer());
4631
4632 CGBuilderTy &Bld = CGF.Builder;
4633 auto CI = CS.capture_begin();
4634
4635 // Use global memory for data sharing.
4636 // Handle passing of global args to workers.
4637 Address GlobalArgs =
4638 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
4639 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
4640 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
4641 CGF.EmitRuntimeCall(
4642 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
4643 DataSharingArgs);
4644
4645 // Retrieve the shared variables from the list of references returned
4646 // by the runtime. Pass the variables to the outlined function.
4647 Address SharedArgListAddress = Address::invalid();
4648 if (CS.capture_size() > 0 ||
4649 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4650 SharedArgListAddress = CGF.EmitLoadOfPointer(
4651 GlobalArgs, CGF.getContext()
4652 .getPointerType(CGF.getContext().getPointerType(
4653 CGF.getContext().VoidPtrTy))
4654 .castAs<PointerType>());
4655 }
4656 unsigned Idx = 0;
4657 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4658 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4659 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4660 Src, CGF.SizeTy->getPointerTo());
4661 llvm::Value *LB = CGF.EmitLoadOfScalar(
4662 TypedAddress,
4663 /*Volatile=*/false,
4664 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4665 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
4666 Args.emplace_back(LB);
4667 ++Idx;
4668 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4669 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4670 Src, CGF.SizeTy->getPointerTo());
4671 llvm::Value *UB = CGF.EmitLoadOfScalar(
4672 TypedAddress,
4673 /*Volatile=*/false,
4674 CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4675 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
4676 Args.emplace_back(UB);
4677 ++Idx;
4678 }
4679 if (CS.capture_size() > 0) {
4680 ASTContext &CGFContext = CGF.getContext();
4681 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
4682 QualType ElemTy = CurField->getType();
4683 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
4684 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4685 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
4686 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
4687 /*Volatile=*/false,
4688 CGFContext.getPointerType(ElemTy),
4689 CI->getLocation());
4690 if (CI->capturesVariableByCopy() &&
4691 !CI->getCapturedVar()->getType()->isAnyPointerType()) {
4692 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
4693 CI->getLocation());
4694 }
4695 Args.emplace_back(Arg);
4696 }
4697 }
4698
4699 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
4700 CGF.FinishFunction();
4701 return Fn;
4702}
4703
4704void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
4705 const Decl *D) {
4706 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
4707 return;
4708
4709 assert(D && "Expected function or captured|block decl.")((D && "Expected function or captured|block decl.") ?
static_cast<void> (0) : __assert_fail ("D && \"Expected function or captured|block decl.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4709, __PRETTY_FUNCTION__))
;
4710 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&((FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && "Function is registered already."
) ? static_cast<void> (0) : __assert_fail ("FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && \"Function is registered already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4711, __PRETTY_FUNCTION__))
4711 "Function is registered already.")((FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && "Function is registered already."
) ? static_cast<void> (0) : __assert_fail ("FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && \"Function is registered already.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4711, __PRETTY_FUNCTION__))
;
4712 assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&(((!TeamAndReductions.first || TeamAndReductions.first == D) &&
"Team is set but not processed.") ? static_cast<void> (
0) : __assert_fail ("(!TeamAndReductions.first || TeamAndReductions.first == D) && \"Team is set but not processed.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4713, __PRETTY_FUNCTION__))
4713 "Team is set but not processed.")(((!TeamAndReductions.first || TeamAndReductions.first == D) &&
"Team is set but not processed.") ? static_cast<void> (
0) : __assert_fail ("(!TeamAndReductions.first || TeamAndReductions.first == D) && \"Team is set but not processed.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4713, __PRETTY_FUNCTION__))
;
4714 const Stmt *Body = nullptr;
4715 bool NeedToDelayGlobalization = false;
4716 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
4717 Body = FD->getBody();
4718 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
4719 Body = BD->getBody();
4720 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
4721 Body = CD->getBody();
4722 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
4723 if (NeedToDelayGlobalization &&
4724 getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
4725 return;
4726 }
4727 if (!Body)
4728 return;
4729 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
4730 VarChecker.Visit(Body);
4731 const RecordDecl *GlobalizedVarsRecord =
4732 VarChecker.getGlobalizedRecord(IsInTTDRegion);
4733 TeamAndReductions.first = nullptr;
4734 TeamAndReductions.second.clear();
4735 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
4736 VarChecker.getEscapedVariableLengthDecls();
4737 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
4738 return;
4739 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
4740 I->getSecond().MappedParams =
4741 std::make_unique<CodeGenFunction::OMPMapVars>();
4742 I->getSecond().GlobalRecord = GlobalizedVarsRecord;
4743 I->getSecond().EscapedParameters.insert(
4744 VarChecker.getEscapedParameters().begin(),
4745 VarChecker.getEscapedParameters().end());
4746 I->getSecond().EscapedVariableLengthDecls.append(
4747 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
4748 DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
4749 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4750 assert(VD->isCanonicalDecl() && "Expected canonical declaration")((VD->isCanonicalDecl() && "Expected canonical declaration"
) ? static_cast<void> (0) : __assert_fail ("VD->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4750, __PRETTY_FUNCTION__))
;
4751 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4752 Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
4753 }
4754 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
4755 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
4756 VarChecker.Visit(Body);
4757 I->getSecond().SecondaryGlobalRecord =
4758 VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
4759 I->getSecond().SecondaryLocalVarData.emplace();
4760 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
4761 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4762 assert(VD->isCanonicalDecl() && "Expected canonical declaration")((VD->isCanonicalDecl() && "Expected canonical declaration"
) ? static_cast<void> (0) : __assert_fail ("VD->isCanonicalDecl() && \"Expected canonical declaration\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4762, __PRETTY_FUNCTION__))
;
4763 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4764 Data.insert(
4765 std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
4766 }
4767 }
4768 if (!NeedToDelayGlobalization) {
4769 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
4770 struct GlobalizationScope final : EHScopeStack::Cleanup {
4771 GlobalizationScope() = default;
4772
4773 void Emit(CodeGenFunction &CGF, Flags flags) override {
4774 static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
4775 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
4776 }
4777 };
4778 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
4779 }
4780}
4781
4782Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
4783 const VarDecl *VD) {
4784 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
1
Assuming 'VD' is null
4785 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4786 auto AS = LangAS::Default;
4787 switch (A->getAllocatorType()) {
4788 // Use the default allocator here as by default local vars are
4789 // threadlocal.
4790 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4791 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4792 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4793 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4794 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4795 // Follow the user decision - use default allocation.
4796 return Address::invalid();
4797 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4798 // TODO: implement aupport for user-defined allocators.
4799 return Address::invalid();
4800 case OMPAllocateDeclAttr::OMPConstMemAlloc:
4801 AS = LangAS::cuda_constant;
4802 break;
4803 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4804 AS = LangAS::cuda_shared;
4805 break;
4806 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4807 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4808 break;
4809 }
4810 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4811 auto *GV = new llvm::GlobalVariable(
4812 CGM.getModule(), VarTy, /*isConstant=*/false,
4813 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
4814 VD->getName(),
4815 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4816 CGM.getContext().getTargetAddressSpace(AS));
4817 CharUnits Align = CGM.getContext().getDeclAlign(VD);
4818 GV->setAlignment(Align.getAsAlign());
4819 return Address(
4820 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4821 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
4822 VD->getType().getAddressSpace()))),
4823 Align);
4824 }
4825
4826 if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
2
Taking false branch
4827 return Address::invalid();
4828
4829 VD = VD->getCanonicalDecl();
3
Called C++ object pointer is null
4830 auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
4831 if (I == FunctionGlobalizedDecls.end())
4832 return Address::invalid();
4833 auto VDI = I->getSecond().LocalVarData.find(VD);
4834 if (VDI != I->getSecond().LocalVarData.end())
4835 return VDI->second.PrivateAddr;
4836 if (VD->hasAttrs()) {
4837 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
4838 E(VD->attr_end());
4839 IT != E; ++IT) {
4840 auto VDI = I->getSecond().LocalVarData.find(
4841 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
4842 ->getCanonicalDecl());
4843 if (VDI != I->getSecond().LocalVarData.end())
4844 return VDI->second.PrivateAddr;
4845 }
4846 }
4847
4848 return Address::invalid();
4849}
4850
4851void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
4852 FunctionGlobalizedDecls.erase(CGF.CurFn);
4853 CGOpenMPRuntime::functionFinished(CGF);
4854}
4855
4856void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
4857 CodeGenFunction &CGF, const OMPLoopDirective &S,
4858 OpenMPDistScheduleClauseKind &ScheduleKind,
4859 llvm::Value *&Chunk) const {
4860 auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
4861 if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
4862 ScheduleKind = OMPC_DIST_SCHEDULE_static;
4863 Chunk = CGF.EmitScalarConversion(
4864 RT.getGPUNumThreads(CGF),
4865 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4866 S.getIterationVariable()->getType(), S.getBeginLoc());
4867 return;
4868 }
4869 CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
4870 CGF, S, ScheduleKind, Chunk);
4871}
4872
4873void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
4874 CodeGenFunction &CGF, const OMPLoopDirective &S,
4875 OpenMPScheduleClauseKind &ScheduleKind,
4876 const Expr *&ChunkExpr) const {
4877 ScheduleKind = OMPC_SCHEDULE_static;
4878 // Chunk size is 1 in this case.
4879 llvm::APInt ChunkSize(32, 1);
4880 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
4881 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4882 SourceLocation());
4883}
4884
4885void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
4886 CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
4887 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&((isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
" Expected target-based directive.") ? static_cast<void>
(0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4888, __PRETTY_FUNCTION__))
4888 " Expected target-based directive.")((isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
" Expected target-based directive.") ? static_cast<void>
(0) : __assert_fail ("isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && \" Expected target-based directive.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4888, __PRETTY_FUNCTION__))
;
4889 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
4890 for (const CapturedStmt::Capture &C : CS->captures()) {
4891 // Capture variables captured by reference in lambdas for target-based
4892 // directives.
4893 if (!C.capturesVariable())
4894 continue;
4895 const VarDecl *VD = C.getCapturedVar();
4896 const auto *RD = VD->getType()
4897 .getCanonicalType()
4898 .getNonReferenceType()
4899 ->getAsCXXRecordDecl();
4900 if (!RD || !RD->isLambda())
4901 continue;
4902 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4903 LValue VDLVal;
4904 if (VD->getType().getCanonicalType()->isReferenceType())
4905 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
4906 else
4907 VDLVal = CGF.MakeAddrLValue(
4908 VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
4909 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4910 FieldDecl *ThisCapture = nullptr;
4911 RD->getCaptureFields(Captures, ThisCapture);
4912 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
4913 LValue ThisLVal =
4914 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
4915 llvm::Value *CXXThis = CGF.LoadCXXThis();
4916 CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
4917 }
4918 for (const LambdaCapture &LC : RD->captures()) {
4919 if (LC.getCaptureKind() != LCK_ByRef)
4920 continue;
4921 const VarDecl *VD = LC.getCapturedVar();
4922 if (!CS->capturesVariable(VD))
4923 continue;
4924 auto It = Captures.find(VD);
4925 assert(It != Captures.end() && "Found lambda capture without field.")((It != Captures.end() && "Found lambda capture without field."
) ? static_cast<void> (0) : __assert_fail ("It != Captures.end() && \"Found lambda capture without field.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4925, __PRETTY_FUNCTION__))
;
4926 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
4927 Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4928 if (VD->getType().getCanonicalType()->isReferenceType())
4929 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
4930 VD->getType().getCanonicalType())
4931 .getAddress(CGF);
4932 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
4933 }
4934 }
4935}
4936
4937unsigned CGOpenMPRuntimeGPU::getDefaultFirstprivateAddressSpace() const {
4938 return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
4939}
4940
4941bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
4942 LangAS &AS) {
4943 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
4944 return false;
4945 const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4946 switch(A->getAllocatorType()) {
4947 case OMPAllocateDeclAttr::OMPNullMemAlloc:
4948 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4949 // Not supported, fallback to the default mem space.
4950 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4951 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4952 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4953 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4954 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4955 AS = LangAS::Default;
4956 return true;
4957 case OMPAllocateDeclAttr::OMPConstMemAlloc:
4958 AS = LangAS::cuda_constant;
4959 return true;
4960 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4961 AS = LangAS::cuda_shared;
4962 return true;
4963 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4964 llvm_unreachable("Expected predefined allocator for the variables with the "::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4965)
4965 "static storage.")::llvm::llvm_unreachable_internal("Expected predefined allocator for the variables with the "
"static storage.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 4965)
;
4966 }
4967 return false;
4968}
4969
4970// Get current CudaArch and ignore any unknown values
4971static CudaArch getCudaArch(CodeGenModule &CGM) {
4972 if (!CGM.getTarget().hasFeature("ptx"))
4973 return CudaArch::UNKNOWN;
4974 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
4975 if (Feature.getValue()) {
4976 CudaArch Arch = StringToCudaArch(Feature.getKey());
4977 if (Arch != CudaArch::UNKNOWN)
4978 return Arch;
4979 }
4980 }
4981 return CudaArch::UNKNOWN;
4982}
4983
4984/// Check to see if target architecture supports unified addressing which is
4985/// a restriction for OpenMP requires clause "unified_shared_memory".
4986void CGOpenMPRuntimeGPU::processRequiresDirective(
4987 const OMPRequiresDecl *D) {
4988 for (const OMPClause *Clause : D->clauselists()) {
4989 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
4990 CudaArch Arch = getCudaArch(CGM);
4991 switch (Arch) {
4992 case CudaArch::SM_20:
4993 case CudaArch::SM_21:
4994 case CudaArch::SM_30:
4995 case CudaArch::SM_32:
4996 case CudaArch::SM_35:
4997 case CudaArch::SM_37:
4998 case CudaArch::SM_50:
4999 case CudaArch::SM_52:
5000 case CudaArch::SM_53:
5001 case CudaArch::SM_60:
5002 case CudaArch::SM_61:
5003 case CudaArch::SM_62: {
5004 SmallString<256> Buffer;
5005 llvm::raw_svector_ostream Out(Buffer);
5006 Out << "Target architecture " << CudaArchToString(Arch)
5007 << " does not support unified addressing";
5008 CGM.Error(Clause->getBeginLoc(), Out.str());
5009 return;
5010 }
5011 case CudaArch::SM_70:
5012 case CudaArch::SM_72:
5013 case CudaArch::SM_75:
5014 case CudaArch::SM_80:
5015 case CudaArch::GFX600:
5016 case CudaArch::GFX601:
5017 case CudaArch::GFX700:
5018 case CudaArch::GFX701:
5019 case CudaArch::GFX702:
5020 case CudaArch::GFX703:
5021 case CudaArch::GFX704:
5022 case CudaArch::GFX801:
5023 case CudaArch::GFX802:
5024 case CudaArch::GFX803:
5025 case CudaArch::GFX810:
5026 case CudaArch::GFX900:
5027 case CudaArch::GFX902:
5028 case CudaArch::GFX904:
5029 case CudaArch::GFX906:
5030 case CudaArch::GFX908:
5031 case CudaArch::GFX909:
5032 case CudaArch::GFX1010:
5033 case CudaArch::GFX1011:
5034 case CudaArch::GFX1012:
5035 case CudaArch::GFX1030:
5036 case CudaArch::GFX1031:
5037 case CudaArch::UNKNOWN:
5038 break;
5039 case CudaArch::LAST:
5040 llvm_unreachable("Unexpected Cuda arch.")::llvm::llvm_unreachable_internal("Unexpected Cuda arch.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 5040)
;
5041 }
5042 }
5043 }
5044 CGOpenMPRuntime::processRequiresDirective(D);
5045}
5046
5047/// Get number of SMs and number of blocks per SM.
5048static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
5049 std::pair<unsigned, unsigned> Data;
5050 if (CGM.getLangOpts().OpenMPCUDANumSMs)
5051 Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
5052 if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
5053 Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
5054 if (Data.first && Data.second)
5055 return Data;
5056 switch (getCudaArch(CGM)) {
5057 case CudaArch::SM_20:
5058 case CudaArch::SM_21:
5059 case CudaArch::SM_30:
5060 case CudaArch::SM_32:
5061 case CudaArch::SM_35:
5062 case CudaArch::SM_37:
5063 case CudaArch::SM_50:
5064 case CudaArch::SM_52:
5065 case CudaArch::SM_53:
5066 return {16, 16};
5067 case CudaArch::SM_60:
5068 case CudaArch::SM_61:
5069 case CudaArch::SM_62:
5070 return {56, 32};
5071 case CudaArch::SM_70:
5072 case CudaArch::SM_72:
5073 case CudaArch::SM_75:
5074 case CudaArch::SM_80:
5075 return {84, 32};
5076 case CudaArch::GFX600:
5077 case CudaArch::GFX601:
5078 case CudaArch::GFX700:
5079 case CudaArch::GFX701:
5080 case CudaArch::GFX702:
5081 case CudaArch::GFX703:
5082 case CudaArch::GFX704:
5083 case CudaArch::GFX801:
5084 case CudaArch::GFX802:
5085 case CudaArch::GFX803:
5086 case CudaArch::GFX810:
5087 case CudaArch::GFX900:
5088 case CudaArch::GFX902:
5089 case CudaArch::GFX904:
5090 case CudaArch::GFX906:
5091 case CudaArch::GFX908:
5092 case CudaArch::GFX909:
5093 case CudaArch::GFX1010:
5094 case CudaArch::GFX1011:
5095 case CudaArch::GFX1012:
5096 case CudaArch::GFX1030:
5097 case CudaArch::GFX1031:
5098 case CudaArch::UNKNOWN:
5099 break;
5100 case CudaArch::LAST:
5101 llvm_unreachable("Unexpected Cuda arch.")::llvm::llvm_unreachable_internal("Unexpected Cuda arch.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 5101)
;
5102 }
5103 llvm_unreachable("Unexpected NVPTX target without ptx feature.")::llvm::llvm_unreachable_internal("Unexpected NVPTX target without ptx feature."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp"
, 5103)
;
5104}
5105
5106void CGOpenMPRuntimeGPU::clear() {
5107 if (!GlobalizedRecords.empty() &&
5108 !CGM.getLangOpts().OpenMPCUDATargetParallel) {
5109 ASTContext &C = CGM.getContext();
5110 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
5111 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
5112 RecordDecl *StaticRD = C.buildImplicitRecord(
5113 "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5114 StaticRD->startDefinition();
5115 RecordDecl *SharedStaticRD = C.buildImplicitRecord(
5116 "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5117 SharedStaticRD->startDefinition();
5118 for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
5119 if (Records.Records.empty())
5120 continue;
5121 unsigned Size = 0;
5122 unsigned RecAlignment = 0;
5123 for (const RecordDecl *RD : Records.Records) {
5124 QualType RDTy = C.getRecordType(RD);
5125 unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
5126 RecAlignment = std::max(RecAlignment, Alignment);
5127 unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
5128 Size =
5129 llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
5130 }
5131 Size = llvm::alignTo(Size, RecAlignment);
5132 llvm::APInt ArySize(/*numBits=*/64, Size);
5133 QualType SubTy = C.getConstantArrayType(
5134 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5135 const bool UseSharedMemory = Size <= SharedMemorySize;
5136 auto *Field =
5137 FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
5138 SourceLocation(), SourceLocation(), nullptr, SubTy,
5139 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5140 /*BW=*/nullptr, /*Mutable=*/false,
5141 /*InitStyle=*/ICIS_NoInit);
5142 Field->setAccess(AS_public);
5143 if (UseSharedMemory) {
5144 SharedStaticRD->addDecl(Field);
5145 SharedRecs.push_back(&Records);
5146 } else {
5147 StaticRD->addDecl(Field);
5148 GlobalRecs.push_back(&Records);
5149 }
5150 Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
5151 Records.UseSharedMemory->setInitializer(
5152 llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
5153 }
5154 // Allocate SharedMemorySize buffer for the shared memory.
5155 // FIXME: nvlink does not handle weak linkage correctly (object with the
5156 // different size are reported as erroneous).
5157 // Restore this code as sson as nvlink is fixed.
5158 if (!SharedStaticRD->field_empty()) {
5159 llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
5160 QualType SubTy = C.getConstantArrayType(
5161 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5162 auto *Field = FieldDecl::Create(
5163 C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
5164 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5165 /*BW=*/nullptr, /*Mutable=*/false,
5166 /*InitStyle=*/ICIS_NoInit);
5167 Field->setAccess(AS_public);
5168 SharedStaticRD->addDecl(Field);
5169 }
5170 SharedStaticRD->completeDefinition();
5171 if (!SharedStaticRD->field_empty()) {
5172 QualType StaticTy = C.getRecordType(SharedStaticRD);
5173 llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
5174 auto *GV = new llvm::GlobalVariable(
5175 CGM.getModule(), LLVMStaticTy,
5176 /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
5177 llvm::Constant::getNullValue(LLVMStaticTy),
5178 "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
5179 llvm::GlobalValue::NotThreadLocal,
5180 C.getTargetAddressSpace(LangAS::cuda_shared));
5181 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5182 GV, CGM.VoidPtrTy);
5183 for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
5184 Rec->Buffer->replaceAllUsesWith(Replacement);
5185 Rec->Buffer->eraseFromParent();
5186 }
5187 }
5188 StaticRD->completeDefinition();
5189 if (!StaticRD->field_empty()) {
5190 QualType StaticTy = C.getRecordType(StaticRD);
5191 std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
5192 llvm::APInt Size1(32, SMsBlockPerSM.second);
5193 QualType Arr1Ty =
5194 C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
5195 /*IndexTypeQuals=*/0);
5196 llvm::APInt Size2(32, SMsBlockPerSM.first);
5197 QualType Arr2Ty =
5198 C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
5199 /*IndexTypeQuals=*/0);
5200 llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
5201 // FIXME: nvlink does not handle weak linkage correctly (object with the
5202 // different size are reported as erroneous).
5203 // Restore CommonLinkage as soon as nvlink is fixed.
5204 auto *GV = new llvm::GlobalVariable(
5205 CGM.getModule(), LLVMArr2Ty,
5206 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5207 llvm::Constant::getNullValue(LLVMArr2Ty),
5208 "_openmp_static_glob_rd_$_");
5209 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5210 GV, CGM.VoidPtrTy);
5211 for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
5212 Rec->Buffer->replaceAllUsesWith(Replacement);
5213 Rec->Buffer->eraseFromParent();
5214 }
5215 }
5216 }
5217 if (!TeamsReductions.empty()) {
5218 ASTContext &C = CGM.getContext();
5219 RecordDecl *StaticRD = C.buildImplicitRecord(
5220 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
5221 StaticRD->startDefinition();
5222 for (const RecordDecl *TeamReductionRec : TeamsReductions) {
5223 QualType RecTy = C.getRecordType(TeamReductionRec);
5224 auto *Field = FieldDecl::Create(
5225 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
5226 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
5227 /*BW=*/nullptr, /*Mutable=*/false,
5228 /*InitStyle=*/ICIS_NoInit);
5229 Field->setAccess(AS_public);
5230 StaticRD->addDecl(Field);
5231 }
5232 StaticRD->completeDefinition();
5233 QualType StaticTy = C.getRecordType(StaticRD);
5234 llvm::Type *LLVMReductionsBufferTy =
5235 CGM.getTypes().ConvertTypeForMem(StaticTy);
5236 // FIXME: nvlink does not handle weak linkage correctly (object with the
5237 // different size are reported as erroneous).
5238 // Restore CommonLinkage as soon as nvlink is fixed.
5239 auto *GV = new llvm::GlobalVariable(
5240 CGM.getModule(), LLVMReductionsBufferTy,
5241 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5242 llvm::Constant::getNullValue(LLVMReductionsBufferTy),
5243 "_openmp_teams_reductions_buffer_$_");
5244 KernelTeamsReductionPtr->setInitializer(
5245 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
5246 CGM.VoidPtrTy));
5247 }
5248 CGOpenMPRuntime::clear();
5249}