LLVM 20.0.0git
OMPIRBuilder.h
Go to the documentation of this file.
1//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the OpenMPIRBuilder class and helpers used as a convenient
10// way to create LLVM instructions for OpenMP directives.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
15#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
16
21#include "llvm/IR/DebugLoc.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/Module.h"
26#include <forward_list>
27#include <map>
28#include <optional>
29
30namespace llvm {
31class CanonicalLoopInfo;
32struct TargetRegionEntryInfo;
33class OffloadEntriesInfoManager;
34class OpenMPIRBuilder;
35
36/// Move the instruction after an InsertPoint to the beginning of another
37/// BasicBlock.
38///
39/// The instructions after \p IP are moved to the beginning of \p New which must
40/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
41/// \p New will be added such that there is no semantic change. Otherwise, the
42/// \p IP insert block remains degenerate and it is up to the caller to insert a
43/// terminator.
44void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
45 bool CreateBranch);
46
47/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
48/// insert location will stick to after the instruction before the insertion
49/// point (instead of moving with the instruction the InsertPoint stores
50/// internally).
51void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
52
53/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
54/// (missing the terminator).
55///
56/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
57/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
58/// is true, a branch to the new successor will new created such that
59/// semantically there is no change; otherwise the block of the insertion point
60/// remains degenerate and it is the caller's responsibility to insert a
61/// terminator. Returns the new successor block.
62BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
63 llvm::Twine Name = {});
64
65/// Split a BasicBlock at \p Builder's insertion point, even if the block is
66/// degenerate (missing the terminator). Its new insert location will stick to
67/// after the instruction before the insertion point (instead of moving with the
68/// instruction the InsertPoint stores internally).
69BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
70 llvm::Twine Name = {});
71
72/// Split a BasicBlock at \p Builder's insertion point, even if the block is
73/// degenerate (missing the terminator). Its new insert location will stick to
74/// after the instruction before the insertion point (instead of moving with the
75/// instruction the InsertPoint stores internally).
76BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
77
78/// Like splitBB, but reuses the current block's name for the new name.
79BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
80 llvm::Twine Suffix = ".split");
81
82/// Captures attributes that affect generating LLVM-IR using the
83/// OpenMPIRBuilder and related classes. Note that not all attributes are
84/// required for all classes or functions. In some use cases the configuration
85/// is not necessary at all, because because the only functions that are called
86/// are ones that are not dependent on the configuration.
88public:
89 /// Flag to define whether to generate code for the role of the OpenMP host
90 /// (if set to false) or device (if set to true) in an offloading context. It
91 /// is set when the -fopenmp-is-target-device compiler frontend option is
92 /// specified.
93 std::optional<bool> IsTargetDevice;
94
95 /// Flag for specifying if the compilation is done for an accelerator. It is
96 /// set according to the architecture of the target triple and currently only
97 /// true when targeting AMDGPU or NVPTX. Today, these targets can only perform
98 /// the role of an OpenMP target device, so `IsTargetDevice` must also be true
99 /// if `IsGPU` is true. This restriction might be lifted if an accelerator-
100 /// like target with the ability to work as the OpenMP host is added, or if
101 /// the capabilities of the currently supported GPU architectures are
102 /// expanded.
103 std::optional<bool> IsGPU;
104
105 /// Flag for specifying if LLVMUsed information should be emitted.
106 std::optional<bool> EmitLLVMUsedMetaInfo;
107
108 /// Flag for specifying if offloading is mandatory.
109 std::optional<bool> OpenMPOffloadMandatory;
110
111 /// First separator used between the initial two parts of a name.
112 std::optional<StringRef> FirstSeparator;
113 /// Separator used between all of the rest consecutive parts of s name
114 std::optional<StringRef> Separator;
115
116 // Grid Value for the GPU target
117 std::optional<omp::GV> GridValue;
118
119 /// When compilation is being done for the OpenMP host (i.e. `IsTargetDevice =
120 /// false`), this contains the list of offloading triples associated, if any.
122
126 bool HasRequiresReverseOffload,
127 bool HasRequiresUnifiedAddress,
128 bool HasRequiresUnifiedSharedMemory,
129 bool HasRequiresDynamicAllocators);
130
131 // Getters functions that assert if the required values are not present.
132 bool isTargetDevice() const {
133 assert(IsTargetDevice.has_value() && "IsTargetDevice is not set");
134 return *IsTargetDevice;
135 }
136
137 bool isGPU() const {
138 assert(IsGPU.has_value() && "IsGPU is not set");
139 return *IsGPU;
140 }
141
143 assert(OpenMPOffloadMandatory.has_value() &&
144 "OpenMPOffloadMandatory is not set");
146 }
147
149 assert(GridValue.has_value() && "GridValue is not set");
150 return *GridValue;
151 }
152
153 bool hasRequiresFlags() const { return RequiresFlags; }
154 bool hasRequiresReverseOffload() const;
155 bool hasRequiresUnifiedAddress() const;
157 bool hasRequiresDynamicAllocators() const;
158
159 /// Returns requires directive clauses as flags compatible with those expected
160 /// by libomptarget.
161 int64_t getRequiresFlags() const;
162
163 // Returns the FirstSeparator if set, otherwise use the default separator
164 // depending on isGPU
166 if (FirstSeparator.has_value())
167 return *FirstSeparator;
168 if (isGPU())
169 return "_";
170 return ".";
171 }
172
173 // Returns the Separator if set, otherwise use the default separator depending
174 // on isGPU
176 if (Separator.has_value())
177 return *Separator;
178 if (isGPU())
179 return "$";
180 return ".";
181 }
182
184 void setIsGPU(bool Value) { IsGPU = Value; }
190
195
196private:
197 /// Flags for specifying which requires directive clauses are present.
198 int64_t RequiresFlags;
199};
200
201/// Data structure to contain the information needed to uniquely identify
202/// a target entry.
204 /// The prefix used for kernel names.
205 static constexpr const char *KernelNamePrefix = "__omp_offloading_";
206
207 std::string ParentName;
208 unsigned DeviceID;
209 unsigned FileID;
210 unsigned Line;
211 unsigned Count;
212
215 unsigned FileID, unsigned Line, unsigned Count = 0)
217 Count(Count) {}
218
221 unsigned DeviceID, unsigned FileID,
222 unsigned Line, unsigned Count);
223
225 return std::make_tuple(ParentName, DeviceID, FileID, Line, Count) <
226 std::make_tuple(RHS.ParentName, RHS.DeviceID, RHS.FileID, RHS.Line,
227 RHS.Count);
228 }
229};
230
231/// Class that manages information about offload code regions and data
233 /// Number of entries registered so far.
234 OpenMPIRBuilder *OMPBuilder;
235 unsigned OffloadingEntriesNum = 0;
236
237public:
238 /// Base class of the entries info.
240 public:
241 /// Kind of a given entry.
242 enum OffloadingEntryInfoKinds : unsigned {
243 /// Entry is a target region.
245 /// Entry is a declare target variable.
247 /// Invalid entry info.
249 };
250
251 protected:
253 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
254 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
255 uint32_t Flags)
256 : Flags(Flags), Order(Order), Kind(Kind) {}
257 ~OffloadEntryInfo() = default;
258
259 public:
260 bool isValid() const { return Order != ~0u; }
261 unsigned getOrder() const { return Order; }
262 OffloadingEntryInfoKinds getKind() const { return Kind; }
263 uint32_t getFlags() const { return Flags; }
264 void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
265 Constant *getAddress() const { return cast_or_null<Constant>(Addr); }
267 assert(!Addr.pointsToAliveValue() && "Address has been set before!");
268 Addr = V;
269 }
270 static bool classof(const OffloadEntryInfo *Info) { return true; }
271
272 private:
273 /// Address of the entity that has to be mapped for offloading.
274 WeakTrackingVH Addr;
275
276 /// Flags associated with the device global.
277 uint32_t Flags = 0u;
278
279 /// Order this entry was emitted.
280 unsigned Order = ~0u;
281
283 };
284
285 /// Return true if a there are no entries defined.
286 bool empty() const;
287 /// Return number of entries defined so far.
288 unsigned size() const { return OffloadingEntriesNum; }
289
290 OffloadEntriesInfoManager(OpenMPIRBuilder *builder) : OMPBuilder(builder) {}
291
292 //
293 // Target region entries related.
294 //
295
296 /// Kind of the target registry entry.
298 /// Mark the entry as target region.
300 };
301
302 /// Target region entries info.
304 /// Address that can be used as the ID of the entry.
305 Constant *ID = nullptr;
306
307 public:
310 explicit OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr,
311 Constant *ID,
314 ID(ID) {
316 }
317
318 Constant *getID() const { return ID; }
319 void setID(Constant *V) {
320 assert(!ID && "ID has been set before!");
321 ID = V;
322 }
323 static bool classof(const OffloadEntryInfo *Info) {
324 return Info->getKind() == OffloadingEntryInfoTargetRegion;
325 }
326 };
327
328 /// Initialize target region entry.
329 /// This is ONLY needed for DEVICE compilation.
331 unsigned Order);
332 /// Register target region entry.
336 /// Return true if a target region entry with the provided information
337 /// exists.
339 bool IgnoreAddressId = false) const;
340
341 // Return the Name based on \a EntryInfo using the next available Count.
343 const TargetRegionEntryInfo &EntryInfo);
344
345 /// brief Applies action \a Action on all registered entries.
346 typedef function_ref<void(const TargetRegionEntryInfo &EntryInfo,
347 const OffloadEntryInfoTargetRegion &)>
349 void
351
352 //
353 // Device global variable entries related.
354 //
355
356 /// Kind of the global variable entry..
358 /// Mark the entry as a to declare target.
360 /// Mark the entry as a to declare target link.
362 /// Mark the entry as a declare target enter.
364 /// Mark the entry as having no declare target entry kind.
366 /// Mark the entry as a declare target indirect global.
368 /// Mark the entry as a register requires global.
370 };
371
372 /// Kind of device clause for declare target variables
373 /// and functions
374 /// NOTE: Currently not used as a part of a variable entry
375 /// used for Flang and Clang to interface with the variable
376 /// related registration functions
378 /// The target is marked for all devices
380 /// The target is marked for non-host devices
382 /// The target is marked for host devices
384 /// The target is marked as having no clause
386 };
387
388 /// Device global variable entries info.
390 /// Type of the global variable.
391 int64_t VarSize;
393 const std::string VarName;
394
395 public:
398 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
401 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr,
402 int64_t VarSize,
405 const std::string &VarName)
407 VarSize(VarSize), Linkage(Linkage), VarName(VarName) {
409 }
410
411 int64_t getVarSize() const { return VarSize; }
412 StringRef getVarName() const { return VarName; }
413 void setVarSize(int64_t Size) { VarSize = Size; }
414 GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
415 void setLinkage(GlobalValue::LinkageTypes LT) { Linkage = LT; }
416 static bool classof(const OffloadEntryInfo *Info) {
417 return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
418 }
419 };
420
421 /// Initialize device global variable entry.
422 /// This is ONLY used for DEVICE compilation.
425 unsigned Order);
426
427 /// Register device global variable entry.
429 int64_t VarSize,
432 /// Checks if the variable with the given name has been registered already.
434 return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
435 }
436 /// Applies action \a Action on all registered entries.
437 typedef function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)>
441
442private:
443 /// Return the count of entries at a particular source location.
444 unsigned
445 getTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo) const;
446
447 /// Update the count of entries at a particular source location.
448 void
449 incrementTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo);
450
452 getTargetRegionEntryCountKey(const TargetRegionEntryInfo &EntryInfo) {
453 return TargetRegionEntryInfo(EntryInfo.ParentName, EntryInfo.DeviceID,
454 EntryInfo.FileID, EntryInfo.Line, 0);
455 }
456
457 // Count of entries at a location.
458 std::map<TargetRegionEntryInfo, unsigned> OffloadEntriesTargetRegionCount;
459
460 // Storage for target region entries kind.
461 typedef std::map<TargetRegionEntryInfo, OffloadEntryInfoTargetRegion>
462 OffloadEntriesTargetRegionTy;
463 OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
464 /// Storage for device global variable entries kind. The storage is to be
465 /// indexed by mangled name.
467 OffloadEntriesDeviceGlobalVarTy;
468 OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
469};
470
471/// An interface to create LLVM-IR for OpenMP directives.
472///
473/// Each OpenMP directive has a corresponding public generator method.
475public:
476 /// Create a new OpenMPIRBuilder operating on the given module \p M. This will
477 /// not have an effect on \p M (see initialize)
479 : M(M), Builder(M.getContext()), OffloadInfoManager(this),
480 T(Triple(M.getTargetTriple())) {}
482
483 class AtomicInfo : public llvm::AtomicInfo<IRBuilder<>> {
484 llvm::Value *AtomicVar;
485
486 public:
493 AtomicVar(AtomicVar) {}
494
495 llvm::Value *getAtomicPointer() const override { return AtomicVar; }
498 const llvm::Twine &Name) const override {
499 llvm::AllocaInst *allocaInst = Builder->CreateAlloca(Ty);
500 allocaInst->setName(Name);
501 return allocaInst;
502 }
503 };
504 /// Initialize the internal state, this will put structures types and
505 /// potentially other helpers into the underlying module. Must be called
506 /// before any other method and only once! This internal state includes types
507 /// used in the OpenMPIRBuilder generated from OMPKinds.def.
508 void initialize();
509
511
512 /// Finalize the underlying module, e.g., by outlining regions.
513 /// \param Fn The function to be finalized. If not used,
514 /// all functions are finalized.
515 void finalize(Function *Fn = nullptr);
516
517 /// Add attributes known for \p FnID to \p Fn.
519
520 /// Type used throughout for insertion points.
522
523 /// Type used to represent an insertion point or an error value.
525
526 /// Get the create a name using the platform specific separators.
527 /// \param Parts parts of the final name that needs separation
528 /// The created name has a first separator between the first and second part
529 /// and a second separator between all other parts.
530 /// E.g. with FirstSeparator "$" and Separator "." and
531 /// parts: "p1", "p2", "p3", "p4"
532 /// The resulting name is "p1$p2.p3.p4"
533 /// The separators are retrieved from the OpenMPIRBuilderConfig.
534 std::string createPlatformSpecificName(ArrayRef<StringRef> Parts) const;
535
536 /// Callback type for variable finalization (think destructors).
537 ///
538 /// \param CodeGenIP is the insertion point at which the finalization code
539 /// should be placed.
540 ///
541 /// A finalize callback knows about all objects that need finalization, e.g.
542 /// destruction, when the scope of the currently generated construct is left
543 /// at the time, and location, the callback is invoked.
544 using FinalizeCallbackTy = std::function<Error(InsertPointTy CodeGenIP)>;
545
547 /// The finalization callback provided by the last in-flight invocation of
548 /// createXXXX for the directive of kind DK.
550
551 /// The directive kind of the innermost directive that has an associated
552 /// region which might require finalization when it is left.
553 omp::Directive DK;
554
555 /// Flag to indicate if the directive is cancellable.
557 };
558
559 /// Push a finalization callback on the finalization stack.
560 ///
561 /// NOTE: Temporary solution until Clang CG is gone.
563 FinalizationStack.push_back(FI);
564 }
565
566 /// Pop the last finalization callback from the finalization stack.
567 ///
568 /// NOTE: Temporary solution until Clang CG is gone.
570
571 /// Callback type for body (=inner region) code generation
572 ///
573 /// The callback takes code locations as arguments, each describing a
574 /// location where additional instructions can be inserted.
575 ///
576 /// The CodeGenIP may be in the middle of a basic block or point to the end of
577 /// it. The basic block may have a terminator or be degenerate. The callback
578 /// function may just insert instructions at that position, but also split the
579 /// block (without the Before argument of BasicBlock::splitBasicBlock such
580 /// that the identify of the split predecessor block is preserved) and insert
581 /// additional control flow, including branches that do not lead back to what
582 /// follows the CodeGenIP. Note that since the callback is allowed to split
583 /// the block, callers must assume that InsertPoints to positions in the
584 /// BasicBlock after CodeGenIP including CodeGenIP itself are invalidated. If
585 /// such InsertPoints need to be preserved, it can split the block itself
586 /// before calling the callback.
587 ///
588 /// AllocaIP and CodeGenIP must not point to the same position.
589 ///
590 /// \param AllocaIP is the insertion point at which new alloca instructions
591 /// should be placed. The BasicBlock it is pointing to must
592 /// not be split.
593 /// \param CodeGenIP is the insertion point at which the body code should be
594 /// placed.
595 ///
596 /// \return an error, if any were triggered during execution.
598 function_ref<Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
599
600 // This is created primarily for sections construct as llvm::function_ref
601 // (BodyGenCallbackTy) is not storable (as described in the comments of
602 // function_ref class - function_ref contains non-ownable reference
603 // to the callable.
604 ///
605 /// \return an error, if any were triggered during execution.
607 std::function<Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
608
609 /// Callback type for loop body code generation.
610 ///
611 /// \param CodeGenIP is the insertion point where the loop's body code must be
612 /// placed. This will be a dedicated BasicBlock with a
613 /// conditional branch from the loop condition check and
614 /// terminated with an unconditional branch to the loop
615 /// latch.
616 /// \param IndVar is the induction variable usable at the insertion point.
617 ///
618 /// \return an error, if any were triggered during execution.
620 function_ref<Error(InsertPointTy CodeGenIP, Value *IndVar)>;
621
622 /// Callback type for variable privatization (think copy & default
623 /// constructor).
624 ///
625 /// \param AllocaIP is the insertion point at which new alloca instructions
626 /// should be placed.
627 /// \param CodeGenIP is the insertion point at which the privatization code
628 /// should be placed.
629 /// \param Original The value being copied/created, should not be used in the
630 /// generated IR.
631 /// \param Inner The equivalent of \p Original that should be used in the
632 /// generated IR; this is equal to \p Original if the value is
633 /// a pointer and can thus be passed directly, otherwise it is
634 /// an equivalent but different value.
635 /// \param ReplVal The replacement value, thus a copy or new created version
636 /// of \p Inner.
637 ///
638 /// \returns The new insertion point where code generation continues and
639 /// \p ReplVal the replacement value.
641 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
642 Value &Inner, Value *&ReplVal)>;
643
644 /// Description of a LLVM-IR insertion point (IP) and a debug/source location
645 /// (filename, line, column, ...).
648 : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
651 : IP(IP), DL(DL) {}
654 };
655
656 /// Emitter methods for OpenMP directives.
657 ///
658 ///{
659
660 /// Generator for '#omp barrier'
661 ///
662 /// \param Loc The location where the barrier directive was encountered.
663 /// \param Kind The kind of directive that caused the barrier.
664 /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
665 /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
666 /// should be checked and acted upon.
667 /// \param ThreadID Optional parameter to pass in any existing ThreadID value.
668 ///
669 /// \returns The insertion point after the barrier.
671 omp::Directive Kind,
672 bool ForceSimpleCall = false,
673 bool CheckCancelFlag = true);
674
675 /// Generator for '#omp cancel'
676 ///
677 /// \param Loc The location where the directive was encountered.
678 /// \param IfCondition The evaluated 'if' clause expression, if any.
679 /// \param CanceledDirective The kind of directive that is cancled.
680 ///
681 /// \returns The insertion point after the barrier.
683 Value *IfCondition,
684 omp::Directive CanceledDirective);
685
686 /// Generator for '#omp parallel'
687 ///
688 /// \param Loc The insert and source location description.
689 /// \param AllocaIP The insertion points to be used for alloca instructions.
690 /// \param BodyGenCB Callback that will generate the region code.
691 /// \param PrivCB Callback to copy a given variable (think copy constructor).
692 /// \param FiniCB Callback to finalize variable copies.
693 /// \param IfCondition The evaluated 'if' clause expression, if any.
694 /// \param NumThreads The evaluated 'num_threads' clause expression, if any.
695 /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
696 /// \param IsCancellable Flag to indicate a cancellable parallel region.
697 ///
698 /// \returns The insertion position *after* the parallel.
701 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
702 FinalizeCallbackTy FiniCB, Value *IfCondition,
703 Value *NumThreads, omp::ProcBindKind ProcBind,
704 bool IsCancellable);
705
706 /// Generator for the control flow structure of an OpenMP canonical loop.
707 ///
708 /// This generator operates on the logical iteration space of the loop, i.e.
709 /// the caller only has to provide a loop trip count of the loop as defined by
710 /// base language semantics. The trip count is interpreted as an unsigned
711 /// integer. The induction variable passed to \p BodyGenCB will be of the same
712 /// type and run from 0 to \p TripCount - 1. It is up to the callback to
713 /// convert the logical iteration variable to the loop counter variable in the
714 /// loop body.
715 ///
716 /// \param Loc The insert and source location description. The insert
717 /// location can be between two instructions or the end of a
718 /// degenerate block (e.g. a BB under construction).
719 /// \param BodyGenCB Callback that will generate the loop body code.
720 /// \param TripCount Number of iterations the loop body is executed.
721 /// \param Name Base name used to derive BB and instruction names.
722 ///
723 /// \returns An object representing the created control flow structure which
724 /// can be used for loop-associated directives.
727 LoopBodyGenCallbackTy BodyGenCB, Value *TripCount,
728 const Twine &Name = "loop");
729
730 /// Generator for the control flow structure of an OpenMP canonical loop.
731 ///
732 /// Instead of a logical iteration space, this allows specifying user-defined
733 /// loop counter values using increment, upper- and lower bounds. To
734 /// disambiguate the terminology when counting downwards, instead of lower
735 /// bounds we use \p Start for the loop counter value in the first body
736 /// iteration.
737 ///
738 /// Consider the following limitations:
739 ///
740 /// * A loop counter space over all integer values of its bit-width cannot be
741 /// represented. E.g using uint8_t, its loop trip count of 256 cannot be
742 /// stored into an 8 bit integer):
743 ///
744 /// DO I = 0, 255, 1
745 ///
746 /// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
747 /// effectively counting downwards:
748 ///
749 /// for (uint8_t i = 100u; i > 0; i += 127u)
750 ///
751 ///
752 /// TODO: May need to add additional parameters to represent:
753 ///
754 /// * Allow representing downcounting with unsigned integers.
755 ///
756 /// * Sign of the step and the comparison operator might disagree:
757 ///
758 /// for (int i = 0; i < 42; i -= 1u)
759 ///
760 //
761 /// \param Loc The insert and source location description.
762 /// \param BodyGenCB Callback that will generate the loop body code.
763 /// \param Start Value of the loop counter for the first iterations.
764 /// \param Stop Loop counter values past this will stop the loop.
765 /// \param Step Loop counter increment after each iteration; negative
766 /// means counting down.
767 /// \param IsSigned Whether Start, Stop and Step are signed integers.
768 /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
769 /// counter.
770 /// \param ComputeIP Insertion point for instructions computing the trip
771 /// count. Can be used to ensure the trip count is available
772 /// at the outermost loop of a loop nest. If not set,
773 /// defaults to the preheader of the generated loop.
774 /// \param Name Base name used to derive BB and instruction names.
775 ///
776 /// \returns An object representing the created control flow structure which
777 /// can be used for loop-associated directives.
779 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
780 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
781 InsertPointTy ComputeIP = {}, const Twine &Name = "loop");
782
783 /// Collapse a loop nest into a single loop.
784 ///
785 /// Merges loops of a loop nest into a single CanonicalLoopNest representation
786 /// that has the same number of innermost loop iterations as the origin loop
787 /// nest. The induction variables of the input loops are derived from the
788 /// collapsed loop's induction variable. This is intended to be used to
789 /// implement OpenMP's collapse clause. Before applying a directive,
790 /// collapseLoops normalizes a loop nest to contain only a single loop and the
791 /// directive's implementation does not need to handle multiple loops itself.
792 /// This does not remove the need to handle all loop nest handling by
793 /// directives, such as the ordered(<n>) clause or the simd schedule-clause
794 /// modifier of the worksharing-loop directive.
795 ///
796 /// Example:
797 /// \code
798 /// for (int i = 0; i < 7; ++i) // Canonical loop "i"
799 /// for (int j = 0; j < 9; ++j) // Canonical loop "j"
800 /// body(i, j);
801 /// \endcode
802 ///
803 /// After collapsing with Loops={i,j}, the loop is changed to
804 /// \code
805 /// for (int ij = 0; ij < 63; ++ij) {
806 /// int i = ij / 9;
807 /// int j = ij % 9;
808 /// body(i, j);
809 /// }
810 /// \endcode
811 ///
812 /// In the current implementation, the following limitations apply:
813 ///
814 /// * All input loops have an induction variable of the same type.
815 ///
816 /// * The collapsed loop will have the same trip count integer type as the
817 /// input loops. Therefore it is possible that the collapsed loop cannot
818 /// represent all iterations of the input loops. For instance, assuming a
819 /// 32 bit integer type, and two input loops both iterating 2^16 times, the
820 /// theoretical trip count of the collapsed loop would be 2^32 iteration,
821 /// which cannot be represented in an 32-bit integer. Behavior is undefined
822 /// in this case.
823 ///
824 /// * The trip counts of every input loop must be available at \p ComputeIP.
825 /// Non-rectangular loops are not yet supported.
826 ///
827 /// * At each nest level, code between a surrounding loop and its nested loop
828 /// is hoisted into the loop body, and such code will be executed more
829 /// often than before collapsing (or not at all if any inner loop iteration
830 /// has a trip count of 0). This is permitted by the OpenMP specification.
831 ///
832 /// \param DL Debug location for instructions added for collapsing,
833 /// such as instructions to compute/derive the input loop's
834 /// induction variables.
835 /// \param Loops Loops in the loop nest to collapse. Loops are specified
836 /// from outermost-to-innermost and every control flow of a
837 /// loop's body must pass through its directly nested loop.
838 /// \param ComputeIP Where additional instruction that compute the collapsed
839 /// trip count. If not set, defaults to before the generated
840 /// loop.
841 ///
842 /// \returns The CanonicalLoopInfo object representing the collapsed loop.
845 InsertPointTy ComputeIP);
846
847 /// Get the default alignment value for given target
848 ///
849 /// \param TargetTriple Target triple
850 /// \param Features StringMap which describes extra CPU features
851 static unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple,
852 const StringMap<bool> &Features);
853
854 /// Retrieve (or create if non-existent) the address of a declare
855 /// target variable, used in conjunction with registerTargetGlobalVariable
856 /// to create declare target global variables.
857 ///
858 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
859 /// clause used in conjunction with the variable being registered (link,
860 /// to, enter).
861 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
862 /// clause used in conjunction with the variable being registered (nohost,
863 /// host, any)
864 /// \param IsDeclaration - boolean stating if the variable being registered
865 /// is a declaration-only and not a definition
866 /// \param IsExternallyVisible - boolean stating if the variable is externally
867 /// visible
868 /// \param EntryInfo - Unique entry information for the value generated
869 /// using getTargetEntryUniqueInfo, used to name generated pointer references
870 /// to the declare target variable
871 /// \param MangledName - the mangled name of the variable being registered
872 /// \param GeneratedRefs - references generated by invocations of
873 /// registerTargetGlobalVariable invoked from getAddrOfDeclareTargetVar,
874 /// these are required by Clang for book keeping.
875 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
876 /// \param TargetTriple - The OpenMP device target triple we are compiling
877 /// for
878 /// \param LlvmPtrTy - The type of the variable we are generating or
879 /// retrieving an address for
880 /// \param GlobalInitializer - a lambda function which creates a constant
881 /// used for initializing a pointer reference to the variable in certain
882 /// cases. If a nullptr is passed, it will default to utilising the original
883 /// variable to initialize the pointer reference.
884 /// \param VariableLinkage - a lambda function which returns the variables
885 /// linkage type, if unspecified and a nullptr is given, it will instead
886 /// utilise the linkage stored on the existing global variable in the
887 /// LLVMModule.
891 bool IsDeclaration, bool IsExternallyVisible,
892 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
893 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
894 std::vector<Triple> TargetTriple, Type *LlvmPtrTy,
895 std::function<Constant *()> GlobalInitializer,
896 std::function<GlobalValue::LinkageTypes()> VariableLinkage);
897
898 /// Registers a target variable for device or host.
899 ///
900 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
901 /// clause used in conjunction with the variable being registered (link,
902 /// to, enter).
903 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
904 /// clause used in conjunction with the variable being registered (nohost,
905 /// host, any)
906 /// \param IsDeclaration - boolean stating if the variable being registered
907 /// is a declaration-only and not a definition
908 /// \param IsExternallyVisible - boolean stating if the variable is externally
909 /// visible
910 /// \param EntryInfo - Unique entry information for the value generated
911 /// using getTargetEntryUniqueInfo, used to name generated pointer references
912 /// to the declare target variable
913 /// \param MangledName - the mangled name of the variable being registered
914 /// \param GeneratedRefs - references generated by invocations of
915 /// registerTargetGlobalVariable these are required by Clang for book
916 /// keeping.
917 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
918 /// \param TargetTriple - The OpenMP device target triple we are compiling
919 /// for
920 /// \param GlobalInitializer - a lambda function which creates a constant
921 /// used for initializing a pointer reference to the variable in certain
922 /// cases. If a nullptr is passed, it will default to utilising the original
923 /// variable to initialize the pointer reference.
924 /// \param VariableLinkage - a lambda function which returns the variables
925 /// linkage type, if unspecified and a nullptr is given, it will instead
926 /// utilise the linkage stored on the existing global variable in the
927 /// LLVMModule.
928 /// \param LlvmPtrTy - The type of the variable we are generating or
929 /// retrieving an address for
930 /// \param Addr - the original llvm value (addr) of the variable to be
931 /// registered
935 bool IsDeclaration, bool IsExternallyVisible,
936 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
937 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
938 std::vector<Triple> TargetTriple,
939 std::function<Constant *()> GlobalInitializer,
940 std::function<GlobalValue::LinkageTypes()> VariableLinkage,
941 Type *LlvmPtrTy, Constant *Addr);
942
943 /// Get the offset of the OMP_MAP_MEMBER_OF field.
944 unsigned getFlagMemberOffset();
945
946 /// Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on
947 /// the position given.
948 /// \param Position - A value indicating the position of the parent
949 /// of the member in the kernel argument structure, often retrieved
950 /// by the parents position in the combined information vectors used
951 /// to generate the structure itself. Multiple children (member's of)
952 /// with the same parent will use the same returned member flag.
954
955 /// Given an initial flag set, this function modifies it to contain
956 /// the passed in MemberOfFlag generated from the getMemberOfFlag
957 /// function. The results are dependent on the existing flag bits
958 /// set in the original flag set.
959 /// \param Flags - The original set of flags to be modified with the
960 /// passed in MemberOfFlag.
961 /// \param MemberOfFlag - A modified OMP_MAP_MEMBER_OF flag, adjusted
962 /// slightly based on the getMemberOfFlag which adjusts the flag bits
963 /// based on the members position in its parent.
965 omp::OpenMPOffloadMappingFlags MemberOfFlag);
966
967private:
968 /// Modifies the canonical loop to be a statically-scheduled workshare loop
969 /// which is executed on the device
970 ///
971 /// This takes a \p CLI representing a canonical loop, such as the one
972 /// created by \see createCanonicalLoop and emits additional instructions to
973 /// turn it into a workshare loop. In particular, it calls to an OpenMP
974 /// runtime function in the preheader to call OpenMP device rtl function
975 /// which handles worksharing of loop body interations.
976 ///
977 /// \param DL Debug location for instructions added for the
978 /// workshare-loop construct itself.
979 /// \param CLI A descriptor of the canonical loop to workshare.
980 /// \param AllocaIP An insertion point for Alloca instructions usable in the
981 /// preheader of the loop.
982 /// \param LoopType Information about type of loop worksharing.
983 /// It corresponds to type of loop workshare OpenMP pragma.
984 ///
985 /// \returns Point where to insert code after the workshare construct.
986 InsertPointTy applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
987 InsertPointTy AllocaIP,
988 omp::WorksharingLoopType LoopType);
989
990 /// Modifies the canonical loop to be a statically-scheduled workshare loop.
991 ///
992 /// This takes a \p LoopInfo representing a canonical loop, such as the one
993 /// created by \p createCanonicalLoop and emits additional instructions to
994 /// turn it into a workshare loop. In particular, it calls to an OpenMP
995 /// runtime function in the preheader to obtain the loop bounds to be used in
996 /// the current thread, updates the relevant instructions in the canonical
997 /// loop and calls to an OpenMP runtime finalization function after the loop.
998 ///
999 /// \param DL Debug location for instructions added for the
1000 /// workshare-loop construct itself.
1001 /// \param CLI A descriptor of the canonical loop to workshare.
1002 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1003 /// preheader of the loop.
1004 /// \param NeedsBarrier Indicates whether a barrier must be inserted after
1005 /// the loop.
1006 ///
1007 /// \returns Point where to insert code after the workshare construct.
1008 InsertPointOrErrorTy applyStaticWorkshareLoop(DebugLoc DL,
1009 CanonicalLoopInfo *CLI,
1010 InsertPointTy AllocaIP,
1011 bool NeedsBarrier);
1012
1013 /// Modifies the canonical loop a statically-scheduled workshare loop with a
1014 /// user-specified chunk size.
1015 ///
1016 /// \param DL Debug location for instructions added for the
1017 /// workshare-loop construct itself.
1018 /// \param CLI A descriptor of the canonical loop to workshare.
1019 /// \param AllocaIP An insertion point for Alloca instructions usable in
1020 /// the preheader of the loop.
1021 /// \param NeedsBarrier Indicates whether a barrier must be inserted after the
1022 /// loop.
1023 /// \param ChunkSize The user-specified chunk size.
1024 ///
1025 /// \returns Point where to insert code after the workshare construct.
1026 InsertPointOrErrorTy applyStaticChunkedWorkshareLoop(DebugLoc DL,
1027 CanonicalLoopInfo *CLI,
1028 InsertPointTy AllocaIP,
1029 bool NeedsBarrier,
1030 Value *ChunkSize);
1031
1032 /// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
1033 ///
1034 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1035 /// created by \p createCanonicalLoop and emits additional instructions to
1036 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1037 /// runtime function in the preheader to obtain, and then in each iteration
1038 /// to update the loop counter.
1039 ///
1040 /// \param DL Debug location for instructions added for the
1041 /// workshare-loop construct itself.
1042 /// \param CLI A descriptor of the canonical loop to workshare.
1043 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1044 /// preheader of the loop.
1045 /// \param SchedType Type of scheduling to be passed to the init function.
1046 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1047 /// the loop.
1048 /// \param Chunk The size of loop chunk considered as a unit when
1049 /// scheduling. If \p nullptr, defaults to 1.
1050 ///
1051 /// \returns Point where to insert code after the workshare construct.
1052 InsertPointOrErrorTy applyDynamicWorkshareLoop(DebugLoc DL,
1053 CanonicalLoopInfo *CLI,
1054 InsertPointTy AllocaIP,
1055 omp::OMPScheduleType SchedType,
1056 bool NeedsBarrier,
1057 Value *Chunk = nullptr);
1058
1059 /// Create alternative version of the loop to support if clause
1060 ///
1061 /// OpenMP if clause can require to generate second loop. This loop
1062 /// will be executed when if clause condition is not met. createIfVersion
1063 /// adds branch instruction to the copied loop if \p ifCond is not met.
1064 ///
1065 /// \param Loop Original loop which should be versioned.
1066 /// \param IfCond Value which corresponds to if clause condition
1067 /// \param VMap Value to value map to define relation between
1068 /// original and copied loop values and loop blocks.
1069 /// \param NamePrefix Optional name prefix for if.then if.else blocks.
1070 void createIfVersion(CanonicalLoopInfo *Loop, Value *IfCond,
1071 ValueToValueMapTy &VMap, const Twine &NamePrefix = "");
1072
1073public:
1074 /// Modifies the canonical loop to be a workshare loop.
1075 ///
1076 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1077 /// created by \p createCanonicalLoop and emits additional instructions to
1078 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1079 /// runtime function in the preheader to obtain the loop bounds to be used in
1080 /// the current thread, updates the relevant instructions in the canonical
1081 /// loop and calls to an OpenMP runtime finalization function after the loop.
1082 ///
1083 /// The concrete transformation is done by applyStaticWorkshareLoop,
1084 /// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending
1085 /// on the value of \p SchedKind and \p ChunkSize.
1086 ///
1087 /// \param DL Debug location for instructions added for the
1088 /// workshare-loop construct itself.
1089 /// \param CLI A descriptor of the canonical loop to workshare.
1090 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1091 /// preheader of the loop.
1092 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1093 /// the loop.
1094 /// \param SchedKind Scheduling algorithm to use.
1095 /// \param ChunkSize The chunk size for the inner loop.
1096 /// \param HasSimdModifier Whether the simd modifier is present in the
1097 /// schedule clause.
1098 /// \param HasMonotonicModifier Whether the monotonic modifier is present in
1099 /// the schedule clause.
1100 /// \param HasNonmonotonicModifier Whether the nonmonotonic modifier is
1101 /// present in the schedule clause.
1102 /// \param HasOrderedClause Whether the (parameterless) ordered clause is
1103 /// present.
1104 /// \param LoopType Information about type of loop worksharing.
1105 /// It corresponds to type of loop workshare OpenMP pragma.
1106 ///
1107 /// \returns Point where to insert code after the workshare construct.
1110 bool NeedsBarrier,
1111 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default,
1112 Value *ChunkSize = nullptr, bool HasSimdModifier = false,
1113 bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false,
1114 bool HasOrderedClause = false,
1115 omp::WorksharingLoopType LoopType =
1117
1118 /// Tile a loop nest.
1119 ///
1120 /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
1121 /// \p/ Loops must be perfectly nested, from outermost to innermost loop
1122 /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
1123 /// of every loop and every tile sizes must be usable in the outermost
1124 /// loop's preheader. This implies that the loop nest is rectangular.
1125 ///
1126 /// Example:
1127 /// \code
1128 /// for (int i = 0; i < 15; ++i) // Canonical loop "i"
1129 /// for (int j = 0; j < 14; ++j) // Canonical loop "j"
1130 /// body(i, j);
1131 /// \endcode
1132 ///
1133 /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
1134 /// \code
1135 /// for (int i1 = 0; i1 < 3; ++i1)
1136 /// for (int j1 = 0; j1 < 2; ++j1)
1137 /// for (int i2 = 0; i2 < 5; ++i2)
1138 /// for (int j2 = 0; j2 < 7; ++j2)
1139 /// body(i1*3+i2, j1*3+j2);
1140 /// \endcode
1141 ///
1142 /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
1143 /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
1144 /// handles non-constant trip counts, non-constant tile sizes and trip counts
1145 /// that are not multiples of the tile size. In the latter case the tile loop
1146 /// of the last floor-loop iteration will have fewer iterations than specified
1147 /// as its tile size.
1148 ///
1149 ///
1150 /// @param DL Debug location for instructions added by tiling, for
1151 /// instance the floor- and tile trip count computation.
1152 /// @param Loops Loops to tile. The CanonicalLoopInfo objects are
1153 /// invalidated by this method, i.e. should not used after
1154 /// tiling.
1155 /// @param TileSizes For each loop in \p Loops, the tile size for that
1156 /// dimensions.
1157 ///
1158 /// \returns A list of generated loops. Contains twice as many loops as the
1159 /// input loop nest; the first half are the floor loops and the
1160 /// second half are the tile loops.
1161 std::vector<CanonicalLoopInfo *>
1163 ArrayRef<Value *> TileSizes);
1164
1165 /// Fully unroll a loop.
1166 ///
1167 /// Instead of unrolling the loop immediately (and duplicating its body
1168 /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
1169 /// metadata.
1170 ///
1171 /// \param DL Debug location for instructions added by unrolling.
1172 /// \param Loop The loop to unroll. The loop will be invalidated.
1174
1175 /// Fully or partially unroll a loop. How the loop is unrolled is determined
1176 /// using LLVM's LoopUnrollPass.
1177 ///
1178 /// \param DL Debug location for instructions added by unrolling.
1179 /// \param Loop The loop to unroll. The loop will be invalidated.
1181
1182 /// Partially unroll a loop.
1183 ///
1184 /// The CanonicalLoopInfo of the unrolled loop for use with chained
1185 /// loop-associated directive can be requested using \p UnrolledCLI. Not
1186 /// needing the CanonicalLoopInfo allows more efficient code generation by
1187 /// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
1188 /// A loop-associated directive applied to the unrolled loop needs to know the
1189 /// new trip count which means that if using a heuristically determined unroll
1190 /// factor (\p Factor == 0), that factor must be computed immediately. We are
1191 /// using the same logic as the LoopUnrollPass to derived the unroll factor,
1192 /// but which assumes that some canonicalization has taken place (e.g.
1193 /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
1194 /// better when the unrolled loop's CanonicalLoopInfo is not needed.
1195 ///
1196 /// \param DL Debug location for instructions added by unrolling.
1197 /// \param Loop The loop to unroll. The loop will be invalidated.
1198 /// \param Factor The factor to unroll the loop by. A factor of 0
1199 /// indicates that a heuristic should be used to determine
1200 /// the unroll-factor.
1201 /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
1202 /// partially unrolled loop. Otherwise, uses loop metadata
1203 /// to defer unrolling to the LoopUnrollPass.
1204 void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
1205 CanonicalLoopInfo **UnrolledCLI);
1206
1207 /// Add metadata to simd-ize a loop. If IfCond is not nullptr, the loop
1208 /// is cloned. The metadata which prevents vectorization is added to
1209 /// to the cloned loop. The cloned loop is executed when ifCond is evaluated
1210 /// to false.
1211 ///
1212 /// \param Loop The loop to simd-ize.
1213 /// \param AlignedVars The map which containts pairs of the pointer
1214 /// and its corresponding alignment.
1215 /// \param IfCond The value which corresponds to the if clause
1216 /// condition.
1217 /// \param Order The enum to map order clause.
1218 /// \param Simdlen The Simdlen length to apply to the simd loop.
1219 /// \param Safelen The Safelen length to apply to the simd loop.
1221 MapVector<Value *, Value *> AlignedVars, Value *IfCond,
1222 omp::OrderKind Order, ConstantInt *Simdlen,
1223 ConstantInt *Safelen);
1224
1225 /// Generator for '#omp flush'
1226 ///
1227 /// \param Loc The location where the flush directive was encountered
1228 void createFlush(const LocationDescription &Loc);
1229
1230 /// Generator for '#omp taskwait'
1231 ///
1232 /// \param Loc The location where the taskwait directive was encountered.
1233 void createTaskwait(const LocationDescription &Loc);
1234
1235 /// Generator for '#omp taskyield'
1236 ///
1237 /// \param Loc The location where the taskyield directive was encountered.
1238 void createTaskyield(const LocationDescription &Loc);
1239
1240 /// A struct to pack the relevant information for an OpenMP depend clause.
1241 struct DependData {
1245 explicit DependData() = default;
1247 Value *DepVal)
1249 };
1250
1251 /// Generator for `#omp task`
1252 ///
1253 /// \param Loc The location where the task construct was encountered.
1254 /// \param AllocaIP The insertion point to be used for alloca instructions.
1255 /// \param BodyGenCB Callback that will generate the region code.
1256 /// \param Tied True if the task is tied, false if the task is untied.
1257 /// \param Final i1 value which is `true` if the task is final, `false` if the
1258 /// task is not final.
1259 /// \param IfCondition i1 value. If it evaluates to `false`, an undeferred
1260 /// task is generated, and the encountering thread must
1261 /// suspend the current task region, for which execution
1262 /// cannot be resumed until execution of the structured
1263 /// block that is associated with the generated task is
1264 /// completed.
1265 /// \param EventHandle If present, signifies the event handle as part of
1266 /// the detach clause
1267 /// \param Mergeable If the given task is `mergeable`
1269 createTask(const LocationDescription &Loc, InsertPointTy AllocaIP,
1270 BodyGenCallbackTy BodyGenCB, bool Tied = true,
1271 Value *Final = nullptr, Value *IfCondition = nullptr,
1272 SmallVector<DependData> Dependencies = {}, bool Mergeable = false,
1273 Value *EventHandle = nullptr);
1274
1275 /// Generator for the taskgroup construct
1276 ///
1277 /// \param Loc The location where the taskgroup construct was encountered.
1278 /// \param AllocaIP The insertion point to be used for alloca instructions.
1279 /// \param BodyGenCB Callback that will generate the region code.
1280 InsertPointOrErrorTy createTaskgroup(const LocationDescription &Loc,
1281 InsertPointTy AllocaIP,
1282 BodyGenCallbackTy BodyGenCB);
1283
1285 std::function<std::tuple<std::string, uint64_t>()>;
1286
1287 /// Creates a unique info for a target entry when provided a filename and
1288 /// line number from.
1289 ///
1290 /// \param CallBack A callback function which should return filename the entry
1291 /// resides in as well as the line number for the target entry
1292 /// \param ParentName The name of the parent the target entry resides in, if
1293 /// any.
1296 StringRef ParentName = "");
1297
1298 /// Enum class for the RedctionGen CallBack type to be used.
1300
1301 /// ReductionGen CallBack for Clang
1302 ///
1303 /// \param CodeGenIP InsertPoint for CodeGen.
1304 /// \param Index Index of the ReductionInfo to generate code for.
1305 /// \param LHSPtr Optionally used by Clang to return the LHSPtr it used for
1306 /// codegen, used for fixup later.
1307 /// \param RHSPtr Optionally used by Clang to
1308 /// return the RHSPtr it used for codegen, used for fixup later.
1309 /// \param CurFn Optionally used by Clang to pass in the Current Function as
1310 /// Clang context may be old.
1312 std::function<InsertPointTy(InsertPointTy CodeGenIP, unsigned Index,
1313 Value **LHS, Value **RHS, Function *CurFn)>;
1314
1315 /// ReductionGen CallBack for MLIR
1316 ///
1317 /// \param CodeGenIP InsertPoint for CodeGen.
1318 /// \param LHS Pass in the LHS Value to be used for CodeGen.
1319 /// \param RHS Pass in the RHS Value to be used for CodeGen.
1321 InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)>;
1322
1323 /// Functions used to generate atomic reductions. Such functions take two
1324 /// Values representing pointers to LHS and RHS of the reduction, as well as
1325 /// the element type of these pointers. They are expected to atomically
1326 /// update the LHS to the reduced value.
1328 InsertPointTy, Type *, Value *, Value *)>;
1329
1330 /// Enum class for reduction evaluation types scalar, complex and aggregate.
1332
1333 /// Information about an OpenMP reduction.
1344 : ElementType(nullptr), Variable(nullptr),
1347
1348 /// Reduction element type, must match pointee type of variable.
1350
1351 /// Reduction variable of pointer type.
1353
1354 /// Thread-private partial reduction variable.
1356
1357 /// Reduction evaluation kind - scalar, complex or aggregate.
1359
1360 /// Callback for generating the reduction body. The IR produced by this will
1361 /// be used to combine two values in a thread-safe context, e.g., under
1362 /// lock or within the same thread, and therefore need not be atomic.
1364
1365 /// Clang callback for generating the reduction body. The IR produced by
1366 /// this will be used to combine two values in a thread-safe context, e.g.,
1367 /// under lock or within the same thread, and therefore need not be atomic.
1369
1370 /// Callback for generating the atomic reduction body, may be null. The IR
1371 /// produced by this will be used to atomically combine two values during
1372 /// reduction. If null, the implementation will use the non-atomic version
1373 /// along with the appropriate synchronization mechanisms.
1375 };
1376
1377 enum class CopyAction : unsigned {
1378 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1379 // the warp using shuffle instructions.
1381 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1382 ThreadCopy,
1383 };
1384
1389 };
1390
1391 /// Supporting functions for Reductions CodeGen.
1392private:
1393 /// Emit the llvm.used metadata.
1394 void emitUsed(StringRef Name, std::vector<llvm::WeakTrackingVH> &List);
1395
1396 /// Get the id of the current thread on the GPU.
1397 Value *getGPUThreadID();
1398
1399 /// Get the GPU warp size.
1400 Value *getGPUWarpSize();
1401
1402 /// Get the id of the warp in the block.
1403 /// We assume that the warp size is 32, which is always the case
1404 /// on the NVPTX device, to generate more efficient code.
1405 Value *getNVPTXWarpID();
1406
1407 /// Get the id of the current lane in the Warp.
1408 /// We assume that the warp size is 32, which is always the case
1409 /// on the NVPTX device, to generate more efficient code.
1410 Value *getNVPTXLaneID();
1411
1412 /// Cast value to the specified type.
1413 Value *castValueToType(InsertPointTy AllocaIP, Value *From, Type *ToType);
1414
1415 /// This function creates calls to one of two shuffle functions to copy
1416 /// variables between lanes in a warp.
1417 Value *createRuntimeShuffleFunction(InsertPointTy AllocaIP, Value *Element,
1418 Type *ElementType, Value *Offset);
1419
1420 /// Function to shuffle over the value from the remote lane.
1421 void shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr,
1422 Type *ElementType, Value *Offset,
1423 Type *ReductionArrayTy);
1424
1425 /// Emit instructions to copy a Reduce list, which contains partially
1426 /// aggregated values, in the specified direction.
1427 void emitReductionListCopy(
1428 InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy,
1429 ArrayRef<ReductionInfo> ReductionInfos, Value *SrcBase, Value *DestBase,
1430 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr});
1431
1432 /// Emit a helper that reduces data across two OpenMP threads (lanes)
1433 /// in the same warp. It uses shuffle instructions to copy over data from
1434 /// a remote lane's stack. The reduction algorithm performed is specified
1435 /// by the fourth parameter.
1436 ///
1437 /// Algorithm Versions.
1438 /// Full Warp Reduce (argument value 0):
1439 /// This algorithm assumes that all 32 lanes are active and gathers
1440 /// data from these 32 lanes, producing a single resultant value.
1441 /// Contiguous Partial Warp Reduce (argument value 1):
1442 /// This algorithm assumes that only a *contiguous* subset of lanes
1443 /// are active. This happens for the last warp in a parallel region
1444 /// when the user specified num_threads is not an integer multiple of
1445 /// 32. This contiguous subset always starts with the zeroth lane.
1446 /// Partial Warp Reduce (argument value 2):
1447 /// This algorithm gathers data from any number of lanes at any position.
1448 /// All reduced values are stored in the lowest possible lane. The set
1449 /// of problems every algorithm addresses is a super set of those
1450 /// addressable by algorithms with a lower version number. Overhead
1451 /// increases as algorithm version increases.
1452 ///
1453 /// Terminology
1454 /// Reduce element:
1455 /// Reduce element refers to the individual data field with primitive
1456 /// data types to be combined and reduced across threads.
1457 /// Reduce list:
1458 /// Reduce list refers to a collection of local, thread-private
1459 /// reduce elements.
1460 /// Remote Reduce list:
1461 /// Remote Reduce list refers to a collection of remote (relative to
1462 /// the current thread) reduce elements.
1463 ///
1464 /// We distinguish between three states of threads that are important to
1465 /// the implementation of this function.
1466 /// Alive threads:
1467 /// Threads in a warp executing the SIMT instruction, as distinguished from
1468 /// threads that are inactive due to divergent control flow.
1469 /// Active threads:
1470 /// The minimal set of threads that has to be alive upon entry to this
1471 /// function. The computation is correct iff active threads are alive.
1472 /// Some threads are alive but they are not active because they do not
1473 /// contribute to the computation in any useful manner. Turning them off
1474 /// may introduce control flow overheads without any tangible benefits.
1475 /// Effective threads:
1476 /// In order to comply with the argument requirements of the shuffle
1477 /// function, we must keep all lanes holding data alive. But at most
1478 /// half of them perform value aggregation; we refer to this half of
1479 /// threads as effective. The other half is simply handing off their
1480 /// data.
1481 ///
1482 /// Procedure
1483 /// Value shuffle:
1484 /// In this step active threads transfer data from higher lane positions
1485 /// in the warp to lower lane positions, creating Remote Reduce list.
1486 /// Value aggregation:
1487 /// In this step, effective threads combine their thread local Reduce list
1488 /// with Remote Reduce list and store the result in the thread local
1489 /// Reduce list.
1490 /// Value copy:
1491 /// In this step, we deal with the assumption made by algorithm 2
1492 /// (i.e. contiguity assumption). When we have an odd number of lanes
1493 /// active, say 2k+1, only k threads will be effective and therefore k
1494 /// new values will be produced. However, the Reduce list owned by the
1495 /// (2k+1)th thread is ignored in the value aggregation. Therefore
1496 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
1497 /// that the contiguity assumption still holds.
1498 ///
1499 /// \param ReductionInfos Array type containing the ReductionOps.
1500 /// \param ReduceFn The reduction function.
1501 /// \param FuncAttrs Optional param to specify any function attributes that
1502 /// need to be copied to the new function.
1503 ///
1504 /// \return The ShuffleAndReduce function.
1505 Function *emitShuffleAndReduceFunction(
1507 Function *ReduceFn, AttributeList FuncAttrs);
1508
1509 /// This function emits a helper that gathers Reduce lists from the first
1510 /// lane of every active warp to lanes in the first warp.
1511 ///
1512 /// void inter_warp_copy_func(void* reduce_data, num_warps)
1513 /// shared smem[warp_size];
1514 /// For all data entries D in reduce_data:
1515 /// sync
1516 /// If (I am the first lane in each warp)
1517 /// Copy my local D to smem[warp_id]
1518 /// sync
1519 /// if (I am the first warp)
1520 /// Copy smem[thread_id] to my local D
1521 ///
1522 /// \param Loc The insert and source location description.
1523 /// \param ReductionInfos Array type containing the ReductionOps.
1524 /// \param FuncAttrs Optional param to specify any function attributes that
1525 /// need to be copied to the new function.
1526 ///
1527 /// \return The InterWarpCopy function.
1529 emitInterWarpCopyFunction(const LocationDescription &Loc,
1530 ArrayRef<ReductionInfo> ReductionInfos,
1531 AttributeList FuncAttrs);
1532
1533 /// This function emits a helper that copies all the reduction variables from
1534 /// the team into the provided global buffer for the reduction variables.
1535 ///
1536 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1537 /// For all data entries D in reduce_data:
1538 /// Copy local D to buffer.D[Idx]
1539 ///
1540 /// \param ReductionInfos Array type containing the ReductionOps.
1541 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1542 /// \param FuncAttrs Optional param to specify any function attributes that
1543 /// need to be copied to the new function.
1544 ///
1545 /// \return The ListToGlobalCopy function.
1546 Function *emitListToGlobalCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1547 Type *ReductionsBufferTy,
1548 AttributeList FuncAttrs);
1549
1550 /// This function emits a helper that copies all the reduction variables from
1551 /// the team into the provided global buffer for the reduction variables.
1552 ///
1553 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1554 /// For all data entries D in reduce_data:
1555 /// Copy buffer.D[Idx] to local D;
1556 ///
1557 /// \param ReductionInfos Array type containing the ReductionOps.
1558 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1559 /// \param FuncAttrs Optional param to specify any function attributes that
1560 /// need to be copied to the new function.
1561 ///
1562 /// \return The GlobalToList function.
1563 Function *emitGlobalToListCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1564 Type *ReductionsBufferTy,
1565 AttributeList FuncAttrs);
1566
1567 /// This function emits a helper that reduces all the reduction variables from
1568 /// the team into the provided global buffer for the reduction variables.
1569 ///
1570 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
1571 /// void *GlobPtrs[];
1572 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
1573 /// ...
1574 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
1575 /// reduce_function(GlobPtrs, reduce_data);
1576 ///
1577 /// \param ReductionInfos Array type containing the ReductionOps.
1578 /// \param ReduceFn The reduction function.
1579 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1580 /// \param FuncAttrs Optional param to specify any function attributes that
1581 /// need to be copied to the new function.
1582 ///
1583 /// \return The ListToGlobalReduce function.
1584 Function *
1585 emitListToGlobalReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
1586 Function *ReduceFn, Type *ReductionsBufferTy,
1587 AttributeList FuncAttrs);
1588
1589 /// This function emits a helper that reduces all the reduction variables from
1590 /// the team into the provided global buffer for the reduction variables.
1591 ///
1592 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
1593 /// void *GlobPtrs[];
1594 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
1595 /// ...
1596 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
1597 /// reduce_function(reduce_data, GlobPtrs);
1598 ///
1599 /// \param ReductionInfos Array type containing the ReductionOps.
1600 /// \param ReduceFn The reduction function.
1601 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1602 /// \param FuncAttrs Optional param to specify any function attributes that
1603 /// need to be copied to the new function.
1604 ///
1605 /// \return The GlobalToListReduce function.
1606 Function *
1607 emitGlobalToListReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
1608 Function *ReduceFn, Type *ReductionsBufferTy,
1609 AttributeList FuncAttrs);
1610
1611 /// Get the function name of a reduction function.
1612 std::string getReductionFuncName(StringRef Name) const;
1613
1614 /// Emits reduction function.
1615 /// \param ReducerName Name of the function calling the reduction.
1616 /// \param ReductionInfos Array type containing the ReductionOps.
1617 /// \param ReductionGenCBKind Optional param to specify Clang or MLIR
1618 /// CodeGenCB kind.
1619 /// \param FuncAttrs Optional param to specify any function attributes that
1620 /// need to be copied to the new function.
1621 ///
1622 /// \return The reduction function.
1623 Expected<Function *> createReductionFunction(
1624 StringRef ReducerName, ArrayRef<ReductionInfo> ReductionInfos,
1626 AttributeList FuncAttrs = {});
1627
1628public:
1629 ///
1630 /// Design of OpenMP reductions on the GPU
1631 ///
1632 /// Consider a typical OpenMP program with one or more reduction
1633 /// clauses:
1634 ///
1635 /// float foo;
1636 /// double bar;
1637 /// #pragma omp target teams distribute parallel for \
1638 /// reduction(+:foo) reduction(*:bar)
1639 /// for (int i = 0; i < N; i++) {
1640 /// foo += A[i]; bar *= B[i];
1641 /// }
1642 ///
1643 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
1644 /// all teams. In our OpenMP implementation on the NVPTX device an
1645 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
1646 /// within a team are mapped to CUDA threads within a threadblock.
1647 /// Our goal is to efficiently aggregate values across all OpenMP
1648 /// threads such that:
1649 ///
1650 /// - the compiler and runtime are logically concise, and
1651 /// - the reduction is performed efficiently in a hierarchical
1652 /// manner as follows: within OpenMP threads in the same warp,
1653 /// across warps in a threadblock, and finally across teams on
1654 /// the NVPTX device.
1655 ///
1656 /// Introduction to Decoupling
1657 ///
1658 /// We would like to decouple the compiler and the runtime so that the
1659 /// latter is ignorant of the reduction variables (number, data types)
1660 /// and the reduction operators. This allows a simpler interface
1661 /// and implementation while still attaining good performance.
1662 ///
1663 /// Pseudocode for the aforementioned OpenMP program generated by the
1664 /// compiler is as follows:
1665 ///
1666 /// 1. Create private copies of reduction variables on each OpenMP
1667 /// thread: 'foo_private', 'bar_private'
1668 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
1669 /// to it and writes the result in 'foo_private' and 'bar_private'
1670 /// respectively.
1671 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
1672 /// and store the result on the team master:
1673 ///
1674 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
1675 /// reduceData, shuffleReduceFn, interWarpCpyFn)
1676 ///
1677 /// where:
1678 /// struct ReduceData {
1679 /// double *foo;
1680 /// double *bar;
1681 /// } reduceData
1682 /// reduceData.foo = &foo_private
1683 /// reduceData.bar = &bar_private
1684 ///
1685 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
1686 /// auxiliary functions generated by the compiler that operate on
1687 /// variables of type 'ReduceData'. They aid the runtime perform
1688 /// algorithmic steps in a data agnostic manner.
1689 ///
1690 /// 'shuffleReduceFn' is a pointer to a function that reduces data
1691 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
1692 /// same warp. It takes the following arguments as input:
1693 ///
1694 /// a. variable of type 'ReduceData' on the calling lane,
1695 /// b. its lane_id,
1696 /// c. an offset relative to the current lane_id to generate a
1697 /// remote_lane_id. The remote lane contains the second
1698 /// variable of type 'ReduceData' that is to be reduced.
1699 /// d. an algorithm version parameter determining which reduction
1700 /// algorithm to use.
1701 ///
1702 /// 'shuffleReduceFn' retrieves data from the remote lane using
1703 /// efficient GPU shuffle intrinsics and reduces, using the
1704 /// algorithm specified by the 4th parameter, the two operands
1705 /// element-wise. The result is written to the first operand.
1706 ///
1707 /// Different reduction algorithms are implemented in different
1708 /// runtime functions, all calling 'shuffleReduceFn' to perform
1709 /// the essential reduction step. Therefore, based on the 4th
1710 /// parameter, this function behaves slightly differently to
1711 /// cooperate with the runtime to ensure correctness under
1712 /// different circumstances.
1713 ///
1714 /// 'InterWarpCpyFn' is a pointer to a function that transfers
1715 /// reduced variables across warps. It tunnels, through CUDA
1716 /// shared memory, the thread-private data of type 'ReduceData'
1717 /// from lane 0 of each warp to a lane in the first warp.
1718 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
1719 /// The last team writes the global reduced value to memory.
1720 ///
1721 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
1722 /// reduceData, shuffleReduceFn, interWarpCpyFn,
1723 /// scratchpadCopyFn, loadAndReduceFn)
1724 ///
1725 /// 'scratchpadCopyFn' is a helper that stores reduced
1726 /// data from the team master to a scratchpad array in
1727 /// global memory.
1728 ///
1729 /// 'loadAndReduceFn' is a helper that loads data from
1730 /// the scratchpad array and reduces it with the input
1731 /// operand.
1732 ///
1733 /// These compiler generated functions hide address
1734 /// calculation and alignment information from the runtime.
1735 /// 5. if ret == 1:
1736 /// The team master of the last team stores the reduced
1737 /// result to the globals in memory.
1738 /// foo += reduceData.foo; bar *= reduceData.bar
1739 ///
1740 ///
1741 /// Warp Reduction Algorithms
1742 ///
1743 /// On the warp level, we have three algorithms implemented in the
1744 /// OpenMP runtime depending on the number of active lanes:
1745 ///
1746 /// Full Warp Reduction
1747 ///
1748 /// The reduce algorithm within a warp where all lanes are active
1749 /// is implemented in the runtime as follows:
1750 ///
1751 /// full_warp_reduce(void *reduce_data,
1752 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
1753 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
1754 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
1755 /// }
1756 ///
1757 /// The algorithm completes in log(2, WARPSIZE) steps.
1758 ///
1759 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
1760 /// not used therefore we save instructions by not retrieving lane_id
1761 /// from the corresponding special registers. The 4th parameter, which
1762 /// represents the version of the algorithm being used, is set to 0 to
1763 /// signify full warp reduction.
1764 ///
1765 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1766 ///
1767 /// #reduce_elem refers to an element in the local lane's data structure
1768 /// #remote_elem is retrieved from a remote lane
1769 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1770 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
1771 ///
1772 /// Contiguous Partial Warp Reduction
1773 ///
1774 /// This reduce algorithm is used within a warp where only the first
1775 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
1776 /// number of OpenMP threads in a parallel region is not a multiple of
1777 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
1778 ///
1779 /// void
1780 /// contiguous_partial_reduce(void *reduce_data,
1781 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
1782 /// int size, int lane_id) {
1783 /// int curr_size;
1784 /// int offset;
1785 /// curr_size = size;
1786 /// mask = curr_size/2;
1787 /// while (offset>0) {
1788 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
1789 /// curr_size = (curr_size+1)/2;
1790 /// offset = curr_size/2;
1791 /// }
1792 /// }
1793 ///
1794 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1795 ///
1796 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1797 /// if (lane_id < offset)
1798 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
1799 /// else
1800 /// reduce_elem = remote_elem
1801 ///
1802 /// This algorithm assumes that the data to be reduced are located in a
1803 /// contiguous subset of lanes starting from the first. When there is
1804 /// an odd number of active lanes, the data in the last lane is not
1805 /// aggregated with any other lane's dat but is instead copied over.
1806 ///
1807 /// Dispersed Partial Warp Reduction
1808 ///
1809 /// This algorithm is used within a warp when any discontiguous subset of
1810 /// lanes are active. It is used to implement the reduction operation
1811 /// across lanes in an OpenMP simd region or in a nested parallel region.
1812 ///
1813 /// void
1814 /// dispersed_partial_reduce(void *reduce_data,
1815 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
1816 /// int size, remote_id;
1817 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
1818 /// do {
1819 /// remote_id = next_active_lane_id_right_after_me();
1820 /// # the above function returns 0 of no active lane
1821 /// # is present right after the current lane.
1822 /// size = number_of_active_lanes_in_this_warp();
1823 /// logical_lane_id /= 2;
1824 /// ShuffleReduceFn(reduce_data, logical_lane_id,
1825 /// remote_id-1-threadIdx.x, 2);
1826 /// } while (logical_lane_id % 2 == 0 && size > 1);
1827 /// }
1828 ///
1829 /// There is no assumption made about the initial state of the reduction.
1830 /// Any number of lanes (>=1) could be active at any position. The reduction
1831 /// result is returned in the first active lane.
1832 ///
1833 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1834 ///
1835 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1836 /// if (lane_id % 2 == 0 && offset > 0)
1837 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
1838 /// else
1839 /// reduce_elem = remote_elem
1840 ///
1841 ///
1842 /// Intra-Team Reduction
1843 ///
1844 /// This function, as implemented in the runtime call
1845 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
1846 /// threads in a team. It first reduces within a warp using the
1847 /// aforementioned algorithms. We then proceed to gather all such
1848 /// reduced values at the first warp.
1849 ///
1850 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
1851 /// data from each of the "warp master" (zeroth lane of each warp, where
1852 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
1853 /// a mathematical sense) the problem of reduction across warp masters in
1854 /// a block to the problem of warp reduction.
1855 ///
1856 ///
1857 /// Inter-Team Reduction
1858 ///
1859 /// Once a team has reduced its data to a single value, it is stored in
1860 /// a global scratchpad array. Since each team has a distinct slot, this
1861 /// can be done without locking.
1862 ///
1863 /// The last team to write to the scratchpad array proceeds to reduce the
1864 /// scratchpad array. One or more workers in the last team use the helper
1865 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
1866 /// the k'th worker reduces every k'th element.
1867 ///
1868 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
1869 /// reduce across workers and compute a globally reduced value.
1870 ///
1871 /// \param Loc The location where the reduction was
1872 /// encountered. Must be within the associate
1873 /// directive and after the last local access to the
1874 /// reduction variables.
1875 /// \param AllocaIP An insertion point suitable for allocas usable
1876 /// in reductions.
1877 /// \param CodeGenIP An insertion point suitable for code
1878 /// generation. \param ReductionInfos A list of info on each reduction
1879 /// variable. \param IsNoWait Optional flag set if the reduction is
1880 /// marked as
1881 /// nowait.
1882 /// \param IsTeamsReduction Optional flag set if it is a teams
1883 /// reduction.
1884 /// \param HasDistribute Optional flag set if it is a
1885 /// distribute reduction.
1886 /// \param GridValue Optional GPU grid value.
1887 /// \param ReductionBufNum Optional OpenMPCUDAReductionBufNumValue to be
1888 /// used for teams reduction.
1889 /// \param SrcLocInfo Source location information global.
1891 const LocationDescription &Loc, InsertPointTy AllocaIP,
1892 InsertPointTy CodeGenIP, ArrayRef<ReductionInfo> ReductionInfos,
1893 bool IsNoWait = false, bool IsTeamsReduction = false,
1894 bool HasDistribute = false,
1896 std::optional<omp::GV> GridValue = {}, unsigned ReductionBufNum = 1024,
1897 Value *SrcLocInfo = nullptr);
1898
1899 // TODO: provide atomic and non-atomic reduction generators for reduction
1900 // operators defined by the OpenMP specification.
1901
1902 /// Generator for '#omp reduction'.
1903 ///
1904 /// Emits the IR instructing the runtime to perform the specific kind of
1905 /// reductions. Expects reduction variables to have been privatized and
1906 /// initialized to reduction-neutral values separately. Emits the calls to
1907 /// runtime functions as well as the reduction function and the basic blocks
1908 /// performing the reduction atomically and non-atomically.
1909 ///
1910 /// The code emitted for the following:
1911 ///
1912 /// \code
1913 /// type var_1;
1914 /// type var_2;
1915 /// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
1916 /// /* body */;
1917 /// \endcode
1918 ///
1919 /// corresponds to the following sketch.
1920 ///
1921 /// \code
1922 /// void _outlined_par() {
1923 /// // N is the number of different reductions.
1924 /// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
1925 /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
1926 /// _omp_reduction_func,
1927 /// _gomp_critical_user.reduction.var)) {
1928 /// case 1: {
1929 /// var_1 = var_1 <reduction-op> privatized_var_1;
1930 /// var_2 = var_2 <reduction-op> privatized_var_2;
1931 /// // ...
1932 /// __kmpc_end_reduce(...);
1933 /// break;
1934 /// }
1935 /// case 2: {
1936 /// _Atomic<ReductionOp>(var_1, privatized_var_1);
1937 /// _Atomic<ReductionOp>(var_2, privatized_var_2);
1938 /// // ...
1939 /// break;
1940 /// }
1941 /// default: break;
1942 /// }
1943 /// }
1944 ///
1945 /// void _omp_reduction_func(void **lhs, void **rhs) {
1946 /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
1947 /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
1948 /// // ...
1949 /// }
1950 /// \endcode
1951 ///
1952 /// \param Loc The location where the reduction was
1953 /// encountered. Must be within the associate
1954 /// directive and after the last local access to the
1955 /// reduction variables.
1956 /// \param AllocaIP An insertion point suitable for allocas usable
1957 /// in reductions.
1958 /// \param ReductionInfos A list of info on each reduction variable.
1959 /// \param IsNoWait A flag set if the reduction is marked as nowait.
1960 /// \param IsByRef A flag set if the reduction is using reference
1961 /// or direct value.
1962 InsertPointOrErrorTy createReductions(const LocationDescription &Loc,
1963 InsertPointTy AllocaIP,
1964 ArrayRef<ReductionInfo> ReductionInfos,
1965 ArrayRef<bool> IsByRef,
1966 bool IsNoWait = false);
1967
1968 ///}
1969
1970 /// Return the insertion point used by the underlying IRBuilder.
1972
1973 /// Update the internal location to \p Loc.
1975 Builder.restoreIP(Loc.IP);
1977 return Loc.IP.getBlock() != nullptr;
1978 }
1979
1980 /// Return the function declaration for the runtime function with \p FnID.
1983
1985
1986 /// Return the (LLVM-IR) string describing the source location \p LocStr.
1987 Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
1988
1989 /// Return the (LLVM-IR) string describing the default source location.
1991
1992 /// Return the (LLVM-IR) string describing the source location identified by
1993 /// the arguments.
1994 Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
1995 unsigned Line, unsigned Column,
1996 uint32_t &SrcLocStrSize);
1997
1998 /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
1999 /// fallback if \p DL does not specify the function name.
2001 Function *F = nullptr);
2002
2003 /// Return the (LLVM-IR) string describing the source location \p Loc.
2004 Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
2005 uint32_t &SrcLocStrSize);
2006
2007 /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
2008 /// TODO: Create a enum class for the Reserve2Flags
2009 Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
2010 omp::IdentFlag Flags = omp::IdentFlag(0),
2011 unsigned Reserve2Flags = 0);
2012
2013 /// Create a hidden global flag \p Name in the module with initial value \p
2014 /// Value.
2016
2017 /// Generate control flow and cleanup for cancellation.
2018 ///
2019 /// \param CancelFlag Flag indicating if the cancellation is performed.
2020 /// \param CanceledDirective The kind of directive that is cancled.
2021 /// \param ExitCB Extra code to be generated in the exit block.
2022 ///
2023 /// \return an error, if any were triggered during execution.
2025 omp::Directive CanceledDirective,
2026 FinalizeCallbackTy ExitCB = {});
2027
2028 /// Generate a target region entry call.
2029 ///
2030 /// \param Loc The location at which the request originated and is fulfilled.
2031 /// \param AllocaIP The insertion point to be used for alloca instructions.
2032 /// \param Return Return value of the created function returned by reference.
2033 /// \param DeviceID Identifier for the device via the 'device' clause.
2034 /// \param NumTeams Numer of teams for the region via the 'num_teams' clause
2035 /// or 0 if unspecified and -1 if there is no 'teams' clause.
2036 /// \param NumThreads Number of threads via the 'thread_limit' clause.
2037 /// \param HostPtr Pointer to the host-side pointer of the target kernel.
2038 /// \param KernelArgs Array of arguments to the kernel.
2039 InsertPointTy emitTargetKernel(const LocationDescription &Loc,
2040 InsertPointTy AllocaIP, Value *&Return,
2041 Value *Ident, Value *DeviceID, Value *NumTeams,
2042 Value *NumThreads, Value *HostPtr,
2043 ArrayRef<Value *> KernelArgs);
2044
2045 /// Generate a flush runtime call.
2046 ///
2047 /// \param Loc The location at which the request originated and is fulfilled.
2048 void emitFlush(const LocationDescription &Loc);
2049
2050 /// The finalization stack made up of finalize callbacks currently in-flight,
2051 /// wrapped into FinalizationInfo objects that reference also the finalization
2052 /// target block and the kind of cancellable directive.
2054
2055 /// Return true if the last entry in the finalization stack is of kind \p DK
2056 /// and cancellable.
2057 bool isLastFinalizationInfoCancellable(omp::Directive DK) {
2058 return !FinalizationStack.empty() &&
2059 FinalizationStack.back().IsCancellable &&
2060 FinalizationStack.back().DK == DK;
2061 }
2062
2063 /// Generate a taskwait runtime call.
2064 ///
2065 /// \param Loc The location at which the request originated and is fulfilled.
2066 void emitTaskwaitImpl(const LocationDescription &Loc);
2067
2068 /// Generate a taskyield runtime call.
2069 ///
2070 /// \param Loc The location at which the request originated and is fulfilled.
2071 void emitTaskyieldImpl(const LocationDescription &Loc);
2072
2073 /// Return the current thread ID.
2074 ///
2075 /// \param Ident The ident (ident_t*) describing the query origin.
2077
2078 /// The OpenMPIRBuilder Configuration
2080
2081 /// The underlying LLVM-IR module
2083
2084 /// The LLVM-IR Builder used to create IR.
2086
2087 /// Map to remember source location strings
2089
2090 /// Map to remember existing ident_t*.
2092
2093 /// Info manager to keep track of target regions.
2095
2096 /// The target triple of the underlying module.
2097 const Triple T;
2098
2099 /// Helper that contains information about regions we need to outline
2100 /// during finalization.
2102 using PostOutlineCBTy = std::function<void(Function &)>;
2106
2107 /// Collect all blocks in between EntryBB and ExitBB in both the given
2108 /// vector and set.
2110 SmallVectorImpl<BasicBlock *> &BlockVector);
2111
2112 /// Return the function that contains the region to be outlined.
2113 Function *getFunction() const { return EntryBB->getParent(); }
2114 };
2115
2116 /// Collection of regions that need to be outlined during finalization.
2118
2119 /// A collection of candidate target functions that's constant allocas will
2120 /// attempt to be raised on a call of finalize after all currently enqueued
2121 /// outline info's have been processed.
2123
2124 /// Collection of owned canonical loop objects that eventually need to be
2125 /// free'd.
2126 std::forward_list<CanonicalLoopInfo> LoopInfos;
2127
2128 /// Add a new region that will be outlined later.
2129 void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
2130
2131 /// An ordered map of auto-generated variables to their unique names.
2132 /// It stores variables with the following names: 1) ".gomp_critical_user_" +
2133 /// <critical_section_name> + ".var" for "omp critical" directives; 2)
2134 /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
2135 /// variables.
2137
2138 /// Computes the size of type in bytes.
2139 Value *getSizeInBytes(Value *BasePtr);
2140
2141 // Emit a branch from the current block to the Target block only if
2142 // the current block has a terminator.
2144
2145 // If BB has no use then delete it and return. Else place BB after the current
2146 // block, if possible, or else at the end of the function. Also add a branch
2147 // from current block to BB if current block does not have a terminator.
2148 void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished = false);
2149
2150 /// Emits code for OpenMP 'if' clause using specified \a BodyGenCallbackTy
2151 /// Here is the logic:
2152 /// if (Cond) {
2153 /// ThenGen();
2154 /// } else {
2155 /// ElseGen();
2156 /// }
2157 ///
2158 /// \return an error, if any were triggered during execution.
2160 BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP = {});
2161
2162 /// Create the global variable holding the offload mappings information.
2164 std::string VarName);
2165
2166 /// Create the global variable holding the offload names information.
2169 std::string VarName);
2170
2173 AllocaInst *Args = nullptr;
2175 };
2176
2177 /// Create the allocas instruction used in call to mapper functions.
2179 InsertPointTy AllocaIP, unsigned NumOperands,
2181
2182 /// Create the call for the target mapper function.
2183 /// \param Loc The source location description.
2184 /// \param MapperFunc Function to be called.
2185 /// \param SrcLocInfo Source location information global.
2186 /// \param MaptypesArg The argument types.
2187 /// \param MapnamesArg The argument names.
2188 /// \param MapperAllocas The AllocaInst used for the call.
2189 /// \param DeviceID Device ID for the call.
2190 /// \param NumOperands Number of operands in the call.
2191 void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
2192 Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
2193 struct MapperAllocas &MapperAllocas, int64_t DeviceID,
2194 unsigned NumOperands);
2195
2196 /// Container for the arguments used to pass data to the runtime library.
2198 /// The array of base pointer passed to the runtime library.
2200 /// The array of section pointers passed to the runtime library.
2202 /// The array of sizes passed to the runtime library.
2203 Value *SizesArray = nullptr;
2204 /// The array of map types passed to the runtime library for the beginning
2205 /// of the region or for the entire region if there are no separate map
2206 /// types for the region end.
2208 /// The array of map types passed to the runtime library for the end of the
2209 /// region, or nullptr if there are no separate map types for the region
2210 /// end.
2212 /// The array of user-defined mappers passed to the runtime library.
2214 /// The array of original declaration names of mapped pointers sent to the
2215 /// runtime library for debugging
2217
2218 explicit TargetDataRTArgs() {}
2227 };
2228
2229 /// Data structure that contains the needed information to construct the
2230 /// kernel args vector.
2232 /// Number of arguments passed to the runtime library.
2233 unsigned NumTargetItems = 0;
2234 /// Arguments passed to the runtime library
2236 /// The number of iterations
2238 /// The number of teams.
2240 /// The number of threads.
2242 /// The size of the dynamic shared memory.
2244 /// True if the kernel has 'no wait' clause.
2245 bool HasNoWait = false;
2246
2247 // Constructors for TargetKernelArgs.
2252 bool HasNoWait)
2257 };
2258
2259 /// Create the kernel args vector used by emitTargetKernel. This function
2260 /// creates various constant values that are used in the resulting args
2261 /// vector.
2262 static void getKernelArgsVector(TargetKernelArgs &KernelArgs,
2264 SmallVector<Value *> &ArgsVector);
2265
2266 /// Struct that keeps the information that should be kept throughout
2267 /// a 'target data' region.
2269 /// Set to true if device pointer information have to be obtained.
2270 bool RequiresDevicePointerInfo = false;
2271 /// Set to true if Clang emits separate runtime calls for the beginning and
2272 /// end of the region. These calls might have separate map type arrays.
2273 bool SeparateBeginEndCalls = false;
2274
2275 public:
2277
2280
2281 /// Indicate whether any user-defined mapper exists.
2282 bool HasMapper = false;
2283 /// The total number of pointers passed to the runtime library.
2284 unsigned NumberOfPtrs = 0u;
2285
2286 bool EmitDebug = false;
2287
2288 /// Whether the `target ... data` directive has a `nowait` clause.
2289 bool HasNoWait = false;
2290
2291 explicit TargetDataInfo() {}
2292 explicit TargetDataInfo(bool RequiresDevicePointerInfo,
2293 bool SeparateBeginEndCalls)
2294 : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
2295 SeparateBeginEndCalls(SeparateBeginEndCalls) {}
2296 /// Clear information about the data arrays.
2299 HasMapper = false;
2300 NumberOfPtrs = 0u;
2301 }
2302 /// Return true if the current target data information has valid arrays.
2303 bool isValid() {
2307 }
2308 bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
2309 bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
2310 };
2311
2319
2320 /// This structure contains combined information generated for mappable
2321 /// clauses, including base pointers, pointers, sizes, map types, user-defined
2322 /// mappers, and non-contiguous information.
2323 struct MapInfosTy {
2325 bool IsNonContiguous = false;
2330 };
2338
2339 /// Append arrays in \a CurInfo.
2340 void append(MapInfosTy &CurInfo) {
2342 CurInfo.BasePointers.end());
2343 Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
2345 CurInfo.DevicePointers.end());
2346 Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
2347 Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
2348 Names.append(CurInfo.Names.begin(), CurInfo.Names.end());
2350 CurInfo.NonContigInfo.Dims.end());
2352 CurInfo.NonContigInfo.Offsets.end());
2354 CurInfo.NonContigInfo.Counts.end());
2356 CurInfo.NonContigInfo.Strides.end());
2357 }
2358 };
2359
2360 /// Callback function type for functions emitting the host fallback code that
2361 /// is executed when the kernel launch fails. It takes an insertion point as
2362 /// parameter where the code should be emitted. It returns an insertion point
2363 /// that points right after after the emitted code.
2366
2367 /// Generate a target region entry call and host fallback call.
2368 ///
2369 /// \param Loc The location at which the request originated and is fulfilled.
2370 /// \param OutlinedFnID The ooulined function ID.
2371 /// \param EmitTargetCallFallbackCB Call back function to generate host
2372 /// fallback code.
2373 /// \param Args Data structure holding information about the kernel arguments.
2374 /// \param DeviceID Identifier for the device via the 'device' clause.
2375 /// \param RTLoc Source location identifier
2376 /// \param AllocaIP The insertion point to be used for alloca instructions.
2378 emitKernelLaunch(const LocationDescription &Loc, Value *OutlinedFnID,
2379 EmitFallbackCallbackTy EmitTargetCallFallbackCB,
2380 TargetKernelArgs &Args, Value *DeviceID, Value *RTLoc,
2381 InsertPointTy AllocaIP);
2382
2383 /// Callback type for generating the bodies of device directives that require
2384 /// outer target tasks (e.g. in case of having `nowait` or `depend` clauses).
2385 ///
2386 /// \param DeviceID The ID of the device on which the target region will
2387 /// execute.
2388 /// \param RTLoc Source location identifier
2389 /// \Param TargetTaskAllocaIP Insertion point for the alloca block of the
2390 /// generated task.
2391 ///
2392 /// \return an error, if any were triggered during execution.
2394 function_ref<Error(Value *DeviceID, Value *RTLoc,
2395 IRBuilderBase::InsertPoint TargetTaskAllocaIP)>;
2396
2397 /// Generate a target-task for the target construct
2398 ///
2399 /// \param TaskBodyCB Callback to generate the actual body of the target task.
2400 /// \param DeviceID Identifier for the device via the 'device' clause.
2401 /// \param RTLoc Source location identifier
2402 /// \param AllocaIP The insertion point to be used for alloca instructions.
2403 /// \param Dependencies Vector of DependData objects holding information of
2404 /// dependencies as specified by the 'depend' clause.
2405 /// \param HasNoWait True if the target construct had 'nowait' on it, false
2406 /// otherwise
2408 TargetTaskBodyCallbackTy TaskBodyCB, Value *DeviceID, Value *RTLoc,
2411 bool HasNoWait);
2412
2413 /// Emit the arguments to be passed to the runtime library based on the
2414 /// arrays of base pointers, pointers, sizes, map types, and mappers. If
2415 /// ForEndCall, emit map types to be passed for the end of the region instead
2416 /// of the beginning.
2420 bool ForEndCall = false);
2421
2422 /// Emit an array of struct descriptors to be assigned to the offload args.
2424 InsertPointTy CodeGenIP,
2425 MapInfosTy &CombinedInfo,
2427
2428 /// Emit the arrays used to pass the captures and map information to the
2429 /// offloading runtime library. If there is no map or capture information,
2430 /// return nullptr by reference. Accepts a reference to a MapInfosTy object
2431 /// that contains information generated for mappable clauses,
2432 /// including base pointers, pointers, sizes, map types, user-defined mappers.
2434 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo,
2435 TargetDataInfo &Info, bool IsNonContiguous = false,
2436 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2437 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr);
2438
2439 /// Allocates memory for and populates the arrays required for offloading
2440 /// (offload_{baseptrs|ptrs|mappers|sizes|maptypes|mapnames}). Then, it
2441 /// emits their base addresses as arguments to be passed to the runtime
2442 /// library. In essence, this function is a combination of
2443 /// emitOffloadingArrays and emitOffloadingArraysArgument and should arguably
2444 /// be preferred by clients of OpenMPIRBuilder.
2446 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info,
2447 TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo,
2448 bool IsNonContiguous = false, bool ForEndCall = false,
2449 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2450 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr);
2451
2452 /// Creates offloading entry for the provided entry ID \a ID, address \a
2453 /// Addr, size \a Size, and flags \a Flags.
2455 int32_t Flags, GlobalValue::LinkageTypes,
2456 StringRef Name = "");
2457
2458 /// The kind of errors that can occur when emitting the offload entries and
2459 /// metadata.
2465
2466 /// Callback function type
2468 std::function<void(EmitMetadataErrorKind, TargetRegionEntryInfo)>;
2469
2470 // Emit the offloading entries and metadata so that the device codegen side
2471 // can easily figure out what to emit. The produced metadata looks like
2472 // this:
2473 //
2474 // !omp_offload.info = !{!1, ...}
2475 //
2476 // We only generate metadata for function that contain target regions.
2478 EmitMetadataErrorReportFunctionTy &ErrorReportFunction);
2479
2480public:
2481 /// Generator for __kmpc_copyprivate
2482 ///
2483 /// \param Loc The source location description.
2484 /// \param BufSize Number of elements in the buffer.
2485 /// \param CpyBuf List of pointers to data to be copied.
2486 /// \param CpyFn function to call for copying data.
2487 /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
2488 ///
2489 /// \return The insertion position *after* the CopyPrivate call.
2490
2492 llvm::Value *BufSize, llvm::Value *CpyBuf,
2493 llvm::Value *CpyFn, llvm::Value *DidIt);
2494
2495 /// Generator for '#omp single'
2496 ///
2497 /// \param Loc The source location description.
2498 /// \param BodyGenCB Callback that will generate the region code.
2499 /// \param FiniCB Callback to finalize variable copies.
2500 /// \param IsNowait If false, a barrier is emitted.
2501 /// \param CPVars copyprivate variables.
2502 /// \param CPFuncs copy functions to use for each copyprivate variable.
2503 ///
2504 /// \returns The insertion position *after* the single call.
2506 BodyGenCallbackTy BodyGenCB,
2507 FinalizeCallbackTy FiniCB, bool IsNowait,
2508 ArrayRef<llvm::Value *> CPVars = {},
2509 ArrayRef<llvm::Function *> CPFuncs = {});
2510
2511 /// Generator for '#omp master'
2512 ///
2513 /// \param Loc The insert and source location description.
2514 /// \param BodyGenCB Callback that will generate the region code.
2515 /// \param FiniCB Callback to finalize variable copies.
2516 ///
2517 /// \returns The insertion position *after* the master.
2518 InsertPointOrErrorTy createMaster(const LocationDescription &Loc,
2519 BodyGenCallbackTy BodyGenCB,
2520 FinalizeCallbackTy FiniCB);
2521
2522 /// Generator for '#omp masked'
2523 ///
2524 /// \param Loc The insert and source location description.
2525 /// \param BodyGenCB Callback that will generate the region code.
2526 /// \param FiniCB Callback to finialize variable copies.
2527 ///
2528 /// \returns The insertion position *after* the masked.
2529 InsertPointOrErrorTy createMasked(const LocationDescription &Loc,
2530 BodyGenCallbackTy BodyGenCB,
2531 FinalizeCallbackTy FiniCB, Value *Filter);
2532
2533 /// Generator for '#omp critical'
2534 ///
2535 /// \param Loc The insert and source location description.
2536 /// \param BodyGenCB Callback that will generate the region body code.
2537 /// \param FiniCB Callback to finalize variable copies.
2538 /// \param CriticalName name of the lock used by the critical directive
2539 /// \param HintInst Hint Instruction for hint clause associated with critical
2540 ///
2541 /// \returns The insertion position *after* the critical.
2542 InsertPointOrErrorTy createCritical(const LocationDescription &Loc,
2543 BodyGenCallbackTy BodyGenCB,
2544 FinalizeCallbackTy FiniCB,
2545 StringRef CriticalName, Value *HintInst);
2546
2547 /// Generator for '#omp ordered depend (source | sink)'
2548 ///
2549 /// \param Loc The insert and source location description.
2550 /// \param AllocaIP The insertion point to be used for alloca instructions.
2551 /// \param NumLoops The number of loops in depend clause.
2552 /// \param StoreValues The value will be stored in vector address.
2553 /// \param Name The name of alloca instruction.
2554 /// \param IsDependSource If true, depend source; otherwise, depend sink.
2555 ///
2556 /// \return The insertion position *after* the ordered.
2557 InsertPointTy createOrderedDepend(const LocationDescription &Loc,
2558 InsertPointTy AllocaIP, unsigned NumLoops,
2559 ArrayRef<llvm::Value *> StoreValues,
2560 const Twine &Name, bool IsDependSource);
2561
2562 /// Generator for '#omp ordered [threads | simd]'
2563 ///
2564 /// \param Loc The insert and source location description.
2565 /// \param BodyGenCB Callback that will generate the region code.
2566 /// \param FiniCB Callback to finalize variable copies.
2567 /// \param IsThreads If true, with threads clause or without clause;
2568 /// otherwise, with simd clause;
2569 ///
2570 /// \returns The insertion position *after* the ordered.
2571 InsertPointOrErrorTy createOrderedThreadsSimd(const LocationDescription &Loc,
2572 BodyGenCallbackTy BodyGenCB,
2573 FinalizeCallbackTy FiniCB,
2574 bool IsThreads);
2575
2576 /// Generator for '#omp sections'
2577 ///
2578 /// \param Loc The insert and source location description.
2579 /// \param AllocaIP The insertion points to be used for alloca instructions.
2580 /// \param SectionCBs Callbacks that will generate body of each section.
2581 /// \param PrivCB Callback to copy a given variable (think copy constructor).
2582 /// \param FiniCB Callback to finalize variable copies.
2583 /// \param IsCancellable Flag to indicate a cancellable parallel region.
2584 /// \param IsNowait If true, barrier - to ensure all sections are executed
2585 /// before moving forward will not be generated.
2586 /// \returns The insertion position *after* the sections.
2588 createSections(const LocationDescription &Loc, InsertPointTy AllocaIP,
2589 ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
2591 bool IsCancellable, bool IsNowait);
2592
2593 /// Generator for '#omp section'
2594 ///
2595 /// \param Loc The insert and source location description.
2596 /// \param BodyGenCB Callback that will generate the region body code.
2597 /// \param FiniCB Callback to finalize variable copies.
2598 /// \returns The insertion position *after* the section.
2599 InsertPointOrErrorTy createSection(const LocationDescription &Loc,
2600 BodyGenCallbackTy BodyGenCB,
2601 FinalizeCallbackTy FiniCB);
2602
2603 /// Generator for `#omp teams`
2604 ///
2605 /// \param Loc The location where the teams construct was encountered.
2606 /// \param BodyGenCB Callback that will generate the region code.
2607 /// \param NumTeamsLower Lower bound on number of teams. If this is nullptr,
2608 /// it is as if lower bound is specified as equal to upperbound. If
2609 /// this is non-null, then upperbound must also be non-null.
2610 /// \param NumTeamsUpper Upper bound on the number of teams.
2611 /// \param ThreadLimit on the number of threads that may participate in a
2612 /// contention group created by each team.
2613 /// \param IfExpr is the integer argument value of the if condition on the
2614 /// teams clause.
2616 createTeams(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2617 Value *NumTeamsLower = nullptr, Value *NumTeamsUpper = nullptr,
2618 Value *ThreadLimit = nullptr, Value *IfExpr = nullptr);
2619
2620 /// Generate conditional branch and relevant BasicBlocks through which private
2621 /// threads copy the 'copyin' variables from Master copy to threadprivate
2622 /// copies.
2623 ///
2624 /// \param IP insertion block for copyin conditional
2625 /// \param MasterVarPtr a pointer to the master variable
2626 /// \param PrivateVarPtr a pointer to the threadprivate variable
2627 /// \param IntPtrTy Pointer size type
2628 /// \param BranchtoEnd Create a branch between the copyin.not.master blocks
2629 // and copy.in.end block
2630 ///
2631 /// \returns The insertion point where copying operation to be emitted.
2633 Value *PrivateAddr,
2634 llvm::IntegerType *IntPtrTy,
2635 bool BranchtoEnd = true);
2636
2637 /// Create a runtime call for kmpc_Alloc
2638 ///
2639 /// \param Loc The insert and source location description.
2640 /// \param Size Size of allocated memory space
2641 /// \param Allocator Allocator information instruction
2642 /// \param Name Name of call Instruction for OMP_alloc
2643 ///
2644 /// \returns CallInst to the OMP_Alloc call
2645 CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
2646 Value *Allocator, std::string Name = "");
2647
2648 /// Create a runtime call for kmpc_free
2649 ///
2650 /// \param Loc The insert and source location description.
2651 /// \param Addr Address of memory space to be freed
2652 /// \param Allocator Allocator information instruction
2653 /// \param Name Name of call Instruction for OMP_Free
2654 ///
2655 /// \returns CallInst to the OMP_Free call
2656 CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
2657 Value *Allocator, std::string Name = "");
2658
2659 /// Create a runtime call for kmpc_threadprivate_cached
2660 ///
2661 /// \param Loc The insert and source location description.
2662 /// \param Pointer pointer to data to be cached
2663 /// \param Size size of data to be cached
2664 /// \param Name Name of call Instruction for callinst
2665 ///
2666 /// \returns CallInst to the thread private cache call.
2667 CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
2670 const llvm::Twine &Name = Twine(""));
2671
2672 /// Create a runtime call for __tgt_interop_init
2673 ///
2674 /// \param Loc The insert and source location description.
2675 /// \param InteropVar variable to be allocated
2676 /// \param InteropType type of interop operation
2677 /// \param Device devide to which offloading will occur
2678 /// \param NumDependences number of dependence variables
2679 /// \param DependenceAddress pointer to dependence variables
2680 /// \param HaveNowaitClause does nowait clause exist
2681 ///
2682 /// \returns CallInst to the __tgt_interop_init call
2683 CallInst *createOMPInteropInit(const LocationDescription &Loc,
2684 Value *InteropVar,
2685 omp::OMPInteropType InteropType, Value *Device,
2686 Value *NumDependences,
2687 Value *DependenceAddress,
2688 bool HaveNowaitClause);
2689
2690 /// Create a runtime call for __tgt_interop_destroy
2691 ///
2692 /// \param Loc The insert and source location description.
2693 /// \param InteropVar variable to be allocated
2694 /// \param Device devide to which offloading will occur
2695 /// \param NumDependences number of dependence variables
2696 /// \param DependenceAddress pointer to dependence variables
2697 /// \param HaveNowaitClause does nowait clause exist
2698 ///
2699 /// \returns CallInst to the __tgt_interop_destroy call
2700 CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
2701 Value *InteropVar, Value *Device,
2702 Value *NumDependences,
2703 Value *DependenceAddress,
2704 bool HaveNowaitClause);
2705
2706 /// Create a runtime call for __tgt_interop_use
2707 ///
2708 /// \param Loc The insert and source location description.
2709 /// \param InteropVar variable to be allocated
2710 /// \param Device devide to which offloading will occur
2711 /// \param NumDependences number of dependence variables
2712 /// \param DependenceAddress pointer to dependence variables
2713 /// \param HaveNowaitClause does nowait clause exist
2714 ///
2715 /// \returns CallInst to the __tgt_interop_use call
2716 CallInst *createOMPInteropUse(const LocationDescription &Loc,
2717 Value *InteropVar, Value *Device,
2718 Value *NumDependences, Value *DependenceAddress,
2719 bool HaveNowaitClause);
2720
2721 /// The `omp target` interface
2722 ///
2723 /// For more information about the usage of this interface,
2724 /// \see openmp/libomptarget/deviceRTLs/common/include/target.h
2725 ///
2726 ///{
2727
2728 /// Create a runtime call for kmpc_target_init
2729 ///
2730 /// \param Loc The insert and source location description.
2731 /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
2732 /// \param MinThreads Minimal number of threads, or 0.
2733 /// \param MaxThreads Maximal number of threads, or 0.
2734 /// \param MinTeams Minimal number of teams, or 0.
2735 /// \param MaxTeams Maximal number of teams, or 0.
2736 InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
2737 int32_t MinThreadsVal = 0,
2738 int32_t MaxThreadsVal = 0,
2739 int32_t MinTeamsVal = 0,
2740 int32_t MaxTeamsVal = 0);
2741
2742 /// Create a runtime call for kmpc_target_deinit
2743 ///
2744 /// \param Loc The insert and source location description.
2745 /// \param TeamsReductionDataSize The maximal size of all the reduction data
2746 /// for teams reduction.
2747 /// \param TeamsReductionBufferLength The number of elements (each of up to
2748 /// \p TeamsReductionDataSize size), in the teams reduction buffer.
2749 void createTargetDeinit(const LocationDescription &Loc,
2750 int32_t TeamsReductionDataSize = 0,
2751 int32_t TeamsReductionBufferLength = 1024);
2752
2753 ///}
2754
2755 /// Helpers to read/write kernel annotations from the IR.
2756 ///
2757 ///{
2758
2759 /// Read/write a bounds on threads for \p Kernel. Read will return 0 if none
2760 /// is set.
2761 static std::pair<int32_t, int32_t>
2762 readThreadBoundsForKernel(const Triple &T, Function &Kernel);
2763 static void writeThreadBoundsForKernel(const Triple &T, Function &Kernel,
2764 int32_t LB, int32_t UB);
2765
2766 /// Read/write a bounds on teams for \p Kernel. Read will return 0 if none
2767 /// is set.
2768 static std::pair<int32_t, int32_t> readTeamBoundsForKernel(const Triple &T,
2769 Function &Kernel);
2770 static void writeTeamsForKernel(const Triple &T, Function &Kernel, int32_t LB,
2771 int32_t UB);
2772 ///}
2773
2774private:
2775 // Sets the function attributes expected for the outlined function
2776 void setOutlinedTargetRegionFunctionAttributes(Function *OutlinedFn);
2777
2778 // Creates the function ID/Address for the given outlined function.
2779 // In the case of an embedded device function the address of the function is
2780 // used, in the case of a non-offload function a constant is created.
2781 Constant *createOutlinedFunctionID(Function *OutlinedFn,
2782 StringRef EntryFnIDName);
2783
2784 // Creates the region entry address for the outlined function
2785 Constant *createTargetRegionEntryAddr(Function *OutlinedFunction,
2786 StringRef EntryFnName);
2787
2788public:
2789 /// Functions used to generate a function with the given name.
2791 std::function<Expected<Function *>(StringRef FunctionName)>;
2792
2793 /// Create a unique name for the entry function using the source location
2794 /// information of the current target region. The name will be something like:
2795 ///
2796 /// __omp_offloading_DD_FFFF_PP_lBB[_CC]
2797 ///
2798 /// where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
2799 /// mangled name of the function that encloses the target region and BB is the
2800 /// line number of the target region. CC is a count added when more than one
2801 /// region is located at the same location.
2802 ///
2803 /// If this target outline function is not an offload entry, we don't need to
2804 /// register it. This may happen if it is guarded by an if clause that is
2805 /// false at compile time, or no target archs have been specified.
2806 ///
2807 /// The created target region ID is used by the runtime library to identify
2808 /// the current target region, so it only has to be unique and not
2809 /// necessarily point to anything. It could be the pointer to the outlined
2810 /// function that implements the target region, but we aren't using that so
2811 /// that the compiler doesn't need to keep that, and could therefore inline
2812 /// the host function if proven worthwhile during optimization. In the other
2813 /// hand, if emitting code for the device, the ID has to be the function
2814 /// address so that it can retrieved from the offloading entry and launched
2815 /// by the runtime library. We also mark the outlined function to have
2816 /// external linkage in case we are emitting code for the device, because
2817 /// these functions will be entry points to the device.
2818 ///
2819 /// \param InfoManager The info manager keeping track of the offload entries
2820 /// \param EntryInfo The entry information about the function
2821 /// \param GenerateFunctionCallback The callback function to generate the code
2822 /// \param OutlinedFunction Pointer to the outlined function
2823 /// \param EntryFnIDName Name of the ID o be created
2825 FunctionGenCallback &GenerateFunctionCallback,
2826 bool IsOffloadEntry, Function *&OutlinedFn,
2827 Constant *&OutlinedFnID);
2828
2829 /// Registers the given function and sets up the attribtues of the function
2830 /// Returns the FunctionID.
2831 ///
2832 /// \param InfoManager The info manager keeping track of the offload entries
2833 /// \param EntryInfo The entry information about the function
2834 /// \param OutlinedFunction Pointer to the outlined function
2835 /// \param EntryFnName Name of the outlined function
2836 /// \param EntryFnIDName Name of the ID o be created
2838 Function *OutlinedFunction,
2839 StringRef EntryFnName,
2840 StringRef EntryFnIDName);
2841
2842 /// Type of BodyGen to use for region codegen
2843 ///
2844 /// Priv: If device pointer privatization is required, emit the body of the
2845 /// region here. It will have to be duplicated: with and without
2846 /// privatization.
2847 /// DupNoPriv: If we need device pointer privatization, we need
2848 /// to emit the body of the region with no privatization in the 'else' branch
2849 /// of the conditional.
2850 /// NoPriv: If we don't require privatization of device
2851 /// pointers, we emit the body in between the runtime calls. This avoids
2852 /// duplicating the body code.
2854
2855 /// Callback type for creating the map infos for the kernel parameters.
2856 /// \param CodeGenIP is the insertion point where code should be generated,
2857 /// if any.
2860
2861private:
2862 /// Emit the array initialization or deletion portion for user-defined mapper
2863 /// code generation. First, it evaluates whether an array section is mapped
2864 /// and whether the \a MapType instructs to delete this section. If \a IsInit
2865 /// is true, and \a MapType indicates to not delete this array, array
2866 /// initialization code is generated. If \a IsInit is false, and \a MapType
2867 /// indicates to delete this array, array deletion code is generated.
2868 void emitUDMapperArrayInitOrDel(Function *MapperFn, llvm::Value *MapperHandle,
2869 llvm::Value *Base, llvm::Value *Begin,
2870 llvm::Value *Size, llvm::Value *MapType,
2871 llvm::Value *MapName, TypeSize ElementSize,
2872 llvm::BasicBlock *ExitBB, bool IsInit);
2873
2874public:
2875 /// Emit the user-defined mapper function. The code generation follows the
2876 /// pattern in the example below.
2877 /// \code
2878 /// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
2879 /// void *base, void *begin,
2880 /// int64_t size, int64_t type,
2881 /// void *name = nullptr) {
2882 /// // Allocate space for an array section first or add a base/begin for
2883 /// // pointer dereference.
2884 /// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) &&
2885 /// !maptype.IsDelete)
2886 /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
2887 /// size*sizeof(Ty), clearToFromMember(type));
2888 /// // Map members.
2889 /// for (unsigned i = 0; i < size; i++) {
2890 /// // For each component specified by this mapper:
2891 /// for (auto c : begin[i]->all_components) {
2892 /// if (c.hasMapper())
2893 /// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin,
2894 /// c.arg_size,
2895 /// c.arg_type, c.arg_name);
2896 /// else
2897 /// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
2898 /// c.arg_begin, c.arg_size, c.arg_type,
2899 /// c.arg_name);
2900 /// }
2901 /// }
2902 /// // Delete the array section.
2903 /// if (size > 1 && maptype.IsDelete)
2904 /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
2905 /// size*sizeof(Ty), clearToFromMember(type));
2906 /// }
2907 /// \endcode
2908 ///
2909 /// \param PrivAndGenMapInfoCB Callback that privatizes code and populates the
2910 /// MapInfos and returns.
2911 /// \param ElemTy DeclareMapper element type.
2912 /// \param FuncName Optional param to specify mapper function name.
2913 /// \param CustomMapperCB Optional callback to generate code related to
2914 /// custom mappers.
2916 function_ref<MapInfosTy &(InsertPointTy CodeGenIP, llvm::Value *PtrPHI,
2917 llvm::Value *BeginArg)>
2918 PrivAndGenMapInfoCB,
2919 llvm::Type *ElemTy, StringRef FuncName,
2920 function_ref<bool(unsigned int, Function **)> CustomMapperCB = nullptr);
2921
2922 /// Generator for '#omp target data'
2923 ///
2924 /// \param Loc The location where the target data construct was encountered.
2925 /// \param AllocaIP The insertion points to be used for alloca instructions.
2926 /// \param CodeGenIP The insertion point at which the target directive code
2927 /// should be placed.
2928 /// \param IsBegin If true then emits begin mapper call otherwise emits
2929 /// end mapper call.
2930 /// \param DeviceID Stores the DeviceID from the device clause.
2931 /// \param IfCond Value which corresponds to the if clause condition.
2932 /// \param Info Stores all information realted to the Target Data directive.
2933 /// \param GenMapInfoCB Callback that populates the MapInfos and returns.
2934 /// \param BodyGenCB Optional Callback to generate the region code.
2935 /// \param DeviceAddrCB Optional callback to generate code related to
2936 /// use_device_ptr and use_device_addr.
2937 /// \param CustomMapperCB Optional callback to generate code related to
2938 /// custom mappers.
2940 const LocationDescription &Loc, InsertPointTy AllocaIP,
2941 InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond,
2943 omp::RuntimeFunction *MapperFunc = nullptr,
2945 BodyGenTy BodyGenType)>
2946 BodyGenCB = nullptr,
2947 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2948 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr,
2949 Value *SrcLocInfo = nullptr);
2950
2952 InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
2953
2955 Argument &Arg, Value *Input, Value *&RetVal, InsertPointTy AllocaIP,
2956 InsertPointTy CodeGenIP)>;
2957
2958 /// Generator for '#omp target'
2959 ///
2960 /// \param Loc where the target data construct was encountered.
2961 /// \param IsOffloadEntry whether it is an offload entry.
2962 /// \param CodeGenIP The insertion point where the call to the outlined
2963 /// function should be emitted.
2964 /// \param EntryInfo The entry information about the function.
2965 /// \param NumTeams Number of teams specified in the num_teams clause.
2966 /// \param NumThreads Number of teams specified in the thread_limit clause.
2967 /// \param Inputs The input values to the region that will be passed.
2968 /// as arguments to the outlined function.
2969 /// \param BodyGenCB Callback that will generate the region code.
2970 /// \param ArgAccessorFuncCB Callback that will generate accessors
2971 /// instructions for passed in target arguments where neccessary
2972 /// \param Dependencies A vector of DependData objects that carry
2973 // dependency information as passed in the depend clause
2974 // \param HasNowait Whether the target construct has a `nowait` clause or not.
2976 const LocationDescription &Loc, bool IsOffloadEntry,
2979 TargetRegionEntryInfo &EntryInfo, ArrayRef<int32_t> NumTeams,
2980 ArrayRef<int32_t> NumThreads, SmallVectorImpl<Value *> &Inputs,
2981 GenMapInfoCallbackTy GenMapInfoCB, TargetBodyGenCallbackTy BodyGenCB,
2982 TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB,
2983 SmallVector<DependData> Dependencies = {}, bool HasNowait = false);
2984
2985 /// Returns __kmpc_for_static_init_* runtime function for the specified
2986 /// size \a IVSize and sign \a IVSigned. Will create a distribute call
2987 /// __kmpc_distribute_static_init* if \a IsGPUDistribute is set.
2988 FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned,
2989 bool IsGPUDistribute);
2990
2991 /// Returns __kmpc_dispatch_init_* runtime function for the specified
2992 /// size \a IVSize and sign \a IVSigned.
2993 FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned);
2994
2995 /// Returns __kmpc_dispatch_next_* runtime function for the specified
2996 /// size \a IVSize and sign \a IVSigned.
2997 FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned);
2998
2999 /// Returns __kmpc_dispatch_fini_* runtime function for the specified
3000 /// size \a IVSize and sign \a IVSigned.
3001 FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
3002
3003 /// Returns __kmpc_dispatch_deinit runtime function.
3005
3006 /// Declarations for LLVM-IR types (simple, array, function and structure) are
3007 /// generated below. Their names are defined and used in OpenMPKinds.def. Here
3008 /// we provide the declarations, the initializeTypes function will provide the
3009 /// values.
3010 ///
3011 ///{
3012#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
3013#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
3014 ArrayType *VarName##Ty = nullptr; \
3015 PointerType *VarName##PtrTy = nullptr;
3016#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
3017 FunctionType *VarName = nullptr; \
3018 PointerType *VarName##Ptr = nullptr;
3019#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
3020 StructType *VarName = nullptr; \
3021 PointerType *VarName##Ptr = nullptr;
3022#include "llvm/Frontend/OpenMP/OMPKinds.def"
3023
3024 ///}
3025
3026private:
3027 /// Create all simple and struct types exposed by the runtime and remember
3028 /// the llvm::PointerTypes of them for easy access later.
3029 void initializeTypes(Module &M);
3030
3031 /// Common interface for generating entry calls for OMP Directives.
3032 /// if the directive has a region/body, It will set the insertion
3033 /// point to the body
3034 ///
3035 /// \param OMPD Directive to generate entry blocks for
3036 /// \param EntryCall Call to the entry OMP Runtime Function
3037 /// \param ExitBB block where the region ends.
3038 /// \param Conditional indicate if the entry call result will be used
3039 /// to evaluate a conditional of whether a thread will execute
3040 /// body code or not.
3041 ///
3042 /// \return The insertion position in exit block
3043 InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
3044 BasicBlock *ExitBB,
3045 bool Conditional = false);
3046
3047 /// Common interface to finalize the region
3048 ///
3049 /// \param OMPD Directive to generate exiting code for
3050 /// \param FinIP Insertion point for emitting Finalization code and exit call
3051 /// \param ExitCall Call to the ending OMP Runtime Function
3052 /// \param HasFinalize indicate if the directive will require finalization
3053 /// and has a finalization callback in the stack that
3054 /// should be called.
3055 ///
3056 /// \return The insertion position in exit block
3057 InsertPointOrErrorTy emitCommonDirectiveExit(omp::Directive OMPD,
3058 InsertPointTy FinIP,
3059 Instruction *ExitCall,
3060 bool HasFinalize = true);
3061
3062 /// Common Interface to generate OMP inlined regions
3063 ///
3064 /// \param OMPD Directive to generate inlined region for
3065 /// \param EntryCall Call to the entry OMP Runtime Function
3066 /// \param ExitCall Call to the ending OMP Runtime Function
3067 /// \param BodyGenCB Body code generation callback.
3068 /// \param FiniCB Finalization Callback. Will be called when finalizing region
3069 /// \param Conditional indicate if the entry call result will be used
3070 /// to evaluate a conditional of whether a thread will execute
3071 /// body code or not.
3072 /// \param HasFinalize indicate if the directive will require finalization
3073 /// and has a finalization callback in the stack that
3074 /// should be called.
3075 /// \param IsCancellable if HasFinalize is set to true, indicate if the
3076 /// the directive should be cancellable.
3077 /// \return The insertion point after the region
3079 EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
3080 Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
3081 FinalizeCallbackTy FiniCB, bool Conditional = false,
3082 bool HasFinalize = true, bool IsCancellable = false);
3083
3084 /// Get the platform-specific name separator.
3085 /// \param Parts different parts of the final name that needs separation
3086 /// \param FirstSeparator First separator used between the initial two
3087 /// parts of the name.
3088 /// \param Separator separator used between all of the rest consecutive
3089 /// parts of the name
3090 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
3091 StringRef FirstSeparator,
3092 StringRef Separator);
3093
3094 /// Returns corresponding lock object for the specified critical region
3095 /// name. If the lock object does not exist it is created, otherwise the
3096 /// reference to the existing copy is returned.
3097 /// \param CriticalName Name of the critical region.
3098 ///
3099 Value *getOMPCriticalRegionLock(StringRef CriticalName);
3100
3101 /// Callback type for Atomic Expression update
3102 /// ex:
3103 /// \code{.cpp}
3104 /// unsigned x = 0;
3105 /// #pragma omp atomic update
3106 /// x = Expr(x_old); //Expr() is any legal operation
3107 /// \endcode
3108 ///
3109 /// \param XOld the value of the atomic memory address to use for update
3110 /// \param IRB reference to the IRBuilder to use
3111 ///
3112 /// \returns Value to update X to.
3113 using AtomicUpdateCallbackTy =
3114 const function_ref<Expected<Value *>(Value *XOld, IRBuilder<> &IRB)>;
3115
3116private:
3117 enum AtomicKind { Read, Write, Update, Capture, Compare };
3118
3119 /// Determine whether to emit flush or not
3120 ///
3121 /// \param Loc The insert and source location description.
3122 /// \param AO The required atomic ordering
3123 /// \param AK The OpenMP atomic operation kind used.
3124 ///
3125 /// \returns wether a flush was emitted or not
3126 bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
3127 AtomicOrdering AO, AtomicKind AK);
3128
3129 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
3130 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
3131 /// Only Scalar data types.
3132 ///
3133 /// \param AllocaIP The insertion point to be used for alloca
3134 /// instructions.
3135 /// \param X The target atomic pointer to be updated
3136 /// \param XElemTy The element type of the atomic pointer.
3137 /// \param Expr The value to update X with.
3138 /// \param AO Atomic ordering of the generated atomic
3139 /// instructions.
3140 /// \param RMWOp The binary operation used for update. If
3141 /// operation is not supported by atomicRMW,
3142 /// or belong to {FADD, FSUB, BAD_BINOP}.
3143 /// Then a `cmpExch` based atomic will be generated.
3144 /// \param UpdateOp Code generator for complex expressions that cannot be
3145 /// expressed through atomicrmw instruction.
3146 /// \param VolatileX true if \a X volatile?
3147 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
3148 /// update expression, false otherwise.
3149 /// (e.g. true for X = X BinOp Expr)
3150 ///
3151 /// \returns A pair of the old value of X before the update, and the value
3152 /// used for the update.
3153 Expected<std::pair<Value *, Value *>>
3154 emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
3156 AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
3157 bool IsXBinopExpr);
3158
3159 std::pair<llvm::LoadInst *, llvm::AllocaInst *>
3160 EmitAtomicLoadLibcall(Value *X, Type *XElemTy, llvm::AtomicOrdering AO,
3161 uint64_t AtomicSizeInBits);
3162
3163 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeLibcall(
3164 Value *X, Type *XElemTy, uint64_t AtomicSizeInBits,
3165 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
3167
3168 /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
3169 ///
3170 /// \Return The instruction
3171 Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
3172 AtomicRMWInst::BinOp RMWOp);
3173
3174public:
3175 /// a struct to pack relevant information while generating atomic Ops
3177 Value *Var = nullptr;
3178 Type *ElemTy = nullptr;
3179 bool IsSigned = false;
3180 bool IsVolatile = false;
3181 };
3182
3183 /// Emit atomic Read for : V = X --- Only Scalar data types.
3184 ///
3185 /// \param Loc The insert and source location description.
3186 /// \param X The target pointer to be atomically read
3187 /// \param V Memory address where to store atomically read
3188 /// value
3189 /// \param AO Atomic ordering of the generated atomic
3190 /// instructions.
3191 ///
3192 /// \return Insertion point after generated atomic read IR.
3195 AtomicOrdering AO);
3196
3197 /// Emit atomic write for : X = Expr --- Only Scalar data types.
3198 ///
3199 /// \param Loc The insert and source location description.
3200 /// \param X The target pointer to be atomically written to
3201 /// \param Expr The value to store.
3202 /// \param AO Atomic ordering of the generated atomic
3203 /// instructions.
3204 ///
3205 /// \return Insertion point after generated atomic Write IR.
3207 AtomicOpValue &X, Value *Expr,
3208 AtomicOrdering AO);
3209
3210 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
3211 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
3212 /// Only Scalar data types.
3213 ///
3214 /// \param Loc The insert and source location description.
3215 /// \param AllocaIP The insertion point to be used for alloca instructions.
3216 /// \param X The target atomic pointer to be updated
3217 /// \param Expr The value to update X with.
3218 /// \param AO Atomic ordering of the generated atomic instructions.
3219 /// \param RMWOp The binary operation used for update. If operation
3220 /// is not supported by atomicRMW, or belong to
3221 /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
3222 /// atomic will be generated.
3223 /// \param UpdateOp Code generator for complex expressions that cannot be
3224 /// expressed through atomicrmw instruction.
3225 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
3226 /// update expression, false otherwise.
3227 /// (e.g. true for X = X BinOp Expr)
3228 ///
3229 /// \return Insertion point after generated atomic update IR.
3232 AtomicOpValue &X, Value *Expr, AtomicOrdering AO,
3234 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr);
3235
3236 /// Emit atomic update for constructs: --- Only Scalar data types
3237 /// V = X; X = X BinOp Expr ,
3238 /// X = X BinOp Expr; V = X,
3239 /// V = X; X = Expr BinOp X,
3240 /// X = Expr BinOp X; V = X,
3241 /// V = X; X = UpdateOp(X),
3242 /// X = UpdateOp(X); V = X,
3243 ///
3244 /// \param Loc The insert and source location description.
3245 /// \param AllocaIP The insertion point to be used for alloca instructions.
3246 /// \param X The target atomic pointer to be updated
3247 /// \param V Memory address where to store captured value
3248 /// \param Expr The value to update X with.
3249 /// \param AO Atomic ordering of the generated atomic instructions
3250 /// \param RMWOp The binary operation used for update. If
3251 /// operation is not supported by atomicRMW, or belong to
3252 /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
3253 /// atomic will be generated.
3254 /// \param UpdateOp Code generator for complex expressions that cannot be
3255 /// expressed through atomicrmw instruction.
3256 /// \param UpdateExpr true if X is an in place update of the form
3257 /// X = X BinOp Expr or X = Expr BinOp X
3258 /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
3259 /// update expression, false otherwise.
3260 /// (e.g. true for X = X BinOp Expr)
3261 /// \param IsPostfixUpdate true if original value of 'x' must be stored in
3262 /// 'v', not an updated one.
3263 ///
3264 /// \return Insertion point after generated atomic capture IR.
3267 AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
3269 AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
3270 bool IsPostfixUpdate, bool IsXBinopExpr);
3271
3272 /// Emit atomic compare for constructs: --- Only scalar data types
3273 /// cond-expr-stmt:
3274 /// x = x ordop expr ? expr : x;
3275 /// x = expr ordop x ? expr : x;
3276 /// x = x == e ? d : x;
3277 /// x = e == x ? d : x; (this one is not in the spec)
3278 /// cond-update-stmt:
3279 /// if (x ordop expr) { x = expr; }
3280 /// if (expr ordop x) { x = expr; }
3281 /// if (x == e) { x = d; }
3282 /// if (e == x) { x = d; } (this one is not in the spec)
3283 /// conditional-update-capture-atomic:
3284 /// v = x; cond-update-stmt; (IsPostfixUpdate=true, IsFailOnly=false)
3285 /// cond-update-stmt; v = x; (IsPostfixUpdate=false, IsFailOnly=false)
3286 /// if (x == e) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3287 /// IsFailOnly=true)
3288 /// r = x == e; if (r) { x = d; } (IsPostfixUpdate=false, IsFailOnly=false)
3289 /// r = x == e; if (r) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3290 /// IsFailOnly=true)
3291 ///
3292 /// \param Loc The insert and source location description.
3293 /// \param X The target atomic pointer to be updated.
3294 /// \param V Memory address where to store captured value (for
3295 /// compare capture only).
3296 /// \param R Memory address where to store comparison result
3297 /// (for compare capture with '==' only).
3298 /// \param E The expected value ('e') for forms that use an
3299 /// equality comparison or an expression ('expr') for
3300 /// forms that use 'ordop' (logically an atomic maximum or
3301 /// minimum).
3302 /// \param D The desired value for forms that use an equality
3303 /// comparison. If forms that use 'ordop', it should be
3304 /// \p nullptr.
3305 /// \param AO Atomic ordering of the generated atomic instructions.
3306 /// \param Op Atomic compare operation. It can only be ==, <, or >.
3307 /// \param IsXBinopExpr True if the conditional statement is in the form where
3308 /// x is on LHS. It only matters for < or >.
3309 /// \param IsPostfixUpdate True if original value of 'x' must be stored in
3310 /// 'v', not an updated one (for compare capture
3311 /// only).
3312 /// \param IsFailOnly True if the original value of 'x' is stored to 'v'
3313 /// only when the comparison fails. This is only valid for
3314 /// the case the comparison is '=='.
3315 ///
3316 /// \return Insertion point after generated atomic capture IR.
3321 bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly);
3324 AtomicOpValue &R, Value *E, Value *D,
3325 AtomicOrdering AO,
3327 bool IsXBinopExpr, bool IsPostfixUpdate,
3328 bool IsFailOnly, AtomicOrdering Failure);
3329
3330 /// Create the control flow structure of a canonical OpenMP loop.
3331 ///
3332 /// The emitted loop will be disconnected, i.e. no edge to the loop's
3333 /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
3334 /// IRBuilder location is not preserved.
3335 ///
3336 /// \param DL DebugLoc used for the instructions in the skeleton.
3337 /// \param TripCount Value to be used for the trip count.
3338 /// \param F Function in which to insert the BasicBlocks.
3339 /// \param PreInsertBefore Where to insert BBs that execute before the body,
3340 /// typically the body itself.
3341 /// \param PostInsertBefore Where to insert BBs that execute after the body.
3342 /// \param Name Base name used to derive BB
3343 /// and instruction names.
3344 ///
3345 /// \returns The CanonicalLoopInfo that represents the emitted loop.
3347 Function *F,
3348 BasicBlock *PreInsertBefore,
3349 BasicBlock *PostInsertBefore,
3350 const Twine &Name = {});
3351 /// OMP Offload Info Metadata name string
3352 const std::string ompOffloadInfoName = "omp_offload.info";
3353
3354 /// Loads all the offload entries information from the host IR
3355 /// metadata. This function is only meant to be used with device code
3356 /// generation.
3357 ///
3358 /// \param M Module to load Metadata info from. Module passed maybe
3359 /// loaded from bitcode file, i.e, different from OpenMPIRBuilder::M module.
3361
3362 /// Loads all the offload entries information from the host IR
3363 /// metadata read from the file passed in as the HostFilePath argument. This
3364 /// function is only meant to be used with device code generation.
3365 ///
3366 /// \param HostFilePath The path to the host IR file,
3367 /// used to load in offload metadata for the device, allowing host and device
3368 /// to maintain the same metadata mapping.
3369 void loadOffloadInfoMetadata(StringRef HostFilePath);
3370
3371 /// Gets (if variable with the given name already exist) or creates
3372 /// internal global variable with the specified Name. The created variable has
3373 /// linkage CommonLinkage by default and is initialized by null value.
3374 /// \param Ty Type of the global variable. If it is exist already the type
3375 /// must be the same.
3376 /// \param Name Name of the variable.
3378 unsigned AddressSpace = 0);
3379};
3380
3381/// Class to represented the control flow structure of an OpenMP canonical loop.
3382///
3383/// The control-flow structure is standardized for easy consumption by
3384/// directives associated with loops. For instance, the worksharing-loop
3385/// construct may change this control flow such that each loop iteration is
3386/// executed on only one thread. The constraints of a canonical loop in brief
3387/// are:
3388///
3389/// * The number of loop iterations must have been computed before entering the
3390/// loop.
3391///
3392/// * Has an (unsigned) logical induction variable that starts at zero and
3393/// increments by one.
3394///
3395/// * The loop's CFG itself has no side-effects. The OpenMP specification
3396/// itself allows side-effects, but the order in which they happen, including
3397/// how often or whether at all, is unspecified. We expect that the frontend
3398/// will emit those side-effect instructions somewhere (e.g. before the loop)
3399/// such that the CanonicalLoopInfo itself can be side-effect free.
3400///
3401/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
3402/// execution of a loop body that satifies these constraints. It does NOT
3403/// represent arbitrary SESE regions that happen to contain a loop. Do not use
3404/// CanonicalLoopInfo for such purposes.
3405///
3406/// The control flow can be described as follows:
3407///
3408/// Preheader
3409/// |
3410/// /-> Header
3411/// | |
3412/// | Cond---\
3413/// | | |
3414/// | Body |
3415/// | | | |
3416/// | <...> |
3417/// | | | |
3418/// \--Latch |
3419/// |
3420/// Exit
3421/// |
3422/// After
3423///
3424/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
3425/// including) and end at AfterIP (at the After's first instruction, excluding).
3426/// That is, instructions in the Preheader and After blocks (except the
3427/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
3428/// side-effects. Typically, the Preheader is used to compute the loop's trip
3429/// count. The instructions from BodyIP (at the Body block's first instruction,
3430/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
3431/// control and thus can have side-effects. The body block is the single entry
3432/// point into the loop body, which may contain arbitrary control flow as long
3433/// as all control paths eventually branch to the Latch block.
3434///
3435/// TODO: Consider adding another standardized BasicBlock between Body CFG and
3436/// Latch to guarantee that there is only a single edge to the latch. It would
3437/// make loop transformations easier to not needing to consider multiple
3438/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
3439/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
3440/// executes after each body iteration.
3441///
3442/// There must be no loop-carried dependencies through llvm::Values. This is
3443/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
3444/// for the induction variable.
3445///
3446/// All code in Header, Cond, Latch and Exit (plus the terminator of the
3447/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
3448/// by assertOK(). They are expected to not be modified unless explicitly
3449/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
3450/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
3451/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
3452/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
3453/// anymore as its underlying control flow may not exist anymore.
3454/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
3455/// may also return a new CanonicalLoopInfo that can be passed to other
3456/// loop-associated construct implementing methods. These loop-transforming
3457/// methods may either create a new CanonicalLoopInfo usually using
3458/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
3459/// modify one of the input CanonicalLoopInfo and return it as representing the
3460/// modified loop. What is done is an implementation detail of
3461/// transformation-implementing method and callers should always assume that the
3462/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
3463/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
3464/// created by createCanonicalLoop, such that transforming methods do not have
3465/// to special case where the CanonicalLoopInfo originated from.
3466///
3467/// Generally, methods consuming CanonicalLoopInfo do not need an
3468/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
3469/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
3470/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
3471/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
3472/// any InsertPoint in the Preheader, After or Block can still be used after
3473/// calling such a method.
3474///
3475/// TODO: Provide mechanisms for exception handling and cancellation points.
3476///
3477/// Defined outside OpenMPIRBuilder because nested classes cannot be
3478/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
3480 friend class OpenMPIRBuilder;
3481
3482private:
3483 BasicBlock *Header = nullptr;
3484 BasicBlock *Cond = nullptr;
3485 BasicBlock *Latch = nullptr;
3486 BasicBlock *Exit = nullptr;
3487
3488 /// Add the control blocks of this loop to \p BBs.
3489 ///
3490 /// This does not include any block from the body, including the one returned
3491 /// by getBody().
3492 ///
3493 /// FIXME: This currently includes the Preheader and After blocks even though
3494 /// their content is (mostly) not under CanonicalLoopInfo's control.
3495 /// Re-evaluated whether this makes sense.
3496 void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
3497
3498 /// Sets the number of loop iterations to the given value. This value must be
3499 /// valid in the condition block (i.e., defined in the preheader) and is
3500 /// interpreted as an unsigned integer.
3501 void setTripCount(Value *TripCount);
3502
3503 /// Replace all uses of the canonical induction variable in the loop body with
3504 /// a new one.
3505 ///
3506 /// The intended use case is to update the induction variable for an updated
3507 /// iteration space such that it can stay normalized in the 0...tripcount-1
3508 /// range.
3509 ///
3510 /// The \p Updater is called with the (presumable updated) current normalized
3511 /// induction variable and is expected to return the value that uses of the
3512 /// pre-updated induction values should use instead, typically dependent on
3513 /// the new induction variable. This is a lambda (instead of e.g. just passing
3514 /// the new value) to be able to distinguish the uses of the pre-updated
3515 /// induction variable and uses of the induction varible to compute the
3516 /// updated induction variable value.
3517 void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater);
3518
3519public:
3520 /// Returns whether this object currently represents the IR of a loop. If
3521 /// returning false, it may have been consumed by a loop transformation or not
3522 /// been intialized. Do not use in this case;
3523 bool isValid() const { return Header; }
3524
3525 /// The preheader ensures that there is only a single edge entering the loop.
3526 /// Code that must be execute before any loop iteration can be emitted here,
3527 /// such as computing the loop trip count and begin lifetime markers. Code in
3528 /// the preheader is not considered part of the canonical loop.
3529 BasicBlock *getPreheader() const;
3530
3531 /// The header is the entry for each iteration. In the canonical control flow,
3532 /// it only contains the PHINode for the induction variable.
3534 assert(isValid() && "Requires a valid canonical loop");
3535 return Header;
3536 }
3537
3538 /// The condition block computes whether there is another loop iteration. If
3539 /// yes, branches to the body; otherwise to the exit block.
3541 assert(isValid() && "Requires a valid canonical loop");
3542 return Cond;
3543 }
3544
3545 /// The body block is the single entry for a loop iteration and not controlled
3546 /// by CanonicalLoopInfo. It can contain arbitrary control flow but must
3547 /// eventually branch to the \p Latch block.
3549 assert(isValid() && "Requires a valid canonical loop");
3550 return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
3551 }
3552
3553 /// Reaching the latch indicates the end of the loop body code. In the
3554 /// canonical control flow, it only contains the increment of the induction
3555 /// variable.
3557 assert(isValid() && "Requires a valid canonical loop");
3558 return Latch;
3559 }
3560
3561 /// Reaching the exit indicates no more iterations are being executed.
3563 assert(isValid() && "Requires a valid canonical loop");
3564 return Exit;
3565 }
3566
3567 /// The after block is intended for clean-up code such as lifetime end
3568 /// markers. It is separate from the exit block to ensure, analogous to the
3569 /// preheader, it having just a single entry edge and being free from PHI
3570 /// nodes should there be multiple loop exits (such as from break
3571 /// statements/cancellations).
3573 assert(isValid() && "Requires a valid canonical loop");
3574 return Exit->getSingleSuccessor();
3575 }
3576
3577 /// Returns the llvm::Value containing the number of loop iterations. It must
3578 /// be valid in the preheader and always interpreted as an unsigned integer of
3579 /// any bit-width.
3581 assert(isValid() && "Requires a valid canonical loop");
3582 Instruction *CmpI = &Cond->front();
3583 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
3584 return CmpI->getOperand(1);
3585 }
3586
3587 /// Returns the instruction representing the current logical induction
3588 /// variable. Always unsigned, always starting at 0 with an increment of one.
3590 assert(isValid() && "Requires a valid canonical loop");
3591 Instruction *IndVarPHI = &Header->front();
3592 assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
3593 return IndVarPHI;
3594 }
3595
3596 /// Return the type of the induction variable (and the trip count).
3598 assert(isValid() && "Requires a valid canonical loop");
3599 return getIndVar()->getType();
3600 }
3601
3602 /// Return the insertion point for user code before the loop.
3604 assert(isValid() && "Requires a valid canonical loop");
3605 BasicBlock *Preheader = getPreheader();
3606 return {Preheader, std::prev(Preheader->end())};
3607 };
3608
3609 /// Return the insertion point for user code in the body.
3611 assert(isValid() && "Requires a valid canonical loop");
3612 BasicBlock *Body = getBody();
3613 return {Body, Body->begin()};
3614 };
3615
3616 /// Return the insertion point for user code after the loop.
3618 assert(isValid() && "Requires a valid canonical loop");
3620 return {After, After->begin()};
3621 };
3622
3624 assert(isValid() && "Requires a valid canonical loop");
3625 return Header->getParent();
3626 }
3627
3628 /// Consistency self-check.
3629 void assertOK() const;
3630
3631 /// Invalidate this loop. That is, the underlying IR does not fulfill the
3632 /// requirements of an OpenMP canonical loop anymore.
3633 void invalidate();
3634};
3635
3636} // end namespace llvm
3637
3638#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define Success
arc branch finalize
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
BlockVerifier::State From
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
DXIL Finalize Linkage
uint64_t Addr
std::string Name
uint32_t Index
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Hardware Loops
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
This file defines constans and helpers used when dealing with OpenMP.
Provides definitions for Target specific Grid Values.
const SmallVectorImpl< MachineOperand > & Cond
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
an instruction to allocate memory on the stack
Definition: Instructions.h:63
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:716
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:461
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
Class to represented the control flow structure of an OpenMP canonical loop.
Value * getTripCount() const
Returns the llvm::Value containing the number of loop iterations.
BasicBlock * getHeader() const
The header is the entry for each iteration.
void assertOK() const
Consistency self-check.
Type * getIndVarType() const
Return the type of the induction variable (and the trip count).
BasicBlock * getBody() const
The body block is the single entry for a loop iteration and not controlled by CanonicalLoopInfo.
bool isValid() const
Returns whether this object currently represents the IR of a loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const
Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getBodyIP() const
Return the insertion point for user code in the body.
BasicBlock * getAfter() const
The after block is intended for clean-up code such as lifetime end markers.
Function * getFunction() const
void invalidate()
Invalidate this loop.
BasicBlock * getLatch() const
Reaching the latch indicates the end of the loop body code.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const
Return the insertion point for user code before the loop.
BasicBlock * getCond() const
The condition block computes whether there is another loop iteration.
BasicBlock * getExit() const
Reaching the exit indicates no more iterations are being executed.
BasicBlock * getPreheader() const
The preheader ensures that there is only a single edge entering the loop.
Instruction * getIndVar() const
Returns the instruction representing the current logical induction variable.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Tagged union holding either a T or a Error.
Definition: Error.h:481
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:51
InsertPoint - A saved insertion point.
Definition: IRBuilder.h:254
BasicBlock * getBlock() const
Definition: IRBuilder.h:269
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:217
InsertPoint saveIP() const
Returns the current insert point.
Definition: IRBuilder.h:274
void restoreIP(InsertPoint IP)
Sets the current insert point to a previously-saved location.
Definition: IRBuilder.h:286
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
Class to represent integer types.
Definition: DerivedTypes.h:42
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags)
Definition: OMPIRBuilder.h:398
OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage, const std::string &VarName)
Definition: OMPIRBuilder.h:401
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:416
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:323
OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
Definition: OMPIRBuilder.h:310
@ OffloadingEntryInfoTargetRegion
Entry is a target region.
Definition: OMPIRBuilder.h:244
@ OffloadingEntryInfoDeviceGlobalVar
Entry is a declare target variable.
Definition: OMPIRBuilder.h:246
OffloadingEntryInfoKinds getKind() const
Definition: OMPIRBuilder.h:262
OffloadEntryInfo(OffloadingEntryInfoKinds Kind)
Definition: OMPIRBuilder.h:253
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:270
OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags)
Definition: OMPIRBuilder.h:254
Class that manages information about offload code regions and data.
Definition: OMPIRBuilder.h:232
function_ref< void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy
Applies action Action on all registered entries.
Definition: OMPIRBuilder.h:438
OMPTargetDeviceClauseKind
Kind of device clause for declare target variables and functions NOTE: Currently not used as a part o...
Definition: OMPIRBuilder.h:377
@ OMPTargetDeviceClauseNoHost
The target is marked for non-host devices.
Definition: OMPIRBuilder.h:381
@ OMPTargetDeviceClauseAny
The target is marked for all devices.
Definition: OMPIRBuilder.h:379
@ OMPTargetDeviceClauseNone
The target is marked as having no clause.
Definition: OMPIRBuilder.h:385
@ OMPTargetDeviceClauseHost
The target is marked for host devices.
Definition: OMPIRBuilder.h:383
void registerDeviceGlobalVarEntryInfo(StringRef VarName, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage)
Register device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order)
Initialize device global variable entry.
void actOnDeviceGlobalVarEntriesInfo(const OffloadDeviceGlobalVarEntryInfoActTy &Action)
OMPTargetRegionEntryKind
Kind of the target registry entry.
Definition: OMPIRBuilder.h:297
@ OMPTargetRegionEntryTargetRegion
Mark the entry as target region.
Definition: OMPIRBuilder.h:299
OffloadEntriesInfoManager(OpenMPIRBuilder *builder)
Definition: OMPIRBuilder.h:290
void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, const TargetRegionEntryInfo &EntryInfo)
bool hasTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, bool IgnoreAddressId=false) const
Return true if a target region entry with the provided information exists.
void registerTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
Register target region entry.
void actOnTargetRegionEntriesInfo(const OffloadTargetRegionEntryInfoActTy &Action)
unsigned size() const
Return number of entries defined so far.
Definition: OMPIRBuilder.h:288
void initializeTargetRegionEntryInfo(const TargetRegionEntryInfo &EntryInfo, unsigned Order)
Initialize target region entry.
OMPTargetGlobalVarEntryKind
Kind of the global variable entry..
Definition: OMPIRBuilder.h:357
@ OMPTargetGlobalVarEntryEnter
Mark the entry as a declare target enter.
Definition: OMPIRBuilder.h:363
@ OMPTargetGlobalVarEntryNone
Mark the entry as having no declare target entry kind.
Definition: OMPIRBuilder.h:365
@ OMPTargetGlobalRegisterRequires
Mark the entry as a register requires global.
Definition: OMPIRBuilder.h:369
@ OMPTargetGlobalVarEntryIndirect
Mark the entry as a declare target indirect global.
Definition: OMPIRBuilder.h:367
@ OMPTargetGlobalVarEntryLink
Mark the entry as a to declare target link.
Definition: OMPIRBuilder.h:361
@ OMPTargetGlobalVarEntryTo
Mark the entry as a to declare target.
Definition: OMPIRBuilder.h:359
function_ref< void(const TargetRegionEntryInfo &EntryInfo, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy
brief Applies action Action on all registered entries.
Definition: OMPIRBuilder.h:348
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const
Checks if the variable with the given name has been registered already.
Definition: OMPIRBuilder.h:433
bool empty() const
Return true if a there are no entries defined.
Captures attributes that affect generating LLVM-IR using the OpenMPIRBuilder and related classes.
Definition: OMPIRBuilder.h:87
void setIsGPU(bool Value)
Definition: OMPIRBuilder.h:184
std::optional< bool > IsTargetDevice
Flag to define whether to generate code for the role of the OpenMP host (if set to false) or device (...
Definition: OMPIRBuilder.h:93
std::optional< bool > IsGPU
Flag for specifying if the compilation is done for an accelerator.
Definition: OMPIRBuilder.h:103
void setGridValue(omp::GV G)
Definition: OMPIRBuilder.h:189
std::optional< StringRef > FirstSeparator
First separator used between the initial two parts of a name.
Definition: OMPIRBuilder.h:112
StringRef separator() const
Definition: OMPIRBuilder.h:175
int64_t getRequiresFlags() const
Returns requires directive clauses as flags compatible with those expected by libomptarget.
void setFirstSeparator(StringRef FS)
Definition: OMPIRBuilder.h:187
StringRef firstSeparator() const
Definition: OMPIRBuilder.h:165
std::optional< bool > OpenMPOffloadMandatory
Flag for specifying if offloading is mandatory.
Definition: OMPIRBuilder.h:109
std::optional< bool > EmitLLVMUsedMetaInfo
Flag for specifying if LLVMUsed information should be emitted.
Definition: OMPIRBuilder.h:106
omp::GV getGridValue() const
Definition: OMPIRBuilder.h:148
SmallVector< Triple > TargetTriples
When compilation is being done for the OpenMP host (i.e.
Definition: OMPIRBuilder.h:121
void setHasRequiresReverseOffload(bool Value)
bool hasRequiresUnifiedSharedMemory() const
void setHasRequiresUnifiedSharedMemory(bool Value)
std::optional< StringRef > Separator
Separator used between all of the rest consecutive parts of s name.
Definition: OMPIRBuilder.h:114
bool hasRequiresDynamicAllocators() const
bool openMPOffloadMandatory() const
Definition: OMPIRBuilder.h:142
void setHasRequiresUnifiedAddress(bool Value)
void setOpenMPOffloadMandatory(bool Value)
Definition: OMPIRBuilder.h:186
void setIsTargetDevice(bool Value)
Definition: OMPIRBuilder.h:183
void setSeparator(StringRef S)
Definition: OMPIRBuilder.h:188
void setHasRequiresDynamicAllocators(bool Value)
void setEmitLLVMUsed(bool Value=true)
Definition: OMPIRBuilder.h:185
std::optional< omp::GV > GridValue
Definition: OMPIRBuilder.h:117
bool hasRequiresReverseOffload() const
bool hasRequiresUnifiedAddress() const
llvm::AllocaInst * CreateAlloca(llvm::Type *Ty, const llvm::Twine &Name) const override
Definition: OMPIRBuilder.h:497
AtomicInfo(IRBuilder<> *Builder, llvm::Type *Ty, uint64_t AtomicSizeInBits, uint64_t ValueSizeInBits, llvm::Align AtomicAlign, llvm::Align ValueAlign, bool UseLibcall, llvm::Value *AtomicVar)
Definition: OMPIRBuilder.h:487
void decorateWithTBAA(llvm::Instruction *I) override
Definition: OMPIRBuilder.h:496
llvm::Value * getAtomicPointer() const override
Definition: OMPIRBuilder.h:495
Struct that keeps the information that should be kept throughout a 'target data' region.
TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls)
SmallMapVector< const Value *, std::pair< Value *, Value * >, 4 > DevicePtrInfoMap
void clearArrayInfo()
Clear information about the data arrays.
unsigned NumberOfPtrs
The total number of pointers passed to the runtime library.
bool HasNoWait
Whether the target ... data directive has a nowait clause.
bool isValid()
Return true if the current target data information has valid arrays.
bool HasMapper
Indicate whether any user-defined mapper exists.
An interface to create LLVM-IR for OpenMP directives.
Definition: OMPIRBuilder.h:474
InsertPointOrErrorTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads)
Generator for '#omp ordered [threads | simd]'.
Constant * getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags=omp::IdentFlag(0), unsigned Reserve2Flags=0)
Return an ident_t* encoding the source location SrcLocStr and Flags.
FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID)
Return the function declaration for the runtime function with FnID.
InsertPointOrErrorTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective)
Generator for '#omp cancel'.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, int32_t MinThreadsVal=0, int32_t MaxThreadsVal=0, int32_t MinTeamsVal=0, int32_t MaxTeamsVal=0)
The omp target interface.
ReductionGenCBKind
Enum class for the RedctionGen CallBack type to be used.
CanonicalLoopInfo * collapseLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, InsertPointTy ComputeIP)
Collapse a loop nest into a single loop.
void createTaskyield(const LocationDescription &Loc)
Generator for '#omp taskyield'.
std::function< Error(InsertPointTy CodeGenIP)> FinalizeCallbackTy
Callback type for variable finalization (think destructors).
Definition: OMPIRBuilder.h:544
void emitBranch(BasicBlock *Target)
InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO)
Emit atomic write for : X = Expr — Only Scalar data types.
static void writeThreadBoundsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
EvalKind
Enum class for reduction evaluation types scalar, complex and aggregate.
static TargetRegionEntryInfo getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, StringRef ParentName="")
Creates a unique info for a target entry when provided a filename and line number from.
void emitTaskwaitImpl(const LocationDescription &Loc)
Generate a taskwait runtime call.
Constant * registerTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, Function *OutlinedFunction, StringRef EntryFnName, StringRef EntryFnIDName)
Registers the given function and sets up the attribtues of the function Returns the FunctionID.
void initialize()
Initialize the internal state, this will put structures types and potentially other helpers into the ...
void createTargetDeinit(const LocationDescription &Loc, int32_t TeamsReductionDataSize=0, int32_t TeamsReductionBufferLength=1024)
Create a runtime call for kmpc_target_deinit.
InsertPointOrErrorTy createTaskgroup(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB)
Generator for the taskgroup construct.
void loadOffloadInfoMetadata(Module &M)
Loads all the offload entries information from the host IR metadata.
std::function< InsertPointOrErrorTy(InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)> ReductionGenCBTy
ReductionGen CallBack for MLIR.
InsertPointOrErrorTy emitTargetTask(TargetTaskBodyCallbackTy TaskBodyCB, Value *DeviceID, Value *RTLoc, OpenMPIRBuilder::InsertPointTy AllocaIP, const SmallVector< llvm::OpenMPIRBuilder::DependData > &Dependencies, bool HasNoWait)
Generate a target-task for the target construct.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully unroll a loop.
void emitFlush(const LocationDescription &Loc)
Generate a flush runtime call.
static std::pair< int32_t, int32_t > readThreadBoundsForKernel(const Triple &T, Function &Kernel)
}
OpenMPIRBuilderConfig Config
The OpenMPIRBuilder Configuration.
CallInst * createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_destroy.
InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO)
Emit atomic Read for : V = X — Only Scalar data types.
Error emitIfClause(Value *Cond, BodyGenCallbackTy ThenGen, BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP={})
Emits code for OpenMP 'if' clause using specified BodyGenCallbackTy Here is the logic: if (Cond) { Th...
std::function< void(EmitMetadataErrorKind, TargetRegionEntryInfo)> EmitMetadataErrorReportFunctionTy
Callback function type.
void setConfig(OpenMPIRBuilderConfig C)
Definition: OMPIRBuilder.h:510
InsertPointOrErrorTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsNowait, ArrayRef< llvm::Value * > CPVars={}, ArrayRef< llvm::Function * > CPFuncs={})
Generator for '#omp single'.
InsertPointOrErrorTy createTeams(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, Value *NumTeamsLower=nullptr, Value *NumTeamsUpper=nullptr, Value *ThreadLimit=nullptr, Value *IfExpr=nullptr)
Generator for #omp teams
std::forward_list< CanonicalLoopInfo > LoopInfos
Collection of owned canonical loop objects that eventually need to be free'd.
void createTaskwait(const LocationDescription &Loc)
Generator for '#omp taskwait'.
CanonicalLoopInfo * createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name={})
Create the control flow structure of a canonical OpenMP loop.
std::string createPlatformSpecificName(ArrayRef< StringRef > Parts) const
Get the create a name using the platform specific separators.
FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_next_* runtime function for the specified size IVSize and sign IVSigned.
static void getKernelArgsVector(TargetKernelArgs &KernelArgs, IRBuilderBase &Builder, SmallVector< Value * > &ArgsVector)
Create the kernel args vector used by emitTargetKernel.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully or partially unroll a loop.
InsertPointOrErrorTy createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable)
Generator for '#omp parallel'.
omp::OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position)
Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on the position given.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn)
Add attributes known for FnID to Fn.
Module & M
The underlying LLVM-IR module.
StringMap< Constant * > SrcLocStrMap
Map to remember source location strings.
void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas)
Create the allocas instruction used in call to mapper functions.
Constant * getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the source location LocStr.
void addOutlineInfo(OutlineInfo &&OI)
Add a new region that will be outlined later.
Error emitTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, FunctionGenCallback &GenerateFunctionCallback, bool IsOffloadEntry, Function *&OutlinedFn, Constant *&OutlinedFnID)
Create a unique name for the entry function using the source location information of the current targ...
FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_fini_* runtime function for the specified size IVSize and sign IVSigned.
InsertPointOrErrorTy createTarget(const LocationDescription &Loc, bool IsOffloadEntry, OpenMPIRBuilder::InsertPointTy AllocaIP, OpenMPIRBuilder::InsertPointTy CodeGenIP, TargetRegionEntryInfo &EntryInfo, ArrayRef< int32_t > NumTeams, ArrayRef< int32_t > NumThreads, SmallVectorImpl< Value * > &Inputs, GenMapInfoCallbackTy GenMapInfoCB, TargetBodyGenCallbackTy BodyGenCB, TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB, SmallVector< DependData > Dependencies={}, bool HasNowait=false)
Generator for '#omp target'.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI)
Partially unroll a loop.
void emitTaskyieldImpl(const LocationDescription &Loc)
Generate a taskyield runtime call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands)
Create the call for the target mapper function.
std::function< Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)> StorableBodyGenCallbackTy
Definition: OMPIRBuilder.h:607
InsertPointTy createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOpValue &R, Value *E, Value *D, AtomicOrdering AO, omp::OMPAtomicCompareOp Op, bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly)
Emit atomic compare for constructs: — Only scalar data types cond-expr-stmt: x = x ordop expr ?...
InsertPointOrErrorTy createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr)
Emit atomic update for constructs: — Only Scalar data types V = X; X = X BinOp Expr ,...
InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef< llvm::Value * > StoreValues, const Twine &Name, bool IsDependSource)
Generator for '#omp ordered depend (source | sink)'.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd=true)
Generate conditional branch and relevant BasicBlocks through which private threads copy the 'copyin' ...
void emitOffloadingArrays(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info, bool IsNonContiguous=false, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr)
Emit the arrays used to pass the captures and map information to the offloading runtime library.
SmallVector< FinalizationInfo, 8 > FinalizationStack
The finalization stack made up of finalize callbacks currently in-flight, wrapped into FinalizationIn...
std::vector< CanonicalLoopInfo * > tileLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, ArrayRef< Value * > TileSizes)
Tile a loop nest.
CallInst * createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_init.
SmallVector< OutlineInfo, 16 > OutlineInfos
Collection of regions that need to be outlined during finalization.
Function * getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID)
std::function< InsertPointOrErrorTy(InsertPointTy, Type *, Value *, Value *)> ReductionGenAtomicCBTy
Functions used to generate atomic reductions.
const Triple T
The target triple of the underlying module.
DenseMap< std::pair< Constant *, uint64_t >, Constant * > IdentMap
Map to remember existing ident_t*.
CallInst * createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_free.
FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned, bool IsGPUDistribute)
Returns __kmpc_for_static_init_* runtime function for the specified size IVSize and sign IVSigned.
CallInst * createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_Alloc.
void emitNonContiguousDescriptor(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info)
Emit an array of struct descriptors to be assigned to the offload args.
InsertPointOrErrorTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for '#omp section'.
std::function< InsertPointTy(InsertPointTy CodeGenIP, unsigned Index, Value **LHS, Value **RHS, Function *CurFn)> ReductionGenClangCBTy
ReductionGen CallBack for Clang.
void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished=false)
Value * getOrCreateThreadID(Value *Ident)
Return the current thread ID.
void emitOffloadingArraysAndArgs(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info, TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo, bool IsNonContiguous=false, bool ForEndCall=false, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr)
Allocates memory for and populates the arrays required for offloading (offload_{baseptrs|ptrs|mappers...
InsertPointOrErrorTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for '#omp master'.
void pushFinalizationCB(const FinalizationInfo &FI)
Push a finalization callback on the finalization stack.
Definition: OMPIRBuilder.h:562
Error emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB={})
Generate control flow and cleanup for cancellation.
InsertPointOrErrorTy emitKernelLaunch(const LocationDescription &Loc, Value *OutlinedFnID, EmitFallbackCallbackTy EmitTargetCallFallbackCB, TargetKernelArgs &Args, Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP)
Generate a target region entry call and host fallback call.
InsertPointTy getInsertionPoint()
}
StringMap< GlobalVariable *, BumpPtrAllocator > InternalVars
An ordered map of auto-generated variables to their unique names.
GlobalVariable * getOrCreateInternalVariable(Type *Ty, const StringRef &Name, unsigned AddressSpace=0)
Gets (if variable with the given name already exist) or creates internal global variable with the spe...
InsertPointOrErrorTy createReductionsGPU(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< ReductionInfo > ReductionInfos, bool IsNoWait=false, bool IsTeamsReduction=false, bool HasDistribute=false, ReductionGenCBKind ReductionGenCBKind=ReductionGenCBKind::MLIR, std::optional< omp::GV > GridValue={}, unsigned ReductionBufNum=1024, Value *SrcLocInfo=nullptr)
Design of OpenMP reductions on the GPU.
FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_init_* runtime function for the specified size IVSize and sign IVSigned.
Function * emitUserDefinedMapper(function_ref< MapInfosTy &(InsertPointTy CodeGenIP, llvm::Value *PtrPHI, llvm::Value *BeginArg)> PrivAndGenMapInfoCB, llvm::Type *ElemTy, StringRef FuncName, function_ref< bool(unsigned int, Function **)> CustomMapperCB=nullptr)
Emit the user-defined mapper function.
CallInst * createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_use.
IRBuilder<>::InsertPoint InsertPointTy
Type used throughout for insertion points.
Definition: OMPIRBuilder.h:521
InsertPointOrErrorTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< ReductionInfo > ReductionInfos, ArrayRef< bool > IsByRef, bool IsNoWait=false)
Generator for '#omp reduction'.
GlobalVariable * createOffloadMapnames(SmallVectorImpl< llvm::Constant * > &Names, std::string VarName)
Create the global variable holding the offload names information.
InsertPointOrErrorTy createTask(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, bool Tied=true, Value *Final=nullptr, Value *IfCondition=nullptr, SmallVector< DependData > Dependencies={}, bool Mergeable=false, Value *EventHandle=nullptr)
Generator for #omp task
std::function< Expected< Function * >(StringRef FunctionName)> FunctionGenCallback
Functions used to generate a function with the given name.
static void writeTeamsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
InsertPointOrErrorTy createBarrier(const LocationDescription &Loc, omp::Directive Kind, bool ForceSimpleCall=false, bool CheckCancelFlag=true)
Emitter methods for OpenMP directives.
void setCorrectMemberOfFlag(omp::OpenMPOffloadMappingFlags &Flags, omp::OpenMPOffloadMappingFlags MemberOfFlag)
Given an initial flag set, this function modifies it to contain the passed in MemberOfFlag generated ...
Constant * getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the default source location.
InsertPointOrErrorTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst)
Generator for '#omp critical'.
void createOffloadEntry(Constant *ID, Constant *Addr, uint64_t Size, int32_t Flags, GlobalValue::LinkageTypes, StringRef Name="")
Creates offloading entry for the provided entry ID ID, address Addr, size Size, and flags Flags.
static unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple, const StringMap< bool > &Features)
Get the default alignment value for given target.
unsigned getFlagMemberOffset()
Get the offset of the OMP_MAP_MEMBER_OF field.
void createOffloadEntriesAndInfoMetadata(EmitMetadataErrorReportFunctionTy &ErrorReportFunction)
void applySimd(CanonicalLoopInfo *Loop, MapVector< Value *, Value * > AlignedVars, Value *IfCond, omp::OrderKind Order, ConstantInt *Simdlen, ConstantInt *Safelen)
Add metadata to simd-ize a loop.
bool isLastFinalizationInfoCancellable(omp::Directive DK)
Return true if the last entry in the finalization stack is of kind DK and cancellable.
InsertPointTy emitTargetKernel(const LocationDescription &Loc, InsertPointTy AllocaIP, Value *&Return, Value *Ident, Value *DeviceID, Value *NumTeams, Value *NumThreads, Value *HostPtr, ArrayRef< Value * > KernelArgs)
Generate a target region entry call.
GlobalVariable * createOffloadMaptypes(SmallVectorImpl< uint64_t > &Mappings, std::string VarName)
Create the global variable holding the offload mappings information.
CallInst * createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name=Twine(""))
Create a runtime call for kmpc_threadprivate_cached.
IRBuilder Builder
The LLVM-IR Builder used to create IR.
GlobalValue * createGlobalFlag(unsigned Value, StringRef Name)
Create a hidden global flag Name in the module with initial value Value.
InsertPointOrErrorTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind=llvm::omp::OMP_SCHEDULE_Default, Value *ChunkSize=nullptr, bool HasSimdModifier=false, bool HasMonotonicModifier=false, bool HasNonmonotonicModifier=false, bool HasOrderedClause=false, omp::WorksharingLoopType LoopType=omp::WorksharingLoopType::ForStaticLoop)
Modifies the canonical loop to be a workshare loop.
void emitOffloadingArraysArgument(IRBuilderBase &Builder, OpenMPIRBuilder::TargetDataRTArgs &RTArgs, OpenMPIRBuilder::TargetDataInfo &Info, bool ForEndCall=false)
Emit the arguments to be passed to the runtime library based on the arrays of base pointers,...
InsertPointOrErrorTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter)
Generator for '#omp masked'.
Expected< CanonicalLoopInfo * > createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name="loop")
Generator for the control flow structure of an OpenMP canonical loop.
Value * getSizeInBytes(Value *BasePtr)
Computes the size of type in bytes.
function_ref< InsertPointOrErrorTy(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)> PrivatizeCallbackTy
Callback type for variable privatization (think copy & default constructor).
Definition: OMPIRBuilder.h:642
OpenMPIRBuilder(Module &M)
Create a new OpenMPIRBuilder operating on the given module M.
Definition: OMPIRBuilder.h:478
FunctionCallee createDispatchDeinitFunction()
Returns __kmpc_dispatch_deinit runtime function.
void registerTargetGlobalVariable(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage, Type *LlvmPtrTy, Constant *Addr)
Registers a target variable for device or host.
InsertPointOrErrorTy createTargetData(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond, TargetDataInfo &Info, GenMapInfoCallbackTy GenMapInfoCB, omp::RuntimeFunction *MapperFunc=nullptr, function_ref< InsertPointOrErrorTy(InsertPointTy CodeGenIP, BodyGenTy BodyGenType)> BodyGenCB=nullptr, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr, Value *SrcLocInfo=nullptr)
Generator for '#omp target data'.
BodyGenTy
Type of BodyGen to use for region codegen.
InsertPointOrErrorTy createAtomicUpdate(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr)
Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X For complex Operations: X = ...
SmallVector< llvm::Function *, 16 > ConstantAllocaRaiseCandidates
A collection of candidate target functions that's constant allocas will attempt to be raised on a cal...
OffloadEntriesInfoManager OffloadInfoManager
Info manager to keep track of target regions.
static std::pair< int32_t, int32_t > readTeamBoundsForKernel(const Triple &T, Function &Kernel)
Read/write a bounds on teams for Kernel.
std::function< std::tuple< std::string, uint64_t >()> FileIdentifierInfoCallbackTy
const std::string ompOffloadInfoName
OMP Offload Info Metadata name string.
Expected< InsertPointTy > InsertPointOrErrorTy
Type used to represent an insertion point or an error value.
Definition: OMPIRBuilder.h:524
InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt)
Generator for __kmpc_copyprivate.
void popFinalizationCB()
Pop the last finalization callback from the finalization stack.
Definition: OMPIRBuilder.h:569
InsertPointOrErrorTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< StorableBodyGenCallbackTy > SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait)
Generator for '#omp sections'.
function_ref< Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)> BodyGenCallbackTy
Callback type for body (=inner region) code generation.
Definition: OMPIRBuilder.h:598
bool updateToLocation(const LocationDescription &Loc)
Update the internal location to Loc.
void createFlush(const LocationDescription &Loc)
Generator for '#omp flush'.
Constant * getAddrOfDeclareTargetVar(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, Type *LlvmPtrTy, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage)
Retrieve (or create if non-existent) the address of a declare target variable, used in conjunction wi...
EmitMetadataErrorKind
The kind of errors that can occur when emitting the offload entries and metadata.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
Definition: StringMap.h:276
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
bool pointsToAliveValue() const
Definition: ValueHandle.h:224
An efficient, type-erasing, non-owning reference to a callable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
OpenMPOffloadMappingFlags
Values for bit flags used to specify the mapping type for offloading.
Definition: OMPConstants.h:195
IdentFlag
IDs for all omp runtime library ident_t flag encodings (see their defintion in openmp/runtime/src/kmp...
Definition: OMPConstants.h:65
RTLDependenceKindTy
Dependence kind for RTL.
Definition: OMPConstants.h:273
RuntimeFunction
IDs for all omp runtime library (RTL) functions.
Definition: OMPConstants.h:45
WorksharingLoopType
A type of worksharing loop construct.
Definition: OMPConstants.h:283
OMPAtomicCompareOp
Atomic compare operations. Currently OpenMP only supports ==, >, and <.
Definition: OMPConstants.h:267
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
BasicBlock * splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch, llvm::Twine Suffix=".split")
Like splitBB, but reuses the current block's name for the new name.
@ Offset
Definition: DWP.cpp:480
void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch)
Move the instruction after an InsertPoint to the beginning of another BasicBlock.
BasicBlock * splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch, llvm::Twine Name={})
Split a BasicBlock at an InsertPoint, even if the block is degenerate (missing the terminator).
AtomicOrdering
Atomic ordering for LLVM's memory model.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
a struct to pack relevant information while generating atomic Ops
A struct to pack the relevant information for an OpenMP depend clause.
DependData(omp::RTLDependenceKindTy DepKind, Type *DepValueType, Value *DepVal)
omp::RTLDependenceKindTy DepKind
bool IsCancellable
Flag to indicate if the directive is cancellable.
Definition: OMPIRBuilder.h:556
FinalizeCallbackTy FiniCB
The finalization callback provided by the last in-flight invocation of createXXXX for the directive o...
Definition: OMPIRBuilder.h:549
omp::Directive DK
The directive kind of the innermost directive that has an associated region which might require final...
Definition: OMPIRBuilder.h:553
Description of a LLVM-IR insertion point (IP) and a debug/source location (filename,...
Definition: OMPIRBuilder.h:646
LocationDescription(const InsertPointTy &IP)
Definition: OMPIRBuilder.h:649
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
Definition: OMPIRBuilder.h:650
LocationDescription(const IRBuilderBase &IRB)
Definition: OMPIRBuilder.h:647
This structure contains combined information generated for mappable clauses, including base pointers,...
void append(MapInfosTy &CurInfo)
Append arrays in CurInfo.
MapDeviceInfoArrayTy DevicePointers
StructNonContiguousInfo NonContigInfo
Helper that contains information about regions we need to outline during finalization.
void collectBlocks(SmallPtrSetImpl< BasicBlock * > &BlockSet, SmallVectorImpl< BasicBlock * > &BlockVector)
Collect all blocks in between EntryBB and ExitBB in both the given vector and set.
Function * getFunction() const
Return the function that contains the region to be outlined.
SmallVector< Value *, 2 > ExcludeArgsFromAggregate
std::function< void(Function &)> PostOutlineCBTy
Information about an OpenMP reduction.
EvalKind EvaluationKind
Reduction evaluation kind - scalar, complex or aggregate.
ReductionGenAtomicCBTy AtomicReductionGen
Callback for generating the atomic reduction body, may be null.
ReductionGenCBTy ReductionGen
Callback for generating the reduction body.
ReductionInfo(Value *PrivateVariable)
Value * Variable
Reduction variable of pointer type.
Value * PrivateVariable
Thread-private partial reduction variable.
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, EvalKind EvaluationKind, ReductionGenCBTy ReductionGen, ReductionGenClangCBTy ReductionGenClang, ReductionGenAtomicCBTy AtomicReductionGen)
ReductionGenClangCBTy ReductionGenClang
Clang callback for generating the reduction body.
Type * ElementType
Reduction element type, must match pointee type of variable.
Container for the arguments used to pass data to the runtime library.
Value * SizesArray
The array of sizes passed to the runtime library.
TargetDataRTArgs(Value *BasePointersArray, Value *PointersArray, Value *SizesArray, Value *MapTypesArray, Value *MapTypesArrayEnd, Value *MappersArray, Value *MapNamesArray)
Value * PointersArray
The array of section pointers passed to the runtime library.
Value * MappersArray
The array of user-defined mappers passed to the runtime library.
Value * MapTypesArrayEnd
The array of map types passed to the runtime library for the end of the region, or nullptr if there a...
Value * BasePointersArray
The array of base pointer passed to the runtime library.
Value * MapTypesArray
The array of map types passed to the runtime library for the beginning of the region or for the entir...
Value * MapNamesArray
The array of original declaration names of mapped pointers sent to the runtime library for debugging.
Data structure that contains the needed information to construct the kernel args vector.
TargetKernelArgs(unsigned NumTargetItems, TargetDataRTArgs RTArgs, Value *NumIterations, ArrayRef< Value * > NumTeams, ArrayRef< Value * > NumThreads, Value *DynCGGroupMem, bool HasNoWait)
Value * DynCGGroupMem
The size of the dynamic shared memory.
ArrayRef< Value * > NumThreads
The number of threads.
TargetDataRTArgs RTArgs
Arguments passed to the runtime library.
Value * NumIterations
The number of iterations.
unsigned NumTargetItems
Number of arguments passed to the runtime library.
bool HasNoWait
True if the kernel has 'no wait' clause.
ArrayRef< Value * > NumTeams
The number of teams.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Data structure to contain the information needed to uniquely identify a target entry.
Definition: OMPIRBuilder.h:203
static void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count)
static constexpr const char * KernelNamePrefix
The prefix used for kernel names.
Definition: OMPIRBuilder.h:205
bool operator<(const TargetRegionEntryInfo &RHS) const
Definition: OMPIRBuilder.h:224
TargetRegionEntryInfo(StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count=0)
Definition: OMPIRBuilder.h:214
Defines various target-specific GPU grid values that must be consistent between host RTL (plugin),...
Definition: OMPGridValues.h:57