LLVM 20.0.0git
OMPIRBuilder.h
Go to the documentation of this file.
1//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the OpenMPIRBuilder class and helpers used as a convenient
10// way to create LLVM instructions for OpenMP directives.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
15#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
16
21#include "llvm/IR/DebugLoc.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/Module.h"
26#include <forward_list>
27#include <map>
28#include <optional>
29
30namespace llvm {
31class CanonicalLoopInfo;
32struct TargetRegionEntryInfo;
33class OffloadEntriesInfoManager;
34class OpenMPIRBuilder;
35
36/// Move the instruction after an InsertPoint to the beginning of another
37/// BasicBlock.
38///
39/// The instructions after \p IP are moved to the beginning of \p New which must
40/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
41/// \p New will be added such that there is no semantic change. Otherwise, the
42/// \p IP insert block remains degenerate and it is up to the caller to insert a
43/// terminator.
44void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
45 bool CreateBranch);
46
47/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
48/// insert location will stick to after the instruction before the insertion
49/// point (instead of moving with the instruction the InsertPoint stores
50/// internally).
51void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
52
53/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
54/// (missing the terminator).
55///
56/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
57/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
58/// is true, a branch to the new successor will new created such that
59/// semantically there is no change; otherwise the block of the insertion point
60/// remains degenerate and it is the caller's responsibility to insert a
61/// terminator. Returns the new successor block.
62BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
63 llvm::Twine Name = {});
64
65/// Split a BasicBlock at \p Builder's insertion point, even if the block is
66/// degenerate (missing the terminator). Its new insert location will stick to
67/// after the instruction before the insertion point (instead of moving with the
68/// instruction the InsertPoint stores internally).
69BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
70 llvm::Twine Name = {});
71
72/// Split a BasicBlock at \p Builder's insertion point, even if the block is
73/// degenerate (missing the terminator). Its new insert location will stick to
74/// after the instruction before the insertion point (instead of moving with the
75/// instruction the InsertPoint stores internally).
76BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
77
78/// Like splitBB, but reuses the current block's name for the new name.
79BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
80 llvm::Twine Suffix = ".split");
81
82/// Captures attributes that affect generating LLVM-IR using the
83/// OpenMPIRBuilder and related classes. Note that not all attributes are
84/// required for all classes or functions. In some use cases the configuration
85/// is not necessary at all, because because the only functions that are called
86/// are ones that are not dependent on the configuration.
88public:
89 /// Flag to define whether to generate code for the role of the OpenMP host
90 /// (if set to false) or device (if set to true) in an offloading context. It
91 /// is set when the -fopenmp-is-target-device compiler frontend option is
92 /// specified.
93 std::optional<bool> IsTargetDevice;
94
95 /// Flag for specifying if the compilation is done for an accelerator. It is
96 /// set according to the architecture of the target triple and currently only
97 /// true when targeting AMDGPU or NVPTX. Today, these targets can only perform
98 /// the role of an OpenMP target device, so `IsTargetDevice` must also be true
99 /// if `IsGPU` is true. This restriction might be lifted if an accelerator-
100 /// like target with the ability to work as the OpenMP host is added, or if
101 /// the capabilities of the currently supported GPU architectures are
102 /// expanded.
103 std::optional<bool> IsGPU;
104
105 /// Flag for specifying if LLVMUsed information should be emitted.
106 std::optional<bool> EmitLLVMUsedMetaInfo;
107
108 /// Flag for specifying if offloading is mandatory.
109 std::optional<bool> OpenMPOffloadMandatory;
110
111 /// First separator used between the initial two parts of a name.
112 std::optional<StringRef> FirstSeparator;
113 /// Separator used between all of the rest consecutive parts of s name
114 std::optional<StringRef> Separator;
115
116 // Grid Value for the GPU target
117 std::optional<omp::GV> GridValue;
118
119 /// When compilation is being done for the OpenMP host (i.e. `IsTargetDevice =
120 /// false`), this contains the list of offloading triples associated, if any.
122
126 bool HasRequiresReverseOffload,
127 bool HasRequiresUnifiedAddress,
128 bool HasRequiresUnifiedSharedMemory,
129 bool HasRequiresDynamicAllocators);
130
131 // Getters functions that assert if the required values are not present.
132 bool isTargetDevice() const {
133 assert(IsTargetDevice.has_value() && "IsTargetDevice is not set");
134 return *IsTargetDevice;
135 }
136
137 bool isGPU() const {
138 assert(IsGPU.has_value() && "IsGPU is not set");
139 return *IsGPU;
140 }
141
143 assert(OpenMPOffloadMandatory.has_value() &&
144 "OpenMPOffloadMandatory is not set");
146 }
147
149 assert(GridValue.has_value() && "GridValue is not set");
150 return *GridValue;
151 }
152
153 bool hasRequiresFlags() const { return RequiresFlags; }
154 bool hasRequiresReverseOffload() const;
155 bool hasRequiresUnifiedAddress() const;
157 bool hasRequiresDynamicAllocators() const;
158
159 /// Returns requires directive clauses as flags compatible with those expected
160 /// by libomptarget.
161 int64_t getRequiresFlags() const;
162
163 // Returns the FirstSeparator if set, otherwise use the default separator
164 // depending on isGPU
166 if (FirstSeparator.has_value())
167 return *FirstSeparator;
168 if (isGPU())
169 return "_";
170 return ".";
171 }
172
173 // Returns the Separator if set, otherwise use the default separator depending
174 // on isGPU
176 if (Separator.has_value())
177 return *Separator;
178 if (isGPU())
179 return "$";
180 return ".";
181 }
182
184 void setIsGPU(bool Value) { IsGPU = Value; }
190
195
196private:
197 /// Flags for specifying which requires directive clauses are present.
198 int64_t RequiresFlags;
199};
200
201/// Data structure to contain the information needed to uniquely identify
202/// a target entry.
204 /// The prefix used for kernel names.
205 static constexpr const char *KernelNamePrefix = "__omp_offloading_";
206
207 std::string ParentName;
208 unsigned DeviceID;
209 unsigned FileID;
210 unsigned Line;
211 unsigned Count;
212
215 unsigned FileID, unsigned Line, unsigned Count = 0)
217 Count(Count) {}
218
221 unsigned DeviceID, unsigned FileID,
222 unsigned Line, unsigned Count);
223
225 return std::make_tuple(ParentName, DeviceID, FileID, Line, Count) <
226 std::make_tuple(RHS.ParentName, RHS.DeviceID, RHS.FileID, RHS.Line,
227 RHS.Count);
228 }
229};
230
231/// Class that manages information about offload code regions and data
233 /// Number of entries registered so far.
234 OpenMPIRBuilder *OMPBuilder;
235 unsigned OffloadingEntriesNum = 0;
236
237public:
238 /// Base class of the entries info.
240 public:
241 /// Kind of a given entry.
242 enum OffloadingEntryInfoKinds : unsigned {
243 /// Entry is a target region.
245 /// Entry is a declare target variable.
247 /// Invalid entry info.
249 };
250
251 protected:
253 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
254 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
255 uint32_t Flags)
256 : Flags(Flags), Order(Order), Kind(Kind) {}
257 ~OffloadEntryInfo() = default;
258
259 public:
260 bool isValid() const { return Order != ~0u; }
261 unsigned getOrder() const { return Order; }
262 OffloadingEntryInfoKinds getKind() const { return Kind; }
263 uint32_t getFlags() const { return Flags; }
264 void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
265 Constant *getAddress() const { return cast_or_null<Constant>(Addr); }
267 assert(!Addr.pointsToAliveValue() && "Address has been set before!");
268 Addr = V;
269 }
270 static bool classof(const OffloadEntryInfo *Info) { return true; }
271
272 private:
273 /// Address of the entity that has to be mapped for offloading.
274 WeakTrackingVH Addr;
275
276 /// Flags associated with the device global.
277 uint32_t Flags = 0u;
278
279 /// Order this entry was emitted.
280 unsigned Order = ~0u;
281
283 };
284
285 /// Return true if a there are no entries defined.
286 bool empty() const;
287 /// Return number of entries defined so far.
288 unsigned size() const { return OffloadingEntriesNum; }
289
290 OffloadEntriesInfoManager(OpenMPIRBuilder *builder) : OMPBuilder(builder) {}
291
292 //
293 // Target region entries related.
294 //
295
296 /// Kind of the target registry entry.
298 /// Mark the entry as target region.
300 };
301
302 /// Target region entries info.
304 /// Address that can be used as the ID of the entry.
305 Constant *ID = nullptr;
306
307 public:
310 explicit OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr,
311 Constant *ID,
314 ID(ID) {
316 }
317
318 Constant *getID() const { return ID; }
319 void setID(Constant *V) {
320 assert(!ID && "ID has been set before!");
321 ID = V;
322 }
323 static bool classof(const OffloadEntryInfo *Info) {
324 return Info->getKind() == OffloadingEntryInfoTargetRegion;
325 }
326 };
327
328 /// Initialize target region entry.
329 /// This is ONLY needed for DEVICE compilation.
331 unsigned Order);
332 /// Register target region entry.
336 /// Return true if a target region entry with the provided information
337 /// exists.
339 bool IgnoreAddressId = false) const;
340
341 // Return the Name based on \a EntryInfo using the next available Count.
343 const TargetRegionEntryInfo &EntryInfo);
344
345 /// brief Applies action \a Action on all registered entries.
346 typedef function_ref<void(const TargetRegionEntryInfo &EntryInfo,
347 const OffloadEntryInfoTargetRegion &)>
349 void
351
352 //
353 // Device global variable entries related.
354 //
355
356 /// Kind of the global variable entry..
358 /// Mark the entry as a to declare target.
360 /// Mark the entry as a to declare target link.
362 /// Mark the entry as a declare target enter.
364 /// Mark the entry as having no declare target entry kind.
366 /// Mark the entry as a declare target indirect global.
368 /// Mark the entry as a register requires global.
370 };
371
372 /// Kind of device clause for declare target variables
373 /// and functions
374 /// NOTE: Currently not used as a part of a variable entry
375 /// used for Flang and Clang to interface with the variable
376 /// related registration functions
378 /// The target is marked for all devices
380 /// The target is marked for non-host devices
382 /// The target is marked for host devices
384 /// The target is marked as having no clause
386 };
387
388 /// Device global variable entries info.
390 /// Type of the global variable.
391 int64_t VarSize;
393 const std::string VarName;
394
395 public:
398 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
401 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr,
402 int64_t VarSize,
405 const std::string &VarName)
407 VarSize(VarSize), Linkage(Linkage), VarName(VarName) {
409 }
410
411 int64_t getVarSize() const { return VarSize; }
412 StringRef getVarName() const { return VarName; }
413 void setVarSize(int64_t Size) { VarSize = Size; }
414 GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
415 void setLinkage(GlobalValue::LinkageTypes LT) { Linkage = LT; }
416 static bool classof(const OffloadEntryInfo *Info) {
417 return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
418 }
419 };
420
421 /// Initialize device global variable entry.
422 /// This is ONLY used for DEVICE compilation.
425 unsigned Order);
426
427 /// Register device global variable entry.
429 int64_t VarSize,
432 /// Checks if the variable with the given name has been registered already.
434 return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
435 }
436 /// Applies action \a Action on all registered entries.
437 typedef function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)>
441
442private:
443 /// Return the count of entries at a particular source location.
444 unsigned
445 getTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo) const;
446
447 /// Update the count of entries at a particular source location.
448 void
449 incrementTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo);
450
452 getTargetRegionEntryCountKey(const TargetRegionEntryInfo &EntryInfo) {
453 return TargetRegionEntryInfo(EntryInfo.ParentName, EntryInfo.DeviceID,
454 EntryInfo.FileID, EntryInfo.Line, 0);
455 }
456
457 // Count of entries at a location.
458 std::map<TargetRegionEntryInfo, unsigned> OffloadEntriesTargetRegionCount;
459
460 // Storage for target region entries kind.
461 typedef std::map<TargetRegionEntryInfo, OffloadEntryInfoTargetRegion>
462 OffloadEntriesTargetRegionTy;
463 OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
464 /// Storage for device global variable entries kind. The storage is to be
465 /// indexed by mangled name.
467 OffloadEntriesDeviceGlobalVarTy;
468 OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
469};
470
471/// An interface to create LLVM-IR for OpenMP directives.
472///
473/// Each OpenMP directive has a corresponding public generator method.
475public:
476 /// Create a new OpenMPIRBuilder operating on the given module \p M. This will
477 /// not have an effect on \p M (see initialize)
479 : M(M), Builder(M.getContext()), OffloadInfoManager(this),
480 T(Triple(M.getTargetTriple())) {}
482
484 llvm::Value *AtomicVar;
485
486 public:
492 AtomicVar(AtomicVar) {}
493
494 llvm::Value *getAtomicPointer() const override { return AtomicVar; }
497 const llvm::Twine &Name) const override {
498 llvm::AllocaInst *allocaInst = Builder->CreateAlloca(Ty);
499 allocaInst->setName(Name);
500 return allocaInst;
501 }
502 };
503 /// Initialize the internal state, this will put structures types and
504 /// potentially other helpers into the underlying module. Must be called
505 /// before any other method and only once! This internal state includes types
506 /// used in the OpenMPIRBuilder generated from OMPKinds.def.
507 void initialize();
508
510
511 /// Finalize the underlying module, e.g., by outlining regions.
512 /// \param Fn The function to be finalized. If not used,
513 /// all functions are finalized.
514 void finalize(Function *Fn = nullptr);
515
516 /// Add attributes known for \p FnID to \p Fn.
518
519 /// Type used throughout for insertion points.
521
522 /// Type used to represent an insertion point or an error value.
524
525 /// Get the create a name using the platform specific separators.
526 /// \param Parts parts of the final name that needs separation
527 /// The created name has a first separator between the first and second part
528 /// and a second separator between all other parts.
529 /// E.g. with FirstSeparator "$" and Separator "." and
530 /// parts: "p1", "p2", "p3", "p4"
531 /// The resulting name is "p1$p2.p3.p4"
532 /// The separators are retrieved from the OpenMPIRBuilderConfig.
533 std::string createPlatformSpecificName(ArrayRef<StringRef> Parts) const;
534
535 /// Callback type for variable finalization (think destructors).
536 ///
537 /// \param CodeGenIP is the insertion point at which the finalization code
538 /// should be placed.
539 ///
540 /// A finalize callback knows about all objects that need finalization, e.g.
541 /// destruction, when the scope of the currently generated construct is left
542 /// at the time, and location, the callback is invoked.
543 using FinalizeCallbackTy = std::function<Error(InsertPointTy CodeGenIP)>;
544
546 /// The finalization callback provided by the last in-flight invocation of
547 /// createXXXX for the directive of kind DK.
549
550 /// The directive kind of the innermost directive that has an associated
551 /// region which might require finalization when it is left.
552 omp::Directive DK;
553
554 /// Flag to indicate if the directive is cancellable.
556 };
557
558 /// Push a finalization callback on the finalization stack.
559 ///
560 /// NOTE: Temporary solution until Clang CG is gone.
562 FinalizationStack.push_back(FI);
563 }
564
565 /// Pop the last finalization callback from the finalization stack.
566 ///
567 /// NOTE: Temporary solution until Clang CG is gone.
569
570 /// Callback type for body (=inner region) code generation
571 ///
572 /// The callback takes code locations as arguments, each describing a
573 /// location where additional instructions can be inserted.
574 ///
575 /// The CodeGenIP may be in the middle of a basic block or point to the end of
576 /// it. The basic block may have a terminator or be degenerate. The callback
577 /// function may just insert instructions at that position, but also split the
578 /// block (without the Before argument of BasicBlock::splitBasicBlock such
579 /// that the identify of the split predecessor block is preserved) and insert
580 /// additional control flow, including branches that do not lead back to what
581 /// follows the CodeGenIP. Note that since the callback is allowed to split
582 /// the block, callers must assume that InsertPoints to positions in the
583 /// BasicBlock after CodeGenIP including CodeGenIP itself are invalidated. If
584 /// such InsertPoints need to be preserved, it can split the block itself
585 /// before calling the callback.
586 ///
587 /// AllocaIP and CodeGenIP must not point to the same position.
588 ///
589 /// \param AllocaIP is the insertion point at which new alloca instructions
590 /// should be placed. The BasicBlock it is pointing to must
591 /// not be split.
592 /// \param CodeGenIP is the insertion point at which the body code should be
593 /// placed.
594 ///
595 /// \return an error, if any were triggered during execution.
597 function_ref<Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
598
599 // This is created primarily for sections construct as llvm::function_ref
600 // (BodyGenCallbackTy) is not storable (as described in the comments of
601 // function_ref class - function_ref contains non-ownable reference
602 // to the callable.
603 ///
604 /// \return an error, if any were triggered during execution.
606 std::function<Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
607
608 /// Callback type for loop body code generation.
609 ///
610 /// \param CodeGenIP is the insertion point where the loop's body code must be
611 /// placed. This will be a dedicated BasicBlock with a
612 /// conditional branch from the loop condition check and
613 /// terminated with an unconditional branch to the loop
614 /// latch.
615 /// \param IndVar is the induction variable usable at the insertion point.
616 ///
617 /// \return an error, if any were triggered during execution.
619 function_ref<Error(InsertPointTy CodeGenIP, Value *IndVar)>;
620
621 /// Callback type for variable privatization (think copy & default
622 /// constructor).
623 ///
624 /// \param AllocaIP is the insertion point at which new alloca instructions
625 /// should be placed.
626 /// \param CodeGenIP is the insertion point at which the privatization code
627 /// should be placed.
628 /// \param Original The value being copied/created, should not be used in the
629 /// generated IR.
630 /// \param Inner The equivalent of \p Original that should be used in the
631 /// generated IR; this is equal to \p Original if the value is
632 /// a pointer and can thus be passed directly, otherwise it is
633 /// an equivalent but different value.
634 /// \param ReplVal The replacement value, thus a copy or new created version
635 /// of \p Inner.
636 ///
637 /// \returns The new insertion point where code generation continues and
638 /// \p ReplVal the replacement value.
640 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
641 Value &Inner, Value *&ReplVal)>;
642
643 /// Description of a LLVM-IR insertion point (IP) and a debug/source location
644 /// (filename, line, column, ...).
647 : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
650 : IP(IP), DL(DL) {}
653 };
654
655 /// Emitter methods for OpenMP directives.
656 ///
657 ///{
658
659 /// Generator for '#omp barrier'
660 ///
661 /// \param Loc The location where the barrier directive was encountered.
662 /// \param Kind The kind of directive that caused the barrier.
663 /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
664 /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
665 /// should be checked and acted upon.
666 /// \param ThreadID Optional parameter to pass in any existing ThreadID value.
667 ///
668 /// \returns The insertion point after the barrier.
670 omp::Directive Kind,
671 bool ForceSimpleCall = false,
672 bool CheckCancelFlag = true);
673
674 /// Generator for '#omp cancel'
675 ///
676 /// \param Loc The location where the directive was encountered.
677 /// \param IfCondition The evaluated 'if' clause expression, if any.
678 /// \param CanceledDirective The kind of directive that is cancled.
679 ///
680 /// \returns The insertion point after the barrier.
682 Value *IfCondition,
683 omp::Directive CanceledDirective);
684
685 /// Generator for '#omp parallel'
686 ///
687 /// \param Loc The insert and source location description.
688 /// \param AllocaIP The insertion points to be used for alloca instructions.
689 /// \param BodyGenCB Callback that will generate the region code.
690 /// \param PrivCB Callback to copy a given variable (think copy constructor).
691 /// \param FiniCB Callback to finalize variable copies.
692 /// \param IfCondition The evaluated 'if' clause expression, if any.
693 /// \param NumThreads The evaluated 'num_threads' clause expression, if any.
694 /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
695 /// \param IsCancellable Flag to indicate a cancellable parallel region.
696 ///
697 /// \returns The insertion position *after* the parallel.
700 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
701 FinalizeCallbackTy FiniCB, Value *IfCondition,
702 Value *NumThreads, omp::ProcBindKind ProcBind,
703 bool IsCancellable);
704
705 /// Generator for the control flow structure of an OpenMP canonical loop.
706 ///
707 /// This generator operates on the logical iteration space of the loop, i.e.
708 /// the caller only has to provide a loop trip count of the loop as defined by
709 /// base language semantics. The trip count is interpreted as an unsigned
710 /// integer. The induction variable passed to \p BodyGenCB will be of the same
711 /// type and run from 0 to \p TripCount - 1. It is up to the callback to
712 /// convert the logical iteration variable to the loop counter variable in the
713 /// loop body.
714 ///
715 /// \param Loc The insert and source location description. The insert
716 /// location can be between two instructions or the end of a
717 /// degenerate block (e.g. a BB under construction).
718 /// \param BodyGenCB Callback that will generate the loop body code.
719 /// \param TripCount Number of iterations the loop body is executed.
720 /// \param Name Base name used to derive BB and instruction names.
721 ///
722 /// \returns An object representing the created control flow structure which
723 /// can be used for loop-associated directives.
726 LoopBodyGenCallbackTy BodyGenCB, Value *TripCount,
727 const Twine &Name = "loop");
728
729 /// Generator for the control flow structure of an OpenMP canonical loop.
730 ///
731 /// Instead of a logical iteration space, this allows specifying user-defined
732 /// loop counter values using increment, upper- and lower bounds. To
733 /// disambiguate the terminology when counting downwards, instead of lower
734 /// bounds we use \p Start for the loop counter value in the first body
735 /// iteration.
736 ///
737 /// Consider the following limitations:
738 ///
739 /// * A loop counter space over all integer values of its bit-width cannot be
740 /// represented. E.g using uint8_t, its loop trip count of 256 cannot be
741 /// stored into an 8 bit integer):
742 ///
743 /// DO I = 0, 255, 1
744 ///
745 /// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
746 /// effectively counting downwards:
747 ///
748 /// for (uint8_t i = 100u; i > 0; i += 127u)
749 ///
750 ///
751 /// TODO: May need to add additional parameters to represent:
752 ///
753 /// * Allow representing downcounting with unsigned integers.
754 ///
755 /// * Sign of the step and the comparison operator might disagree:
756 ///
757 /// for (int i = 0; i < 42; i -= 1u)
758 ///
759 //
760 /// \param Loc The insert and source location description.
761 /// \param BodyGenCB Callback that will generate the loop body code.
762 /// \param Start Value of the loop counter for the first iterations.
763 /// \param Stop Loop counter values past this will stop the loop.
764 /// \param Step Loop counter increment after each iteration; negative
765 /// means counting down.
766 /// \param IsSigned Whether Start, Stop and Step are signed integers.
767 /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
768 /// counter.
769 /// \param ComputeIP Insertion point for instructions computing the trip
770 /// count. Can be used to ensure the trip count is available
771 /// at the outermost loop of a loop nest. If not set,
772 /// defaults to the preheader of the generated loop.
773 /// \param Name Base name used to derive BB and instruction names.
774 ///
775 /// \returns An object representing the created control flow structure which
776 /// can be used for loop-associated directives.
778 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
779 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
780 InsertPointTy ComputeIP = {}, const Twine &Name = "loop");
781
782 /// Collapse a loop nest into a single loop.
783 ///
784 /// Merges loops of a loop nest into a single CanonicalLoopNest representation
785 /// that has the same number of innermost loop iterations as the origin loop
786 /// nest. The induction variables of the input loops are derived from the
787 /// collapsed loop's induction variable. This is intended to be used to
788 /// implement OpenMP's collapse clause. Before applying a directive,
789 /// collapseLoops normalizes a loop nest to contain only a single loop and the
790 /// directive's implementation does not need to handle multiple loops itself.
791 /// This does not remove the need to handle all loop nest handling by
792 /// directives, such as the ordered(<n>) clause or the simd schedule-clause
793 /// modifier of the worksharing-loop directive.
794 ///
795 /// Example:
796 /// \code
797 /// for (int i = 0; i < 7; ++i) // Canonical loop "i"
798 /// for (int j = 0; j < 9; ++j) // Canonical loop "j"
799 /// body(i, j);
800 /// \endcode
801 ///
802 /// After collapsing with Loops={i,j}, the loop is changed to
803 /// \code
804 /// for (int ij = 0; ij < 63; ++ij) {
805 /// int i = ij / 9;
806 /// int j = ij % 9;
807 /// body(i, j);
808 /// }
809 /// \endcode
810 ///
811 /// In the current implementation, the following limitations apply:
812 ///
813 /// * All input loops have an induction variable of the same type.
814 ///
815 /// * The collapsed loop will have the same trip count integer type as the
816 /// input loops. Therefore it is possible that the collapsed loop cannot
817 /// represent all iterations of the input loops. For instance, assuming a
818 /// 32 bit integer type, and two input loops both iterating 2^16 times, the
819 /// theoretical trip count of the collapsed loop would be 2^32 iteration,
820 /// which cannot be represented in an 32-bit integer. Behavior is undefined
821 /// in this case.
822 ///
823 /// * The trip counts of every input loop must be available at \p ComputeIP.
824 /// Non-rectangular loops are not yet supported.
825 ///
826 /// * At each nest level, code between a surrounding loop and its nested loop
827 /// is hoisted into the loop body, and such code will be executed more
828 /// often than before collapsing (or not at all if any inner loop iteration
829 /// has a trip count of 0). This is permitted by the OpenMP specification.
830 ///
831 /// \param DL Debug location for instructions added for collapsing,
832 /// such as instructions to compute/derive the input loop's
833 /// induction variables.
834 /// \param Loops Loops in the loop nest to collapse. Loops are specified
835 /// from outermost-to-innermost and every control flow of a
836 /// loop's body must pass through its directly nested loop.
837 /// \param ComputeIP Where additional instruction that compute the collapsed
838 /// trip count. If not set, defaults to before the generated
839 /// loop.
840 ///
841 /// \returns The CanonicalLoopInfo object representing the collapsed loop.
844 InsertPointTy ComputeIP);
845
846 /// Get the default alignment value for given target
847 ///
848 /// \param TargetTriple Target triple
849 /// \param Features StringMap which describes extra CPU features
850 static unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple,
851 const StringMap<bool> &Features);
852
853 /// Retrieve (or create if non-existent) the address of a declare
854 /// target variable, used in conjunction with registerTargetGlobalVariable
855 /// to create declare target global variables.
856 ///
857 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
858 /// clause used in conjunction with the variable being registered (link,
859 /// to, enter).
860 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
861 /// clause used in conjunction with the variable being registered (nohost,
862 /// host, any)
863 /// \param IsDeclaration - boolean stating if the variable being registered
864 /// is a declaration-only and not a definition
865 /// \param IsExternallyVisible - boolean stating if the variable is externally
866 /// visible
867 /// \param EntryInfo - Unique entry information for the value generated
868 /// using getTargetEntryUniqueInfo, used to name generated pointer references
869 /// to the declare target variable
870 /// \param MangledName - the mangled name of the variable being registered
871 /// \param GeneratedRefs - references generated by invocations of
872 /// registerTargetGlobalVariable invoked from getAddrOfDeclareTargetVar,
873 /// these are required by Clang for book keeping.
874 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
875 /// \param TargetTriple - The OpenMP device target triple we are compiling
876 /// for
877 /// \param LlvmPtrTy - The type of the variable we are generating or
878 /// retrieving an address for
879 /// \param GlobalInitializer - a lambda function which creates a constant
880 /// used for initializing a pointer reference to the variable in certain
881 /// cases. If a nullptr is passed, it will default to utilising the original
882 /// variable to initialize the pointer reference.
883 /// \param VariableLinkage - a lambda function which returns the variables
884 /// linkage type, if unspecified and a nullptr is given, it will instead
885 /// utilise the linkage stored on the existing global variable in the
886 /// LLVMModule.
890 bool IsDeclaration, bool IsExternallyVisible,
891 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
892 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
893 std::vector<Triple> TargetTriple, Type *LlvmPtrTy,
894 std::function<Constant *()> GlobalInitializer,
895 std::function<GlobalValue::LinkageTypes()> VariableLinkage);
896
897 /// Registers a target variable for device or host.
898 ///
899 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
900 /// clause used in conjunction with the variable being registered (link,
901 /// to, enter).
902 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
903 /// clause used in conjunction with the variable being registered (nohost,
904 /// host, any)
905 /// \param IsDeclaration - boolean stating if the variable being registered
906 /// is a declaration-only and not a definition
907 /// \param IsExternallyVisible - boolean stating if the variable is externally
908 /// visible
909 /// \param EntryInfo - Unique entry information for the value generated
910 /// using getTargetEntryUniqueInfo, used to name generated pointer references
911 /// to the declare target variable
912 /// \param MangledName - the mangled name of the variable being registered
913 /// \param GeneratedRefs - references generated by invocations of
914 /// registerTargetGlobalVariable these are required by Clang for book
915 /// keeping.
916 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
917 /// \param TargetTriple - The OpenMP device target triple we are compiling
918 /// for
919 /// \param GlobalInitializer - a lambda function which creates a constant
920 /// used for initializing a pointer reference to the variable in certain
921 /// cases. If a nullptr is passed, it will default to utilising the original
922 /// variable to initialize the pointer reference.
923 /// \param VariableLinkage - a lambda function which returns the variables
924 /// linkage type, if unspecified and a nullptr is given, it will instead
925 /// utilise the linkage stored on the existing global variable in the
926 /// LLVMModule.
927 /// \param LlvmPtrTy - The type of the variable we are generating or
928 /// retrieving an address for
929 /// \param Addr - the original llvm value (addr) of the variable to be
930 /// registered
934 bool IsDeclaration, bool IsExternallyVisible,
935 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
936 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
937 std::vector<Triple> TargetTriple,
938 std::function<Constant *()> GlobalInitializer,
939 std::function<GlobalValue::LinkageTypes()> VariableLinkage,
940 Type *LlvmPtrTy, Constant *Addr);
941
942 /// Get the offset of the OMP_MAP_MEMBER_OF field.
943 unsigned getFlagMemberOffset();
944
945 /// Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on
946 /// the position given.
947 /// \param Position - A value indicating the position of the parent
948 /// of the member in the kernel argument structure, often retrieved
949 /// by the parents position in the combined information vectors used
950 /// to generate the structure itself. Multiple children (member's of)
951 /// with the same parent will use the same returned member flag.
953
954 /// Given an initial flag set, this function modifies it to contain
955 /// the passed in MemberOfFlag generated from the getMemberOfFlag
956 /// function. The results are dependent on the existing flag bits
957 /// set in the original flag set.
958 /// \param Flags - The original set of flags to be modified with the
959 /// passed in MemberOfFlag.
960 /// \param MemberOfFlag - A modified OMP_MAP_MEMBER_OF flag, adjusted
961 /// slightly based on the getMemberOfFlag which adjusts the flag bits
962 /// based on the members position in its parent.
964 omp::OpenMPOffloadMappingFlags MemberOfFlag);
965
966private:
967 /// Modifies the canonical loop to be a statically-scheduled workshare loop
968 /// which is executed on the device
969 ///
970 /// This takes a \p CLI representing a canonical loop, such as the one
971 /// created by \see createCanonicalLoop and emits additional instructions to
972 /// turn it into a workshare loop. In particular, it calls to an OpenMP
973 /// runtime function in the preheader to call OpenMP device rtl function
974 /// which handles worksharing of loop body interations.
975 ///
976 /// \param DL Debug location for instructions added for the
977 /// workshare-loop construct itself.
978 /// \param CLI A descriptor of the canonical loop to workshare.
979 /// \param AllocaIP An insertion point for Alloca instructions usable in the
980 /// preheader of the loop.
981 /// \param LoopType Information about type of loop worksharing.
982 /// It corresponds to type of loop workshare OpenMP pragma.
983 ///
984 /// \returns Point where to insert code after the workshare construct.
985 InsertPointTy applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
986 InsertPointTy AllocaIP,
987 omp::WorksharingLoopType LoopType);
988
989 /// Modifies the canonical loop to be a statically-scheduled workshare loop.
990 ///
991 /// This takes a \p LoopInfo representing a canonical loop, such as the one
992 /// created by \p createCanonicalLoop and emits additional instructions to
993 /// turn it into a workshare loop. In particular, it calls to an OpenMP
994 /// runtime function in the preheader to obtain the loop bounds to be used in
995 /// the current thread, updates the relevant instructions in the canonical
996 /// loop and calls to an OpenMP runtime finalization function after the loop.
997 ///
998 /// \param DL Debug location for instructions added for the
999 /// workshare-loop construct itself.
1000 /// \param CLI A descriptor of the canonical loop to workshare.
1001 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1002 /// preheader of the loop.
1003 /// \param NeedsBarrier Indicates whether a barrier must be inserted after
1004 /// the loop.
1005 ///
1006 /// \returns Point where to insert code after the workshare construct.
1007 InsertPointOrErrorTy applyStaticWorkshareLoop(DebugLoc DL,
1008 CanonicalLoopInfo *CLI,
1009 InsertPointTy AllocaIP,
1010 bool NeedsBarrier);
1011
1012 /// Modifies the canonical loop a statically-scheduled workshare loop with a
1013 /// user-specified chunk size.
1014 ///
1015 /// \param DL Debug location for instructions added for the
1016 /// workshare-loop construct itself.
1017 /// \param CLI A descriptor of the canonical loop to workshare.
1018 /// \param AllocaIP An insertion point for Alloca instructions usable in
1019 /// the preheader of the loop.
1020 /// \param NeedsBarrier Indicates whether a barrier must be inserted after the
1021 /// loop.
1022 /// \param ChunkSize The user-specified chunk size.
1023 ///
1024 /// \returns Point where to insert code after the workshare construct.
1025 InsertPointOrErrorTy applyStaticChunkedWorkshareLoop(DebugLoc DL,
1026 CanonicalLoopInfo *CLI,
1027 InsertPointTy AllocaIP,
1028 bool NeedsBarrier,
1029 Value *ChunkSize);
1030
1031 /// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
1032 ///
1033 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1034 /// created by \p createCanonicalLoop and emits additional instructions to
1035 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1036 /// runtime function in the preheader to obtain, and then in each iteration
1037 /// to update the loop counter.
1038 ///
1039 /// \param DL Debug location for instructions added for the
1040 /// workshare-loop construct itself.
1041 /// \param CLI A descriptor of the canonical loop to workshare.
1042 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1043 /// preheader of the loop.
1044 /// \param SchedType Type of scheduling to be passed to the init function.
1045 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1046 /// the loop.
1047 /// \param Chunk The size of loop chunk considered as a unit when
1048 /// scheduling. If \p nullptr, defaults to 1.
1049 ///
1050 /// \returns Point where to insert code after the workshare construct.
1051 InsertPointOrErrorTy applyDynamicWorkshareLoop(DebugLoc DL,
1052 CanonicalLoopInfo *CLI,
1053 InsertPointTy AllocaIP,
1054 omp::OMPScheduleType SchedType,
1055 bool NeedsBarrier,
1056 Value *Chunk = nullptr);
1057
1058 /// Create alternative version of the loop to support if clause
1059 ///
1060 /// OpenMP if clause can require to generate second loop. This loop
1061 /// will be executed when if clause condition is not met. createIfVersion
1062 /// adds branch instruction to the copied loop if \p ifCond is not met.
1063 ///
1064 /// \param Loop Original loop which should be versioned.
1065 /// \param IfCond Value which corresponds to if clause condition
1066 /// \param VMap Value to value map to define relation between
1067 /// original and copied loop values and loop blocks.
1068 /// \param NamePrefix Optional name prefix for if.then if.else blocks.
1069 void createIfVersion(CanonicalLoopInfo *Loop, Value *IfCond,
1070 ValueToValueMapTy &VMap, const Twine &NamePrefix = "");
1071
1072public:
1073 /// Modifies the canonical loop to be a workshare loop.
1074 ///
1075 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1076 /// created by \p createCanonicalLoop and emits additional instructions to
1077 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1078 /// runtime function in the preheader to obtain the loop bounds to be used in
1079 /// the current thread, updates the relevant instructions in the canonical
1080 /// loop and calls to an OpenMP runtime finalization function after the loop.
1081 ///
1082 /// The concrete transformation is done by applyStaticWorkshareLoop,
1083 /// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending
1084 /// on the value of \p SchedKind and \p ChunkSize.
1085 ///
1086 /// \param DL Debug location for instructions added for the
1087 /// workshare-loop construct itself.
1088 /// \param CLI A descriptor of the canonical loop to workshare.
1089 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1090 /// preheader of the loop.
1091 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1092 /// the loop.
1093 /// \param SchedKind Scheduling algorithm to use.
1094 /// \param ChunkSize The chunk size for the inner loop.
1095 /// \param HasSimdModifier Whether the simd modifier is present in the
1096 /// schedule clause.
1097 /// \param HasMonotonicModifier Whether the monotonic modifier is present in
1098 /// the schedule clause.
1099 /// \param HasNonmonotonicModifier Whether the nonmonotonic modifier is
1100 /// present in the schedule clause.
1101 /// \param HasOrderedClause Whether the (parameterless) ordered clause is
1102 /// present.
1103 /// \param LoopType Information about type of loop worksharing.
1104 /// It corresponds to type of loop workshare OpenMP pragma.
1105 ///
1106 /// \returns Point where to insert code after the workshare construct.
1109 bool NeedsBarrier,
1110 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default,
1111 Value *ChunkSize = nullptr, bool HasSimdModifier = false,
1112 bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false,
1113 bool HasOrderedClause = false,
1114 omp::WorksharingLoopType LoopType =
1116
1117 /// Tile a loop nest.
1118 ///
1119 /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
1120 /// \p/ Loops must be perfectly nested, from outermost to innermost loop
1121 /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
1122 /// of every loop and every tile sizes must be usable in the outermost
1123 /// loop's preheader. This implies that the loop nest is rectangular.
1124 ///
1125 /// Example:
1126 /// \code
1127 /// for (int i = 0; i < 15; ++i) // Canonical loop "i"
1128 /// for (int j = 0; j < 14; ++j) // Canonical loop "j"
1129 /// body(i, j);
1130 /// \endcode
1131 ///
1132 /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
1133 /// \code
1134 /// for (int i1 = 0; i1 < 3; ++i1)
1135 /// for (int j1 = 0; j1 < 2; ++j1)
1136 /// for (int i2 = 0; i2 < 5; ++i2)
1137 /// for (int j2 = 0; j2 < 7; ++j2)
1138 /// body(i1*3+i2, j1*3+j2);
1139 /// \endcode
1140 ///
1141 /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
1142 /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
1143 /// handles non-constant trip counts, non-constant tile sizes and trip counts
1144 /// that are not multiples of the tile size. In the latter case the tile loop
1145 /// of the last floor-loop iteration will have fewer iterations than specified
1146 /// as its tile size.
1147 ///
1148 ///
1149 /// @param DL Debug location for instructions added by tiling, for
1150 /// instance the floor- and tile trip count computation.
1151 /// @param Loops Loops to tile. The CanonicalLoopInfo objects are
1152 /// invalidated by this method, i.e. should not used after
1153 /// tiling.
1154 /// @param TileSizes For each loop in \p Loops, the tile size for that
1155 /// dimensions.
1156 ///
1157 /// \returns A list of generated loops. Contains twice as many loops as the
1158 /// input loop nest; the first half are the floor loops and the
1159 /// second half are the tile loops.
1160 std::vector<CanonicalLoopInfo *>
1162 ArrayRef<Value *> TileSizes);
1163
1164 /// Fully unroll a loop.
1165 ///
1166 /// Instead of unrolling the loop immediately (and duplicating its body
1167 /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
1168 /// metadata.
1169 ///
1170 /// \param DL Debug location for instructions added by unrolling.
1171 /// \param Loop The loop to unroll. The loop will be invalidated.
1173
1174 /// Fully or partially unroll a loop. How the loop is unrolled is determined
1175 /// using LLVM's LoopUnrollPass.
1176 ///
1177 /// \param DL Debug location for instructions added by unrolling.
1178 /// \param Loop The loop to unroll. The loop will be invalidated.
1180
1181 /// Partially unroll a loop.
1182 ///
1183 /// The CanonicalLoopInfo of the unrolled loop for use with chained
1184 /// loop-associated directive can be requested using \p UnrolledCLI. Not
1185 /// needing the CanonicalLoopInfo allows more efficient code generation by
1186 /// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
1187 /// A loop-associated directive applied to the unrolled loop needs to know the
1188 /// new trip count which means that if using a heuristically determined unroll
1189 /// factor (\p Factor == 0), that factor must be computed immediately. We are
1190 /// using the same logic as the LoopUnrollPass to derived the unroll factor,
1191 /// but which assumes that some canonicalization has taken place (e.g.
1192 /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
1193 /// better when the unrolled loop's CanonicalLoopInfo is not needed.
1194 ///
1195 /// \param DL Debug location for instructions added by unrolling.
1196 /// \param Loop The loop to unroll. The loop will be invalidated.
1197 /// \param Factor The factor to unroll the loop by. A factor of 0
1198 /// indicates that a heuristic should be used to determine
1199 /// the unroll-factor.
1200 /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
1201 /// partially unrolled loop. Otherwise, uses loop metadata
1202 /// to defer unrolling to the LoopUnrollPass.
1203 void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
1204 CanonicalLoopInfo **UnrolledCLI);
1205
1206 /// Add metadata to simd-ize a loop. If IfCond is not nullptr, the loop
1207 /// is cloned. The metadata which prevents vectorization is added to
1208 /// to the cloned loop. The cloned loop is executed when ifCond is evaluated
1209 /// to false.
1210 ///
1211 /// \param Loop The loop to simd-ize.
1212 /// \param AlignedVars The map which containts pairs of the pointer
1213 /// and its corresponding alignment.
1214 /// \param IfCond The value which corresponds to the if clause
1215 /// condition.
1216 /// \param Order The enum to map order clause.
1217 /// \param Simdlen The Simdlen length to apply to the simd loop.
1218 /// \param Safelen The Safelen length to apply to the simd loop.
1220 MapVector<Value *, Value *> AlignedVars, Value *IfCond,
1221 omp::OrderKind Order, ConstantInt *Simdlen,
1222 ConstantInt *Safelen);
1223
1224 /// Generator for '#omp flush'
1225 ///
1226 /// \param Loc The location where the flush directive was encountered
1227 void createFlush(const LocationDescription &Loc);
1228
1229 /// Generator for '#omp taskwait'
1230 ///
1231 /// \param Loc The location where the taskwait directive was encountered.
1232 void createTaskwait(const LocationDescription &Loc);
1233
1234 /// Generator for '#omp taskyield'
1235 ///
1236 /// \param Loc The location where the taskyield directive was encountered.
1237 void createTaskyield(const LocationDescription &Loc);
1238
1239 /// A struct to pack the relevant information for an OpenMP depend clause.
1240 struct DependData {
1244 explicit DependData() = default;
1246 Value *DepVal)
1248 };
1249
1250 /// Generator for `#omp task`
1251 ///
1252 /// \param Loc The location where the task construct was encountered.
1253 /// \param AllocaIP The insertion point to be used for alloca instructions.
1254 /// \param BodyGenCB Callback that will generate the region code.
1255 /// \param Tied True if the task is tied, false if the task is untied.
1256 /// \param Final i1 value which is `true` if the task is final, `false` if the
1257 /// task is not final.
1258 /// \param IfCondition i1 value. If it evaluates to `false`, an undeferred
1259 /// task is generated, and the encountering thread must
1260 /// suspend the current task region, for which execution
1261 /// cannot be resumed until execution of the structured
1262 /// block that is associated with the generated task is
1263 /// completed.
1264 /// \param EventHandle If present, signifies the event handle as part of
1265 /// the detach clause
1266 /// \param Mergeable If the given task is `mergeable`
1268 createTask(const LocationDescription &Loc, InsertPointTy AllocaIP,
1269 BodyGenCallbackTy BodyGenCB, bool Tied = true,
1270 Value *Final = nullptr, Value *IfCondition = nullptr,
1271 SmallVector<DependData> Dependencies = {}, bool Mergeable = false,
1272 Value *EventHandle = nullptr);
1273
1274 /// Generator for the taskgroup construct
1275 ///
1276 /// \param Loc The location where the taskgroup construct was encountered.
1277 /// \param AllocaIP The insertion point to be used for alloca instructions.
1278 /// \param BodyGenCB Callback that will generate the region code.
1279 InsertPointOrErrorTy createTaskgroup(const LocationDescription &Loc,
1280 InsertPointTy AllocaIP,
1281 BodyGenCallbackTy BodyGenCB);
1282
1284 std::function<std::tuple<std::string, uint64_t>()>;
1285
1286 /// Creates a unique info for a target entry when provided a filename and
1287 /// line number from.
1288 ///
1289 /// \param CallBack A callback function which should return filename the entry
1290 /// resides in as well as the line number for the target entry
1291 /// \param ParentName The name of the parent the target entry resides in, if
1292 /// any.
1295 StringRef ParentName = "");
1296
1297 /// Enum class for the RedctionGen CallBack type to be used.
1299
1300 /// ReductionGen CallBack for Clang
1301 ///
1302 /// \param CodeGenIP InsertPoint for CodeGen.
1303 /// \param Index Index of the ReductionInfo to generate code for.
1304 /// \param LHSPtr Optionally used by Clang to return the LHSPtr it used for
1305 /// codegen, used for fixup later.
1306 /// \param RHSPtr Optionally used by Clang to
1307 /// return the RHSPtr it used for codegen, used for fixup later.
1308 /// \param CurFn Optionally used by Clang to pass in the Current Function as
1309 /// Clang context may be old.
1311 std::function<InsertPointTy(InsertPointTy CodeGenIP, unsigned Index,
1312 Value **LHS, Value **RHS, Function *CurFn)>;
1313
1314 /// ReductionGen CallBack for MLIR
1315 ///
1316 /// \param CodeGenIP InsertPoint for CodeGen.
1317 /// \param LHS Pass in the LHS Value to be used for CodeGen.
1318 /// \param RHS Pass in the RHS Value to be used for CodeGen.
1320 InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)>;
1321
1322 /// Functions used to generate atomic reductions. Such functions take two
1323 /// Values representing pointers to LHS and RHS of the reduction, as well as
1324 /// the element type of these pointers. They are expected to atomically
1325 /// update the LHS to the reduced value.
1327 InsertPointTy, Type *, Value *, Value *)>;
1328
1329 /// Enum class for reduction evaluation types scalar, complex and aggregate.
1331
1332 /// Information about an OpenMP reduction.
1343 : ElementType(nullptr), Variable(nullptr),
1346
1347 /// Reduction element type, must match pointee type of variable.
1349
1350 /// Reduction variable of pointer type.
1352
1353 /// Thread-private partial reduction variable.
1355
1356 /// Reduction evaluation kind - scalar, complex or aggregate.
1358
1359 /// Callback for generating the reduction body. The IR produced by this will
1360 /// be used to combine two values in a thread-safe context, e.g., under
1361 /// lock or within the same thread, and therefore need not be atomic.
1363
1364 /// Clang callback for generating the reduction body. The IR produced by
1365 /// this will be used to combine two values in a thread-safe context, e.g.,
1366 /// under lock or within the same thread, and therefore need not be atomic.
1368
1369 /// Callback for generating the atomic reduction body, may be null. The IR
1370 /// produced by this will be used to atomically combine two values during
1371 /// reduction. If null, the implementation will use the non-atomic version
1372 /// along with the appropriate synchronization mechanisms.
1374 };
1375
1376 enum class CopyAction : unsigned {
1377 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1378 // the warp using shuffle instructions.
1380 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1381 ThreadCopy,
1382 };
1383
1388 };
1389
1390 /// Supporting functions for Reductions CodeGen.
1391private:
1392 /// Emit the llvm.used metadata.
1393 void emitUsed(StringRef Name, std::vector<llvm::WeakTrackingVH> &List);
1394
1395 /// Get the id of the current thread on the GPU.
1396 Value *getGPUThreadID();
1397
1398 /// Get the GPU warp size.
1399 Value *getGPUWarpSize();
1400
1401 /// Get the id of the warp in the block.
1402 /// We assume that the warp size is 32, which is always the case
1403 /// on the NVPTX device, to generate more efficient code.
1404 Value *getNVPTXWarpID();
1405
1406 /// Get the id of the current lane in the Warp.
1407 /// We assume that the warp size is 32, which is always the case
1408 /// on the NVPTX device, to generate more efficient code.
1409 Value *getNVPTXLaneID();
1410
1411 /// Cast value to the specified type.
1412 Value *castValueToType(InsertPointTy AllocaIP, Value *From, Type *ToType);
1413
1414 /// This function creates calls to one of two shuffle functions to copy
1415 /// variables between lanes in a warp.
1416 Value *createRuntimeShuffleFunction(InsertPointTy AllocaIP, Value *Element,
1417 Type *ElementType, Value *Offset);
1418
1419 /// Function to shuffle over the value from the remote lane.
1420 void shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr,
1421 Type *ElementType, Value *Offset,
1422 Type *ReductionArrayTy);
1423
1424 /// Emit instructions to copy a Reduce list, which contains partially
1425 /// aggregated values, in the specified direction.
1426 void emitReductionListCopy(
1427 InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy,
1428 ArrayRef<ReductionInfo> ReductionInfos, Value *SrcBase, Value *DestBase,
1429 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr});
1430
1431 /// Emit a helper that reduces data across two OpenMP threads (lanes)
1432 /// in the same warp. It uses shuffle instructions to copy over data from
1433 /// a remote lane's stack. The reduction algorithm performed is specified
1434 /// by the fourth parameter.
1435 ///
1436 /// Algorithm Versions.
1437 /// Full Warp Reduce (argument value 0):
1438 /// This algorithm assumes that all 32 lanes are active and gathers
1439 /// data from these 32 lanes, producing a single resultant value.
1440 /// Contiguous Partial Warp Reduce (argument value 1):
1441 /// This algorithm assumes that only a *contiguous* subset of lanes
1442 /// are active. This happens for the last warp in a parallel region
1443 /// when the user specified num_threads is not an integer multiple of
1444 /// 32. This contiguous subset always starts with the zeroth lane.
1445 /// Partial Warp Reduce (argument value 2):
1446 /// This algorithm gathers data from any number of lanes at any position.
1447 /// All reduced values are stored in the lowest possible lane. The set
1448 /// of problems every algorithm addresses is a super set of those
1449 /// addressable by algorithms with a lower version number. Overhead
1450 /// increases as algorithm version increases.
1451 ///
1452 /// Terminology
1453 /// Reduce element:
1454 /// Reduce element refers to the individual data field with primitive
1455 /// data types to be combined and reduced across threads.
1456 /// Reduce list:
1457 /// Reduce list refers to a collection of local, thread-private
1458 /// reduce elements.
1459 /// Remote Reduce list:
1460 /// Remote Reduce list refers to a collection of remote (relative to
1461 /// the current thread) reduce elements.
1462 ///
1463 /// We distinguish between three states of threads that are important to
1464 /// the implementation of this function.
1465 /// Alive threads:
1466 /// Threads in a warp executing the SIMT instruction, as distinguished from
1467 /// threads that are inactive due to divergent control flow.
1468 /// Active threads:
1469 /// The minimal set of threads that has to be alive upon entry to this
1470 /// function. The computation is correct iff active threads are alive.
1471 /// Some threads are alive but they are not active because they do not
1472 /// contribute to the computation in any useful manner. Turning them off
1473 /// may introduce control flow overheads without any tangible benefits.
1474 /// Effective threads:
1475 /// In order to comply with the argument requirements of the shuffle
1476 /// function, we must keep all lanes holding data alive. But at most
1477 /// half of them perform value aggregation; we refer to this half of
1478 /// threads as effective. The other half is simply handing off their
1479 /// data.
1480 ///
1481 /// Procedure
1482 /// Value shuffle:
1483 /// In this step active threads transfer data from higher lane positions
1484 /// in the warp to lower lane positions, creating Remote Reduce list.
1485 /// Value aggregation:
1486 /// In this step, effective threads combine their thread local Reduce list
1487 /// with Remote Reduce list and store the result in the thread local
1488 /// Reduce list.
1489 /// Value copy:
1490 /// In this step, we deal with the assumption made by algorithm 2
1491 /// (i.e. contiguity assumption). When we have an odd number of lanes
1492 /// active, say 2k+1, only k threads will be effective and therefore k
1493 /// new values will be produced. However, the Reduce list owned by the
1494 /// (2k+1)th thread is ignored in the value aggregation. Therefore
1495 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
1496 /// that the contiguity assumption still holds.
1497 ///
1498 /// \param ReductionInfos Array type containing the ReductionOps.
1499 /// \param ReduceFn The reduction function.
1500 /// \param FuncAttrs Optional param to specify any function attributes that
1501 /// need to be copied to the new function.
1502 ///
1503 /// \return The ShuffleAndReduce function.
1504 Function *emitShuffleAndReduceFunction(
1506 Function *ReduceFn, AttributeList FuncAttrs);
1507
1508 /// This function emits a helper that gathers Reduce lists from the first
1509 /// lane of every active warp to lanes in the first warp.
1510 ///
1511 /// void inter_warp_copy_func(void* reduce_data, num_warps)
1512 /// shared smem[warp_size];
1513 /// For all data entries D in reduce_data:
1514 /// sync
1515 /// If (I am the first lane in each warp)
1516 /// Copy my local D to smem[warp_id]
1517 /// sync
1518 /// if (I am the first warp)
1519 /// Copy smem[thread_id] to my local D
1520 ///
1521 /// \param Loc The insert and source location description.
1522 /// \param ReductionInfos Array type containing the ReductionOps.
1523 /// \param FuncAttrs Optional param to specify any function attributes that
1524 /// need to be copied to the new function.
1525 ///
1526 /// \return The InterWarpCopy function.
1528 emitInterWarpCopyFunction(const LocationDescription &Loc,
1529 ArrayRef<ReductionInfo> ReductionInfos,
1530 AttributeList FuncAttrs);
1531
1532 /// This function emits a helper that copies all the reduction variables from
1533 /// the team into the provided global buffer for the reduction variables.
1534 ///
1535 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1536 /// For all data entries D in reduce_data:
1537 /// Copy local D to buffer.D[Idx]
1538 ///
1539 /// \param ReductionInfos Array type containing the ReductionOps.
1540 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1541 /// \param FuncAttrs Optional param to specify any function attributes that
1542 /// need to be copied to the new function.
1543 ///
1544 /// \return The ListToGlobalCopy function.
1545 Function *emitListToGlobalCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1546 Type *ReductionsBufferTy,
1547 AttributeList FuncAttrs);
1548
1549 /// This function emits a helper that copies all the reduction variables from
1550 /// the team into the provided global buffer for the reduction variables.
1551 ///
1552 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1553 /// For all data entries D in reduce_data:
1554 /// Copy buffer.D[Idx] to local D;
1555 ///
1556 /// \param ReductionInfos Array type containing the ReductionOps.
1557 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1558 /// \param FuncAttrs Optional param to specify any function attributes that
1559 /// need to be copied to the new function.
1560 ///
1561 /// \return The GlobalToList function.
1562 Function *emitGlobalToListCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1563 Type *ReductionsBufferTy,
1564 AttributeList FuncAttrs);
1565
1566 /// This function emits a helper that reduces all the reduction variables from
1567 /// the team into the provided global buffer for the reduction variables.
1568 ///
1569 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
1570 /// void *GlobPtrs[];
1571 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
1572 /// ...
1573 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
1574 /// reduce_function(GlobPtrs, reduce_data);
1575 ///
1576 /// \param ReductionInfos Array type containing the ReductionOps.
1577 /// \param ReduceFn The reduction function.
1578 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1579 /// \param FuncAttrs Optional param to specify any function attributes that
1580 /// need to be copied to the new function.
1581 ///
1582 /// \return The ListToGlobalReduce function.
1583 Function *
1584 emitListToGlobalReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
1585 Function *ReduceFn, Type *ReductionsBufferTy,
1586 AttributeList FuncAttrs);
1587
1588 /// This function emits a helper that reduces all the reduction variables from
1589 /// the team into the provided global buffer for the reduction variables.
1590 ///
1591 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
1592 /// void *GlobPtrs[];
1593 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
1594 /// ...
1595 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
1596 /// reduce_function(reduce_data, GlobPtrs);
1597 ///
1598 /// \param ReductionInfos Array type containing the ReductionOps.
1599 /// \param ReduceFn The reduction function.
1600 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1601 /// \param FuncAttrs Optional param to specify any function attributes that
1602 /// need to be copied to the new function.
1603 ///
1604 /// \return The GlobalToListReduce function.
1605 Function *
1606 emitGlobalToListReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
1607 Function *ReduceFn, Type *ReductionsBufferTy,
1608 AttributeList FuncAttrs);
1609
1610 /// Get the function name of a reduction function.
1611 std::string getReductionFuncName(StringRef Name) const;
1612
1613 /// Emits reduction function.
1614 /// \param ReducerName Name of the function calling the reduction.
1615 /// \param ReductionInfos Array type containing the ReductionOps.
1616 /// \param ReductionGenCBKind Optional param to specify Clang or MLIR
1617 /// CodeGenCB kind.
1618 /// \param FuncAttrs Optional param to specify any function attributes that
1619 /// need to be copied to the new function.
1620 ///
1621 /// \return The reduction function.
1622 Expected<Function *> createReductionFunction(
1623 StringRef ReducerName, ArrayRef<ReductionInfo> ReductionInfos,
1625 AttributeList FuncAttrs = {});
1626
1627public:
1628 ///
1629 /// Design of OpenMP reductions on the GPU
1630 ///
1631 /// Consider a typical OpenMP program with one or more reduction
1632 /// clauses:
1633 ///
1634 /// float foo;
1635 /// double bar;
1636 /// #pragma omp target teams distribute parallel for \
1637 /// reduction(+:foo) reduction(*:bar)
1638 /// for (int i = 0; i < N; i++) {
1639 /// foo += A[i]; bar *= B[i];
1640 /// }
1641 ///
1642 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
1643 /// all teams. In our OpenMP implementation on the NVPTX device an
1644 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
1645 /// within a team are mapped to CUDA threads within a threadblock.
1646 /// Our goal is to efficiently aggregate values across all OpenMP
1647 /// threads such that:
1648 ///
1649 /// - the compiler and runtime are logically concise, and
1650 /// - the reduction is performed efficiently in a hierarchical
1651 /// manner as follows: within OpenMP threads in the same warp,
1652 /// across warps in a threadblock, and finally across teams on
1653 /// the NVPTX device.
1654 ///
1655 /// Introduction to Decoupling
1656 ///
1657 /// We would like to decouple the compiler and the runtime so that the
1658 /// latter is ignorant of the reduction variables (number, data types)
1659 /// and the reduction operators. This allows a simpler interface
1660 /// and implementation while still attaining good performance.
1661 ///
1662 /// Pseudocode for the aforementioned OpenMP program generated by the
1663 /// compiler is as follows:
1664 ///
1665 /// 1. Create private copies of reduction variables on each OpenMP
1666 /// thread: 'foo_private', 'bar_private'
1667 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
1668 /// to it and writes the result in 'foo_private' and 'bar_private'
1669 /// respectively.
1670 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
1671 /// and store the result on the team master:
1672 ///
1673 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
1674 /// reduceData, shuffleReduceFn, interWarpCpyFn)
1675 ///
1676 /// where:
1677 /// struct ReduceData {
1678 /// double *foo;
1679 /// double *bar;
1680 /// } reduceData
1681 /// reduceData.foo = &foo_private
1682 /// reduceData.bar = &bar_private
1683 ///
1684 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
1685 /// auxiliary functions generated by the compiler that operate on
1686 /// variables of type 'ReduceData'. They aid the runtime perform
1687 /// algorithmic steps in a data agnostic manner.
1688 ///
1689 /// 'shuffleReduceFn' is a pointer to a function that reduces data
1690 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
1691 /// same warp. It takes the following arguments as input:
1692 ///
1693 /// a. variable of type 'ReduceData' on the calling lane,
1694 /// b. its lane_id,
1695 /// c. an offset relative to the current lane_id to generate a
1696 /// remote_lane_id. The remote lane contains the second
1697 /// variable of type 'ReduceData' that is to be reduced.
1698 /// d. an algorithm version parameter determining which reduction
1699 /// algorithm to use.
1700 ///
1701 /// 'shuffleReduceFn' retrieves data from the remote lane using
1702 /// efficient GPU shuffle intrinsics and reduces, using the
1703 /// algorithm specified by the 4th parameter, the two operands
1704 /// element-wise. The result is written to the first operand.
1705 ///
1706 /// Different reduction algorithms are implemented in different
1707 /// runtime functions, all calling 'shuffleReduceFn' to perform
1708 /// the essential reduction step. Therefore, based on the 4th
1709 /// parameter, this function behaves slightly differently to
1710 /// cooperate with the runtime to ensure correctness under
1711 /// different circumstances.
1712 ///
1713 /// 'InterWarpCpyFn' is a pointer to a function that transfers
1714 /// reduced variables across warps. It tunnels, through CUDA
1715 /// shared memory, the thread-private data of type 'ReduceData'
1716 /// from lane 0 of each warp to a lane in the first warp.
1717 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
1718 /// The last team writes the global reduced value to memory.
1719 ///
1720 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
1721 /// reduceData, shuffleReduceFn, interWarpCpyFn,
1722 /// scratchpadCopyFn, loadAndReduceFn)
1723 ///
1724 /// 'scratchpadCopyFn' is a helper that stores reduced
1725 /// data from the team master to a scratchpad array in
1726 /// global memory.
1727 ///
1728 /// 'loadAndReduceFn' is a helper that loads data from
1729 /// the scratchpad array and reduces it with the input
1730 /// operand.
1731 ///
1732 /// These compiler generated functions hide address
1733 /// calculation and alignment information from the runtime.
1734 /// 5. if ret == 1:
1735 /// The team master of the last team stores the reduced
1736 /// result to the globals in memory.
1737 /// foo += reduceData.foo; bar *= reduceData.bar
1738 ///
1739 ///
1740 /// Warp Reduction Algorithms
1741 ///
1742 /// On the warp level, we have three algorithms implemented in the
1743 /// OpenMP runtime depending on the number of active lanes:
1744 ///
1745 /// Full Warp Reduction
1746 ///
1747 /// The reduce algorithm within a warp where all lanes are active
1748 /// is implemented in the runtime as follows:
1749 ///
1750 /// full_warp_reduce(void *reduce_data,
1751 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
1752 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
1753 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
1754 /// }
1755 ///
1756 /// The algorithm completes in log(2, WARPSIZE) steps.
1757 ///
1758 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
1759 /// not used therefore we save instructions by not retrieving lane_id
1760 /// from the corresponding special registers. The 4th parameter, which
1761 /// represents the version of the algorithm being used, is set to 0 to
1762 /// signify full warp reduction.
1763 ///
1764 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1765 ///
1766 /// #reduce_elem refers to an element in the local lane's data structure
1767 /// #remote_elem is retrieved from a remote lane
1768 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1769 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
1770 ///
1771 /// Contiguous Partial Warp Reduction
1772 ///
1773 /// This reduce algorithm is used within a warp where only the first
1774 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
1775 /// number of OpenMP threads in a parallel region is not a multiple of
1776 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
1777 ///
1778 /// void
1779 /// contiguous_partial_reduce(void *reduce_data,
1780 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
1781 /// int size, int lane_id) {
1782 /// int curr_size;
1783 /// int offset;
1784 /// curr_size = size;
1785 /// mask = curr_size/2;
1786 /// while (offset>0) {
1787 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
1788 /// curr_size = (curr_size+1)/2;
1789 /// offset = curr_size/2;
1790 /// }
1791 /// }
1792 ///
1793 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1794 ///
1795 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1796 /// if (lane_id < offset)
1797 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
1798 /// else
1799 /// reduce_elem = remote_elem
1800 ///
1801 /// This algorithm assumes that the data to be reduced are located in a
1802 /// contiguous subset of lanes starting from the first. When there is
1803 /// an odd number of active lanes, the data in the last lane is not
1804 /// aggregated with any other lane's dat but is instead copied over.
1805 ///
1806 /// Dispersed Partial Warp Reduction
1807 ///
1808 /// This algorithm is used within a warp when any discontiguous subset of
1809 /// lanes are active. It is used to implement the reduction operation
1810 /// across lanes in an OpenMP simd region or in a nested parallel region.
1811 ///
1812 /// void
1813 /// dispersed_partial_reduce(void *reduce_data,
1814 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
1815 /// int size, remote_id;
1816 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
1817 /// do {
1818 /// remote_id = next_active_lane_id_right_after_me();
1819 /// # the above function returns 0 of no active lane
1820 /// # is present right after the current lane.
1821 /// size = number_of_active_lanes_in_this_warp();
1822 /// logical_lane_id /= 2;
1823 /// ShuffleReduceFn(reduce_data, logical_lane_id,
1824 /// remote_id-1-threadIdx.x, 2);
1825 /// } while (logical_lane_id % 2 == 0 && size > 1);
1826 /// }
1827 ///
1828 /// There is no assumption made about the initial state of the reduction.
1829 /// Any number of lanes (>=1) could be active at any position. The reduction
1830 /// result is returned in the first active lane.
1831 ///
1832 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1833 ///
1834 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1835 /// if (lane_id % 2 == 0 && offset > 0)
1836 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
1837 /// else
1838 /// reduce_elem = remote_elem
1839 ///
1840 ///
1841 /// Intra-Team Reduction
1842 ///
1843 /// This function, as implemented in the runtime call
1844 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
1845 /// threads in a team. It first reduces within a warp using the
1846 /// aforementioned algorithms. We then proceed to gather all such
1847 /// reduced values at the first warp.
1848 ///
1849 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
1850 /// data from each of the "warp master" (zeroth lane of each warp, where
1851 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
1852 /// a mathematical sense) the problem of reduction across warp masters in
1853 /// a block to the problem of warp reduction.
1854 ///
1855 ///
1856 /// Inter-Team Reduction
1857 ///
1858 /// Once a team has reduced its data to a single value, it is stored in
1859 /// a global scratchpad array. Since each team has a distinct slot, this
1860 /// can be done without locking.
1861 ///
1862 /// The last team to write to the scratchpad array proceeds to reduce the
1863 /// scratchpad array. One or more workers in the last team use the helper
1864 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
1865 /// the k'th worker reduces every k'th element.
1866 ///
1867 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
1868 /// reduce across workers and compute a globally reduced value.
1869 ///
1870 /// \param Loc The location where the reduction was
1871 /// encountered. Must be within the associate
1872 /// directive and after the last local access to the
1873 /// reduction variables.
1874 /// \param AllocaIP An insertion point suitable for allocas usable
1875 /// in reductions.
1876 /// \param CodeGenIP An insertion point suitable for code
1877 /// generation. \param ReductionInfos A list of info on each reduction
1878 /// variable. \param IsNoWait Optional flag set if the reduction is
1879 /// marked as
1880 /// nowait.
1881 /// \param IsTeamsReduction Optional flag set if it is a teams
1882 /// reduction.
1883 /// \param HasDistribute Optional flag set if it is a
1884 /// distribute reduction.
1885 /// \param GridValue Optional GPU grid value.
1886 /// \param ReductionBufNum Optional OpenMPCUDAReductionBufNumValue to be
1887 /// used for teams reduction.
1888 /// \param SrcLocInfo Source location information global.
1890 const LocationDescription &Loc, InsertPointTy AllocaIP,
1891 InsertPointTy CodeGenIP, ArrayRef<ReductionInfo> ReductionInfos,
1892 bool IsNoWait = false, bool IsTeamsReduction = false,
1893 bool HasDistribute = false,
1895 std::optional<omp::GV> GridValue = {}, unsigned ReductionBufNum = 1024,
1896 Value *SrcLocInfo = nullptr);
1897
1898 // TODO: provide atomic and non-atomic reduction generators for reduction
1899 // operators defined by the OpenMP specification.
1900
1901 /// Generator for '#omp reduction'.
1902 ///
1903 /// Emits the IR instructing the runtime to perform the specific kind of
1904 /// reductions. Expects reduction variables to have been privatized and
1905 /// initialized to reduction-neutral values separately. Emits the calls to
1906 /// runtime functions as well as the reduction function and the basic blocks
1907 /// performing the reduction atomically and non-atomically.
1908 ///
1909 /// The code emitted for the following:
1910 ///
1911 /// \code
1912 /// type var_1;
1913 /// type var_2;
1914 /// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
1915 /// /* body */;
1916 /// \endcode
1917 ///
1918 /// corresponds to the following sketch.
1919 ///
1920 /// \code
1921 /// void _outlined_par() {
1922 /// // N is the number of different reductions.
1923 /// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
1924 /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
1925 /// _omp_reduction_func,
1926 /// _gomp_critical_user.reduction.var)) {
1927 /// case 1: {
1928 /// var_1 = var_1 <reduction-op> privatized_var_1;
1929 /// var_2 = var_2 <reduction-op> privatized_var_2;
1930 /// // ...
1931 /// __kmpc_end_reduce(...);
1932 /// break;
1933 /// }
1934 /// case 2: {
1935 /// _Atomic<ReductionOp>(var_1, privatized_var_1);
1936 /// _Atomic<ReductionOp>(var_2, privatized_var_2);
1937 /// // ...
1938 /// break;
1939 /// }
1940 /// default: break;
1941 /// }
1942 /// }
1943 ///
1944 /// void _omp_reduction_func(void **lhs, void **rhs) {
1945 /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
1946 /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
1947 /// // ...
1948 /// }
1949 /// \endcode
1950 ///
1951 /// \param Loc The location where the reduction was
1952 /// encountered. Must be within the associate
1953 /// directive and after the last local access to the
1954 /// reduction variables.
1955 /// \param AllocaIP An insertion point suitable for allocas usable
1956 /// in reductions.
1957 /// \param ReductionInfos A list of info on each reduction variable.
1958 /// \param IsNoWait A flag set if the reduction is marked as nowait.
1959 /// \param IsByRef A flag set if the reduction is using reference
1960 /// or direct value.
1961 InsertPointOrErrorTy createReductions(const LocationDescription &Loc,
1962 InsertPointTy AllocaIP,
1963 ArrayRef<ReductionInfo> ReductionInfos,
1964 ArrayRef<bool> IsByRef,
1965 bool IsNoWait = false);
1966
1967 ///}
1968
1969 /// Return the insertion point used by the underlying IRBuilder.
1971
1972 /// Update the internal location to \p Loc.
1974 Builder.restoreIP(Loc.IP);
1976 return Loc.IP.getBlock() != nullptr;
1977 }
1978
1979 /// Return the function declaration for the runtime function with \p FnID.
1982
1984
1985 /// Return the (LLVM-IR) string describing the source location \p LocStr.
1986 Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
1987
1988 /// Return the (LLVM-IR) string describing the default source location.
1990
1991 /// Return the (LLVM-IR) string describing the source location identified by
1992 /// the arguments.
1993 Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
1994 unsigned Line, unsigned Column,
1995 uint32_t &SrcLocStrSize);
1996
1997 /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
1998 /// fallback if \p DL does not specify the function name.
2000 Function *F = nullptr);
2001
2002 /// Return the (LLVM-IR) string describing the source location \p Loc.
2003 Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
2004 uint32_t &SrcLocStrSize);
2005
2006 /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
2007 /// TODO: Create a enum class for the Reserve2Flags
2008 Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
2009 omp::IdentFlag Flags = omp::IdentFlag(0),
2010 unsigned Reserve2Flags = 0);
2011
2012 /// Create a hidden global flag \p Name in the module with initial value \p
2013 /// Value.
2015
2016 /// Generate control flow and cleanup for cancellation.
2017 ///
2018 /// \param CancelFlag Flag indicating if the cancellation is performed.
2019 /// \param CanceledDirective The kind of directive that is cancled.
2020 /// \param ExitCB Extra code to be generated in the exit block.
2021 ///
2022 /// \return an error, if any were triggered during execution.
2024 omp::Directive CanceledDirective,
2025 FinalizeCallbackTy ExitCB = {});
2026
2027 /// Generate a target region entry call.
2028 ///
2029 /// \param Loc The location at which the request originated and is fulfilled.
2030 /// \param AllocaIP The insertion point to be used for alloca instructions.
2031 /// \param Return Return value of the created function returned by reference.
2032 /// \param DeviceID Identifier for the device via the 'device' clause.
2033 /// \param NumTeams Numer of teams for the region via the 'num_teams' clause
2034 /// or 0 if unspecified and -1 if there is no 'teams' clause.
2035 /// \param NumThreads Number of threads via the 'thread_limit' clause.
2036 /// \param HostPtr Pointer to the host-side pointer of the target kernel.
2037 /// \param KernelArgs Array of arguments to the kernel.
2038 InsertPointTy emitTargetKernel(const LocationDescription &Loc,
2039 InsertPointTy AllocaIP, Value *&Return,
2040 Value *Ident, Value *DeviceID, Value *NumTeams,
2041 Value *NumThreads, Value *HostPtr,
2042 ArrayRef<Value *> KernelArgs);
2043
2044 /// Generate a flush runtime call.
2045 ///
2046 /// \param Loc The location at which the request originated and is fulfilled.
2047 void emitFlush(const LocationDescription &Loc);
2048
2049 /// The finalization stack made up of finalize callbacks currently in-flight,
2050 /// wrapped into FinalizationInfo objects that reference also the finalization
2051 /// target block and the kind of cancellable directive.
2053
2054 /// Return true if the last entry in the finalization stack is of kind \p DK
2055 /// and cancellable.
2056 bool isLastFinalizationInfoCancellable(omp::Directive DK) {
2057 return !FinalizationStack.empty() &&
2058 FinalizationStack.back().IsCancellable &&
2059 FinalizationStack.back().DK == DK;
2060 }
2061
2062 /// Generate a taskwait runtime call.
2063 ///
2064 /// \param Loc The location at which the request originated and is fulfilled.
2065 void emitTaskwaitImpl(const LocationDescription &Loc);
2066
2067 /// Generate a taskyield runtime call.
2068 ///
2069 /// \param Loc The location at which the request originated and is fulfilled.
2070 void emitTaskyieldImpl(const LocationDescription &Loc);
2071
2072 /// Return the current thread ID.
2073 ///
2074 /// \param Ident The ident (ident_t*) describing the query origin.
2076
2077 /// The OpenMPIRBuilder Configuration
2079
2080 /// The underlying LLVM-IR module
2082
2083 /// The LLVM-IR Builder used to create IR.
2085
2086 /// Map to remember source location strings
2088
2089 /// Map to remember existing ident_t*.
2091
2092 /// Info manager to keep track of target regions.
2094
2095 /// The target triple of the underlying module.
2096 const Triple T;
2097
2098 /// Helper that contains information about regions we need to outline
2099 /// during finalization.
2101 using PostOutlineCBTy = std::function<void(Function &)>;
2105
2106 /// Collect all blocks in between EntryBB and ExitBB in both the given
2107 /// vector and set.
2109 SmallVectorImpl<BasicBlock *> &BlockVector);
2110
2111 /// Return the function that contains the region to be outlined.
2112 Function *getFunction() const { return EntryBB->getParent(); }
2113 };
2114
2115 /// Collection of regions that need to be outlined during finalization.
2117
2118 /// A collection of candidate target functions that's constant allocas will
2119 /// attempt to be raised on a call of finalize after all currently enqueued
2120 /// outline info's have been processed.
2122
2123 /// Collection of owned canonical loop objects that eventually need to be
2124 /// free'd.
2125 std::forward_list<CanonicalLoopInfo> LoopInfos;
2126
2127 /// Add a new region that will be outlined later.
2128 void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
2129
2130 /// An ordered map of auto-generated variables to their unique names.
2131 /// It stores variables with the following names: 1) ".gomp_critical_user_" +
2132 /// <critical_section_name> + ".var" for "omp critical" directives; 2)
2133 /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
2134 /// variables.
2136
2137 /// Computes the size of type in bytes.
2138 Value *getSizeInBytes(Value *BasePtr);
2139
2140 // Emit a branch from the current block to the Target block only if
2141 // the current block has a terminator.
2143
2144 // If BB has no use then delete it and return. Else place BB after the current
2145 // block, if possible, or else at the end of the function. Also add a branch
2146 // from current block to BB if current block does not have a terminator.
2147 void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished = false);
2148
2149 /// Emits code for OpenMP 'if' clause using specified \a BodyGenCallbackTy
2150 /// Here is the logic:
2151 /// if (Cond) {
2152 /// ThenGen();
2153 /// } else {
2154 /// ElseGen();
2155 /// }
2156 ///
2157 /// \return an error, if any were triggered during execution.
2159 BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP = {});
2160
2161 /// Create the global variable holding the offload mappings information.
2163 std::string VarName);
2164
2165 /// Create the global variable holding the offload names information.
2168 std::string VarName);
2169
2172 AllocaInst *Args = nullptr;
2174 };
2175
2176 /// Create the allocas instruction used in call to mapper functions.
2178 InsertPointTy AllocaIP, unsigned NumOperands,
2180
2181 /// Create the call for the target mapper function.
2182 /// \param Loc The source location description.
2183 /// \param MapperFunc Function to be called.
2184 /// \param SrcLocInfo Source location information global.
2185 /// \param MaptypesArg The argument types.
2186 /// \param MapnamesArg The argument names.
2187 /// \param MapperAllocas The AllocaInst used for the call.
2188 /// \param DeviceID Device ID for the call.
2189 /// \param NumOperands Number of operands in the call.
2190 void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
2191 Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
2192 struct MapperAllocas &MapperAllocas, int64_t DeviceID,
2193 unsigned NumOperands);
2194
2195 /// Container for the arguments used to pass data to the runtime library.
2197 /// The array of base pointer passed to the runtime library.
2199 /// The array of section pointers passed to the runtime library.
2201 /// The array of sizes passed to the runtime library.
2202 Value *SizesArray = nullptr;
2203 /// The array of map types passed to the runtime library for the beginning
2204 /// of the region or for the entire region if there are no separate map
2205 /// types for the region end.
2207 /// The array of map types passed to the runtime library for the end of the
2208 /// region, or nullptr if there are no separate map types for the region
2209 /// end.
2211 /// The array of user-defined mappers passed to the runtime library.
2213 /// The array of original declaration names of mapped pointers sent to the
2214 /// runtime library for debugging
2216
2217 explicit TargetDataRTArgs() {}
2226 };
2227
2228 /// Data structure that contains the needed information to construct the
2229 /// kernel args vector.
2231 /// Number of arguments passed to the runtime library.
2232 unsigned NumTargetItems = 0;
2233 /// Arguments passed to the runtime library
2235 /// The number of iterations
2237 /// The number of teams.
2239 /// The number of threads.
2241 /// The size of the dynamic shared memory.
2243 /// True if the kernel has 'no wait' clause.
2244 bool HasNoWait = false;
2245
2246 // Constructors for TargetKernelArgs.
2251 bool HasNoWait)
2256 };
2257
2258 /// Create the kernel args vector used by emitTargetKernel. This function
2259 /// creates various constant values that are used in the resulting args
2260 /// vector.
2261 static void getKernelArgsVector(TargetKernelArgs &KernelArgs,
2263 SmallVector<Value *> &ArgsVector);
2264
2265 /// Struct that keeps the information that should be kept throughout
2266 /// a 'target data' region.
2268 /// Set to true if device pointer information have to be obtained.
2269 bool RequiresDevicePointerInfo = false;
2270 /// Set to true if Clang emits separate runtime calls for the beginning and
2271 /// end of the region. These calls might have separate map type arrays.
2272 bool SeparateBeginEndCalls = false;
2273
2274 public:
2276
2279
2280 /// Indicate whether any user-defined mapper exists.
2281 bool HasMapper = false;
2282 /// The total number of pointers passed to the runtime library.
2283 unsigned NumberOfPtrs = 0u;
2284
2285 bool EmitDebug = false;
2286
2287 /// Whether the `target ... data` directive has a `nowait` clause.
2288 bool HasNoWait = false;
2289
2290 explicit TargetDataInfo() {}
2291 explicit TargetDataInfo(bool RequiresDevicePointerInfo,
2292 bool SeparateBeginEndCalls)
2293 : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
2294 SeparateBeginEndCalls(SeparateBeginEndCalls) {}
2295 /// Clear information about the data arrays.
2298 HasMapper = false;
2299 NumberOfPtrs = 0u;
2300 }
2301 /// Return true if the current target data information has valid arrays.
2302 bool isValid() {
2306 }
2307 bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
2308 bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
2309 };
2310
2318
2319 /// This structure contains combined information generated for mappable
2320 /// clauses, including base pointers, pointers, sizes, map types, user-defined
2321 /// mappers, and non-contiguous information.
2322 struct MapInfosTy {
2324 bool IsNonContiguous = false;
2329 };
2337
2338 /// Append arrays in \a CurInfo.
2339 void append(MapInfosTy &CurInfo) {
2341 CurInfo.BasePointers.end());
2342 Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
2344 CurInfo.DevicePointers.end());
2345 Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
2346 Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
2347 Names.append(CurInfo.Names.begin(), CurInfo.Names.end());
2349 CurInfo.NonContigInfo.Dims.end());
2351 CurInfo.NonContigInfo.Offsets.end());
2353 CurInfo.NonContigInfo.Counts.end());
2355 CurInfo.NonContigInfo.Strides.end());
2356 }
2357 };
2358
2359 /// Callback function type for functions emitting the host fallback code that
2360 /// is executed when the kernel launch fails. It takes an insertion point as
2361 /// parameter where the code should be emitted. It returns an insertion point
2362 /// that points right after after the emitted code.
2365
2366 /// Generate a target region entry call and host fallback call.
2367 ///
2368 /// \param Loc The location at which the request originated and is fulfilled.
2369 /// \param OutlinedFnID The ooulined function ID.
2370 /// \param EmitTargetCallFallbackCB Call back function to generate host
2371 /// fallback code.
2372 /// \param Args Data structure holding information about the kernel arguments.
2373 /// \param DeviceID Identifier for the device via the 'device' clause.
2374 /// \param RTLoc Source location identifier
2375 /// \param AllocaIP The insertion point to be used for alloca instructions.
2377 emitKernelLaunch(const LocationDescription &Loc, Value *OutlinedFnID,
2378 EmitFallbackCallbackTy EmitTargetCallFallbackCB,
2379 TargetKernelArgs &Args, Value *DeviceID, Value *RTLoc,
2380 InsertPointTy AllocaIP);
2381
2382 /// Callback type for generating the bodies of device directives that require
2383 /// outer target tasks (e.g. in case of having `nowait` or `depend` clauses).
2384 ///
2385 /// \param DeviceID The ID of the device on which the target region will
2386 /// execute.
2387 /// \param RTLoc Source location identifier
2388 /// \Param TargetTaskAllocaIP Insertion point for the alloca block of the
2389 /// generated task.
2390 ///
2391 /// \return an error, if any were triggered during execution.
2393 function_ref<Error(Value *DeviceID, Value *RTLoc,
2394 IRBuilderBase::InsertPoint TargetTaskAllocaIP)>;
2395
2396 /// Generate a target-task for the target construct
2397 ///
2398 /// \param TaskBodyCB Callback to generate the actual body of the target task.
2399 /// \param DeviceID Identifier for the device via the 'device' clause.
2400 /// \param RTLoc Source location identifier
2401 /// \param AllocaIP The insertion point to be used for alloca instructions.
2402 /// \param Dependencies Vector of DependData objects holding information of
2403 /// dependencies as specified by the 'depend' clause.
2404 /// \param HasNoWait True if the target construct had 'nowait' on it, false
2405 /// otherwise
2407 TargetTaskBodyCallbackTy TaskBodyCB, Value *DeviceID, Value *RTLoc,
2410 bool HasNoWait);
2411
2412 /// Emit the arguments to be passed to the runtime library based on the
2413 /// arrays of base pointers, pointers, sizes, map types, and mappers. If
2414 /// ForEndCall, emit map types to be passed for the end of the region instead
2415 /// of the beginning.
2419 bool ForEndCall = false);
2420
2421 /// Emit an array of struct descriptors to be assigned to the offload args.
2423 InsertPointTy CodeGenIP,
2424 MapInfosTy &CombinedInfo,
2426
2427 /// Emit the arrays used to pass the captures and map information to the
2428 /// offloading runtime library. If there is no map or capture information,
2429 /// return nullptr by reference. Accepts a reference to a MapInfosTy object
2430 /// that contains information generated for mappable clauses,
2431 /// including base pointers, pointers, sizes, map types, user-defined mappers.
2433 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo,
2434 TargetDataInfo &Info, bool IsNonContiguous = false,
2435 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2436 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr);
2437
2438 /// Allocates memory for and populates the arrays required for offloading
2439 /// (offload_{baseptrs|ptrs|mappers|sizes|maptypes|mapnames}). Then, it
2440 /// emits their base addresses as arguments to be passed to the runtime
2441 /// library. In essence, this function is a combination of
2442 /// emitOffloadingArrays and emitOffloadingArraysArgument and should arguably
2443 /// be preferred by clients of OpenMPIRBuilder.
2445 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info,
2446 TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo,
2447 bool IsNonContiguous = false, bool ForEndCall = false,
2448 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2449 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr);
2450
2451 /// Creates offloading entry for the provided entry ID \a ID, address \a
2452 /// Addr, size \a Size, and flags \a Flags.
2454 int32_t Flags, GlobalValue::LinkageTypes,
2455 StringRef Name = "");
2456
2457 /// The kind of errors that can occur when emitting the offload entries and
2458 /// metadata.
2464
2465 /// Callback function type
2467 std::function<void(EmitMetadataErrorKind, TargetRegionEntryInfo)>;
2468
2469 // Emit the offloading entries and metadata so that the device codegen side
2470 // can easily figure out what to emit. The produced metadata looks like
2471 // this:
2472 //
2473 // !omp_offload.info = !{!1, ...}
2474 //
2475 // We only generate metadata for function that contain target regions.
2477 EmitMetadataErrorReportFunctionTy &ErrorReportFunction);
2478
2479public:
2480 /// Generator for __kmpc_copyprivate
2481 ///
2482 /// \param Loc The source location description.
2483 /// \param BufSize Number of elements in the buffer.
2484 /// \param CpyBuf List of pointers to data to be copied.
2485 /// \param CpyFn function to call for copying data.
2486 /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
2487 ///
2488 /// \return The insertion position *after* the CopyPrivate call.
2489
2491 llvm::Value *BufSize, llvm::Value *CpyBuf,
2492 llvm::Value *CpyFn, llvm::Value *DidIt);
2493
2494 /// Generator for '#omp single'
2495 ///
2496 /// \param Loc The source location description.
2497 /// \param BodyGenCB Callback that will generate the region code.
2498 /// \param FiniCB Callback to finalize variable copies.
2499 /// \param IsNowait If false, a barrier is emitted.
2500 /// \param CPVars copyprivate variables.
2501 /// \param CPFuncs copy functions to use for each copyprivate variable.
2502 ///
2503 /// \returns The insertion position *after* the single call.
2505 BodyGenCallbackTy BodyGenCB,
2506 FinalizeCallbackTy FiniCB, bool IsNowait,
2507 ArrayRef<llvm::Value *> CPVars = {},
2508 ArrayRef<llvm::Function *> CPFuncs = {});
2509
2510 /// Generator for '#omp master'
2511 ///
2512 /// \param Loc The insert and source location description.
2513 /// \param BodyGenCB Callback that will generate the region code.
2514 /// \param FiniCB Callback to finalize variable copies.
2515 ///
2516 /// \returns The insertion position *after* the master.
2517 InsertPointOrErrorTy createMaster(const LocationDescription &Loc,
2518 BodyGenCallbackTy BodyGenCB,
2519 FinalizeCallbackTy FiniCB);
2520
2521 /// Generator for '#omp masked'
2522 ///
2523 /// \param Loc The insert and source location description.
2524 /// \param BodyGenCB Callback that will generate the region code.
2525 /// \param FiniCB Callback to finialize variable copies.
2526 ///
2527 /// \returns The insertion position *after* the masked.
2528 InsertPointOrErrorTy createMasked(const LocationDescription &Loc,
2529 BodyGenCallbackTy BodyGenCB,
2530 FinalizeCallbackTy FiniCB, Value *Filter);
2531
2532 /// Generator for '#omp critical'
2533 ///
2534 /// \param Loc The insert and source location description.
2535 /// \param BodyGenCB Callback that will generate the region body code.
2536 /// \param FiniCB Callback to finalize variable copies.
2537 /// \param CriticalName name of the lock used by the critical directive
2538 /// \param HintInst Hint Instruction for hint clause associated with critical
2539 ///
2540 /// \returns The insertion position *after* the critical.
2541 InsertPointOrErrorTy createCritical(const LocationDescription &Loc,
2542 BodyGenCallbackTy BodyGenCB,
2543 FinalizeCallbackTy FiniCB,
2544 StringRef CriticalName, Value *HintInst);
2545
2546 /// Generator for '#omp ordered depend (source | sink)'
2547 ///
2548 /// \param Loc The insert and source location description.
2549 /// \param AllocaIP The insertion point to be used for alloca instructions.
2550 /// \param NumLoops The number of loops in depend clause.
2551 /// \param StoreValues The value will be stored in vector address.
2552 /// \param Name The name of alloca instruction.
2553 /// \param IsDependSource If true, depend source; otherwise, depend sink.
2554 ///
2555 /// \return The insertion position *after* the ordered.
2556 InsertPointTy createOrderedDepend(const LocationDescription &Loc,
2557 InsertPointTy AllocaIP, unsigned NumLoops,
2558 ArrayRef<llvm::Value *> StoreValues,
2559 const Twine &Name, bool IsDependSource);
2560
2561 /// Generator for '#omp ordered [threads | simd]'
2562 ///
2563 /// \param Loc The insert and source location description.
2564 /// \param BodyGenCB Callback that will generate the region code.
2565 /// \param FiniCB Callback to finalize variable copies.
2566 /// \param IsThreads If true, with threads clause or without clause;
2567 /// otherwise, with simd clause;
2568 ///
2569 /// \returns The insertion position *after* the ordered.
2570 InsertPointOrErrorTy createOrderedThreadsSimd(const LocationDescription &Loc,
2571 BodyGenCallbackTy BodyGenCB,
2572 FinalizeCallbackTy FiniCB,
2573 bool IsThreads);
2574
2575 /// Generator for '#omp sections'
2576 ///
2577 /// \param Loc The insert and source location description.
2578 /// \param AllocaIP The insertion points to be used for alloca instructions.
2579 /// \param SectionCBs Callbacks that will generate body of each section.
2580 /// \param PrivCB Callback to copy a given variable (think copy constructor).
2581 /// \param FiniCB Callback to finalize variable copies.
2582 /// \param IsCancellable Flag to indicate a cancellable parallel region.
2583 /// \param IsNowait If true, barrier - to ensure all sections are executed
2584 /// before moving forward will not be generated.
2585 /// \returns The insertion position *after* the sections.
2587 createSections(const LocationDescription &Loc, InsertPointTy AllocaIP,
2588 ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
2590 bool IsCancellable, bool IsNowait);
2591
2592 /// Generator for '#omp section'
2593 ///
2594 /// \param Loc The insert and source location description.
2595 /// \param BodyGenCB Callback that will generate the region body code.
2596 /// \param FiniCB Callback to finalize variable copies.
2597 /// \returns The insertion position *after* the section.
2598 InsertPointOrErrorTy createSection(const LocationDescription &Loc,
2599 BodyGenCallbackTy BodyGenCB,
2600 FinalizeCallbackTy FiniCB);
2601
2602 /// Generator for `#omp teams`
2603 ///
2604 /// \param Loc The location where the teams construct was encountered.
2605 /// \param BodyGenCB Callback that will generate the region code.
2606 /// \param NumTeamsLower Lower bound on number of teams. If this is nullptr,
2607 /// it is as if lower bound is specified as equal to upperbound. If
2608 /// this is non-null, then upperbound must also be non-null.
2609 /// \param NumTeamsUpper Upper bound on the number of teams.
2610 /// \param ThreadLimit on the number of threads that may participate in a
2611 /// contention group created by each team.
2612 /// \param IfExpr is the integer argument value of the if condition on the
2613 /// teams clause.
2615 createTeams(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2616 Value *NumTeamsLower = nullptr, Value *NumTeamsUpper = nullptr,
2617 Value *ThreadLimit = nullptr, Value *IfExpr = nullptr);
2618
2619 /// Generate conditional branch and relevant BasicBlocks through which private
2620 /// threads copy the 'copyin' variables from Master copy to threadprivate
2621 /// copies.
2622 ///
2623 /// \param IP insertion block for copyin conditional
2624 /// \param MasterVarPtr a pointer to the master variable
2625 /// \param PrivateVarPtr a pointer to the threadprivate variable
2626 /// \param IntPtrTy Pointer size type
2627 /// \param BranchtoEnd Create a branch between the copyin.not.master blocks
2628 // and copy.in.end block
2629 ///
2630 /// \returns The insertion point where copying operation to be emitted.
2632 Value *PrivateAddr,
2633 llvm::IntegerType *IntPtrTy,
2634 bool BranchtoEnd = true);
2635
2636 /// Create a runtime call for kmpc_Alloc
2637 ///
2638 /// \param Loc The insert and source location description.
2639 /// \param Size Size of allocated memory space
2640 /// \param Allocator Allocator information instruction
2641 /// \param Name Name of call Instruction for OMP_alloc
2642 ///
2643 /// \returns CallInst to the OMP_Alloc call
2644 CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
2645 Value *Allocator, std::string Name = "");
2646
2647 /// Create a runtime call for kmpc_free
2648 ///
2649 /// \param Loc The insert and source location description.
2650 /// \param Addr Address of memory space to be freed
2651 /// \param Allocator Allocator information instruction
2652 /// \param Name Name of call Instruction for OMP_Free
2653 ///
2654 /// \returns CallInst to the OMP_Free call
2655 CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
2656 Value *Allocator, std::string Name = "");
2657
2658 /// Create a runtime call for kmpc_threadprivate_cached
2659 ///
2660 /// \param Loc The insert and source location description.
2661 /// \param Pointer pointer to data to be cached
2662 /// \param Size size of data to be cached
2663 /// \param Name Name of call Instruction for callinst
2664 ///
2665 /// \returns CallInst to the thread private cache call.
2666 CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
2669 const llvm::Twine &Name = Twine(""));
2670
2671 /// Create a runtime call for __tgt_interop_init
2672 ///
2673 /// \param Loc The insert and source location description.
2674 /// \param InteropVar variable to be allocated
2675 /// \param InteropType type of interop operation
2676 /// \param Device devide to which offloading will occur
2677 /// \param NumDependences number of dependence variables
2678 /// \param DependenceAddress pointer to dependence variables
2679 /// \param HaveNowaitClause does nowait clause exist
2680 ///
2681 /// \returns CallInst to the __tgt_interop_init call
2682 CallInst *createOMPInteropInit(const LocationDescription &Loc,
2683 Value *InteropVar,
2684 omp::OMPInteropType InteropType, Value *Device,
2685 Value *NumDependences,
2686 Value *DependenceAddress,
2687 bool HaveNowaitClause);
2688
2689 /// Create a runtime call for __tgt_interop_destroy
2690 ///
2691 /// \param Loc The insert and source location description.
2692 /// \param InteropVar variable to be allocated
2693 /// \param Device devide to which offloading will occur
2694 /// \param NumDependences number of dependence variables
2695 /// \param DependenceAddress pointer to dependence variables
2696 /// \param HaveNowaitClause does nowait clause exist
2697 ///
2698 /// \returns CallInst to the __tgt_interop_destroy call
2699 CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
2700 Value *InteropVar, Value *Device,
2701 Value *NumDependences,
2702 Value *DependenceAddress,
2703 bool HaveNowaitClause);
2704
2705 /// Create a runtime call for __tgt_interop_use
2706 ///
2707 /// \param Loc The insert and source location description.
2708 /// \param InteropVar variable to be allocated
2709 /// \param Device devide to which offloading will occur
2710 /// \param NumDependences number of dependence variables
2711 /// \param DependenceAddress pointer to dependence variables
2712 /// \param HaveNowaitClause does nowait clause exist
2713 ///
2714 /// \returns CallInst to the __tgt_interop_use call
2715 CallInst *createOMPInteropUse(const LocationDescription &Loc,
2716 Value *InteropVar, Value *Device,
2717 Value *NumDependences, Value *DependenceAddress,
2718 bool HaveNowaitClause);
2719
2720 /// The `omp target` interface
2721 ///
2722 /// For more information about the usage of this interface,
2723 /// \see openmp/libomptarget/deviceRTLs/common/include/target.h
2724 ///
2725 ///{
2726
2727 /// Create a runtime call for kmpc_target_init
2728 ///
2729 /// \param Loc The insert and source location description.
2730 /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
2731 /// \param MinThreads Minimal number of threads, or 0.
2732 /// \param MaxThreads Maximal number of threads, or 0.
2733 /// \param MinTeams Minimal number of teams, or 0.
2734 /// \param MaxTeams Maximal number of teams, or 0.
2735 InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
2736 int32_t MinThreadsVal = 0,
2737 int32_t MaxThreadsVal = 0,
2738 int32_t MinTeamsVal = 0,
2739 int32_t MaxTeamsVal = 0);
2740
2741 /// Create a runtime call for kmpc_target_deinit
2742 ///
2743 /// \param Loc The insert and source location description.
2744 /// \param TeamsReductionDataSize The maximal size of all the reduction data
2745 /// for teams reduction.
2746 /// \param TeamsReductionBufferLength The number of elements (each of up to
2747 /// \p TeamsReductionDataSize size), in the teams reduction buffer.
2748 void createTargetDeinit(const LocationDescription &Loc,
2749 int32_t TeamsReductionDataSize = 0,
2750 int32_t TeamsReductionBufferLength = 1024);
2751
2752 ///}
2753
2754 /// Helpers to read/write kernel annotations from the IR.
2755 ///
2756 ///{
2757
2758 /// Read/write a bounds on threads for \p Kernel. Read will return 0 if none
2759 /// is set.
2760 static std::pair<int32_t, int32_t>
2761 readThreadBoundsForKernel(const Triple &T, Function &Kernel);
2762 static void writeThreadBoundsForKernel(const Triple &T, Function &Kernel,
2763 int32_t LB, int32_t UB);
2764
2765 /// Read/write a bounds on teams for \p Kernel. Read will return 0 if none
2766 /// is set.
2767 static std::pair<int32_t, int32_t> readTeamBoundsForKernel(const Triple &T,
2768 Function &Kernel);
2769 static void writeTeamsForKernel(const Triple &T, Function &Kernel, int32_t LB,
2770 int32_t UB);
2771 ///}
2772
2773private:
2774 // Sets the function attributes expected for the outlined function
2775 void setOutlinedTargetRegionFunctionAttributes(Function *OutlinedFn);
2776
2777 // Creates the function ID/Address for the given outlined function.
2778 // In the case of an embedded device function the address of the function is
2779 // used, in the case of a non-offload function a constant is created.
2780 Constant *createOutlinedFunctionID(Function *OutlinedFn,
2781 StringRef EntryFnIDName);
2782
2783 // Creates the region entry address for the outlined function
2784 Constant *createTargetRegionEntryAddr(Function *OutlinedFunction,
2785 StringRef EntryFnName);
2786
2787public:
2788 /// Functions used to generate a function with the given name.
2790 std::function<Expected<Function *>(StringRef FunctionName)>;
2791
2792 /// Create a unique name for the entry function using the source location
2793 /// information of the current target region. The name will be something like:
2794 ///
2795 /// __omp_offloading_DD_FFFF_PP_lBB[_CC]
2796 ///
2797 /// where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
2798 /// mangled name of the function that encloses the target region and BB is the
2799 /// line number of the target region. CC is a count added when more than one
2800 /// region is located at the same location.
2801 ///
2802 /// If this target outline function is not an offload entry, we don't need to
2803 /// register it. This may happen if it is guarded by an if clause that is
2804 /// false at compile time, or no target archs have been specified.
2805 ///
2806 /// The created target region ID is used by the runtime library to identify
2807 /// the current target region, so it only has to be unique and not
2808 /// necessarily point to anything. It could be the pointer to the outlined
2809 /// function that implements the target region, but we aren't using that so
2810 /// that the compiler doesn't need to keep that, and could therefore inline
2811 /// the host function if proven worthwhile during optimization. In the other
2812 /// hand, if emitting code for the device, the ID has to be the function
2813 /// address so that it can retrieved from the offloading entry and launched
2814 /// by the runtime library. We also mark the outlined function to have
2815 /// external linkage in case we are emitting code for the device, because
2816 /// these functions will be entry points to the device.
2817 ///
2818 /// \param InfoManager The info manager keeping track of the offload entries
2819 /// \param EntryInfo The entry information about the function
2820 /// \param GenerateFunctionCallback The callback function to generate the code
2821 /// \param OutlinedFunction Pointer to the outlined function
2822 /// \param EntryFnIDName Name of the ID o be created
2824 FunctionGenCallback &GenerateFunctionCallback,
2825 bool IsOffloadEntry, Function *&OutlinedFn,
2826 Constant *&OutlinedFnID);
2827
2828 /// Registers the given function and sets up the attribtues of the function
2829 /// Returns the FunctionID.
2830 ///
2831 /// \param InfoManager The info manager keeping track of the offload entries
2832 /// \param EntryInfo The entry information about the function
2833 /// \param OutlinedFunction Pointer to the outlined function
2834 /// \param EntryFnName Name of the outlined function
2835 /// \param EntryFnIDName Name of the ID o be created
2837 Function *OutlinedFunction,
2838 StringRef EntryFnName,
2839 StringRef EntryFnIDName);
2840
2841 /// Type of BodyGen to use for region codegen
2842 ///
2843 /// Priv: If device pointer privatization is required, emit the body of the
2844 /// region here. It will have to be duplicated: with and without
2845 /// privatization.
2846 /// DupNoPriv: If we need device pointer privatization, we need
2847 /// to emit the body of the region with no privatization in the 'else' branch
2848 /// of the conditional.
2849 /// NoPriv: If we don't require privatization of device
2850 /// pointers, we emit the body in between the runtime calls. This avoids
2851 /// duplicating the body code.
2853
2854 /// Callback type for creating the map infos for the kernel parameters.
2855 /// \param CodeGenIP is the insertion point where code should be generated,
2856 /// if any.
2859
2860private:
2861 /// Emit the array initialization or deletion portion for user-defined mapper
2862 /// code generation. First, it evaluates whether an array section is mapped
2863 /// and whether the \a MapType instructs to delete this section. If \a IsInit
2864 /// is true, and \a MapType indicates to not delete this array, array
2865 /// initialization code is generated. If \a IsInit is false, and \a MapType
2866 /// indicates to delete this array, array deletion code is generated.
2867 void emitUDMapperArrayInitOrDel(Function *MapperFn, llvm::Value *MapperHandle,
2868 llvm::Value *Base, llvm::Value *Begin,
2869 llvm::Value *Size, llvm::Value *MapType,
2870 llvm::Value *MapName, TypeSize ElementSize,
2871 llvm::BasicBlock *ExitBB, bool IsInit);
2872
2873public:
2874 /// Emit the user-defined mapper function. The code generation follows the
2875 /// pattern in the example below.
2876 /// \code
2877 /// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
2878 /// void *base, void *begin,
2879 /// int64_t size, int64_t type,
2880 /// void *name = nullptr) {
2881 /// // Allocate space for an array section first or add a base/begin for
2882 /// // pointer dereference.
2883 /// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) &&
2884 /// !maptype.IsDelete)
2885 /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
2886 /// size*sizeof(Ty), clearToFromMember(type));
2887 /// // Map members.
2888 /// for (unsigned i = 0; i < size; i++) {
2889 /// // For each component specified by this mapper:
2890 /// for (auto c : begin[i]->all_components) {
2891 /// if (c.hasMapper())
2892 /// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin,
2893 /// c.arg_size,
2894 /// c.arg_type, c.arg_name);
2895 /// else
2896 /// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
2897 /// c.arg_begin, c.arg_size, c.arg_type,
2898 /// c.arg_name);
2899 /// }
2900 /// }
2901 /// // Delete the array section.
2902 /// if (size > 1 && maptype.IsDelete)
2903 /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
2904 /// size*sizeof(Ty), clearToFromMember(type));
2905 /// }
2906 /// \endcode
2907 ///
2908 /// \param PrivAndGenMapInfoCB Callback that privatizes code and populates the
2909 /// MapInfos and returns.
2910 /// \param ElemTy DeclareMapper element type.
2911 /// \param FuncName Optional param to specify mapper function name.
2912 /// \param CustomMapperCB Optional callback to generate code related to
2913 /// custom mappers.
2915 function_ref<MapInfosTy &(InsertPointTy CodeGenIP, llvm::Value *PtrPHI,
2916 llvm::Value *BeginArg)>
2917 PrivAndGenMapInfoCB,
2918 llvm::Type *ElemTy, StringRef FuncName,
2919 function_ref<bool(unsigned int, Function **)> CustomMapperCB = nullptr);
2920
2921 /// Generator for '#omp target data'
2922 ///
2923 /// \param Loc The location where the target data construct was encountered.
2924 /// \param AllocaIP The insertion points to be used for alloca instructions.
2925 /// \param CodeGenIP The insertion point at which the target directive code
2926 /// should be placed.
2927 /// \param IsBegin If true then emits begin mapper call otherwise emits
2928 /// end mapper call.
2929 /// \param DeviceID Stores the DeviceID from the device clause.
2930 /// \param IfCond Value which corresponds to the if clause condition.
2931 /// \param Info Stores all information realted to the Target Data directive.
2932 /// \param GenMapInfoCB Callback that populates the MapInfos and returns.
2933 /// \param BodyGenCB Optional Callback to generate the region code.
2934 /// \param DeviceAddrCB Optional callback to generate code related to
2935 /// use_device_ptr and use_device_addr.
2936 /// \param CustomMapperCB Optional callback to generate code related to
2937 /// custom mappers.
2939 const LocationDescription &Loc, InsertPointTy AllocaIP,
2940 InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond,
2942 omp::RuntimeFunction *MapperFunc = nullptr,
2944 BodyGenTy BodyGenType)>
2945 BodyGenCB = nullptr,
2946 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2947 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr,
2948 Value *SrcLocInfo = nullptr);
2949
2951 InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
2952
2954 Argument &Arg, Value *Input, Value *&RetVal, InsertPointTy AllocaIP,
2955 InsertPointTy CodeGenIP)>;
2956
2957 /// Generator for '#omp target'
2958 ///
2959 /// \param Loc where the target data construct was encountered.
2960 /// \param IsOffloadEntry whether it is an offload entry.
2961 /// \param CodeGenIP The insertion point where the call to the outlined
2962 /// function should be emitted.
2963 /// \param EntryInfo The entry information about the function.
2964 /// \param NumTeams Number of teams specified in the num_teams clause.
2965 /// \param NumThreads Number of teams specified in the thread_limit clause.
2966 /// \param Inputs The input values to the region that will be passed.
2967 /// as arguments to the outlined function.
2968 /// \param BodyGenCB Callback that will generate the region code.
2969 /// \param ArgAccessorFuncCB Callback that will generate accessors
2970 /// instructions for passed in target arguments where neccessary
2971 /// \param Dependencies A vector of DependData objects that carry
2972 // dependency information as passed in the depend clause
2973 // \param HasNowait Whether the target construct has a `nowait` clause or not.
2975 const LocationDescription &Loc, bool IsOffloadEntry,
2978 TargetRegionEntryInfo &EntryInfo, ArrayRef<int32_t> NumTeams,
2979 ArrayRef<int32_t> NumThreads, SmallVectorImpl<Value *> &Inputs,
2980 GenMapInfoCallbackTy GenMapInfoCB, TargetBodyGenCallbackTy BodyGenCB,
2981 TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB,
2982 SmallVector<DependData> Dependencies = {}, bool HasNowait = false);
2983
2984 /// Returns __kmpc_for_static_init_* runtime function for the specified
2985 /// size \a IVSize and sign \a IVSigned. Will create a distribute call
2986 /// __kmpc_distribute_static_init* if \a IsGPUDistribute is set.
2987 FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned,
2988 bool IsGPUDistribute);
2989
2990 /// Returns __kmpc_dispatch_init_* runtime function for the specified
2991 /// size \a IVSize and sign \a IVSigned.
2992 FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned);
2993
2994 /// Returns __kmpc_dispatch_next_* runtime function for the specified
2995 /// size \a IVSize and sign \a IVSigned.
2996 FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned);
2997
2998 /// Returns __kmpc_dispatch_fini_* runtime function for the specified
2999 /// size \a IVSize and sign \a IVSigned.
3000 FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
3001
3002 /// Returns __kmpc_dispatch_deinit runtime function.
3004
3005 /// Declarations for LLVM-IR types (simple, array, function and structure) are
3006 /// generated below. Their names are defined and used in OpenMPKinds.def. Here
3007 /// we provide the declarations, the initializeTypes function will provide the
3008 /// values.
3009 ///
3010 ///{
3011#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
3012#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
3013 ArrayType *VarName##Ty = nullptr; \
3014 PointerType *VarName##PtrTy = nullptr;
3015#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
3016 FunctionType *VarName = nullptr; \
3017 PointerType *VarName##Ptr = nullptr;
3018#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
3019 StructType *VarName = nullptr; \
3020 PointerType *VarName##Ptr = nullptr;
3021#include "llvm/Frontend/OpenMP/OMPKinds.def"
3022
3023 ///}
3024
3025private:
3026 /// Create all simple and struct types exposed by the runtime and remember
3027 /// the llvm::PointerTypes of them for easy access later.
3028 void initializeTypes(Module &M);
3029
3030 /// Common interface for generating entry calls for OMP Directives.
3031 /// if the directive has a region/body, It will set the insertion
3032 /// point to the body
3033 ///
3034 /// \param OMPD Directive to generate entry blocks for
3035 /// \param EntryCall Call to the entry OMP Runtime Function
3036 /// \param ExitBB block where the region ends.
3037 /// \param Conditional indicate if the entry call result will be used
3038 /// to evaluate a conditional of whether a thread will execute
3039 /// body code or not.
3040 ///
3041 /// \return The insertion position in exit block
3042 InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
3043 BasicBlock *ExitBB,
3044 bool Conditional = false);
3045
3046 /// Common interface to finalize the region
3047 ///
3048 /// \param OMPD Directive to generate exiting code for
3049 /// \param FinIP Insertion point for emitting Finalization code and exit call
3050 /// \param ExitCall Call to the ending OMP Runtime Function
3051 /// \param HasFinalize indicate if the directive will require finalization
3052 /// and has a finalization callback in the stack that
3053 /// should be called.
3054 ///
3055 /// \return The insertion position in exit block
3056 InsertPointOrErrorTy emitCommonDirectiveExit(omp::Directive OMPD,
3057 InsertPointTy FinIP,
3058 Instruction *ExitCall,
3059 bool HasFinalize = true);
3060
3061 /// Common Interface to generate OMP inlined regions
3062 ///
3063 /// \param OMPD Directive to generate inlined region for
3064 /// \param EntryCall Call to the entry OMP Runtime Function
3065 /// \param ExitCall Call to the ending OMP Runtime Function
3066 /// \param BodyGenCB Body code generation callback.
3067 /// \param FiniCB Finalization Callback. Will be called when finalizing region
3068 /// \param Conditional indicate if the entry call result will be used
3069 /// to evaluate a conditional of whether a thread will execute
3070 /// body code or not.
3071 /// \param HasFinalize indicate if the directive will require finalization
3072 /// and has a finalization callback in the stack that
3073 /// should be called.
3074 /// \param IsCancellable if HasFinalize is set to true, indicate if the
3075 /// the directive should be cancellable.
3076 /// \return The insertion point after the region
3078 EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
3079 Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
3080 FinalizeCallbackTy FiniCB, bool Conditional = false,
3081 bool HasFinalize = true, bool IsCancellable = false);
3082
3083 /// Get the platform-specific name separator.
3084 /// \param Parts different parts of the final name that needs separation
3085 /// \param FirstSeparator First separator used between the initial two
3086 /// parts of the name.
3087 /// \param Separator separator used between all of the rest consecutive
3088 /// parts of the name
3089 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
3090 StringRef FirstSeparator,
3091 StringRef Separator);
3092
3093 /// Returns corresponding lock object for the specified critical region
3094 /// name. If the lock object does not exist it is created, otherwise the
3095 /// reference to the existing copy is returned.
3096 /// \param CriticalName Name of the critical region.
3097 ///
3098 Value *getOMPCriticalRegionLock(StringRef CriticalName);
3099
3100 /// Callback type for Atomic Expression update
3101 /// ex:
3102 /// \code{.cpp}
3103 /// unsigned x = 0;
3104 /// #pragma omp atomic update
3105 /// x = Expr(x_old); //Expr() is any legal operation
3106 /// \endcode
3107 ///
3108 /// \param XOld the value of the atomic memory address to use for update
3109 /// \param IRB reference to the IRBuilder to use
3110 ///
3111 /// \returns Value to update X to.
3112 using AtomicUpdateCallbackTy =
3113 const function_ref<Expected<Value *>(Value *XOld, IRBuilder<> &IRB)>;
3114
3115private:
3116 enum AtomicKind { Read, Write, Update, Capture, Compare };
3117
3118 /// Determine whether to emit flush or not
3119 ///
3120 /// \param Loc The insert and source location description.
3121 /// \param AO The required atomic ordering
3122 /// \param AK The OpenMP atomic operation kind used.
3123 ///
3124 /// \returns wether a flush was emitted or not
3125 bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
3126 AtomicOrdering AO, AtomicKind AK);
3127
3128 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
3129 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
3130 /// Only Scalar data types.
3131 ///
3132 /// \param AllocaIP The insertion point to be used for alloca
3133 /// instructions.
3134 /// \param X The target atomic pointer to be updated
3135 /// \param XElemTy The element type of the atomic pointer.
3136 /// \param Expr The value to update X with.
3137 /// \param AO Atomic ordering of the generated atomic
3138 /// instructions.
3139 /// \param RMWOp The binary operation used for update. If
3140 /// operation is not supported by atomicRMW,
3141 /// or belong to {FADD, FSUB, BAD_BINOP}.
3142 /// Then a `cmpExch` based atomic will be generated.
3143 /// \param UpdateOp Code generator for complex expressions that cannot be
3144 /// expressed through atomicrmw instruction.
3145 /// \param VolatileX true if \a X volatile?
3146 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
3147 /// update expression, false otherwise.
3148 /// (e.g. true for X = X BinOp Expr)
3149 ///
3150 /// \returns A pair of the old value of X before the update, and the value
3151 /// used for the update.
3152 Expected<std::pair<Value *, Value *>>
3153 emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
3155 AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
3156 bool IsXBinopExpr);
3157
3158 /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
3159 ///
3160 /// \Return The instruction
3161 Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
3162 AtomicRMWInst::BinOp RMWOp);
3163
3164public:
3165 /// a struct to pack relevant information while generating atomic Ops
3167 Value *Var = nullptr;
3168 Type *ElemTy = nullptr;
3169 bool IsSigned = false;
3170 bool IsVolatile = false;
3171 };
3172
3173 /// Emit atomic Read for : V = X --- Only Scalar data types.
3174 ///
3175 /// \param Loc The insert and source location description.
3176 /// \param X The target pointer to be atomically read
3177 /// \param V Memory address where to store atomically read
3178 /// value
3179 /// \param AO Atomic ordering of the generated atomic
3180 /// instructions.
3181 ///
3182 /// \return Insertion point after generated atomic read IR.
3185 AtomicOrdering AO);
3186
3187 /// Emit atomic write for : X = Expr --- Only Scalar data types.
3188 ///
3189 /// \param Loc The insert and source location description.
3190 /// \param X The target pointer to be atomically written to
3191 /// \param Expr The value to store.
3192 /// \param AO Atomic ordering of the generated atomic
3193 /// instructions.
3194 ///
3195 /// \return Insertion point after generated atomic Write IR.
3197 AtomicOpValue &X, Value *Expr,
3198 AtomicOrdering AO);
3199
3200 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
3201 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
3202 /// Only Scalar data types.
3203 ///
3204 /// \param Loc The insert and source location description.
3205 /// \param AllocaIP The insertion point to be used for alloca instructions.
3206 /// \param X The target atomic pointer to be updated
3207 /// \param Expr The value to update X with.
3208 /// \param AO Atomic ordering of the generated atomic instructions.
3209 /// \param RMWOp The binary operation used for update. If operation
3210 /// is not supported by atomicRMW, or belong to
3211 /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
3212 /// atomic will be generated.
3213 /// \param UpdateOp Code generator for complex expressions that cannot be
3214 /// expressed through atomicrmw instruction.
3215 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
3216 /// update expression, false otherwise.
3217 /// (e.g. true for X = X BinOp Expr)
3218 ///
3219 /// \return Insertion point after generated atomic update IR.
3222 AtomicOpValue &X, Value *Expr, AtomicOrdering AO,
3224 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr);
3225
3226 /// Emit atomic update for constructs: --- Only Scalar data types
3227 /// V = X; X = X BinOp Expr ,
3228 /// X = X BinOp Expr; V = X,
3229 /// V = X; X = Expr BinOp X,
3230 /// X = Expr BinOp X; V = X,
3231 /// V = X; X = UpdateOp(X),
3232 /// X = UpdateOp(X); V = X,
3233 ///
3234 /// \param Loc The insert and source location description.
3235 /// \param AllocaIP The insertion point to be used for alloca instructions.
3236 /// \param X The target atomic pointer to be updated
3237 /// \param V Memory address where to store captured value
3238 /// \param Expr The value to update X with.
3239 /// \param AO Atomic ordering of the generated atomic instructions
3240 /// \param RMWOp The binary operation used for update. If
3241 /// operation is not supported by atomicRMW, or belong to
3242 /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
3243 /// atomic will be generated.
3244 /// \param UpdateOp Code generator for complex expressions that cannot be
3245 /// expressed through atomicrmw instruction.
3246 /// \param UpdateExpr true if X is an in place update of the form
3247 /// X = X BinOp Expr or X = Expr BinOp X
3248 /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
3249 /// update expression, false otherwise.
3250 /// (e.g. true for X = X BinOp Expr)
3251 /// \param IsPostfixUpdate true if original value of 'x' must be stored in
3252 /// 'v', not an updated one.
3253 ///
3254 /// \return Insertion point after generated atomic capture IR.
3257 AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
3259 AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
3260 bool IsPostfixUpdate, bool IsXBinopExpr);
3261
3262 /// Emit atomic compare for constructs: --- Only scalar data types
3263 /// cond-expr-stmt:
3264 /// x = x ordop expr ? expr : x;
3265 /// x = expr ordop x ? expr : x;
3266 /// x = x == e ? d : x;
3267 /// x = e == x ? d : x; (this one is not in the spec)
3268 /// cond-update-stmt:
3269 /// if (x ordop expr) { x = expr; }
3270 /// if (expr ordop x) { x = expr; }
3271 /// if (x == e) { x = d; }
3272 /// if (e == x) { x = d; } (this one is not in the spec)
3273 /// conditional-update-capture-atomic:
3274 /// v = x; cond-update-stmt; (IsPostfixUpdate=true, IsFailOnly=false)
3275 /// cond-update-stmt; v = x; (IsPostfixUpdate=false, IsFailOnly=false)
3276 /// if (x == e) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3277 /// IsFailOnly=true)
3278 /// r = x == e; if (r) { x = d; } (IsPostfixUpdate=false, IsFailOnly=false)
3279 /// r = x == e; if (r) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3280 /// IsFailOnly=true)
3281 ///
3282 /// \param Loc The insert and source location description.
3283 /// \param X The target atomic pointer to be updated.
3284 /// \param V Memory address where to store captured value (for
3285 /// compare capture only).
3286 /// \param R Memory address where to store comparison result
3287 /// (for compare capture with '==' only).
3288 /// \param E The expected value ('e') for forms that use an
3289 /// equality comparison or an expression ('expr') for
3290 /// forms that use 'ordop' (logically an atomic maximum or
3291 /// minimum).
3292 /// \param D The desired value for forms that use an equality
3293 /// comparison. If forms that use 'ordop', it should be
3294 /// \p nullptr.
3295 /// \param AO Atomic ordering of the generated atomic instructions.
3296 /// \param Op Atomic compare operation. It can only be ==, <, or >.
3297 /// \param IsXBinopExpr True if the conditional statement is in the form where
3298 /// x is on LHS. It only matters for < or >.
3299 /// \param IsPostfixUpdate True if original value of 'x' must be stored in
3300 /// 'v', not an updated one (for compare capture
3301 /// only).
3302 /// \param IsFailOnly True if the original value of 'x' is stored to 'v'
3303 /// only when the comparison fails. This is only valid for
3304 /// the case the comparison is '=='.
3305 ///
3306 /// \return Insertion point after generated atomic capture IR.
3311 bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly);
3314 AtomicOpValue &R, Value *E, Value *D,
3315 AtomicOrdering AO,
3317 bool IsXBinopExpr, bool IsPostfixUpdate,
3318 bool IsFailOnly, AtomicOrdering Failure);
3319
3320 /// Create the control flow structure of a canonical OpenMP loop.
3321 ///
3322 /// The emitted loop will be disconnected, i.e. no edge to the loop's
3323 /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
3324 /// IRBuilder location is not preserved.
3325 ///
3326 /// \param DL DebugLoc used for the instructions in the skeleton.
3327 /// \param TripCount Value to be used for the trip count.
3328 /// \param F Function in which to insert the BasicBlocks.
3329 /// \param PreInsertBefore Where to insert BBs that execute before the body,
3330 /// typically the body itself.
3331 /// \param PostInsertBefore Where to insert BBs that execute after the body.
3332 /// \param Name Base name used to derive BB
3333 /// and instruction names.
3334 ///
3335 /// \returns The CanonicalLoopInfo that represents the emitted loop.
3337 Function *F,
3338 BasicBlock *PreInsertBefore,
3339 BasicBlock *PostInsertBefore,
3340 const Twine &Name = {});
3341 /// OMP Offload Info Metadata name string
3342 const std::string ompOffloadInfoName = "omp_offload.info";
3343
3344 /// Loads all the offload entries information from the host IR
3345 /// metadata. This function is only meant to be used with device code
3346 /// generation.
3347 ///
3348 /// \param M Module to load Metadata info from. Module passed maybe
3349 /// loaded from bitcode file, i.e, different from OpenMPIRBuilder::M module.
3351
3352 /// Loads all the offload entries information from the host IR
3353 /// metadata read from the file passed in as the HostFilePath argument. This
3354 /// function is only meant to be used with device code generation.
3355 ///
3356 /// \param HostFilePath The path to the host IR file,
3357 /// used to load in offload metadata for the device, allowing host and device
3358 /// to maintain the same metadata mapping.
3359 void loadOffloadInfoMetadata(StringRef HostFilePath);
3360
3361 /// Gets (if variable with the given name already exist) or creates
3362 /// internal global variable with the specified Name. The created variable has
3363 /// linkage CommonLinkage by default and is initialized by null value.
3364 /// \param Ty Type of the global variable. If it is exist already the type
3365 /// must be the same.
3366 /// \param Name Name of the variable.
3368 unsigned AddressSpace = 0);
3369};
3370
3371/// Class to represented the control flow structure of an OpenMP canonical loop.
3372///
3373/// The control-flow structure is standardized for easy consumption by
3374/// directives associated with loops. For instance, the worksharing-loop
3375/// construct may change this control flow such that each loop iteration is
3376/// executed on only one thread. The constraints of a canonical loop in brief
3377/// are:
3378///
3379/// * The number of loop iterations must have been computed before entering the
3380/// loop.
3381///
3382/// * Has an (unsigned) logical induction variable that starts at zero and
3383/// increments by one.
3384///
3385/// * The loop's CFG itself has no side-effects. The OpenMP specification
3386/// itself allows side-effects, but the order in which they happen, including
3387/// how often or whether at all, is unspecified. We expect that the frontend
3388/// will emit those side-effect instructions somewhere (e.g. before the loop)
3389/// such that the CanonicalLoopInfo itself can be side-effect free.
3390///
3391/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
3392/// execution of a loop body that satifies these constraints. It does NOT
3393/// represent arbitrary SESE regions that happen to contain a loop. Do not use
3394/// CanonicalLoopInfo for such purposes.
3395///
3396/// The control flow can be described as follows:
3397///
3398/// Preheader
3399/// |
3400/// /-> Header
3401/// | |
3402/// | Cond---\
3403/// | | |
3404/// | Body |
3405/// | | | |
3406/// | <...> |
3407/// | | | |
3408/// \--Latch |
3409/// |
3410/// Exit
3411/// |
3412/// After
3413///
3414/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
3415/// including) and end at AfterIP (at the After's first instruction, excluding).
3416/// That is, instructions in the Preheader and After blocks (except the
3417/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
3418/// side-effects. Typically, the Preheader is used to compute the loop's trip
3419/// count. The instructions from BodyIP (at the Body block's first instruction,
3420/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
3421/// control and thus can have side-effects. The body block is the single entry
3422/// point into the loop body, which may contain arbitrary control flow as long
3423/// as all control paths eventually branch to the Latch block.
3424///
3425/// TODO: Consider adding another standardized BasicBlock between Body CFG and
3426/// Latch to guarantee that there is only a single edge to the latch. It would
3427/// make loop transformations easier to not needing to consider multiple
3428/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
3429/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
3430/// executes after each body iteration.
3431///
3432/// There must be no loop-carried dependencies through llvm::Values. This is
3433/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
3434/// for the induction variable.
3435///
3436/// All code in Header, Cond, Latch and Exit (plus the terminator of the
3437/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
3438/// by assertOK(). They are expected to not be modified unless explicitly
3439/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
3440/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
3441/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
3442/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
3443/// anymore as its underlying control flow may not exist anymore.
3444/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
3445/// may also return a new CanonicalLoopInfo that can be passed to other
3446/// loop-associated construct implementing methods. These loop-transforming
3447/// methods may either create a new CanonicalLoopInfo usually using
3448/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
3449/// modify one of the input CanonicalLoopInfo and return it as representing the
3450/// modified loop. What is done is an implementation detail of
3451/// transformation-implementing method and callers should always assume that the
3452/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
3453/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
3454/// created by createCanonicalLoop, such that transforming methods do not have
3455/// to special case where the CanonicalLoopInfo originated from.
3456///
3457/// Generally, methods consuming CanonicalLoopInfo do not need an
3458/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
3459/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
3460/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
3461/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
3462/// any InsertPoint in the Preheader, After or Block can still be used after
3463/// calling such a method.
3464///
3465/// TODO: Provide mechanisms for exception handling and cancellation points.
3466///
3467/// Defined outside OpenMPIRBuilder because nested classes cannot be
3468/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
3470 friend class OpenMPIRBuilder;
3471
3472private:
3473 BasicBlock *Header = nullptr;
3474 BasicBlock *Cond = nullptr;
3475 BasicBlock *Latch = nullptr;
3476 BasicBlock *Exit = nullptr;
3477
3478 /// Add the control blocks of this loop to \p BBs.
3479 ///
3480 /// This does not include any block from the body, including the one returned
3481 /// by getBody().
3482 ///
3483 /// FIXME: This currently includes the Preheader and After blocks even though
3484 /// their content is (mostly) not under CanonicalLoopInfo's control.
3485 /// Re-evaluated whether this makes sense.
3486 void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
3487
3488 /// Sets the number of loop iterations to the given value. This value must be
3489 /// valid in the condition block (i.e., defined in the preheader) and is
3490 /// interpreted as an unsigned integer.
3491 void setTripCount(Value *TripCount);
3492
3493 /// Replace all uses of the canonical induction variable in the loop body with
3494 /// a new one.
3495 ///
3496 /// The intended use case is to update the induction variable for an updated
3497 /// iteration space such that it can stay normalized in the 0...tripcount-1
3498 /// range.
3499 ///
3500 /// The \p Updater is called with the (presumable updated) current normalized
3501 /// induction variable and is expected to return the value that uses of the
3502 /// pre-updated induction values should use instead, typically dependent on
3503 /// the new induction variable. This is a lambda (instead of e.g. just passing
3504 /// the new value) to be able to distinguish the uses of the pre-updated
3505 /// induction variable and uses of the induction varible to compute the
3506 /// updated induction variable value.
3507 void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater);
3508
3509public:
3510 /// Returns whether this object currently represents the IR of a loop. If
3511 /// returning false, it may have been consumed by a loop transformation or not
3512 /// been intialized. Do not use in this case;
3513 bool isValid() const { return Header; }
3514
3515 /// The preheader ensures that there is only a single edge entering the loop.
3516 /// Code that must be execute before any loop iteration can be emitted here,
3517 /// such as computing the loop trip count and begin lifetime markers. Code in
3518 /// the preheader is not considered part of the canonical loop.
3519 BasicBlock *getPreheader() const;
3520
3521 /// The header is the entry for each iteration. In the canonical control flow,
3522 /// it only contains the PHINode for the induction variable.
3524 assert(isValid() && "Requires a valid canonical loop");
3525 return Header;
3526 }
3527
3528 /// The condition block computes whether there is another loop iteration. If
3529 /// yes, branches to the body; otherwise to the exit block.
3531 assert(isValid() && "Requires a valid canonical loop");
3532 return Cond;
3533 }
3534
3535 /// The body block is the single entry for a loop iteration and not controlled
3536 /// by CanonicalLoopInfo. It can contain arbitrary control flow but must
3537 /// eventually branch to the \p Latch block.
3539 assert(isValid() && "Requires a valid canonical loop");
3540 return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
3541 }
3542
3543 /// Reaching the latch indicates the end of the loop body code. In the
3544 /// canonical control flow, it only contains the increment of the induction
3545 /// variable.
3547 assert(isValid() && "Requires a valid canonical loop");
3548 return Latch;
3549 }
3550
3551 /// Reaching the exit indicates no more iterations are being executed.
3553 assert(isValid() && "Requires a valid canonical loop");
3554 return Exit;
3555 }
3556
3557 /// The after block is intended for clean-up code such as lifetime end
3558 /// markers. It is separate from the exit block to ensure, analogous to the
3559 /// preheader, it having just a single entry edge and being free from PHI
3560 /// nodes should there be multiple loop exits (such as from break
3561 /// statements/cancellations).
3563 assert(isValid() && "Requires a valid canonical loop");
3564 return Exit->getSingleSuccessor();
3565 }
3566
3567 /// Returns the llvm::Value containing the number of loop iterations. It must
3568 /// be valid in the preheader and always interpreted as an unsigned integer of
3569 /// any bit-width.
3571 assert(isValid() && "Requires a valid canonical loop");
3572 Instruction *CmpI = &Cond->front();
3573 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
3574 return CmpI->getOperand(1);
3575 }
3576
3577 /// Returns the instruction representing the current logical induction
3578 /// variable. Always unsigned, always starting at 0 with an increment of one.
3580 assert(isValid() && "Requires a valid canonical loop");
3581 Instruction *IndVarPHI = &Header->front();
3582 assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
3583 return IndVarPHI;
3584 }
3585
3586 /// Return the type of the induction variable (and the trip count).
3588 assert(isValid() && "Requires a valid canonical loop");
3589 return getIndVar()->getType();
3590 }
3591
3592 /// Return the insertion point for user code before the loop.
3594 assert(isValid() && "Requires a valid canonical loop");
3595 BasicBlock *Preheader = getPreheader();
3596 return {Preheader, std::prev(Preheader->end())};
3597 };
3598
3599 /// Return the insertion point for user code in the body.
3601 assert(isValid() && "Requires a valid canonical loop");
3602 BasicBlock *Body = getBody();
3603 return {Body, Body->begin()};
3604 };
3605
3606 /// Return the insertion point for user code after the loop.
3608 assert(isValid() && "Requires a valid canonical loop");
3610 return {After, After->begin()};
3611 };
3612
3614 assert(isValid() && "Requires a valid canonical loop");
3615 return Header->getParent();
3616 }
3617
3618 /// Consistency self-check.
3619 void assertOK() const;
3620
3621 /// Invalidate this loop. That is, the underlying IR does not fulfill the
3622 /// requirements of an OpenMP canonical loop anymore.
3623 void invalidate();
3624};
3625
3626} // end namespace llvm
3627
3628#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
arc branch finalize
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
BlockVerifier::State From
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
DXIL Finalize Linkage
uint64_t Addr
std::string Name
uint32_t Index
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Hardware Loops
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
This file defines constans and helpers used when dealing with OpenMP.
Provides definitions for Target specific Grid Values.
const SmallVectorImpl< MachineOperand > & Cond
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
an instruction to allocate memory on the stack
Definition: Instructions.h:63
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Align AtomicAlign
Definition: Atomic.h:22
bool UseLibcall
Definition: Atomic.h:24
IRBuilderBase * Builder
Definition: Atomic.h:18
uint64_t AtomicSizeInBits
Definition: Atomic.h:20
uint64_t ValueSizeInBits
Definition: Atomic.h:21
Align ValueAlign
Definition: Atomic.h:23
Type * Ty
Definition: Atomic.h:19
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:716
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:461
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
Class to represented the control flow structure of an OpenMP canonical loop.
Value * getTripCount() const
Returns the llvm::Value containing the number of loop iterations.
BasicBlock * getHeader() const
The header is the entry for each iteration.
void assertOK() const
Consistency self-check.
Type * getIndVarType() const
Return the type of the induction variable (and the trip count).
BasicBlock * getBody() const
The body block is the single entry for a loop iteration and not controlled by CanonicalLoopInfo.
bool isValid() const
Returns whether this object currently represents the IR of a loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const
Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getBodyIP() const
Return the insertion point for user code in the body.
BasicBlock * getAfter() const
The after block is intended for clean-up code such as lifetime end markers.
Function * getFunction() const
void invalidate()
Invalidate this loop.
BasicBlock * getLatch() const
Reaching the latch indicates the end of the loop body code.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const
Return the insertion point for user code before the loop.
BasicBlock * getCond() const
The condition block computes whether there is another loop iteration.
BasicBlock * getExit() const
Reaching the exit indicates no more iterations are being executed.
BasicBlock * getPreheader() const
The preheader ensures that there is only a single edge entering the loop.
Instruction * getIndVar() const
Returns the instruction representing the current logical induction variable.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Tagged union holding either a T or a Error.
Definition: Error.h:481
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:51
InsertPoint - A saved insertion point.
Definition: IRBuilder.h:254
BasicBlock * getBlock() const
Definition: IRBuilder.h:269
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1796
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:217
InsertPoint saveIP() const
Returns the current insert point.
Definition: IRBuilder.h:274
void restoreIP(InsertPoint IP)
Sets the current insert point to a previously-saved location.
Definition: IRBuilder.h:286
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
Class to represent integer types.
Definition: DerivedTypes.h:42
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags)
Definition: OMPIRBuilder.h:398
OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage, const std::string &VarName)
Definition: OMPIRBuilder.h:401
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:416
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:323
OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
Definition: OMPIRBuilder.h:310
@ OffloadingEntryInfoTargetRegion
Entry is a target region.
Definition: OMPIRBuilder.h:244
@ OffloadingEntryInfoDeviceGlobalVar
Entry is a declare target variable.
Definition: OMPIRBuilder.h:246
OffloadingEntryInfoKinds getKind() const
Definition: OMPIRBuilder.h:262
OffloadEntryInfo(OffloadingEntryInfoKinds Kind)
Definition: OMPIRBuilder.h:253
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:270
OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags)
Definition: OMPIRBuilder.h:254
Class that manages information about offload code regions and data.
Definition: OMPIRBuilder.h:232
function_ref< void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy
Applies action Action on all registered entries.
Definition: OMPIRBuilder.h:438
OMPTargetDeviceClauseKind
Kind of device clause for declare target variables and functions NOTE: Currently not used as a part o...
Definition: OMPIRBuilder.h:377
@ OMPTargetDeviceClauseNoHost
The target is marked for non-host devices.
Definition: OMPIRBuilder.h:381
@ OMPTargetDeviceClauseAny
The target is marked for all devices.
Definition: OMPIRBuilder.h:379
@ OMPTargetDeviceClauseNone
The target is marked as having no clause.
Definition: OMPIRBuilder.h:385
@ OMPTargetDeviceClauseHost
The target is marked for host devices.
Definition: OMPIRBuilder.h:383
void registerDeviceGlobalVarEntryInfo(StringRef VarName, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage)
Register device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order)
Initialize device global variable entry.
void actOnDeviceGlobalVarEntriesInfo(const OffloadDeviceGlobalVarEntryInfoActTy &Action)
OMPTargetRegionEntryKind
Kind of the target registry entry.
Definition: OMPIRBuilder.h:297
@ OMPTargetRegionEntryTargetRegion
Mark the entry as target region.
Definition: OMPIRBuilder.h:299
OffloadEntriesInfoManager(OpenMPIRBuilder *builder)
Definition: OMPIRBuilder.h:290
void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, const TargetRegionEntryInfo &EntryInfo)
bool hasTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, bool IgnoreAddressId=false) const
Return true if a target region entry with the provided information exists.
void registerTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
Register target region entry.
void actOnTargetRegionEntriesInfo(const OffloadTargetRegionEntryInfoActTy &Action)
unsigned size() const
Return number of entries defined so far.
Definition: OMPIRBuilder.h:288
void initializeTargetRegionEntryInfo(const TargetRegionEntryInfo &EntryInfo, unsigned Order)
Initialize target region entry.
OMPTargetGlobalVarEntryKind
Kind of the global variable entry..
Definition: OMPIRBuilder.h:357
@ OMPTargetGlobalVarEntryEnter
Mark the entry as a declare target enter.
Definition: OMPIRBuilder.h:363
@ OMPTargetGlobalVarEntryNone
Mark the entry as having no declare target entry kind.
Definition: OMPIRBuilder.h:365
@ OMPTargetGlobalRegisterRequires
Mark the entry as a register requires global.
Definition: OMPIRBuilder.h:369
@ OMPTargetGlobalVarEntryIndirect
Mark the entry as a declare target indirect global.
Definition: OMPIRBuilder.h:367
@ OMPTargetGlobalVarEntryLink
Mark the entry as a to declare target link.
Definition: OMPIRBuilder.h:361
@ OMPTargetGlobalVarEntryTo
Mark the entry as a to declare target.
Definition: OMPIRBuilder.h:359
function_ref< void(const TargetRegionEntryInfo &EntryInfo, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy
brief Applies action Action on all registered entries.
Definition: OMPIRBuilder.h:348
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const
Checks if the variable with the given name has been registered already.
Definition: OMPIRBuilder.h:433
bool empty() const
Return true if a there are no entries defined.
Captures attributes that affect generating LLVM-IR using the OpenMPIRBuilder and related classes.
Definition: OMPIRBuilder.h:87
void setIsGPU(bool Value)
Definition: OMPIRBuilder.h:184
std::optional< bool > IsTargetDevice
Flag to define whether to generate code for the role of the OpenMP host (if set to false) or device (...
Definition: OMPIRBuilder.h:93
std::optional< bool > IsGPU
Flag for specifying if the compilation is done for an accelerator.
Definition: OMPIRBuilder.h:103
void setGridValue(omp::GV G)
Definition: OMPIRBuilder.h:189
std::optional< StringRef > FirstSeparator
First separator used between the initial two parts of a name.
Definition: OMPIRBuilder.h:112
StringRef separator() const
Definition: OMPIRBuilder.h:175
int64_t getRequiresFlags() const
Returns requires directive clauses as flags compatible with those expected by libomptarget.
void setFirstSeparator(StringRef FS)
Definition: OMPIRBuilder.h:187
StringRef firstSeparator() const
Definition: OMPIRBuilder.h:165
std::optional< bool > OpenMPOffloadMandatory
Flag for specifying if offloading is mandatory.
Definition: OMPIRBuilder.h:109
std::optional< bool > EmitLLVMUsedMetaInfo
Flag for specifying if LLVMUsed information should be emitted.
Definition: OMPIRBuilder.h:106
omp::GV getGridValue() const
Definition: OMPIRBuilder.h:148
SmallVector< Triple > TargetTriples
When compilation is being done for the OpenMP host (i.e.
Definition: OMPIRBuilder.h:121
void setHasRequiresReverseOffload(bool Value)
bool hasRequiresUnifiedSharedMemory() const
void setHasRequiresUnifiedSharedMemory(bool Value)
std::optional< StringRef > Separator
Separator used between all of the rest consecutive parts of s name.
Definition: OMPIRBuilder.h:114
bool hasRequiresDynamicAllocators() const
bool openMPOffloadMandatory() const
Definition: OMPIRBuilder.h:142
void setHasRequiresUnifiedAddress(bool Value)
void setOpenMPOffloadMandatory(bool Value)
Definition: OMPIRBuilder.h:186
void setIsTargetDevice(bool Value)
Definition: OMPIRBuilder.h:183
void setSeparator(StringRef S)
Definition: OMPIRBuilder.h:188
void setHasRequiresDynamicAllocators(bool Value)
void setEmitLLVMUsed(bool Value=true)
Definition: OMPIRBuilder.h:185
std::optional< omp::GV > GridValue
Definition: OMPIRBuilder.h:117
bool hasRequiresReverseOffload() const
bool hasRequiresUnifiedAddress() const
llvm::AllocaInst * CreateAlloca(llvm::Type *Ty, const llvm::Twine &Name) const override
Definition: OMPIRBuilder.h:496
AtomicInfo(IRBuilder<> *Builder, llvm::Type *Ty, uint64_t AtomicSizeInBits, uint64_t ValueSizeInBits, llvm::Align AtomicAlign, llvm::Align ValueAlign, bool UseLibcall, llvm::Value *AtomicVar)
Definition: OMPIRBuilder.h:487
void decorateWithTBAA(llvm::Instruction *I) override
Definition: OMPIRBuilder.h:495
llvm::Value * getAtomicPointer() const override
Definition: OMPIRBuilder.h:494
Struct that keeps the information that should be kept throughout a 'target data' region.
TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls)
SmallMapVector< const Value *, std::pair< Value *, Value * >, 4 > DevicePtrInfoMap
void clearArrayInfo()
Clear information about the data arrays.
unsigned NumberOfPtrs
The total number of pointers passed to the runtime library.
bool HasNoWait
Whether the target ... data directive has a nowait clause.
bool isValid()
Return true if the current target data information has valid arrays.
bool HasMapper
Indicate whether any user-defined mapper exists.
An interface to create LLVM-IR for OpenMP directives.
Definition: OMPIRBuilder.h:474
InsertPointOrErrorTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads)
Generator for '#omp ordered [threads | simd]'.
Constant * getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags=omp::IdentFlag(0), unsigned Reserve2Flags=0)
Return an ident_t* encoding the source location SrcLocStr and Flags.
FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID)
Return the function declaration for the runtime function with FnID.
InsertPointOrErrorTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective)
Generator for '#omp cancel'.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, int32_t MinThreadsVal=0, int32_t MaxThreadsVal=0, int32_t MinTeamsVal=0, int32_t MaxTeamsVal=0)
The omp target interface.
ReductionGenCBKind
Enum class for the RedctionGen CallBack type to be used.
CanonicalLoopInfo * collapseLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, InsertPointTy ComputeIP)
Collapse a loop nest into a single loop.
void createTaskyield(const LocationDescription &Loc)
Generator for '#omp taskyield'.
std::function< Error(InsertPointTy CodeGenIP)> FinalizeCallbackTy
Callback type for variable finalization (think destructors).
Definition: OMPIRBuilder.h:543
void emitBranch(BasicBlock *Target)
InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO)
Emit atomic write for : X = Expr — Only Scalar data types.
static void writeThreadBoundsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
EvalKind
Enum class for reduction evaluation types scalar, complex and aggregate.
static TargetRegionEntryInfo getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, StringRef ParentName="")
Creates a unique info for a target entry when provided a filename and line number from.
void emitTaskwaitImpl(const LocationDescription &Loc)
Generate a taskwait runtime call.
Constant * registerTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, Function *OutlinedFunction, StringRef EntryFnName, StringRef EntryFnIDName)
Registers the given function and sets up the attribtues of the function Returns the FunctionID.
void initialize()
Initialize the internal state, this will put structures types and potentially other helpers into the ...
void createTargetDeinit(const LocationDescription &Loc, int32_t TeamsReductionDataSize=0, int32_t TeamsReductionBufferLength=1024)
Create a runtime call for kmpc_target_deinit.
InsertPointOrErrorTy createTaskgroup(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB)
Generator for the taskgroup construct.
void loadOffloadInfoMetadata(Module &M)
Loads all the offload entries information from the host IR metadata.
std::function< InsertPointOrErrorTy(InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)> ReductionGenCBTy
ReductionGen CallBack for MLIR.
InsertPointOrErrorTy emitTargetTask(TargetTaskBodyCallbackTy TaskBodyCB, Value *DeviceID, Value *RTLoc, OpenMPIRBuilder::InsertPointTy AllocaIP, const SmallVector< llvm::OpenMPIRBuilder::DependData > &Dependencies, bool HasNoWait)
Generate a target-task for the target construct.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully unroll a loop.
void emitFlush(const LocationDescription &Loc)
Generate a flush runtime call.
static std::pair< int32_t, int32_t > readThreadBoundsForKernel(const Triple &T, Function &Kernel)
}
OpenMPIRBuilderConfig Config
The OpenMPIRBuilder Configuration.
CallInst * createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_destroy.
InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO)
Emit atomic Read for : V = X — Only Scalar data types.
Error emitIfClause(Value *Cond, BodyGenCallbackTy ThenGen, BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP={})
Emits code for OpenMP 'if' clause using specified BodyGenCallbackTy Here is the logic: if (Cond) { Th...
std::function< void(EmitMetadataErrorKind, TargetRegionEntryInfo)> EmitMetadataErrorReportFunctionTy
Callback function type.
void setConfig(OpenMPIRBuilderConfig C)
Definition: OMPIRBuilder.h:509
InsertPointOrErrorTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsNowait, ArrayRef< llvm::Value * > CPVars={}, ArrayRef< llvm::Function * > CPFuncs={})
Generator for '#omp single'.
InsertPointOrErrorTy createTeams(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, Value *NumTeamsLower=nullptr, Value *NumTeamsUpper=nullptr, Value *ThreadLimit=nullptr, Value *IfExpr=nullptr)
Generator for #omp teams
std::forward_list< CanonicalLoopInfo > LoopInfos
Collection of owned canonical loop objects that eventually need to be free'd.
void createTaskwait(const LocationDescription &Loc)
Generator for '#omp taskwait'.
CanonicalLoopInfo * createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name={})
Create the control flow structure of a canonical OpenMP loop.
std::string createPlatformSpecificName(ArrayRef< StringRef > Parts) const
Get the create a name using the platform specific separators.
FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_next_* runtime function for the specified size IVSize and sign IVSigned.
static void getKernelArgsVector(TargetKernelArgs &KernelArgs, IRBuilderBase &Builder, SmallVector< Value * > &ArgsVector)
Create the kernel args vector used by emitTargetKernel.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully or partially unroll a loop.
InsertPointOrErrorTy createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable)
Generator for '#omp parallel'.
omp::OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position)
Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on the position given.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn)
Add attributes known for FnID to Fn.
Module & M
The underlying LLVM-IR module.
StringMap< Constant * > SrcLocStrMap
Map to remember source location strings.
void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas)
Create the allocas instruction used in call to mapper functions.
Constant * getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the source location LocStr.
void addOutlineInfo(OutlineInfo &&OI)
Add a new region that will be outlined later.
Error emitTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, FunctionGenCallback &GenerateFunctionCallback, bool IsOffloadEntry, Function *&OutlinedFn, Constant *&OutlinedFnID)
Create a unique name for the entry function using the source location information of the current targ...
FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_fini_* runtime function for the specified size IVSize and sign IVSigned.
InsertPointOrErrorTy createTarget(const LocationDescription &Loc, bool IsOffloadEntry, OpenMPIRBuilder::InsertPointTy AllocaIP, OpenMPIRBuilder::InsertPointTy CodeGenIP, TargetRegionEntryInfo &EntryInfo, ArrayRef< int32_t > NumTeams, ArrayRef< int32_t > NumThreads, SmallVectorImpl< Value * > &Inputs, GenMapInfoCallbackTy GenMapInfoCB, TargetBodyGenCallbackTy BodyGenCB, TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB, SmallVector< DependData > Dependencies={}, bool HasNowait=false)
Generator for '#omp target'.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI)
Partially unroll a loop.
void emitTaskyieldImpl(const LocationDescription &Loc)
Generate a taskyield runtime call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands)
Create the call for the target mapper function.
std::function< Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)> StorableBodyGenCallbackTy
Definition: OMPIRBuilder.h:606
InsertPointTy createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOpValue &R, Value *E, Value *D, AtomicOrdering AO, omp::OMPAtomicCompareOp Op, bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly)
Emit atomic compare for constructs: — Only scalar data types cond-expr-stmt: x = x ordop expr ?...
InsertPointOrErrorTy createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr)
Emit atomic update for constructs: — Only Scalar data types V = X; X = X BinOp Expr ,...
InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef< llvm::Value * > StoreValues, const Twine &Name, bool IsDependSource)
Generator for '#omp ordered depend (source | sink)'.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd=true)
Generate conditional branch and relevant BasicBlocks through which private threads copy the 'copyin' ...
void emitOffloadingArrays(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info, bool IsNonContiguous=false, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr)
Emit the arrays used to pass the captures and map information to the offloading runtime library.
SmallVector< FinalizationInfo, 8 > FinalizationStack
The finalization stack made up of finalize callbacks currently in-flight, wrapped into FinalizationIn...
std::vector< CanonicalLoopInfo * > tileLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, ArrayRef< Value * > TileSizes)
Tile a loop nest.
CallInst * createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_init.
SmallVector< OutlineInfo, 16 > OutlineInfos
Collection of regions that need to be outlined during finalization.
Function * getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID)
std::function< InsertPointOrErrorTy(InsertPointTy, Type *, Value *, Value *)> ReductionGenAtomicCBTy
Functions used to generate atomic reductions.
const Triple T
The target triple of the underlying module.
DenseMap< std::pair< Constant *, uint64_t >, Constant * > IdentMap
Map to remember existing ident_t*.
CallInst * createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_free.
FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned, bool IsGPUDistribute)
Returns __kmpc_for_static_init_* runtime function for the specified size IVSize and sign IVSigned.
CallInst * createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_Alloc.
void emitNonContiguousDescriptor(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info)
Emit an array of struct descriptors to be assigned to the offload args.
InsertPointOrErrorTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for '#omp section'.
std::function< InsertPointTy(InsertPointTy CodeGenIP, unsigned Index, Value **LHS, Value **RHS, Function *CurFn)> ReductionGenClangCBTy
ReductionGen CallBack for Clang.
void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished=false)
Value * getOrCreateThreadID(Value *Ident)
Return the current thread ID.
void emitOffloadingArraysAndArgs(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info, TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo, bool IsNonContiguous=false, bool ForEndCall=false, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr)
Allocates memory for and populates the arrays required for offloading (offload_{baseptrs|ptrs|mappers...
InsertPointOrErrorTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for '#omp master'.
void pushFinalizationCB(const FinalizationInfo &FI)
Push a finalization callback on the finalization stack.
Definition: OMPIRBuilder.h:561
Error emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB={})
Generate control flow and cleanup for cancellation.
InsertPointOrErrorTy emitKernelLaunch(const LocationDescription &Loc, Value *OutlinedFnID, EmitFallbackCallbackTy EmitTargetCallFallbackCB, TargetKernelArgs &Args, Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP)
Generate a target region entry call and host fallback call.
InsertPointTy getInsertionPoint()
}
StringMap< GlobalVariable *, BumpPtrAllocator > InternalVars
An ordered map of auto-generated variables to their unique names.
GlobalVariable * getOrCreateInternalVariable(Type *Ty, const StringRef &Name, unsigned AddressSpace=0)
Gets (if variable with the given name already exist) or creates internal global variable with the spe...
InsertPointOrErrorTy createReductionsGPU(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< ReductionInfo > ReductionInfos, bool IsNoWait=false, bool IsTeamsReduction=false, bool HasDistribute=false, ReductionGenCBKind ReductionGenCBKind=ReductionGenCBKind::MLIR, std::optional< omp::GV > GridValue={}, unsigned ReductionBufNum=1024, Value *SrcLocInfo=nullptr)
Design of OpenMP reductions on the GPU.
FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_init_* runtime function for the specified size IVSize and sign IVSigned.
Function * emitUserDefinedMapper(function_ref< MapInfosTy &(InsertPointTy CodeGenIP, llvm::Value *PtrPHI, llvm::Value *BeginArg)> PrivAndGenMapInfoCB, llvm::Type *ElemTy, StringRef FuncName, function_ref< bool(unsigned int, Function **)> CustomMapperCB=nullptr)
Emit the user-defined mapper function.
CallInst * createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_use.
IRBuilder<>::InsertPoint InsertPointTy
Type used throughout for insertion points.
Definition: OMPIRBuilder.h:520
InsertPointOrErrorTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< ReductionInfo > ReductionInfos, ArrayRef< bool > IsByRef, bool IsNoWait=false)
Generator for '#omp reduction'.
GlobalVariable * createOffloadMapnames(SmallVectorImpl< llvm::Constant * > &Names, std::string VarName)
Create the global variable holding the offload names information.
InsertPointOrErrorTy createTask(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, bool Tied=true, Value *Final=nullptr, Value *IfCondition=nullptr, SmallVector< DependData > Dependencies={}, bool Mergeable=false, Value *EventHandle=nullptr)
Generator for #omp task
std::function< Expected< Function * >(StringRef FunctionName)> FunctionGenCallback
Functions used to generate a function with the given name.
static void writeTeamsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
InsertPointOrErrorTy createBarrier(const LocationDescription &Loc, omp::Directive Kind, bool ForceSimpleCall=false, bool CheckCancelFlag=true)
Emitter methods for OpenMP directives.
void setCorrectMemberOfFlag(omp::OpenMPOffloadMappingFlags &Flags, omp::OpenMPOffloadMappingFlags MemberOfFlag)
Given an initial flag set, this function modifies it to contain the passed in MemberOfFlag generated ...
Constant * getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the default source location.
InsertPointOrErrorTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst)
Generator for '#omp critical'.
void createOffloadEntry(Constant *ID, Constant *Addr, uint64_t Size, int32_t Flags, GlobalValue::LinkageTypes, StringRef Name="")
Creates offloading entry for the provided entry ID ID, address Addr, size Size, and flags Flags.
static unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple, const StringMap< bool > &Features)
Get the default alignment value for given target.
unsigned getFlagMemberOffset()
Get the offset of the OMP_MAP_MEMBER_OF field.
void createOffloadEntriesAndInfoMetadata(EmitMetadataErrorReportFunctionTy &ErrorReportFunction)
void applySimd(CanonicalLoopInfo *Loop, MapVector< Value *, Value * > AlignedVars, Value *IfCond, omp::OrderKind Order, ConstantInt *Simdlen, ConstantInt *Safelen)
Add metadata to simd-ize a loop.
bool isLastFinalizationInfoCancellable(omp::Directive DK)
Return true if the last entry in the finalization stack is of kind DK and cancellable.
InsertPointTy emitTargetKernel(const LocationDescription &Loc, InsertPointTy AllocaIP, Value *&Return, Value *Ident, Value *DeviceID, Value *NumTeams, Value *NumThreads, Value *HostPtr, ArrayRef< Value * > KernelArgs)
Generate a target region entry call.
GlobalVariable * createOffloadMaptypes(SmallVectorImpl< uint64_t > &Mappings, std::string VarName)
Create the global variable holding the offload mappings information.
CallInst * createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name=Twine(""))
Create a runtime call for kmpc_threadprivate_cached.
IRBuilder Builder
The LLVM-IR Builder used to create IR.
GlobalValue * createGlobalFlag(unsigned Value, StringRef Name)
Create a hidden global flag Name in the module with initial value Value.
InsertPointOrErrorTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind=llvm::omp::OMP_SCHEDULE_Default, Value *ChunkSize=nullptr, bool HasSimdModifier=false, bool HasMonotonicModifier=false, bool HasNonmonotonicModifier=false, bool HasOrderedClause=false, omp::WorksharingLoopType LoopType=omp::WorksharingLoopType::ForStaticLoop)
Modifies the canonical loop to be a workshare loop.
void emitOffloadingArraysArgument(IRBuilderBase &Builder, OpenMPIRBuilder::TargetDataRTArgs &RTArgs, OpenMPIRBuilder::TargetDataInfo &Info, bool ForEndCall=false)
Emit the arguments to be passed to the runtime library based on the arrays of base pointers,...
InsertPointOrErrorTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter)
Generator for '#omp masked'.
Expected< CanonicalLoopInfo * > createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name="loop")
Generator for the control flow structure of an OpenMP canonical loop.
Value * getSizeInBytes(Value *BasePtr)
Computes the size of type in bytes.
function_ref< InsertPointOrErrorTy(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)> PrivatizeCallbackTy
Callback type for variable privatization (think copy & default constructor).
Definition: OMPIRBuilder.h:641
OpenMPIRBuilder(Module &M)
Create a new OpenMPIRBuilder operating on the given module M.
Definition: OMPIRBuilder.h:478
FunctionCallee createDispatchDeinitFunction()
Returns __kmpc_dispatch_deinit runtime function.
void registerTargetGlobalVariable(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage, Type *LlvmPtrTy, Constant *Addr)
Registers a target variable for device or host.
InsertPointOrErrorTy createTargetData(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond, TargetDataInfo &Info, GenMapInfoCallbackTy GenMapInfoCB, omp::RuntimeFunction *MapperFunc=nullptr, function_ref< InsertPointOrErrorTy(InsertPointTy CodeGenIP, BodyGenTy BodyGenType)> BodyGenCB=nullptr, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr, Value *SrcLocInfo=nullptr)
Generator for '#omp target data'.
BodyGenTy
Type of BodyGen to use for region codegen.
InsertPointOrErrorTy createAtomicUpdate(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr)
Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X For complex Operations: X = ...
SmallVector< llvm::Function *, 16 > ConstantAllocaRaiseCandidates
A collection of candidate target functions that's constant allocas will attempt to be raised on a cal...
OffloadEntriesInfoManager OffloadInfoManager
Info manager to keep track of target regions.
static std::pair< int32_t, int32_t > readTeamBoundsForKernel(const Triple &T, Function &Kernel)
Read/write a bounds on teams for Kernel.
std::function< std::tuple< std::string, uint64_t >()> FileIdentifierInfoCallbackTy
const std::string ompOffloadInfoName
OMP Offload Info Metadata name string.
Expected< InsertPointTy > InsertPointOrErrorTy
Type used to represent an insertion point or an error value.
Definition: OMPIRBuilder.h:523
InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt)
Generator for __kmpc_copyprivate.
void popFinalizationCB()
Pop the last finalization callback from the finalization stack.
Definition: OMPIRBuilder.h:568
InsertPointOrErrorTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< StorableBodyGenCallbackTy > SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait)
Generator for '#omp sections'.
function_ref< Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)> BodyGenCallbackTy
Callback type for body (=inner region) code generation.
Definition: OMPIRBuilder.h:597
bool updateToLocation(const LocationDescription &Loc)
Update the internal location to Loc.
void createFlush(const LocationDescription &Loc)
Generator for '#omp flush'.
Constant * getAddrOfDeclareTargetVar(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, Type *LlvmPtrTy, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage)
Retrieve (or create if non-existent) the address of a declare target variable, used in conjunction wi...
EmitMetadataErrorKind
The kind of errors that can occur when emitting the offload entries and metadata.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
Definition: StringMap.h:276
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
bool pointsToAliveValue() const
Definition: ValueHandle.h:224
An efficient, type-erasing, non-owning reference to a callable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
OpenMPOffloadMappingFlags
Values for bit flags used to specify the mapping type for offloading.
Definition: OMPConstants.h:195
IdentFlag
IDs for all omp runtime library ident_t flag encodings (see their defintion in openmp/runtime/src/kmp...
Definition: OMPConstants.h:65
RTLDependenceKindTy
Dependence kind for RTL.
Definition: OMPConstants.h:273
RuntimeFunction
IDs for all omp runtime library (RTL) functions.
Definition: OMPConstants.h:45
WorksharingLoopType
A type of worksharing loop construct.
Definition: OMPConstants.h:283
OMPAtomicCompareOp
Atomic compare operations. Currently OpenMP only supports ==, >, and <.
Definition: OMPConstants.h:267
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
BasicBlock * splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch, llvm::Twine Suffix=".split")
Like splitBB, but reuses the current block's name for the new name.
@ Offset
Definition: DWP.cpp:480
void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch)
Move the instruction after an InsertPoint to the beginning of another BasicBlock.
BasicBlock * splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch, llvm::Twine Name={})
Split a BasicBlock at an InsertPoint, even if the block is degenerate (missing the terminator).
AtomicOrdering
Atomic ordering for LLVM's memory model.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
a struct to pack relevant information while generating atomic Ops
A struct to pack the relevant information for an OpenMP depend clause.
DependData(omp::RTLDependenceKindTy DepKind, Type *DepValueType, Value *DepVal)
omp::RTLDependenceKindTy DepKind
bool IsCancellable
Flag to indicate if the directive is cancellable.
Definition: OMPIRBuilder.h:555
FinalizeCallbackTy FiniCB
The finalization callback provided by the last in-flight invocation of createXXXX for the directive o...
Definition: OMPIRBuilder.h:548
omp::Directive DK
The directive kind of the innermost directive that has an associated region which might require final...
Definition: OMPIRBuilder.h:552
Description of a LLVM-IR insertion point (IP) and a debug/source location (filename,...
Definition: OMPIRBuilder.h:645
LocationDescription(const InsertPointTy &IP)
Definition: OMPIRBuilder.h:648
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
Definition: OMPIRBuilder.h:649
LocationDescription(const IRBuilderBase &IRB)
Definition: OMPIRBuilder.h:646
This structure contains combined information generated for mappable clauses, including base pointers,...
void append(MapInfosTy &CurInfo)
Append arrays in CurInfo.
MapDeviceInfoArrayTy DevicePointers
StructNonContiguousInfo NonContigInfo
Helper that contains information about regions we need to outline during finalization.
void collectBlocks(SmallPtrSetImpl< BasicBlock * > &BlockSet, SmallVectorImpl< BasicBlock * > &BlockVector)
Collect all blocks in between EntryBB and ExitBB in both the given vector and set.
Function * getFunction() const
Return the function that contains the region to be outlined.
SmallVector< Value *, 2 > ExcludeArgsFromAggregate
std::function< void(Function &)> PostOutlineCBTy
Information about an OpenMP reduction.
EvalKind EvaluationKind
Reduction evaluation kind - scalar, complex or aggregate.
ReductionGenAtomicCBTy AtomicReductionGen
Callback for generating the atomic reduction body, may be null.
ReductionGenCBTy ReductionGen
Callback for generating the reduction body.
ReductionInfo(Value *PrivateVariable)
Value * Variable
Reduction variable of pointer type.
Value * PrivateVariable
Thread-private partial reduction variable.
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, EvalKind EvaluationKind, ReductionGenCBTy ReductionGen, ReductionGenClangCBTy ReductionGenClang, ReductionGenAtomicCBTy AtomicReductionGen)
ReductionGenClangCBTy ReductionGenClang
Clang callback for generating the reduction body.
Type * ElementType
Reduction element type, must match pointee type of variable.
Container for the arguments used to pass data to the runtime library.
Value * SizesArray
The array of sizes passed to the runtime library.
TargetDataRTArgs(Value *BasePointersArray, Value *PointersArray, Value *SizesArray, Value *MapTypesArray, Value *MapTypesArrayEnd, Value *MappersArray, Value *MapNamesArray)
Value * PointersArray
The array of section pointers passed to the runtime library.
Value * MappersArray
The array of user-defined mappers passed to the runtime library.
Value * MapTypesArrayEnd
The array of map types passed to the runtime library for the end of the region, or nullptr if there a...
Value * BasePointersArray
The array of base pointer passed to the runtime library.
Value * MapTypesArray
The array of map types passed to the runtime library for the beginning of the region or for the entir...
Value * MapNamesArray
The array of original declaration names of mapped pointers sent to the runtime library for debugging.
Data structure that contains the needed information to construct the kernel args vector.
TargetKernelArgs(unsigned NumTargetItems, TargetDataRTArgs RTArgs, Value *NumIterations, ArrayRef< Value * > NumTeams, ArrayRef< Value * > NumThreads, Value *DynCGGroupMem, bool HasNoWait)
Value * DynCGGroupMem
The size of the dynamic shared memory.
ArrayRef< Value * > NumThreads
The number of threads.
TargetDataRTArgs RTArgs
Arguments passed to the runtime library.
Value * NumIterations
The number of iterations.
unsigned NumTargetItems
Number of arguments passed to the runtime library.
bool HasNoWait
True if the kernel has 'no wait' clause.
ArrayRef< Value * > NumTeams
The number of teams.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Data structure to contain the information needed to uniquely identify a target entry.
Definition: OMPIRBuilder.h:203
static void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count)
static constexpr const char * KernelNamePrefix
The prefix used for kernel names.
Definition: OMPIRBuilder.h:205
bool operator<(const TargetRegionEntryInfo &RHS) const
Definition: OMPIRBuilder.h:224
TargetRegionEntryInfo(StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count=0)
Definition: OMPIRBuilder.h:214
Defines various target-specific GPU grid values that must be consistent between host RTL (plugin),...
Definition: OMPGridValues.h:57