LLVM 19.0.0git
OMPIRBuilder.h
Go to the documentation of this file.
1//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the OpenMPIRBuilder class and helpers used as a convenient
10// way to create LLVM instructions for OpenMP directives.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
15#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
16
20#include "llvm/IR/DebugLoc.h"
21#include "llvm/IR/IRBuilder.h"
24#include <forward_list>
25#include <map>
26#include <optional>
27
28namespace llvm {
29class CanonicalLoopInfo;
30struct TargetRegionEntryInfo;
31class OffloadEntriesInfoManager;
32class OpenMPIRBuilder;
33
34/// Move the instruction after an InsertPoint to the beginning of another
35/// BasicBlock.
36///
37/// The instructions after \p IP are moved to the beginning of \p New which must
38/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
39/// \p New will be added such that there is no semantic change. Otherwise, the
40/// \p IP insert block remains degenerate and it is up to the caller to insert a
41/// terminator.
42void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
43 bool CreateBranch);
44
45/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
46/// insert location will stick to after the instruction before the insertion
47/// point (instead of moving with the instruction the InsertPoint stores
48/// internally).
49void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);
50
51/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
52/// (missing the terminator).
53///
54/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
55/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
56/// is true, a branch to the new successor will new created such that
57/// semantically there is no change; otherwise the block of the insertion point
58/// remains degenerate and it is the caller's responsibility to insert a
59/// terminator. Returns the new successor block.
60BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
61 llvm::Twine Name = {});
62
63/// Split a BasicBlock at \p Builder's insertion point, even if the block is
64/// degenerate (missing the terminator). Its new insert location will stick to
65/// after the instruction before the insertion point (instead of moving with the
66/// instruction the InsertPoint stores internally).
67BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
68 llvm::Twine Name = {});
69
70/// Split a BasicBlock at \p Builder's insertion point, even if the block is
71/// degenerate (missing the terminator). Its new insert location will stick to
72/// after the instruction before the insertion point (instead of moving with the
73/// instruction the InsertPoint stores internally).
74BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);
75
76/// Like splitBB, but reuses the current block's name for the new name.
77BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
78 llvm::Twine Suffix = ".split");
79
80/// Captures attributes that affect generating LLVM-IR using the
81/// OpenMPIRBuilder and related classes. Note that not all attributes are
82/// required for all classes or functions. In some use cases the configuration
83/// is not necessary at all, because because the only functions that are called
84/// are ones that are not dependent on the configuration.
86public:
87 /// Flag to define whether to generate code for the role of the OpenMP host
88 /// (if set to false) or device (if set to true) in an offloading context. It
89 /// is set when the -fopenmp-is-target-device compiler frontend option is
90 /// specified.
91 std::optional<bool> IsTargetDevice;
92
93 /// Flag for specifying if the compilation is done for an accelerator. It is
94 /// set according to the architecture of the target triple and currently only
95 /// true when targeting AMDGPU or NVPTX. Today, these targets can only perform
96 /// the role of an OpenMP target device, so `IsTargetDevice` must also be true
97 /// if `IsGPU` is true. This restriction might be lifted if an accelerator-
98 /// like target with the ability to work as the OpenMP host is added, or if
99 /// the capabilities of the currently supported GPU architectures are
100 /// expanded.
101 std::optional<bool> IsGPU;
102
103 /// Flag for specifying if LLVMUsed information should be emitted.
104 std::optional<bool> EmitLLVMUsedMetaInfo;
105
106 /// Flag for specifying if offloading is mandatory.
107 std::optional<bool> OpenMPOffloadMandatory;
108
109 /// First separator used between the initial two parts of a name.
110 std::optional<StringRef> FirstSeparator;
111 /// Separator used between all of the rest consecutive parts of s name
112 std::optional<StringRef> Separator;
113
114 // Grid Value for the GPU target
115 std::optional<omp::GV> GridValue;
116
120 bool HasRequiresReverseOffload,
121 bool HasRequiresUnifiedAddress,
122 bool HasRequiresUnifiedSharedMemory,
123 bool HasRequiresDynamicAllocators);
124
125 // Getters functions that assert if the required values are not present.
126 bool isTargetDevice() const {
127 assert(IsTargetDevice.has_value() && "IsTargetDevice is not set");
128 return *IsTargetDevice;
129 }
130
131 bool isGPU() const {
132 assert(IsGPU.has_value() && "IsGPU is not set");
133 return *IsGPU;
134 }
135
137 assert(OpenMPOffloadMandatory.has_value() &&
138 "OpenMPOffloadMandatory is not set");
140 }
141
143 assert(GridValue.has_value() && "GridValue is not set");
144 return *GridValue;
145 }
146
147 bool hasRequiresFlags() const { return RequiresFlags; }
148 bool hasRequiresReverseOffload() const;
149 bool hasRequiresUnifiedAddress() const;
151 bool hasRequiresDynamicAllocators() const;
152
153 /// Returns requires directive clauses as flags compatible with those expected
154 /// by libomptarget.
155 int64_t getRequiresFlags() const;
156
157 // Returns the FirstSeparator if set, otherwise use the default separator
158 // depending on isGPU
160 if (FirstSeparator.has_value())
161 return *FirstSeparator;
162 if (isGPU())
163 return "_";
164 return ".";
165 }
166
167 // Returns the Separator if set, otherwise use the default separator depending
168 // on isGPU
170 if (Separator.has_value())
171 return *Separator;
172 if (isGPU())
173 return "$";
174 return ".";
175 }
176
178 void setIsGPU(bool Value) { IsGPU = Value; }
184
189
190private:
191 /// Flags for specifying which requires directive clauses are present.
192 int64_t RequiresFlags;
193};
194
195/// Data structure to contain the information needed to uniquely identify
196/// a target entry.
198 std::string ParentName;
199 unsigned DeviceID;
200 unsigned FileID;
201 unsigned Line;
202 unsigned Count;
203
206 unsigned FileID, unsigned Line, unsigned Count = 0)
208 Count(Count) {}
209
212 unsigned DeviceID, unsigned FileID,
213 unsigned Line, unsigned Count);
214
216 return std::make_tuple(ParentName, DeviceID, FileID, Line, Count) <
217 std::make_tuple(RHS.ParentName, RHS.DeviceID, RHS.FileID, RHS.Line,
218 RHS.Count);
219 }
220};
221
222/// Class that manages information about offload code regions and data
224 /// Number of entries registered so far.
225 OpenMPIRBuilder *OMPBuilder;
226 unsigned OffloadingEntriesNum = 0;
227
228public:
229 /// Base class of the entries info.
231 public:
232 /// Kind of a given entry.
233 enum OffloadingEntryInfoKinds : unsigned {
234 /// Entry is a target region.
236 /// Entry is a declare target variable.
238 /// Invalid entry info.
240 };
241
242 protected:
244 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
245 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
246 uint32_t Flags)
247 : Flags(Flags), Order(Order), Kind(Kind) {}
248 ~OffloadEntryInfo() = default;
249
250 public:
251 bool isValid() const { return Order != ~0u; }
252 unsigned getOrder() const { return Order; }
253 OffloadingEntryInfoKinds getKind() const { return Kind; }
254 uint32_t getFlags() const { return Flags; }
255 void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
256 Constant *getAddress() const { return cast_or_null<Constant>(Addr); }
258 assert(!Addr.pointsToAliveValue() && "Address has been set before!");
259 Addr = V;
260 }
261 static bool classof(const OffloadEntryInfo *Info) { return true; }
262
263 private:
264 /// Address of the entity that has to be mapped for offloading.
265 WeakTrackingVH Addr;
266
267 /// Flags associated with the device global.
268 uint32_t Flags = 0u;
269
270 /// Order this entry was emitted.
271 unsigned Order = ~0u;
272
274 };
275
276 /// Return true if a there are no entries defined.
277 bool empty() const;
278 /// Return number of entries defined so far.
279 unsigned size() const { return OffloadingEntriesNum; }
280
281 OffloadEntriesInfoManager(OpenMPIRBuilder *builder) : OMPBuilder(builder) {}
282
283 //
284 // Target region entries related.
285 //
286
287 /// Kind of the target registry entry.
289 /// Mark the entry as target region.
291 };
292
293 /// Target region entries info.
295 /// Address that can be used as the ID of the entry.
296 Constant *ID = nullptr;
297
298 public:
301 explicit OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr,
302 Constant *ID,
305 ID(ID) {
307 }
308
309 Constant *getID() const { return ID; }
310 void setID(Constant *V) {
311 assert(!ID && "ID has been set before!");
312 ID = V;
313 }
314 static bool classof(const OffloadEntryInfo *Info) {
315 return Info->getKind() == OffloadingEntryInfoTargetRegion;
316 }
317 };
318
319 /// Initialize target region entry.
320 /// This is ONLY needed for DEVICE compilation.
322 unsigned Order);
323 /// Register target region entry.
327 /// Return true if a target region entry with the provided information
328 /// exists.
330 bool IgnoreAddressId = false) const;
331
332 // Return the Name based on \a EntryInfo using the next available Count.
334 const TargetRegionEntryInfo &EntryInfo);
335
336 /// brief Applies action \a Action on all registered entries.
337 typedef function_ref<void(const TargetRegionEntryInfo &EntryInfo,
338 const OffloadEntryInfoTargetRegion &)>
340 void
342
343 //
344 // Device global variable entries related.
345 //
346
347 /// Kind of the global variable entry..
349 /// Mark the entry as a to declare target.
351 /// Mark the entry as a to declare target link.
353 /// Mark the entry as a declare target enter.
355 /// Mark the entry as having no declare target entry kind.
357 /// Mark the entry as a declare target indirect global.
359 /// Mark the entry as a register requires global.
361 };
362
363 /// Kind of device clause for declare target variables
364 /// and functions
365 /// NOTE: Currently not used as a part of a variable entry
366 /// used for Flang and Clang to interface with the variable
367 /// related registration functions
369 /// The target is marked for all devices
371 /// The target is marked for non-host devices
373 /// The target is marked for host devices
375 /// The target is marked as having no clause
377 };
378
379 /// Device global variable entries info.
381 /// Type of the global variable.
382 int64_t VarSize;
384 const std::string VarName;
385
386 public:
389 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
392 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr,
393 int64_t VarSize,
396 const std::string &VarName)
398 VarSize(VarSize), Linkage(Linkage), VarName(VarName) {
400 }
401
402 int64_t getVarSize() const { return VarSize; }
403 StringRef getVarName() const { return VarName; }
404 void setVarSize(int64_t Size) { VarSize = Size; }
405 GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
406 void setLinkage(GlobalValue::LinkageTypes LT) { Linkage = LT; }
407 static bool classof(const OffloadEntryInfo *Info) {
408 return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
409 }
410 };
411
412 /// Initialize device global variable entry.
413 /// This is ONLY used for DEVICE compilation.
416 unsigned Order);
417
418 /// Register device global variable entry.
420 int64_t VarSize,
423 /// Checks if the variable with the given name has been registered already.
425 return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
426 }
427 /// Applies action \a Action on all registered entries.
428 typedef function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)>
432
433private:
434 /// Return the count of entries at a particular source location.
435 unsigned
436 getTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo) const;
437
438 /// Update the count of entries at a particular source location.
439 void
440 incrementTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo);
441
443 getTargetRegionEntryCountKey(const TargetRegionEntryInfo &EntryInfo) {
444 return TargetRegionEntryInfo(EntryInfo.ParentName, EntryInfo.DeviceID,
445 EntryInfo.FileID, EntryInfo.Line, 0);
446 }
447
448 // Count of entries at a location.
449 std::map<TargetRegionEntryInfo, unsigned> OffloadEntriesTargetRegionCount;
450
451 // Storage for target region entries kind.
452 typedef std::map<TargetRegionEntryInfo, OffloadEntryInfoTargetRegion>
453 OffloadEntriesTargetRegionTy;
454 OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
455 /// Storage for device global variable entries kind. The storage is to be
456 /// indexed by mangled name.
458 OffloadEntriesDeviceGlobalVarTy;
459 OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
460};
461
462/// An interface to create LLVM-IR for OpenMP directives.
463///
464/// Each OpenMP directive has a corresponding public generator method.
466public:
467 /// Create a new OpenMPIRBuilder operating on the given module \p M. This will
468 /// not have an effect on \p M (see initialize)
470 : M(M), Builder(M.getContext()), OffloadInfoManager(this),
471 T(Triple(M.getTargetTriple())) {}
473
474 /// Initialize the internal state, this will put structures types and
475 /// potentially other helpers into the underlying module. Must be called
476 /// before any other method and only once! This internal state includes types
477 /// used in the OpenMPIRBuilder generated from OMPKinds.def.
478 void initialize();
479
481
482 /// Finalize the underlying module, e.g., by outlining regions.
483 /// \param Fn The function to be finalized. If not used,
484 /// all functions are finalized.
485 void finalize(Function *Fn = nullptr);
486
487 /// Add attributes known for \p FnID to \p Fn.
489
490 /// Type used throughout for insertion points.
492
493 /// Get the create a name using the platform specific separators.
494 /// \param Parts parts of the final name that needs separation
495 /// The created name has a first separator between the first and second part
496 /// and a second separator between all other parts.
497 /// E.g. with FirstSeparator "$" and Separator "." and
498 /// parts: "p1", "p2", "p3", "p4"
499 /// The resulting name is "p1$p2.p3.p4"
500 /// The separators are retrieved from the OpenMPIRBuilderConfig.
501 std::string createPlatformSpecificName(ArrayRef<StringRef> Parts) const;
502
503 /// Callback type for variable finalization (think destructors).
504 ///
505 /// \param CodeGenIP is the insertion point at which the finalization code
506 /// should be placed.
507 ///
508 /// A finalize callback knows about all objects that need finalization, e.g.
509 /// destruction, when the scope of the currently generated construct is left
510 /// at the time, and location, the callback is invoked.
511 using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
512
514 /// The finalization callback provided by the last in-flight invocation of
515 /// createXXXX for the directive of kind DK.
517
518 /// The directive kind of the innermost directive that has an associated
519 /// region which might require finalization when it is left.
520 omp::Directive DK;
521
522 /// Flag to indicate if the directive is cancellable.
524 };
525
526 /// Push a finalization callback on the finalization stack.
527 ///
528 /// NOTE: Temporary solution until Clang CG is gone.
530 FinalizationStack.push_back(FI);
531 }
532
533 /// Pop the last finalization callback from the finalization stack.
534 ///
535 /// NOTE: Temporary solution until Clang CG is gone.
537
538 /// Callback type for body (=inner region) code generation
539 ///
540 /// The callback takes code locations as arguments, each describing a
541 /// location where additional instructions can be inserted.
542 ///
543 /// The CodeGenIP may be in the middle of a basic block or point to the end of
544 /// it. The basic block may have a terminator or be degenerate. The callback
545 /// function may just insert instructions at that position, but also split the
546 /// block (without the Before argument of BasicBlock::splitBasicBlock such
547 /// that the identify of the split predecessor block is preserved) and insert
548 /// additional control flow, including branches that do not lead back to what
549 /// follows the CodeGenIP. Note that since the callback is allowed to split
550 /// the block, callers must assume that InsertPoints to positions in the
551 /// BasicBlock after CodeGenIP including CodeGenIP itself are invalidated. If
552 /// such InsertPoints need to be preserved, it can split the block itself
553 /// before calling the callback.
554 ///
555 /// AllocaIP and CodeGenIP must not point to the same position.
556 ///
557 /// \param AllocaIP is the insertion point at which new alloca instructions
558 /// should be placed. The BasicBlock it is pointing to must
559 /// not be split.
560 /// \param CodeGenIP is the insertion point at which the body code should be
561 /// placed.
563 function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
564
565 // This is created primarily for sections construct as llvm::function_ref
566 // (BodyGenCallbackTy) is not storable (as described in the comments of
567 // function_ref class - function_ref contains non-ownable reference
568 // to the callable.
570 std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
571
572 /// Callback type for loop body code generation.
573 ///
574 /// \param CodeGenIP is the insertion point where the loop's body code must be
575 /// placed. This will be a dedicated BasicBlock with a
576 /// conditional branch from the loop condition check and
577 /// terminated with an unconditional branch to the loop
578 /// latch.
579 /// \param IndVar is the induction variable usable at the insertion point.
581 function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
582
583 /// Callback type for variable privatization (think copy & default
584 /// constructor).
585 ///
586 /// \param AllocaIP is the insertion point at which new alloca instructions
587 /// should be placed.
588 /// \param CodeGenIP is the insertion point at which the privatization code
589 /// should be placed.
590 /// \param Original The value being copied/created, should not be used in the
591 /// generated IR.
592 /// \param Inner The equivalent of \p Original that should be used in the
593 /// generated IR; this is equal to \p Original if the value is
594 /// a pointer and can thus be passed directly, otherwise it is
595 /// an equivalent but different value.
596 /// \param ReplVal The replacement value, thus a copy or new created version
597 /// of \p Inner.
598 ///
599 /// \returns The new insertion point where code generation continues and
600 /// \p ReplVal the replacement value.
602 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
603 Value &Inner, Value *&ReplVal)>;
604
605 /// Description of a LLVM-IR insertion point (IP) and a debug/source location
606 /// (filename, line, column, ...).
609 : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
612 : IP(IP), DL(DL) {}
615 };
616
617 /// Emitter methods for OpenMP directives.
618 ///
619 ///{
620
621 /// Generator for '#omp barrier'
622 ///
623 /// \param Loc The location where the barrier directive was encountered.
624 /// \param Kind The kind of directive that caused the barrier.
625 /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
626 /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
627 /// should be checked and acted upon.
628 /// \param ThreadID Optional parameter to pass in any existing ThreadID value.
629 ///
630 /// \returns The insertion point after the barrier.
632 omp::Directive Kind, bool ForceSimpleCall = false,
633 bool CheckCancelFlag = true);
634
635 /// Generator for '#omp cancel'
636 ///
637 /// \param Loc The location where the directive was encountered.
638 /// \param IfCondition The evaluated 'if' clause expression, if any.
639 /// \param CanceledDirective The kind of directive that is cancled.
640 ///
641 /// \returns The insertion point after the barrier.
642 InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
643 omp::Directive CanceledDirective);
644
645 /// Generator for '#omp parallel'
646 ///
647 /// \param Loc The insert and source location description.
648 /// \param AllocaIP The insertion points to be used for alloca instructions.
649 /// \param BodyGenCB Callback that will generate the region code.
650 /// \param PrivCB Callback to copy a given variable (think copy constructor).
651 /// \param FiniCB Callback to finalize variable copies.
652 /// \param IfCondition The evaluated 'if' clause expression, if any.
653 /// \param NumThreads The evaluated 'num_threads' clause expression, if any.
654 /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
655 /// \param IsCancellable Flag to indicate a cancellable parallel region.
656 ///
657 /// \returns The insertion position *after* the parallel.
660 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
661 FinalizeCallbackTy FiniCB, Value *IfCondition,
662 Value *NumThreads, omp::ProcBindKind ProcBind,
663 bool IsCancellable);
664
665 /// Generator for the control flow structure of an OpenMP canonical loop.
666 ///
667 /// This generator operates on the logical iteration space of the loop, i.e.
668 /// the caller only has to provide a loop trip count of the loop as defined by
669 /// base language semantics. The trip count is interpreted as an unsigned
670 /// integer. The induction variable passed to \p BodyGenCB will be of the same
671 /// type and run from 0 to \p TripCount - 1. It is up to the callback to
672 /// convert the logical iteration variable to the loop counter variable in the
673 /// loop body.
674 ///
675 /// \param Loc The insert and source location description. The insert
676 /// location can be between two instructions or the end of a
677 /// degenerate block (e.g. a BB under construction).
678 /// \param BodyGenCB Callback that will generate the loop body code.
679 /// \param TripCount Number of iterations the loop body is executed.
680 /// \param Name Base name used to derive BB and instruction names.
681 ///
682 /// \returns An object representing the created control flow structure which
683 /// can be used for loop-associated directives.
685 LoopBodyGenCallbackTy BodyGenCB,
686 Value *TripCount,
687 const Twine &Name = "loop");
688
689 /// Generator for the control flow structure of an OpenMP canonical loop.
690 ///
691 /// Instead of a logical iteration space, this allows specifying user-defined
692 /// loop counter values using increment, upper- and lower bounds. To
693 /// disambiguate the terminology when counting downwards, instead of lower
694 /// bounds we use \p Start for the loop counter value in the first body
695 /// iteration.
696 ///
697 /// Consider the following limitations:
698 ///
699 /// * A loop counter space over all integer values of its bit-width cannot be
700 /// represented. E.g using uint8_t, its loop trip count of 256 cannot be
701 /// stored into an 8 bit integer):
702 ///
703 /// DO I = 0, 255, 1
704 ///
705 /// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
706 /// effectively counting downwards:
707 ///
708 /// for (uint8_t i = 100u; i > 0; i += 127u)
709 ///
710 ///
711 /// TODO: May need to add additional parameters to represent:
712 ///
713 /// * Allow representing downcounting with unsigned integers.
714 ///
715 /// * Sign of the step and the comparison operator might disagree:
716 ///
717 /// for (int i = 0; i < 42; i -= 1u)
718 ///
719 //
720 /// \param Loc The insert and source location description.
721 /// \param BodyGenCB Callback that will generate the loop body code.
722 /// \param Start Value of the loop counter for the first iterations.
723 /// \param Stop Loop counter values past this will stop the loop.
724 /// \param Step Loop counter increment after each iteration; negative
725 /// means counting down.
726 /// \param IsSigned Whether Start, Stop and Step are signed integers.
727 /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
728 /// counter.
729 /// \param ComputeIP Insertion point for instructions computing the trip
730 /// count. Can be used to ensure the trip count is available
731 /// at the outermost loop of a loop nest. If not set,
732 /// defaults to the preheader of the generated loop.
733 /// \param Name Base name used to derive BB and instruction names.
734 ///
735 /// \returns An object representing the created control flow structure which
736 /// can be used for loop-associated directives.
738 LoopBodyGenCallbackTy BodyGenCB,
739 Value *Start, Value *Stop, Value *Step,
740 bool IsSigned, bool InclusiveStop,
741 InsertPointTy ComputeIP = {},
742 const Twine &Name = "loop");
743
744 /// Collapse a loop nest into a single loop.
745 ///
746 /// Merges loops of a loop nest into a single CanonicalLoopNest representation
747 /// that has the same number of innermost loop iterations as the origin loop
748 /// nest. The induction variables of the input loops are derived from the
749 /// collapsed loop's induction variable. This is intended to be used to
750 /// implement OpenMP's collapse clause. Before applying a directive,
751 /// collapseLoops normalizes a loop nest to contain only a single loop and the
752 /// directive's implementation does not need to handle multiple loops itself.
753 /// This does not remove the need to handle all loop nest handling by
754 /// directives, such as the ordered(<n>) clause or the simd schedule-clause
755 /// modifier of the worksharing-loop directive.
756 ///
757 /// Example:
758 /// \code
759 /// for (int i = 0; i < 7; ++i) // Canonical loop "i"
760 /// for (int j = 0; j < 9; ++j) // Canonical loop "j"
761 /// body(i, j);
762 /// \endcode
763 ///
764 /// After collapsing with Loops={i,j}, the loop is changed to
765 /// \code
766 /// for (int ij = 0; ij < 63; ++ij) {
767 /// int i = ij / 9;
768 /// int j = ij % 9;
769 /// body(i, j);
770 /// }
771 /// \endcode
772 ///
773 /// In the current implementation, the following limitations apply:
774 ///
775 /// * All input loops have an induction variable of the same type.
776 ///
777 /// * The collapsed loop will have the same trip count integer type as the
778 /// input loops. Therefore it is possible that the collapsed loop cannot
779 /// represent all iterations of the input loops. For instance, assuming a
780 /// 32 bit integer type, and two input loops both iterating 2^16 times, the
781 /// theoretical trip count of the collapsed loop would be 2^32 iteration,
782 /// which cannot be represented in an 32-bit integer. Behavior is undefined
783 /// in this case.
784 ///
785 /// * The trip counts of every input loop must be available at \p ComputeIP.
786 /// Non-rectangular loops are not yet supported.
787 ///
788 /// * At each nest level, code between a surrounding loop and its nested loop
789 /// is hoisted into the loop body, and such code will be executed more
790 /// often than before collapsing (or not at all if any inner loop iteration
791 /// has a trip count of 0). This is permitted by the OpenMP specification.
792 ///
793 /// \param DL Debug location for instructions added for collapsing,
794 /// such as instructions to compute/derive the input loop's
795 /// induction variables.
796 /// \param Loops Loops in the loop nest to collapse. Loops are specified
797 /// from outermost-to-innermost and every control flow of a
798 /// loop's body must pass through its directly nested loop.
799 /// \param ComputeIP Where additional instruction that compute the collapsed
800 /// trip count. If not set, defaults to before the generated
801 /// loop.
802 ///
803 /// \returns The CanonicalLoopInfo object representing the collapsed loop.
806 InsertPointTy ComputeIP);
807
808 /// Get the default alignment value for given target
809 ///
810 /// \param TargetTriple Target triple
811 /// \param Features StringMap which describes extra CPU features
812 static unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple,
813 const StringMap<bool> &Features);
814
815 /// Retrieve (or create if non-existent) the address of a declare
816 /// target variable, used in conjunction with registerTargetGlobalVariable
817 /// to create declare target global variables.
818 ///
819 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
820 /// clause used in conjunction with the variable being registered (link,
821 /// to, enter).
822 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
823 /// clause used in conjunction with the variable being registered (nohost,
824 /// host, any)
825 /// \param IsDeclaration - boolean stating if the variable being registered
826 /// is a declaration-only and not a definition
827 /// \param IsExternallyVisible - boolean stating if the variable is externally
828 /// visible
829 /// \param EntryInfo - Unique entry information for the value generated
830 /// using getTargetEntryUniqueInfo, used to name generated pointer references
831 /// to the declare target variable
832 /// \param MangledName - the mangled name of the variable being registered
833 /// \param GeneratedRefs - references generated by invocations of
834 /// registerTargetGlobalVariable invoked from getAddrOfDeclareTargetVar,
835 /// these are required by Clang for book keeping.
836 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
837 /// \param TargetTriple - The OpenMP device target triple we are compiling
838 /// for
839 /// \param LlvmPtrTy - The type of the variable we are generating or
840 /// retrieving an address for
841 /// \param GlobalInitializer - a lambda function which creates a constant
842 /// used for initializing a pointer reference to the variable in certain
843 /// cases. If a nullptr is passed, it will default to utilising the original
844 /// variable to initialize the pointer reference.
845 /// \param VariableLinkage - a lambda function which returns the variables
846 /// linkage type, if unspecified and a nullptr is given, it will instead
847 /// utilise the linkage stored on the existing global variable in the
848 /// LLVMModule.
852 bool IsDeclaration, bool IsExternallyVisible,
853 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
854 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
855 std::vector<Triple> TargetTriple, Type *LlvmPtrTy,
856 std::function<Constant *()> GlobalInitializer,
857 std::function<GlobalValue::LinkageTypes()> VariableLinkage);
858
859 /// Registers a target variable for device or host.
860 ///
861 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
862 /// clause used in conjunction with the variable being registered (link,
863 /// to, enter).
864 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
865 /// clause used in conjunction with the variable being registered (nohost,
866 /// host, any)
867 /// \param IsDeclaration - boolean stating if the variable being registered
868 /// is a declaration-only and not a definition
869 /// \param IsExternallyVisible - boolean stating if the variable is externally
870 /// visible
871 /// \param EntryInfo - Unique entry information for the value generated
872 /// using getTargetEntryUniqueInfo, used to name generated pointer references
873 /// to the declare target variable
874 /// \param MangledName - the mangled name of the variable being registered
875 /// \param GeneratedRefs - references generated by invocations of
876 /// registerTargetGlobalVariable these are required by Clang for book
877 /// keeping.
878 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
879 /// \param TargetTriple - The OpenMP device target triple we are compiling
880 /// for
881 /// \param GlobalInitializer - a lambda function which creates a constant
882 /// used for initializing a pointer reference to the variable in certain
883 /// cases. If a nullptr is passed, it will default to utilising the original
884 /// variable to initialize the pointer reference.
885 /// \param VariableLinkage - a lambda function which returns the variables
886 /// linkage type, if unspecified and a nullptr is given, it will instead
887 /// utilise the linkage stored on the existing global variable in the
888 /// LLVMModule.
889 /// \param LlvmPtrTy - The type of the variable we are generating or
890 /// retrieving an address for
891 /// \param Addr - the original llvm value (addr) of the variable to be
892 /// registered
896 bool IsDeclaration, bool IsExternallyVisible,
897 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
898 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
899 std::vector<Triple> TargetTriple,
900 std::function<Constant *()> GlobalInitializer,
901 std::function<GlobalValue::LinkageTypes()> VariableLinkage,
902 Type *LlvmPtrTy, Constant *Addr);
903
904 /// Get the offset of the OMP_MAP_MEMBER_OF field.
905 unsigned getFlagMemberOffset();
906
907 /// Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on
908 /// the position given.
909 /// \param Position - A value indicating the position of the parent
910 /// of the member in the kernel argument structure, often retrieved
911 /// by the parents position in the combined information vectors used
912 /// to generate the structure itself. Multiple children (member's of)
913 /// with the same parent will use the same returned member flag.
915
916 /// Given an initial flag set, this function modifies it to contain
917 /// the passed in MemberOfFlag generated from the getMemberOfFlag
918 /// function. The results are dependent on the existing flag bits
919 /// set in the original flag set.
920 /// \param Flags - The original set of flags to be modified with the
921 /// passed in MemberOfFlag.
922 /// \param MemberOfFlag - A modified OMP_MAP_MEMBER_OF flag, adjusted
923 /// slightly based on the getMemberOfFlag which adjusts the flag bits
924 /// based on the members position in its parent.
926 omp::OpenMPOffloadMappingFlags MemberOfFlag);
927
928private:
929 /// Modifies the canonical loop to be a statically-scheduled workshare loop
930 /// which is executed on the device
931 ///
932 /// This takes a \p CLI representing a canonical loop, such as the one
933 /// created by \see createCanonicalLoop and emits additional instructions to
934 /// turn it into a workshare loop. In particular, it calls to an OpenMP
935 /// runtime function in the preheader to call OpenMP device rtl function
936 /// which handles worksharing of loop body interations.
937 ///
938 /// \param DL Debug location for instructions added for the
939 /// workshare-loop construct itself.
940 /// \param CLI A descriptor of the canonical loop to workshare.
941 /// \param AllocaIP An insertion point for Alloca instructions usable in the
942 /// preheader of the loop.
943 /// \param LoopType Information about type of loop worksharing.
944 /// It corresponds to type of loop workshare OpenMP pragma.
945 ///
946 /// \returns Point where to insert code after the workshare construct.
947 InsertPointTy applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
948 InsertPointTy AllocaIP,
949 omp::WorksharingLoopType LoopType);
950
951 /// Modifies the canonical loop to be a statically-scheduled workshare loop.
952 ///
953 /// This takes a \p LoopInfo representing a canonical loop, such as the one
954 /// created by \p createCanonicalLoop and emits additional instructions to
955 /// turn it into a workshare loop. In particular, it calls to an OpenMP
956 /// runtime function in the preheader to obtain the loop bounds to be used in
957 /// the current thread, updates the relevant instructions in the canonical
958 /// loop and calls to an OpenMP runtime finalization function after the loop.
959 ///
960 /// \param DL Debug location for instructions added for the
961 /// workshare-loop construct itself.
962 /// \param CLI A descriptor of the canonical loop to workshare.
963 /// \param AllocaIP An insertion point for Alloca instructions usable in the
964 /// preheader of the loop.
965 /// \param NeedsBarrier Indicates whether a barrier must be inserted after
966 /// the loop.
967 ///
968 /// \returns Point where to insert code after the workshare construct.
969 InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
970 InsertPointTy AllocaIP,
971 bool NeedsBarrier);
972
973 /// Modifies the canonical loop a statically-scheduled workshare loop with a
974 /// user-specified chunk size.
975 ///
976 /// \param DL Debug location for instructions added for the
977 /// workshare-loop construct itself.
978 /// \param CLI A descriptor of the canonical loop to workshare.
979 /// \param AllocaIP An insertion point for Alloca instructions usable in
980 /// the preheader of the loop.
981 /// \param NeedsBarrier Indicates whether a barrier must be inserted after the
982 /// loop.
983 /// \param ChunkSize The user-specified chunk size.
984 ///
985 /// \returns Point where to insert code after the workshare construct.
986 InsertPointTy applyStaticChunkedWorkshareLoop(DebugLoc DL,
988 InsertPointTy AllocaIP,
989 bool NeedsBarrier,
990 Value *ChunkSize);
991
992 /// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
993 ///
994 /// This takes a \p LoopInfo representing a canonical loop, such as the one
995 /// created by \p createCanonicalLoop and emits additional instructions to
996 /// turn it into a workshare loop. In particular, it calls to an OpenMP
997 /// runtime function in the preheader to obtain, and then in each iteration
998 /// to update the loop counter.
999 ///
1000 /// \param DL Debug location for instructions added for the
1001 /// workshare-loop construct itself.
1002 /// \param CLI A descriptor of the canonical loop to workshare.
1003 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1004 /// preheader of the loop.
1005 /// \param SchedType Type of scheduling to be passed to the init function.
1006 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1007 /// the loop.
1008 /// \param Chunk The size of loop chunk considered as a unit when
1009 /// scheduling. If \p nullptr, defaults to 1.
1010 ///
1011 /// \returns Point where to insert code after the workshare construct.
1012 InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
1013 InsertPointTy AllocaIP,
1014 omp::OMPScheduleType SchedType,
1015 bool NeedsBarrier,
1016 Value *Chunk = nullptr);
1017
1018 /// Create alternative version of the loop to support if clause
1019 ///
1020 /// OpenMP if clause can require to generate second loop. This loop
1021 /// will be executed when if clause condition is not met. createIfVersion
1022 /// adds branch instruction to the copied loop if \p ifCond is not met.
1023 ///
1024 /// \param Loop Original loop which should be versioned.
1025 /// \param IfCond Value which corresponds to if clause condition
1026 /// \param VMap Value to value map to define relation between
1027 /// original and copied loop values and loop blocks.
1028 /// \param NamePrefix Optional name prefix for if.then if.else blocks.
1029 void createIfVersion(CanonicalLoopInfo *Loop, Value *IfCond,
1030 ValueToValueMapTy &VMap, const Twine &NamePrefix = "");
1031
1032public:
1033 /// Modifies the canonical loop to be a workshare loop.
1034 ///
1035 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1036 /// created by \p createCanonicalLoop and emits additional instructions to
1037 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1038 /// runtime function in the preheader to obtain the loop bounds to be used in
1039 /// the current thread, updates the relevant instructions in the canonical
1040 /// loop and calls to an OpenMP runtime finalization function after the loop.
1041 ///
1042 /// The concrete transformation is done by applyStaticWorkshareLoop,
1043 /// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending
1044 /// on the value of \p SchedKind and \p ChunkSize.
1045 ///
1046 /// \param DL Debug location for instructions added for the
1047 /// workshare-loop construct itself.
1048 /// \param CLI A descriptor of the canonical loop to workshare.
1049 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1050 /// preheader of the loop.
1051 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1052 /// the loop.
1053 /// \param SchedKind Scheduling algorithm to use.
1054 /// \param ChunkSize The chunk size for the inner loop.
1055 /// \param HasSimdModifier Whether the simd modifier is present in the
1056 /// schedule clause.
1057 /// \param HasMonotonicModifier Whether the monotonic modifier is present in
1058 /// the schedule clause.
1059 /// \param HasNonmonotonicModifier Whether the nonmonotonic modifier is
1060 /// present in the schedule clause.
1061 /// \param HasOrderedClause Whether the (parameterless) ordered clause is
1062 /// present.
1063 /// \param LoopType Information about type of loop worksharing.
1064 /// It corresponds to type of loop workshare OpenMP pragma.
1065 ///
1066 /// \returns Point where to insert code after the workshare construct.
1069 bool NeedsBarrier,
1070 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default,
1071 Value *ChunkSize = nullptr, bool HasSimdModifier = false,
1072 bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false,
1073 bool HasOrderedClause = false,
1074 omp::WorksharingLoopType LoopType =
1076
1077 /// Tile a loop nest.
1078 ///
1079 /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
1080 /// \p/ Loops must be perfectly nested, from outermost to innermost loop
1081 /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
1082 /// of every loop and every tile sizes must be usable in the outermost
1083 /// loop's preheader. This implies that the loop nest is rectangular.
1084 ///
1085 /// Example:
1086 /// \code
1087 /// for (int i = 0; i < 15; ++i) // Canonical loop "i"
1088 /// for (int j = 0; j < 14; ++j) // Canonical loop "j"
1089 /// body(i, j);
1090 /// \endcode
1091 ///
1092 /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
1093 /// \code
1094 /// for (int i1 = 0; i1 < 3; ++i1)
1095 /// for (int j1 = 0; j1 < 2; ++j1)
1096 /// for (int i2 = 0; i2 < 5; ++i2)
1097 /// for (int j2 = 0; j2 < 7; ++j2)
1098 /// body(i1*3+i2, j1*3+j2);
1099 /// \endcode
1100 ///
1101 /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
1102 /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
1103 /// handles non-constant trip counts, non-constant tile sizes and trip counts
1104 /// that are not multiples of the tile size. In the latter case the tile loop
1105 /// of the last floor-loop iteration will have fewer iterations than specified
1106 /// as its tile size.
1107 ///
1108 ///
1109 /// @param DL Debug location for instructions added by tiling, for
1110 /// instance the floor- and tile trip count computation.
1111 /// @param Loops Loops to tile. The CanonicalLoopInfo objects are
1112 /// invalidated by this method, i.e. should not used after
1113 /// tiling.
1114 /// @param TileSizes For each loop in \p Loops, the tile size for that
1115 /// dimensions.
1116 ///
1117 /// \returns A list of generated loops. Contains twice as many loops as the
1118 /// input loop nest; the first half are the floor loops and the
1119 /// second half are the tile loops.
1120 std::vector<CanonicalLoopInfo *>
1122 ArrayRef<Value *> TileSizes);
1123
1124 /// Fully unroll a loop.
1125 ///
1126 /// Instead of unrolling the loop immediately (and duplicating its body
1127 /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
1128 /// metadata.
1129 ///
1130 /// \param DL Debug location for instructions added by unrolling.
1131 /// \param Loop The loop to unroll. The loop will be invalidated.
1133
1134 /// Fully or partially unroll a loop. How the loop is unrolled is determined
1135 /// using LLVM's LoopUnrollPass.
1136 ///
1137 /// \param DL Debug location for instructions added by unrolling.
1138 /// \param Loop The loop to unroll. The loop will be invalidated.
1140
1141 /// Partially unroll a loop.
1142 ///
1143 /// The CanonicalLoopInfo of the unrolled loop for use with chained
1144 /// loop-associated directive can be requested using \p UnrolledCLI. Not
1145 /// needing the CanonicalLoopInfo allows more efficient code generation by
1146 /// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
1147 /// A loop-associated directive applied to the unrolled loop needs to know the
1148 /// new trip count which means that if using a heuristically determined unroll
1149 /// factor (\p Factor == 0), that factor must be computed immediately. We are
1150 /// using the same logic as the LoopUnrollPass to derived the unroll factor,
1151 /// but which assumes that some canonicalization has taken place (e.g.
1152 /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
1153 /// better when the unrolled loop's CanonicalLoopInfo is not needed.
1154 ///
1155 /// \param DL Debug location for instructions added by unrolling.
1156 /// \param Loop The loop to unroll. The loop will be invalidated.
1157 /// \param Factor The factor to unroll the loop by. A factor of 0
1158 /// indicates that a heuristic should be used to determine
1159 /// the unroll-factor.
1160 /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
1161 /// partially unrolled loop. Otherwise, uses loop metadata
1162 /// to defer unrolling to the LoopUnrollPass.
1163 void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
1164 CanonicalLoopInfo **UnrolledCLI);
1165
1166 /// Add metadata to simd-ize a loop. If IfCond is not nullptr, the loop
1167 /// is cloned. The metadata which prevents vectorization is added to
1168 /// to the cloned loop. The cloned loop is executed when ifCond is evaluated
1169 /// to false.
1170 ///
1171 /// \param Loop The loop to simd-ize.
1172 /// \param AlignedVars The map which containts pairs of the pointer
1173 /// and its corresponding alignment.
1174 /// \param IfCond The value which corresponds to the if clause
1175 /// condition.
1176 /// \param Order The enum to map order clause.
1177 /// \param Simdlen The Simdlen length to apply to the simd loop.
1178 /// \param Safelen The Safelen length to apply to the simd loop.
1180 MapVector<Value *, Value *> AlignedVars, Value *IfCond,
1181 omp::OrderKind Order, ConstantInt *Simdlen,
1182 ConstantInt *Safelen);
1183
1184 /// Generator for '#omp flush'
1185 ///
1186 /// \param Loc The location where the flush directive was encountered
1187 void createFlush(const LocationDescription &Loc);
1188
1189 /// Generator for '#omp taskwait'
1190 ///
1191 /// \param Loc The location where the taskwait directive was encountered.
1192 void createTaskwait(const LocationDescription &Loc);
1193
1194 /// Generator for '#omp taskyield'
1195 ///
1196 /// \param Loc The location where the taskyield directive was encountered.
1197 void createTaskyield(const LocationDescription &Loc);
1198
1199 /// A struct to pack the relevant information for an OpenMP depend clause.
1200 struct DependData {
1204 explicit DependData() = default;
1206 Value *DepVal)
1208 };
1209
1210 /// Generator for `#omp task`
1211 ///
1212 /// \param Loc The location where the task construct was encountered.
1213 /// \param AllocaIP The insertion point to be used for alloca instructions.
1214 /// \param BodyGenCB Callback that will generate the region code.
1215 /// \param Tied True if the task is tied, false if the task is untied.
1216 /// \param Final i1 value which is `true` if the task is final, `false` if the
1217 /// task is not final.
1218 /// \param IfCondition i1 value. If it evaluates to `false`, an undeferred
1219 /// task is generated, and the encountering thread must
1220 /// suspend the current task region, for which execution
1221 /// cannot be resumed until execution of the structured
1222 /// block that is associated with the generated task is
1223 /// completed.
1224 InsertPointTy createTask(const LocationDescription &Loc,
1225 InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB,
1226 bool Tied = true, Value *Final = nullptr,
1227 Value *IfCondition = nullptr,
1228 SmallVector<DependData> Dependencies = {});
1229
1230 /// Generator for the taskgroup construct
1231 ///
1232 /// \param Loc The location where the taskgroup construct was encountered.
1233 /// \param AllocaIP The insertion point to be used for alloca instructions.
1234 /// \param BodyGenCB Callback that will generate the region code.
1235 InsertPointTy createTaskgroup(const LocationDescription &Loc,
1236 InsertPointTy AllocaIP,
1237 BodyGenCallbackTy BodyGenCB);
1238
1240 std::function<std::tuple<std::string, uint64_t>()>;
1241
1242 /// Creates a unique info for a target entry when provided a filename and
1243 /// line number from.
1244 ///
1245 /// \param CallBack A callback function which should return filename the entry
1246 /// resides in as well as the line number for the target entry
1247 /// \param ParentName The name of the parent the target entry resides in, if
1248 /// any.
1251 StringRef ParentName = "");
1252
1253 /// Enum class for the RedctionGen CallBack type to be used.
1255
1256 /// ReductionGen CallBack for Clang
1257 ///
1258 /// \param CodeGenIP InsertPoint for CodeGen.
1259 /// \param Index Index of the ReductionInfo to generate code for.
1260 /// \param LHSPtr Optionally used by Clang to return the LHSPtr it used for
1261 /// codegen, used for fixup later.
1262 /// \param RHSPtr Optionally used by Clang to
1263 /// return the RHSPtr it used for codegen, used for fixup later.
1264 /// \param CurFn Optionally used by Clang to pass in the Current Function as
1265 /// Clang context may be old.
1267 std::function<InsertPointTy(InsertPointTy CodeGenIP, unsigned Index,
1268 Value **LHS, Value **RHS, Function *CurFn)>;
1269
1270 /// ReductionGen CallBack for MLIR
1271 ///
1272 /// \param CodeGenIP InsertPoint for CodeGen.
1273 /// \param LHS Pass in the LHS Value to be used for CodeGen.
1274 /// \param RHS Pass in the RHS Value to be used for CodeGen.
1275 using ReductionGenCBTy = std::function<InsertPointTy(
1276 InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)>;
1277
1278 /// Functions used to generate atomic reductions. Such functions take two
1279 /// Values representing pointers to LHS and RHS of the reduction, as well as
1280 /// the element type of these pointers. They are expected to atomically
1281 /// update the LHS to the reduced value.
1283 std::function<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
1284
1285 /// Enum class for reduction evaluation types scalar, complex and aggregate.
1287
1288 /// Information about an OpenMP reduction.
1299 : ElementType(nullptr), Variable(nullptr),
1302
1303 /// Reduction element type, must match pointee type of variable.
1305
1306 /// Reduction variable of pointer type.
1308
1309 /// Thread-private partial reduction variable.
1311
1312 /// Reduction evaluation kind - scalar, complex or aggregate.
1314
1315 /// Callback for generating the reduction body. The IR produced by this will
1316 /// be used to combine two values in a thread-safe context, e.g., under
1317 /// lock or within the same thread, and therefore need not be atomic.
1319
1320 /// Clang callback for generating the reduction body. The IR produced by
1321 /// this will be used to combine two values in a thread-safe context, e.g.,
1322 /// under lock or within the same thread, and therefore need not be atomic.
1324
1325 /// Callback for generating the atomic reduction body, may be null. The IR
1326 /// produced by this will be used to atomically combine two values during
1327 /// reduction. If null, the implementation will use the non-atomic version
1328 /// along with the appropriate synchronization mechanisms.
1330 };
1331
1332 enum class CopyAction : unsigned {
1333 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1334 // the warp using shuffle instructions.
1336 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1337 ThreadCopy,
1338 };
1339
1344 };
1345
1346 /// Supporting functions for Reductions CodeGen.
1347private:
1348 /// Emit the llvm.used metadata.
1349 void emitUsed(StringRef Name, std::vector<llvm::WeakTrackingVH> &List);
1350
1351 /// Get the id of the current thread on the GPU.
1352 Value *getGPUThreadID();
1353
1354 /// Get the GPU warp size.
1355 Value *getGPUWarpSize();
1356
1357 /// Get the id of the warp in the block.
1358 /// We assume that the warp size is 32, which is always the case
1359 /// on the NVPTX device, to generate more efficient code.
1360 Value *getNVPTXWarpID();
1361
1362 /// Get the id of the current lane in the Warp.
1363 /// We assume that the warp size is 32, which is always the case
1364 /// on the NVPTX device, to generate more efficient code.
1365 Value *getNVPTXLaneID();
1366
1367 /// Cast value to the specified type.
1368 Value *castValueToType(InsertPointTy AllocaIP, Value *From, Type *ToType);
1369
1370 /// This function creates calls to one of two shuffle functions to copy
1371 /// variables between lanes in a warp.
1372 Value *createRuntimeShuffleFunction(InsertPointTy AllocaIP, Value *Element,
1373 Type *ElementType, Value *Offset);
1374
1375 /// Function to shuffle over the value from the remote lane.
1376 void shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr,
1377 Type *ElementType, Value *Offset,
1378 Type *ReductionArrayTy);
1379
1380 /// Emit instructions to copy a Reduce list, which contains partially
1381 /// aggregated values, in the specified direction.
1382 void emitReductionListCopy(
1383 InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy,
1384 ArrayRef<ReductionInfo> ReductionInfos, Value *SrcBase, Value *DestBase,
1385 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr});
1386
1387 /// Emit a helper that reduces data across two OpenMP threads (lanes)
1388 /// in the same warp. It uses shuffle instructions to copy over data from
1389 /// a remote lane's stack. The reduction algorithm performed is specified
1390 /// by the fourth parameter.
1391 ///
1392 /// Algorithm Versions.
1393 /// Full Warp Reduce (argument value 0):
1394 /// This algorithm assumes that all 32 lanes are active and gathers
1395 /// data from these 32 lanes, producing a single resultant value.
1396 /// Contiguous Partial Warp Reduce (argument value 1):
1397 /// This algorithm assumes that only a *contiguous* subset of lanes
1398 /// are active. This happens for the last warp in a parallel region
1399 /// when the user specified num_threads is not an integer multiple of
1400 /// 32. This contiguous subset always starts with the zeroth lane.
1401 /// Partial Warp Reduce (argument value 2):
1402 /// This algorithm gathers data from any number of lanes at any position.
1403 /// All reduced values are stored in the lowest possible lane. The set
1404 /// of problems every algorithm addresses is a super set of those
1405 /// addressable by algorithms with a lower version number. Overhead
1406 /// increases as algorithm version increases.
1407 ///
1408 /// Terminology
1409 /// Reduce element:
1410 /// Reduce element refers to the individual data field with primitive
1411 /// data types to be combined and reduced across threads.
1412 /// Reduce list:
1413 /// Reduce list refers to a collection of local, thread-private
1414 /// reduce elements.
1415 /// Remote Reduce list:
1416 /// Remote Reduce list refers to a collection of remote (relative to
1417 /// the current thread) reduce elements.
1418 ///
1419 /// We distinguish between three states of threads that are important to
1420 /// the implementation of this function.
1421 /// Alive threads:
1422 /// Threads in a warp executing the SIMT instruction, as distinguished from
1423 /// threads that are inactive due to divergent control flow.
1424 /// Active threads:
1425 /// The minimal set of threads that has to be alive upon entry to this
1426 /// function. The computation is correct iff active threads are alive.
1427 /// Some threads are alive but they are not active because they do not
1428 /// contribute to the computation in any useful manner. Turning them off
1429 /// may introduce control flow overheads without any tangible benefits.
1430 /// Effective threads:
1431 /// In order to comply with the argument requirements of the shuffle
1432 /// function, we must keep all lanes holding data alive. But at most
1433 /// half of them perform value aggregation; we refer to this half of
1434 /// threads as effective. The other half is simply handing off their
1435 /// data.
1436 ///
1437 /// Procedure
1438 /// Value shuffle:
1439 /// In this step active threads transfer data from higher lane positions
1440 /// in the warp to lower lane positions, creating Remote Reduce list.
1441 /// Value aggregation:
1442 /// In this step, effective threads combine their thread local Reduce list
1443 /// with Remote Reduce list and store the result in the thread local
1444 /// Reduce list.
1445 /// Value copy:
1446 /// In this step, we deal with the assumption made by algorithm 2
1447 /// (i.e. contiguity assumption). When we have an odd number of lanes
1448 /// active, say 2k+1, only k threads will be effective and therefore k
1449 /// new values will be produced. However, the Reduce list owned by the
1450 /// (2k+1)th thread is ignored in the value aggregation. Therefore
1451 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
1452 /// that the contiguity assumption still holds.
1453 ///
1454 /// \param ReductionInfos Array type containing the ReductionOps.
1455 /// \param ReduceFn The reduction function.
1456 /// \param FuncAttrs Optional param to specify any function attributes that
1457 /// need to be copied to the new function.
1458 ///
1459 /// \return The ShuffleAndReduce function.
1460 Function *emitShuffleAndReduceFunction(
1462 Function *ReduceFn, AttributeList FuncAttrs);
1463
1464 /// This function emits a helper that gathers Reduce lists from the first
1465 /// lane of every active warp to lanes in the first warp.
1466 ///
1467 /// void inter_warp_copy_func(void* reduce_data, num_warps)
1468 /// shared smem[warp_size];
1469 /// For all data entries D in reduce_data:
1470 /// sync
1471 /// If (I am the first lane in each warp)
1472 /// Copy my local D to smem[warp_id]
1473 /// sync
1474 /// if (I am the first warp)
1475 /// Copy smem[thread_id] to my local D
1476 ///
1477 /// \param Loc The insert and source location description.
1478 /// \param ReductionInfos Array type containing the ReductionOps.
1479 /// \param FuncAttrs Optional param to specify any function attributes that
1480 /// need to be copied to the new function.
1481 ///
1482 /// \return The InterWarpCopy function.
1483 Function *emitInterWarpCopyFunction(const LocationDescription &Loc,
1484 ArrayRef<ReductionInfo> ReductionInfos,
1485 AttributeList FuncAttrs);
1486
1487 /// This function emits a helper that copies all the reduction variables from
1488 /// the team into the provided global buffer for the reduction variables.
1489 ///
1490 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1491 /// For all data entries D in reduce_data:
1492 /// Copy local D to buffer.D[Idx]
1493 ///
1494 /// \param ReductionInfos Array type containing the ReductionOps.
1495 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1496 /// \param FuncAttrs Optional param to specify any function attributes that
1497 /// need to be copied to the new function.
1498 ///
1499 /// \return The ListToGlobalCopy function.
1500 Function *emitListToGlobalCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1501 Type *ReductionsBufferTy,
1502 AttributeList FuncAttrs);
1503
1504 /// This function emits a helper that copies all the reduction variables from
1505 /// the team into the provided global buffer for the reduction variables.
1506 ///
1507 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1508 /// For all data entries D in reduce_data:
1509 /// Copy buffer.D[Idx] to local D;
1510 ///
1511 /// \param ReductionInfos Array type containing the ReductionOps.
1512 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1513 /// \param FuncAttrs Optional param to specify any function attributes that
1514 /// need to be copied to the new function.
1515 ///
1516 /// \return The GlobalToList function.
1517 Function *emitGlobalToListCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1518 Type *ReductionsBufferTy,
1519 AttributeList FuncAttrs);
1520
1521 /// This function emits a helper that reduces all the reduction variables from
1522 /// the team into the provided global buffer for the reduction variables.
1523 ///
1524 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
1525 /// void *GlobPtrs[];
1526 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
1527 /// ...
1528 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
1529 /// reduce_function(GlobPtrs, reduce_data);
1530 ///
1531 /// \param ReductionInfos Array type containing the ReductionOps.
1532 /// \param ReduceFn The reduction function.
1533 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1534 /// \param FuncAttrs Optional param to specify any function attributes that
1535 /// need to be copied to the new function.
1536 ///
1537 /// \return The ListToGlobalReduce function.
1538 Function *
1539 emitListToGlobalReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
1540 Function *ReduceFn, Type *ReductionsBufferTy,
1541 AttributeList FuncAttrs);
1542
1543 /// This function emits a helper that reduces all the reduction variables from
1544 /// the team into the provided global buffer for the reduction variables.
1545 ///
1546 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
1547 /// void *GlobPtrs[];
1548 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
1549 /// ...
1550 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
1551 /// reduce_function(reduce_data, GlobPtrs);
1552 ///
1553 /// \param ReductionInfos Array type containing the ReductionOps.
1554 /// \param ReduceFn The reduction function.
1555 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1556 /// \param FuncAttrs Optional param to specify any function attributes that
1557 /// need to be copied to the new function.
1558 ///
1559 /// \return The GlobalToListReduce function.
1560 Function *
1561 emitGlobalToListReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
1562 Function *ReduceFn, Type *ReductionsBufferTy,
1563 AttributeList FuncAttrs);
1564
1565 /// Get the function name of a reduction function.
1566 std::string getReductionFuncName(StringRef Name) const;
1567
1568 /// Emits reduction function.
1569 /// \param ReducerName Name of the function calling the reduction.
1570 /// \param ReductionInfos Array type containing the ReductionOps.
1571 /// \param ReductionGenCBKind Optional param to specify Clang or MLIR
1572 /// CodeGenCB kind.
1573 /// \param FuncAttrs Optional param to specify any function attributes that
1574 /// need to be copied to the new function.
1575 ///
1576 /// \return The reduction function.
1577 Function *createReductionFunction(
1578 StringRef ReducerName, ArrayRef<ReductionInfo> ReductionInfos,
1580 AttributeList FuncAttrs = {});
1581
1582public:
1583 ///
1584 /// Design of OpenMP reductions on the GPU
1585 ///
1586 /// Consider a typical OpenMP program with one or more reduction
1587 /// clauses:
1588 ///
1589 /// float foo;
1590 /// double bar;
1591 /// #pragma omp target teams distribute parallel for \
1592 /// reduction(+:foo) reduction(*:bar)
1593 /// for (int i = 0; i < N; i++) {
1594 /// foo += A[i]; bar *= B[i];
1595 /// }
1596 ///
1597 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
1598 /// all teams. In our OpenMP implementation on the NVPTX device an
1599 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
1600 /// within a team are mapped to CUDA threads within a threadblock.
1601 /// Our goal is to efficiently aggregate values across all OpenMP
1602 /// threads such that:
1603 ///
1604 /// - the compiler and runtime are logically concise, and
1605 /// - the reduction is performed efficiently in a hierarchical
1606 /// manner as follows: within OpenMP threads in the same warp,
1607 /// across warps in a threadblock, and finally across teams on
1608 /// the NVPTX device.
1609 ///
1610 /// Introduction to Decoupling
1611 ///
1612 /// We would like to decouple the compiler and the runtime so that the
1613 /// latter is ignorant of the reduction variables (number, data types)
1614 /// and the reduction operators. This allows a simpler interface
1615 /// and implementation while still attaining good performance.
1616 ///
1617 /// Pseudocode for the aforementioned OpenMP program generated by the
1618 /// compiler is as follows:
1619 ///
1620 /// 1. Create private copies of reduction variables on each OpenMP
1621 /// thread: 'foo_private', 'bar_private'
1622 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
1623 /// to it and writes the result in 'foo_private' and 'bar_private'
1624 /// respectively.
1625 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
1626 /// and store the result on the team master:
1627 ///
1628 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
1629 /// reduceData, shuffleReduceFn, interWarpCpyFn)
1630 ///
1631 /// where:
1632 /// struct ReduceData {
1633 /// double *foo;
1634 /// double *bar;
1635 /// } reduceData
1636 /// reduceData.foo = &foo_private
1637 /// reduceData.bar = &bar_private
1638 ///
1639 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
1640 /// auxiliary functions generated by the compiler that operate on
1641 /// variables of type 'ReduceData'. They aid the runtime perform
1642 /// algorithmic steps in a data agnostic manner.
1643 ///
1644 /// 'shuffleReduceFn' is a pointer to a function that reduces data
1645 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
1646 /// same warp. It takes the following arguments as input:
1647 ///
1648 /// a. variable of type 'ReduceData' on the calling lane,
1649 /// b. its lane_id,
1650 /// c. an offset relative to the current lane_id to generate a
1651 /// remote_lane_id. The remote lane contains the second
1652 /// variable of type 'ReduceData' that is to be reduced.
1653 /// d. an algorithm version parameter determining which reduction
1654 /// algorithm to use.
1655 ///
1656 /// 'shuffleReduceFn' retrieves data from the remote lane using
1657 /// efficient GPU shuffle intrinsics and reduces, using the
1658 /// algorithm specified by the 4th parameter, the two operands
1659 /// element-wise. The result is written to the first operand.
1660 ///
1661 /// Different reduction algorithms are implemented in different
1662 /// runtime functions, all calling 'shuffleReduceFn' to perform
1663 /// the essential reduction step. Therefore, based on the 4th
1664 /// parameter, this function behaves slightly differently to
1665 /// cooperate with the runtime to ensure correctness under
1666 /// different circumstances.
1667 ///
1668 /// 'InterWarpCpyFn' is a pointer to a function that transfers
1669 /// reduced variables across warps. It tunnels, through CUDA
1670 /// shared memory, the thread-private data of type 'ReduceData'
1671 /// from lane 0 of each warp to a lane in the first warp.
1672 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
1673 /// The last team writes the global reduced value to memory.
1674 ///
1675 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
1676 /// reduceData, shuffleReduceFn, interWarpCpyFn,
1677 /// scratchpadCopyFn, loadAndReduceFn)
1678 ///
1679 /// 'scratchpadCopyFn' is a helper that stores reduced
1680 /// data from the team master to a scratchpad array in
1681 /// global memory.
1682 ///
1683 /// 'loadAndReduceFn' is a helper that loads data from
1684 /// the scratchpad array and reduces it with the input
1685 /// operand.
1686 ///
1687 /// These compiler generated functions hide address
1688 /// calculation and alignment information from the runtime.
1689 /// 5. if ret == 1:
1690 /// The team master of the last team stores the reduced
1691 /// result to the globals in memory.
1692 /// foo += reduceData.foo; bar *= reduceData.bar
1693 ///
1694 ///
1695 /// Warp Reduction Algorithms
1696 ///
1697 /// On the warp level, we have three algorithms implemented in the
1698 /// OpenMP runtime depending on the number of active lanes:
1699 ///
1700 /// Full Warp Reduction
1701 ///
1702 /// The reduce algorithm within a warp where all lanes are active
1703 /// is implemented in the runtime as follows:
1704 ///
1705 /// full_warp_reduce(void *reduce_data,
1706 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
1707 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
1708 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
1709 /// }
1710 ///
1711 /// The algorithm completes in log(2, WARPSIZE) steps.
1712 ///
1713 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
1714 /// not used therefore we save instructions by not retrieving lane_id
1715 /// from the corresponding special registers. The 4th parameter, which
1716 /// represents the version of the algorithm being used, is set to 0 to
1717 /// signify full warp reduction.
1718 ///
1719 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1720 ///
1721 /// #reduce_elem refers to an element in the local lane's data structure
1722 /// #remote_elem is retrieved from a remote lane
1723 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1724 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
1725 ///
1726 /// Contiguous Partial Warp Reduction
1727 ///
1728 /// This reduce algorithm is used within a warp where only the first
1729 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
1730 /// number of OpenMP threads in a parallel region is not a multiple of
1731 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
1732 ///
1733 /// void
1734 /// contiguous_partial_reduce(void *reduce_data,
1735 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
1736 /// int size, int lane_id) {
1737 /// int curr_size;
1738 /// int offset;
1739 /// curr_size = size;
1740 /// mask = curr_size/2;
1741 /// while (offset>0) {
1742 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
1743 /// curr_size = (curr_size+1)/2;
1744 /// offset = curr_size/2;
1745 /// }
1746 /// }
1747 ///
1748 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1749 ///
1750 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1751 /// if (lane_id < offset)
1752 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
1753 /// else
1754 /// reduce_elem = remote_elem
1755 ///
1756 /// This algorithm assumes that the data to be reduced are located in a
1757 /// contiguous subset of lanes starting from the first. When there is
1758 /// an odd number of active lanes, the data in the last lane is not
1759 /// aggregated with any other lane's dat but is instead copied over.
1760 ///
1761 /// Dispersed Partial Warp Reduction
1762 ///
1763 /// This algorithm is used within a warp when any discontiguous subset of
1764 /// lanes are active. It is used to implement the reduction operation
1765 /// across lanes in an OpenMP simd region or in a nested parallel region.
1766 ///
1767 /// void
1768 /// dispersed_partial_reduce(void *reduce_data,
1769 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
1770 /// int size, remote_id;
1771 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
1772 /// do {
1773 /// remote_id = next_active_lane_id_right_after_me();
1774 /// # the above function returns 0 of no active lane
1775 /// # is present right after the current lane.
1776 /// size = number_of_active_lanes_in_this_warp();
1777 /// logical_lane_id /= 2;
1778 /// ShuffleReduceFn(reduce_data, logical_lane_id,
1779 /// remote_id-1-threadIdx.x, 2);
1780 /// } while (logical_lane_id % 2 == 0 && size > 1);
1781 /// }
1782 ///
1783 /// There is no assumption made about the initial state of the reduction.
1784 /// Any number of lanes (>=1) could be active at any position. The reduction
1785 /// result is returned in the first active lane.
1786 ///
1787 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
1788 ///
1789 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
1790 /// if (lane_id % 2 == 0 && offset > 0)
1791 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
1792 /// else
1793 /// reduce_elem = remote_elem
1794 ///
1795 ///
1796 /// Intra-Team Reduction
1797 ///
1798 /// This function, as implemented in the runtime call
1799 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
1800 /// threads in a team. It first reduces within a warp using the
1801 /// aforementioned algorithms. We then proceed to gather all such
1802 /// reduced values at the first warp.
1803 ///
1804 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
1805 /// data from each of the "warp master" (zeroth lane of each warp, where
1806 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
1807 /// a mathematical sense) the problem of reduction across warp masters in
1808 /// a block to the problem of warp reduction.
1809 ///
1810 ///
1811 /// Inter-Team Reduction
1812 ///
1813 /// Once a team has reduced its data to a single value, it is stored in
1814 /// a global scratchpad array. Since each team has a distinct slot, this
1815 /// can be done without locking.
1816 ///
1817 /// The last team to write to the scratchpad array proceeds to reduce the
1818 /// scratchpad array. One or more workers in the last team use the helper
1819 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
1820 /// the k'th worker reduces every k'th element.
1821 ///
1822 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
1823 /// reduce across workers and compute a globally reduced value.
1824 ///
1825 /// \param Loc The location where the reduction was
1826 /// encountered. Must be within the associate
1827 /// directive and after the last local access to the
1828 /// reduction variables.
1829 /// \param AllocaIP An insertion point suitable for allocas usable
1830 /// in reductions.
1831 /// \param CodeGenIP An insertion point suitable for code
1832 /// generation. \param ReductionInfos A list of info on each reduction
1833 /// variable. \param IsNoWait Optional flag set if the reduction is
1834 /// marked as
1835 /// nowait.
1836 /// \param IsTeamsReduction Optional flag set if it is a teams
1837 /// reduction.
1838 /// \param HasDistribute Optional flag set if it is a
1839 /// distribute reduction.
1840 /// \param GridValue Optional GPU grid value.
1841 /// \param ReductionBufNum Optional OpenMPCUDAReductionBufNumValue to be
1842 /// used for teams reduction.
1843 /// \param SrcLocInfo Source location information global.
1845 const LocationDescription &Loc, InsertPointTy AllocaIP,
1846 InsertPointTy CodeGenIP, ArrayRef<ReductionInfo> ReductionInfos,
1847 bool IsNoWait = false, bool IsTeamsReduction = false,
1848 bool HasDistribute = false,
1850 std::optional<omp::GV> GridValue = {}, unsigned ReductionBufNum = 1024,
1851 Value *SrcLocInfo = nullptr);
1852
1853 // TODO: provide atomic and non-atomic reduction generators for reduction
1854 // operators defined by the OpenMP specification.
1855
1856 /// Generator for '#omp reduction'.
1857 ///
1858 /// Emits the IR instructing the runtime to perform the specific kind of
1859 /// reductions. Expects reduction variables to have been privatized and
1860 /// initialized to reduction-neutral values separately. Emits the calls to
1861 /// runtime functions as well as the reduction function and the basic blocks
1862 /// performing the reduction atomically and non-atomically.
1863 ///
1864 /// The code emitted for the following:
1865 ///
1866 /// \code
1867 /// type var_1;
1868 /// type var_2;
1869 /// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
1870 /// /* body */;
1871 /// \endcode
1872 ///
1873 /// corresponds to the following sketch.
1874 ///
1875 /// \code
1876 /// void _outlined_par() {
1877 /// // N is the number of different reductions.
1878 /// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
1879 /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
1880 /// _omp_reduction_func,
1881 /// _gomp_critical_user.reduction.var)) {
1882 /// case 1: {
1883 /// var_1 = var_1 <reduction-op> privatized_var_1;
1884 /// var_2 = var_2 <reduction-op> privatized_var_2;
1885 /// // ...
1886 /// __kmpc_end_reduce(...);
1887 /// break;
1888 /// }
1889 /// case 2: {
1890 /// _Atomic<ReductionOp>(var_1, privatized_var_1);
1891 /// _Atomic<ReductionOp>(var_2, privatized_var_2);
1892 /// // ...
1893 /// break;
1894 /// }
1895 /// default: break;
1896 /// }
1897 /// }
1898 ///
1899 /// void _omp_reduction_func(void **lhs, void **rhs) {
1900 /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
1901 /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
1902 /// // ...
1903 /// }
1904 /// \endcode
1905 ///
1906 /// \param Loc The location where the reduction was
1907 /// encountered. Must be within the associate
1908 /// directive and after the last local access to the
1909 /// reduction variables.
1910 /// \param AllocaIP An insertion point suitable for allocas usable
1911 /// in reductions.
1912 /// \param ReductionInfos A list of info on each reduction variable.
1913 /// \param IsNoWait A flag set if the reduction is marked as nowait.
1914 /// \param IsByRef A flag set if the reduction is using reference
1915 /// or direct value.
1916 InsertPointTy createReductions(const LocationDescription &Loc,
1917 InsertPointTy AllocaIP,
1918 ArrayRef<ReductionInfo> ReductionInfos,
1919 ArrayRef<bool> IsByRef, bool IsNoWait = false);
1920
1921 ///}
1922
1923 /// Return the insertion point used by the underlying IRBuilder.
1925
1926 /// Update the internal location to \p Loc.
1928 Builder.restoreIP(Loc.IP);
1930 return Loc.IP.getBlock() != nullptr;
1931 }
1932
1933 /// Return the function declaration for the runtime function with \p FnID.
1936
1938
1939 /// Return the (LLVM-IR) string describing the source location \p LocStr.
1940 Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
1941
1942 /// Return the (LLVM-IR) string describing the default source location.
1944
1945 /// Return the (LLVM-IR) string describing the source location identified by
1946 /// the arguments.
1947 Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
1948 unsigned Line, unsigned Column,
1949 uint32_t &SrcLocStrSize);
1950
1951 /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
1952 /// fallback if \p DL does not specify the function name.
1954 Function *F = nullptr);
1955
1956 /// Return the (LLVM-IR) string describing the source location \p Loc.
1957 Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
1958 uint32_t &SrcLocStrSize);
1959
1960 /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
1961 /// TODO: Create a enum class for the Reserve2Flags
1962 Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
1963 omp::IdentFlag Flags = omp::IdentFlag(0),
1964 unsigned Reserve2Flags = 0);
1965
1966 /// Create a hidden global flag \p Name in the module with initial value \p
1967 /// Value.
1969
1970 /// Generate control flow and cleanup for cancellation.
1971 ///
1972 /// \param CancelFlag Flag indicating if the cancellation is performed.
1973 /// \param CanceledDirective The kind of directive that is cancled.
1974 /// \param ExitCB Extra code to be generated in the exit block.
1975 void emitCancelationCheckImpl(Value *CancelFlag,
1976 omp::Directive CanceledDirective,
1977 FinalizeCallbackTy ExitCB = {});
1978
1979 /// Generate a target region entry call.
1980 ///
1981 /// \param Loc The location at which the request originated and is fulfilled.
1982 /// \param AllocaIP The insertion point to be used for alloca instructions.
1983 /// \param Return Return value of the created function returned by reference.
1984 /// \param DeviceID Identifier for the device via the 'device' clause.
1985 /// \param NumTeams Numer of teams for the region via the 'num_teams' clause
1986 /// or 0 if unspecified and -1 if there is no 'teams' clause.
1987 /// \param NumThreads Number of threads via the 'thread_limit' clause.
1988 /// \param HostPtr Pointer to the host-side pointer of the target kernel.
1989 /// \param KernelArgs Array of arguments to the kernel.
1990 InsertPointTy emitTargetKernel(const LocationDescription &Loc,
1991 InsertPointTy AllocaIP, Value *&Return,
1992 Value *Ident, Value *DeviceID, Value *NumTeams,
1993 Value *NumThreads, Value *HostPtr,
1994 ArrayRef<Value *> KernelArgs);
1995
1996 /// Generate a flush runtime call.
1997 ///
1998 /// \param Loc The location at which the request originated and is fulfilled.
1999 void emitFlush(const LocationDescription &Loc);
2000
2001 /// The finalization stack made up of finalize callbacks currently in-flight,
2002 /// wrapped into FinalizationInfo objects that reference also the finalization
2003 /// target block and the kind of cancellable directive.
2005
2006 /// Return true if the last entry in the finalization stack is of kind \p DK
2007 /// and cancellable.
2008 bool isLastFinalizationInfoCancellable(omp::Directive DK) {
2009 return !FinalizationStack.empty() &&
2010 FinalizationStack.back().IsCancellable &&
2011 FinalizationStack.back().DK == DK;
2012 }
2013
2014 /// Generate a taskwait runtime call.
2015 ///
2016 /// \param Loc The location at which the request originated and is fulfilled.
2017 void emitTaskwaitImpl(const LocationDescription &Loc);
2018
2019 /// Generate a taskyield runtime call.
2020 ///
2021 /// \param Loc The location at which the request originated and is fulfilled.
2022 void emitTaskyieldImpl(const LocationDescription &Loc);
2023
2024 /// Return the current thread ID.
2025 ///
2026 /// \param Ident The ident (ident_t*) describing the query origin.
2028
2029 /// The OpenMPIRBuilder Configuration
2031
2032 /// The underlying LLVM-IR module
2034
2035 /// The LLVM-IR Builder used to create IR.
2037
2038 /// Map to remember source location strings
2040
2041 /// Map to remember existing ident_t*.
2043
2044 /// Info manager to keep track of target regions.
2046
2047 /// The target triple of the underlying module.
2048 const Triple T;
2049
2050 /// Helper that contains information about regions we need to outline
2051 /// during finalization.
2053 using PostOutlineCBTy = std::function<void(Function &)>;
2057
2058 /// Collect all blocks in between EntryBB and ExitBB in both the given
2059 /// vector and set.
2061 SmallVectorImpl<BasicBlock *> &BlockVector);
2062
2063 /// Return the function that contains the region to be outlined.
2064 Function *getFunction() const { return EntryBB->getParent(); }
2065 };
2066
2067 /// Collection of regions that need to be outlined during finalization.
2069
2070 /// A collection of candidate target functions that's constant allocas will
2071 /// attempt to be raised on a call of finalize after all currently enqueued
2072 /// outline info's have been processed.
2074
2075 /// Collection of owned canonical loop objects that eventually need to be
2076 /// free'd.
2077 std::forward_list<CanonicalLoopInfo> LoopInfos;
2078
2079 /// Add a new region that will be outlined later.
2080 void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
2081
2082 /// An ordered map of auto-generated variables to their unique names.
2083 /// It stores variables with the following names: 1) ".gomp_critical_user_" +
2084 /// <critical_section_name> + ".var" for "omp critical" directives; 2)
2085 /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
2086 /// variables.
2088
2089 /// Computes the size of type in bytes.
2090 Value *getSizeInBytes(Value *BasePtr);
2091
2092 // Emit a branch from the current block to the Target block only if
2093 // the current block has a terminator.
2095
2096 // If BB has no use then delete it and return. Else place BB after the current
2097 // block, if possible, or else at the end of the function. Also add a branch
2098 // from current block to BB if current block does not have a terminator.
2099 void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished = false);
2100
2101 /// Emits code for OpenMP 'if' clause using specified \a BodyGenCallbackTy
2102 /// Here is the logic:
2103 /// if (Cond) {
2104 /// ThenGen();
2105 /// } else {
2106 /// ElseGen();
2107 /// }
2109 BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP = {});
2110
2111 /// Create the global variable holding the offload mappings information.
2113 std::string VarName);
2114
2115 /// Create the global variable holding the offload names information.
2118 std::string VarName);
2119
2122 AllocaInst *Args = nullptr;
2124 };
2125
2126 /// Create the allocas instruction used in call to mapper functions.
2128 InsertPointTy AllocaIP, unsigned NumOperands,
2130
2131 /// Create the call for the target mapper function.
2132 /// \param Loc The source location description.
2133 /// \param MapperFunc Function to be called.
2134 /// \param SrcLocInfo Source location information global.
2135 /// \param MaptypesArg The argument types.
2136 /// \param MapnamesArg The argument names.
2137 /// \param MapperAllocas The AllocaInst used for the call.
2138 /// \param DeviceID Device ID for the call.
2139 /// \param NumOperands Number of operands in the call.
2140 void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
2141 Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
2142 struct MapperAllocas &MapperAllocas, int64_t DeviceID,
2143 unsigned NumOperands);
2144
2145 /// Container for the arguments used to pass data to the runtime library.
2147 /// The array of base pointer passed to the runtime library.
2149 /// The array of section pointers passed to the runtime library.
2151 /// The array of sizes passed to the runtime library.
2152 Value *SizesArray = nullptr;
2153 /// The array of map types passed to the runtime library for the beginning
2154 /// of the region or for the entire region if there are no separate map
2155 /// types for the region end.
2157 /// The array of map types passed to the runtime library for the end of the
2158 /// region, or nullptr if there are no separate map types for the region
2159 /// end.
2161 /// The array of user-defined mappers passed to the runtime library.
2163 /// The array of original declaration names of mapped pointers sent to the
2164 /// runtime library for debugging
2166
2167 explicit TargetDataRTArgs() {}
2176 };
2177
2178 /// Data structure that contains the needed information to construct the
2179 /// kernel args vector.
2181 /// Number of arguments passed to the runtime library.
2183 /// Arguments passed to the runtime library
2185 /// The number of iterations
2187 /// The number of teams.
2189 /// The number of threads.
2191 /// The size of the dynamic shared memory.
2193 /// True if the kernel has 'no wait' clause.
2195
2196 /// Constructor for TargetKernelArgs
2204 };
2205
2206 /// Create the kernel args vector used by emitTargetKernel. This function
2207 /// creates various constant values that are used in the resulting args
2208 /// vector.
2209 static void getKernelArgsVector(TargetKernelArgs &KernelArgs,
2211 SmallVector<Value *> &ArgsVector);
2212
2213 /// Struct that keeps the information that should be kept throughout
2214 /// a 'target data' region.
2216 /// Set to true if device pointer information have to be obtained.
2217 bool RequiresDevicePointerInfo = false;
2218 /// Set to true if Clang emits separate runtime calls for the beginning and
2219 /// end of the region. These calls might have separate map type arrays.
2220 bool SeparateBeginEndCalls = false;
2221
2222 public:
2224
2227
2228 /// Indicate whether any user-defined mapper exists.
2229 bool HasMapper = false;
2230 /// The total number of pointers passed to the runtime library.
2231 unsigned NumberOfPtrs = 0u;
2232
2233 explicit TargetDataInfo() {}
2234 explicit TargetDataInfo(bool RequiresDevicePointerInfo,
2235 bool SeparateBeginEndCalls)
2236 : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
2237 SeparateBeginEndCalls(SeparateBeginEndCalls) {}
2238 /// Clear information about the data arrays.
2241 HasMapper = false;
2242 NumberOfPtrs = 0u;
2243 }
2244 /// Return true if the current target data information has valid arrays.
2245 bool isValid() {
2249 }
2250 bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
2251 bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
2252 };
2253
2261
2262 /// This structure contains combined information generated for mappable
2263 /// clauses, including base pointers, pointers, sizes, map types, user-defined
2264 /// mappers, and non-contiguous information.
2265 struct MapInfosTy {
2267 bool IsNonContiguous = false;
2272 };
2280
2281 /// Append arrays in \a CurInfo.
2282 void append(MapInfosTy &CurInfo) {
2284 CurInfo.BasePointers.end());
2285 Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
2287 CurInfo.DevicePointers.end());
2288 Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
2289 Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
2290 Names.append(CurInfo.Names.begin(), CurInfo.Names.end());
2292 CurInfo.NonContigInfo.Dims.end());
2294 CurInfo.NonContigInfo.Offsets.end());
2296 CurInfo.NonContigInfo.Counts.end());
2298 CurInfo.NonContigInfo.Strides.end());
2299 }
2300 };
2301
2302 /// Callback function type for functions emitting the host fallback code that
2303 /// is executed when the kernel launch fails. It takes an insertion point as
2304 /// parameter where the code should be emitted. It returns an insertion point
2305 /// that points right after after the emitted code.
2307
2308 /// Generate a target region entry call and host fallback call.
2309 ///
2310 /// \param Loc The location at which the request originated and is fulfilled.
2311 /// \param OutlinedFn The outlined kernel function.
2312 /// \param OutlinedFnID The ooulined function ID.
2313 /// \param EmitTargetCallFallbackCB Call back function to generate host
2314 /// fallback code.
2315 /// \param Args Data structure holding information about the kernel arguments.
2316 /// \param DeviceID Identifier for the device via the 'device' clause.
2317 /// \param RTLoc Source location identifier
2318 /// \param AllocaIP The insertion point to be used for alloca instructions.
2320 const LocationDescription &Loc, Function *OutlinedFn, Value *OutlinedFnID,
2321 EmitFallbackCallbackTy EmitTargetCallFallbackCB, TargetKernelArgs &Args,
2322 Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP);
2323
2324 /// Emit the arguments to be passed to the runtime library based on the
2325 /// arrays of base pointers, pointers, sizes, map types, and mappers. If
2326 /// ForEndCall, emit map types to be passed for the end of the region instead
2327 /// of the beginning.
2331 bool EmitDebug = false,
2332 bool ForEndCall = false);
2333
2334 /// Emit an array of struct descriptors to be assigned to the offload args.
2336 InsertPointTy CodeGenIP,
2337 MapInfosTy &CombinedInfo,
2339
2340 /// Emit the arrays used to pass the captures and map information to the
2341 /// offloading runtime library. If there is no map or capture information,
2342 /// return nullptr by reference.
2344 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo,
2345 TargetDataInfo &Info, bool IsNonContiguous = false,
2346 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2347 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr);
2348
2349 /// Creates offloading entry for the provided entry ID \a ID, address \a
2350 /// Addr, size \a Size, and flags \a Flags.
2352 int32_t Flags, GlobalValue::LinkageTypes,
2353 StringRef Name = "");
2354
2355 /// The kind of errors that can occur when emitting the offload entries and
2356 /// metadata.
2362
2363 /// Callback function type
2365 std::function<void(EmitMetadataErrorKind, TargetRegionEntryInfo)>;
2366
2367 // Emit the offloading entries and metadata so that the device codegen side
2368 // can easily figure out what to emit. The produced metadata looks like
2369 // this:
2370 //
2371 // !omp_offload.info = !{!1, ...}
2372 //
2373 // We only generate metadata for function that contain target regions.
2375 EmitMetadataErrorReportFunctionTy &ErrorReportFunction);
2376
2377public:
2378 /// Generator for __kmpc_copyprivate
2379 ///
2380 /// \param Loc The source location description.
2381 /// \param BufSize Number of elements in the buffer.
2382 /// \param CpyBuf List of pointers to data to be copied.
2383 /// \param CpyFn function to call for copying data.
2384 /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
2385 ///
2386 /// \return The insertion position *after* the CopyPrivate call.
2387
2389 llvm::Value *BufSize, llvm::Value *CpyBuf,
2390 llvm::Value *CpyFn, llvm::Value *DidIt);
2391
2392 /// Generator for '#omp single'
2393 ///
2394 /// \param Loc The source location description.
2395 /// \param BodyGenCB Callback that will generate the region code.
2396 /// \param FiniCB Callback to finalize variable copies.
2397 /// \param IsNowait If false, a barrier is emitted.
2398 /// \param CPVars copyprivate variables.
2399 /// \param CPFuncs copy functions to use for each copyprivate variable.
2400 ///
2401 /// \returns The insertion position *after* the single call.
2403 BodyGenCallbackTy BodyGenCB,
2404 FinalizeCallbackTy FiniCB, bool IsNowait,
2405 ArrayRef<llvm::Value *> CPVars = {},
2406 ArrayRef<llvm::Function *> CPFuncs = {});
2407
2408 /// Generator for '#omp master'
2409 ///
2410 /// \param Loc The insert and source location description.
2411 /// \param BodyGenCB Callback that will generate the region code.
2412 /// \param FiniCB Callback to finalize variable copies.
2413 ///
2414 /// \returns The insertion position *after* the master.
2415 InsertPointTy createMaster(const LocationDescription &Loc,
2416 BodyGenCallbackTy BodyGenCB,
2417 FinalizeCallbackTy FiniCB);
2418
2419 /// Generator for '#omp masked'
2420 ///
2421 /// \param Loc The insert and source location description.
2422 /// \param BodyGenCB Callback that will generate the region code.
2423 /// \param FiniCB Callback to finialize variable copies.
2424 ///
2425 /// \returns The insertion position *after* the masked.
2426 InsertPointTy createMasked(const LocationDescription &Loc,
2427 BodyGenCallbackTy BodyGenCB,
2428 FinalizeCallbackTy FiniCB, Value *Filter);
2429
2430 /// Generator for '#omp critical'
2431 ///
2432 /// \param Loc The insert and source location description.
2433 /// \param BodyGenCB Callback that will generate the region body code.
2434 /// \param FiniCB Callback to finalize variable copies.
2435 /// \param CriticalName name of the lock used by the critical directive
2436 /// \param HintInst Hint Instruction for hint clause associated with critical
2437 ///
2438 /// \returns The insertion position *after* the critical.
2439 InsertPointTy createCritical(const LocationDescription &Loc,
2440 BodyGenCallbackTy BodyGenCB,
2441 FinalizeCallbackTy FiniCB,
2442 StringRef CriticalName, Value *HintInst);
2443
2444 /// Generator for '#omp ordered depend (source | sink)'
2445 ///
2446 /// \param Loc The insert and source location description.
2447 /// \param AllocaIP The insertion point to be used for alloca instructions.
2448 /// \param NumLoops The number of loops in depend clause.
2449 /// \param StoreValues The value will be stored in vector address.
2450 /// \param Name The name of alloca instruction.
2451 /// \param IsDependSource If true, depend source; otherwise, depend sink.
2452 ///
2453 /// \return The insertion position *after* the ordered.
2454 InsertPointTy createOrderedDepend(const LocationDescription &Loc,
2455 InsertPointTy AllocaIP, unsigned NumLoops,
2456 ArrayRef<llvm::Value *> StoreValues,
2457 const Twine &Name, bool IsDependSource);
2458
2459 /// Generator for '#omp ordered [threads | simd]'
2460 ///
2461 /// \param Loc The insert and source location description.
2462 /// \param BodyGenCB Callback that will generate the region code.
2463 /// \param FiniCB Callback to finalize variable copies.
2464 /// \param IsThreads If true, with threads clause or without clause;
2465 /// otherwise, with simd clause;
2466 ///
2467 /// \returns The insertion position *after* the ordered.
2468 InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
2469 BodyGenCallbackTy BodyGenCB,
2470 FinalizeCallbackTy FiniCB,
2471 bool IsThreads);
2472
2473 /// Generator for '#omp sections'
2474 ///
2475 /// \param Loc The insert and source location description.
2476 /// \param AllocaIP The insertion points to be used for alloca instructions.
2477 /// \param SectionCBs Callbacks that will generate body of each section.
2478 /// \param PrivCB Callback to copy a given variable (think copy constructor).
2479 /// \param FiniCB Callback to finalize variable copies.
2480 /// \param IsCancellable Flag to indicate a cancellable parallel region.
2481 /// \param IsNowait If true, barrier - to ensure all sections are executed
2482 /// before moving forward will not be generated.
2483 /// \returns The insertion position *after* the sections.
2484 InsertPointTy createSections(const LocationDescription &Loc,
2485 InsertPointTy AllocaIP,
2486 ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
2487 PrivatizeCallbackTy PrivCB,
2488 FinalizeCallbackTy FiniCB, bool IsCancellable,
2489 bool IsNowait);
2490
2491 /// Generator for '#omp section'
2492 ///
2493 /// \param Loc The insert and source location description.
2494 /// \param BodyGenCB Callback that will generate the region body code.
2495 /// \param FiniCB Callback to finalize variable copies.
2496 /// \returns The insertion position *after* the section.
2497 InsertPointTy createSection(const LocationDescription &Loc,
2498 BodyGenCallbackTy BodyGenCB,
2499 FinalizeCallbackTy FiniCB);
2500
2501 /// Generator for `#omp teams`
2502 ///
2503 /// \param Loc The location where the teams construct was encountered.
2504 /// \param BodyGenCB Callback that will generate the region code.
2505 /// \param NumTeamsLower Lower bound on number of teams. If this is nullptr,
2506 /// it is as if lower bound is specified as equal to upperbound. If
2507 /// this is non-null, then upperbound must also be non-null.
2508 /// \param NumTeamsUpper Upper bound on the number of teams.
2509 /// \param ThreadLimit on the number of threads that may participate in a
2510 /// contention group created by each team.
2511 /// \param IfExpr is the integer argument value of the if condition on the
2512 /// teams clause.
2514 createTeams(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2515 Value *NumTeamsLower = nullptr, Value *NumTeamsUpper = nullptr,
2516 Value *ThreadLimit = nullptr, Value *IfExpr = nullptr);
2517
2518 /// Generate conditional branch and relevant BasicBlocks through which private
2519 /// threads copy the 'copyin' variables from Master copy to threadprivate
2520 /// copies.
2521 ///
2522 /// \param IP insertion block for copyin conditional
2523 /// \param MasterVarPtr a pointer to the master variable
2524 /// \param PrivateVarPtr a pointer to the threadprivate variable
2525 /// \param IntPtrTy Pointer size type
2526 /// \param BranchtoEnd Create a branch between the copyin.not.master blocks
2527 // and copy.in.end block
2528 ///
2529 /// \returns The insertion point where copying operation to be emitted.
2531 Value *PrivateAddr,
2532 llvm::IntegerType *IntPtrTy,
2533 bool BranchtoEnd = true);
2534
2535 /// Create a runtime call for kmpc_Alloc
2536 ///
2537 /// \param Loc The insert and source location description.
2538 /// \param Size Size of allocated memory space
2539 /// \param Allocator Allocator information instruction
2540 /// \param Name Name of call Instruction for OMP_alloc
2541 ///
2542 /// \returns CallInst to the OMP_Alloc call
2543 CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
2544 Value *Allocator, std::string Name = "");
2545
2546 /// Create a runtime call for kmpc_free
2547 ///
2548 /// \param Loc The insert and source location description.
2549 /// \param Addr Address of memory space to be freed
2550 /// \param Allocator Allocator information instruction
2551 /// \param Name Name of call Instruction for OMP_Free
2552 ///
2553 /// \returns CallInst to the OMP_Free call
2554 CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
2555 Value *Allocator, std::string Name = "");
2556
2557 /// Create a runtime call for kmpc_threadprivate_cached
2558 ///
2559 /// \param Loc The insert and source location description.
2560 /// \param Pointer pointer to data to be cached
2561 /// \param Size size of data to be cached
2562 /// \param Name Name of call Instruction for callinst
2563 ///
2564 /// \returns CallInst to the thread private cache call.
2565 CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
2568 const llvm::Twine &Name = Twine(""));
2569
2570 /// Create a runtime call for __tgt_interop_init
2571 ///
2572 /// \param Loc The insert and source location description.
2573 /// \param InteropVar variable to be allocated
2574 /// \param InteropType type of interop operation
2575 /// \param Device devide to which offloading will occur
2576 /// \param NumDependences number of dependence variables
2577 /// \param DependenceAddress pointer to dependence variables
2578 /// \param HaveNowaitClause does nowait clause exist
2579 ///
2580 /// \returns CallInst to the __tgt_interop_init call
2581 CallInst *createOMPInteropInit(const LocationDescription &Loc,
2582 Value *InteropVar,
2583 omp::OMPInteropType InteropType, Value *Device,
2584 Value *NumDependences,
2585 Value *DependenceAddress,
2586 bool HaveNowaitClause);
2587
2588 /// Create a runtime call for __tgt_interop_destroy
2589 ///
2590 /// \param Loc The insert and source location description.
2591 /// \param InteropVar variable to be allocated
2592 /// \param Device devide to which offloading will occur
2593 /// \param NumDependences number of dependence variables
2594 /// \param DependenceAddress pointer to dependence variables
2595 /// \param HaveNowaitClause does nowait clause exist
2596 ///
2597 /// \returns CallInst to the __tgt_interop_destroy call
2598 CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
2599 Value *InteropVar, Value *Device,
2600 Value *NumDependences,
2601 Value *DependenceAddress,
2602 bool HaveNowaitClause);
2603
2604 /// Create a runtime call for __tgt_interop_use
2605 ///
2606 /// \param Loc The insert and source location description.
2607 /// \param InteropVar variable to be allocated
2608 /// \param Device devide to which offloading will occur
2609 /// \param NumDependences number of dependence variables
2610 /// \param DependenceAddress pointer to dependence variables
2611 /// \param HaveNowaitClause does nowait clause exist
2612 ///
2613 /// \returns CallInst to the __tgt_interop_use call
2614 CallInst *createOMPInteropUse(const LocationDescription &Loc,
2615 Value *InteropVar, Value *Device,
2616 Value *NumDependences, Value *DependenceAddress,
2617 bool HaveNowaitClause);
2618
2619 /// The `omp target` interface
2620 ///
2621 /// For more information about the usage of this interface,
2622 /// \see openmp/libomptarget/deviceRTLs/common/include/target.h
2623 ///
2624 ///{
2625
2626 /// Create a runtime call for kmpc_target_init
2627 ///
2628 /// \param Loc The insert and source location description.
2629 /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
2630 /// \param MinThreads Minimal number of threads, or 0.
2631 /// \param MaxThreads Maximal number of threads, or 0.
2632 /// \param MinTeams Minimal number of teams, or 0.
2633 /// \param MaxTeams Maximal number of teams, or 0.
2634 InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
2635 int32_t MinThreadsVal = 0,
2636 int32_t MaxThreadsVal = 0,
2637 int32_t MinTeamsVal = 0,
2638 int32_t MaxTeamsVal = 0);
2639
2640 /// Create a runtime call for kmpc_target_deinit
2641 ///
2642 /// \param Loc The insert and source location description.
2643 /// \param TeamsReductionDataSize The maximal size of all the reduction data
2644 /// for teams reduction.
2645 /// \param TeamsReductionBufferLength The number of elements (each of up to
2646 /// \p TeamsReductionDataSize size), in the teams reduction buffer.
2647 void createTargetDeinit(const LocationDescription &Loc,
2648 int32_t TeamsReductionDataSize = 0,
2649 int32_t TeamsReductionBufferLength = 1024);
2650
2651 ///}
2652
2653 /// Helpers to read/write kernel annotations from the IR.
2654 ///
2655 ///{
2656
2657 /// Read/write a bounds on threads for \p Kernel. Read will return 0 if none
2658 /// is set.
2659 static std::pair<int32_t, int32_t>
2660 readThreadBoundsForKernel(const Triple &T, Function &Kernel);
2661 static void writeThreadBoundsForKernel(const Triple &T, Function &Kernel,
2662 int32_t LB, int32_t UB);
2663
2664 /// Read/write a bounds on teams for \p Kernel. Read will return 0 if none
2665 /// is set.
2666 static std::pair<int32_t, int32_t> readTeamBoundsForKernel(const Triple &T,
2667 Function &Kernel);
2668 static void writeTeamsForKernel(const Triple &T, Function &Kernel, int32_t LB,
2669 int32_t UB);
2670 ///}
2671
2672private:
2673 // Sets the function attributes expected for the outlined function
2674 void setOutlinedTargetRegionFunctionAttributes(Function *OutlinedFn);
2675
2676 // Creates the function ID/Address for the given outlined function.
2677 // In the case of an embedded device function the address of the function is
2678 // used, in the case of a non-offload function a constant is created.
2679 Constant *createOutlinedFunctionID(Function *OutlinedFn,
2680 StringRef EntryFnIDName);
2681
2682 // Creates the region entry address for the outlined function
2683 Constant *createTargetRegionEntryAddr(Function *OutlinedFunction,
2684 StringRef EntryFnName);
2685
2686public:
2687 /// Functions used to generate a function with the given name.
2688 using FunctionGenCallback = std::function<Function *(StringRef FunctionName)>;
2689
2690 /// Create a unique name for the entry function using the source location
2691 /// information of the current target region. The name will be something like:
2692 ///
2693 /// __omp_offloading_DD_FFFF_PP_lBB[_CC]
2694 ///
2695 /// where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
2696 /// mangled name of the function that encloses the target region and BB is the
2697 /// line number of the target region. CC is a count added when more than one
2698 /// region is located at the same location.
2699 ///
2700 /// If this target outline function is not an offload entry, we don't need to
2701 /// register it. This may happen if it is guarded by an if clause that is
2702 /// false at compile time, or no target archs have been specified.
2703 ///
2704 /// The created target region ID is used by the runtime library to identify
2705 /// the current target region, so it only has to be unique and not
2706 /// necessarily point to anything. It could be the pointer to the outlined
2707 /// function that implements the target region, but we aren't using that so
2708 /// that the compiler doesn't need to keep that, and could therefore inline
2709 /// the host function if proven worthwhile during optimization. In the other
2710 /// hand, if emitting code for the device, the ID has to be the function
2711 /// address so that it can retrieved from the offloading entry and launched
2712 /// by the runtime library. We also mark the outlined function to have
2713 /// external linkage in case we are emitting code for the device, because
2714 /// these functions will be entry points to the device.
2715 ///
2716 /// \param InfoManager The info manager keeping track of the offload entries
2717 /// \param EntryInfo The entry information about the function
2718 /// \param GenerateFunctionCallback The callback function to generate the code
2719 /// \param OutlinedFunction Pointer to the outlined function
2720 /// \param EntryFnIDName Name of the ID o be created
2722 FunctionGenCallback &GenerateFunctionCallback,
2723 bool IsOffloadEntry, Function *&OutlinedFn,
2724 Constant *&OutlinedFnID);
2725
2726 /// Registers the given function and sets up the attribtues of the function
2727 /// Returns the FunctionID.
2728 ///
2729 /// \param InfoManager The info manager keeping track of the offload entries
2730 /// \param EntryInfo The entry information about the function
2731 /// \param OutlinedFunction Pointer to the outlined function
2732 /// \param EntryFnName Name of the outlined function
2733 /// \param EntryFnIDName Name of the ID o be created
2735 Function *OutlinedFunction,
2736 StringRef EntryFnName,
2737 StringRef EntryFnIDName);
2738
2739 /// Type of BodyGen to use for region codegen
2740 ///
2741 /// Priv: If device pointer privatization is required, emit the body of the
2742 /// region here. It will have to be duplicated: with and without
2743 /// privatization.
2744 /// DupNoPriv: If we need device pointer privatization, we need
2745 /// to emit the body of the region with no privatization in the 'else' branch
2746 /// of the conditional.
2747 /// NoPriv: If we don't require privatization of device
2748 /// pointers, we emit the body in between the runtime calls. This avoids
2749 /// duplicating the body code.
2751
2752 /// Callback type for creating the map infos for the kernel parameters.
2753 /// \param CodeGenIP is the insertion point where code should be generated,
2754 /// if any.
2757
2758 /// Generator for '#omp target data'
2759 ///
2760 /// \param Loc The location where the target data construct was encountered.
2761 /// \param AllocaIP The insertion points to be used for alloca instructions.
2762 /// \param CodeGenIP The insertion point at which the target directive code
2763 /// should be placed.
2764 /// \param IsBegin If true then emits begin mapper call otherwise emits
2765 /// end mapper call.
2766 /// \param DeviceID Stores the DeviceID from the device clause.
2767 /// \param IfCond Value which corresponds to the if clause condition.
2768 /// \param Info Stores all information realted to the Target Data directive.
2769 /// \param GenMapInfoCB Callback that populates the MapInfos and returns.
2770 /// \param BodyGenCB Optional Callback to generate the region code.
2771 /// \param DeviceAddrCB Optional callback to generate code related to
2772 /// use_device_ptr and use_device_addr.
2773 /// \param CustomMapperCB Optional callback to generate code related to
2774 /// custom mappers.
2776 const LocationDescription &Loc, InsertPointTy AllocaIP,
2777 InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond,
2779 omp::RuntimeFunction *MapperFunc = nullptr,
2781 BodyGenTy BodyGenType)>
2782 BodyGenCB = nullptr,
2783 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
2784 function_ref<Value *(unsigned int)> CustomMapperCB = nullptr,
2785 Value *SrcLocInfo = nullptr);
2786
2788 InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;
2789
2791 Argument &Arg, Value *Input, Value *&RetVal, InsertPointTy AllocaIP,
2792 InsertPointTy CodeGenIP)>;
2793
2794 /// Generator for '#omp target'
2795 ///
2796 /// \param Loc where the target data construct was encountered.
2797 /// \param CodeGenIP The insertion point where the call to the outlined
2798 /// function should be emitted.
2799 /// \param EntryInfo The entry information about the function.
2800 /// \param NumTeams Number of teams specified in the num_teams clause.
2801 /// \param NumThreads Number of teams specified in the thread_limit clause.
2802 /// \param Inputs The input values to the region that will be passed.
2803 /// as arguments to the outlined function.
2804 /// \param BodyGenCB Callback that will generate the region code.
2805 /// \param ArgAccessorFuncCB Callback that will generate accessors
2806 /// instructions for passed in target arguments where neccessary
2810 TargetRegionEntryInfo &EntryInfo, int32_t NumTeams,
2811 int32_t NumThreads,
2813 GenMapInfoCallbackTy GenMapInfoCB,
2814 TargetBodyGenCallbackTy BodyGenCB,
2815 TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB);
2816
2817 /// Returns __kmpc_for_static_init_* runtime function for the specified
2818 /// size \a IVSize and sign \a IVSigned. Will create a distribute call
2819 /// __kmpc_distribute_static_init* if \a IsGPUDistribute is set.
2820 FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned,
2821 bool IsGPUDistribute);
2822
2823 /// Returns __kmpc_dispatch_init_* runtime function for the specified
2824 /// size \a IVSize and sign \a IVSigned.
2825 FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned);
2826
2827 /// Returns __kmpc_dispatch_next_* runtime function for the specified
2828 /// size \a IVSize and sign \a IVSigned.
2829 FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned);
2830
2831 /// Returns __kmpc_dispatch_fini_* runtime function for the specified
2832 /// size \a IVSize and sign \a IVSigned.
2833 FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
2834
2835 /// Declarations for LLVM-IR types (simple, array, function and structure) are
2836 /// generated below. Their names are defined and used in OpenMPKinds.def. Here
2837 /// we provide the declarations, the initializeTypes function will provide the
2838 /// values.
2839 ///
2840 ///{
2841#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
2842#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
2843 ArrayType *VarName##Ty = nullptr; \
2844 PointerType *VarName##PtrTy = nullptr;
2845#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
2846 FunctionType *VarName = nullptr; \
2847 PointerType *VarName##Ptr = nullptr;
2848#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
2849 StructType *VarName = nullptr; \
2850 PointerType *VarName##Ptr = nullptr;
2851#include "llvm/Frontend/OpenMP/OMPKinds.def"
2852
2853 ///}
2854
2855private:
2856 /// Create all simple and struct types exposed by the runtime and remember
2857 /// the llvm::PointerTypes of them for easy access later.
2858 void initializeTypes(Module &M);
2859
2860 /// Common interface for generating entry calls for OMP Directives.
2861 /// if the directive has a region/body, It will set the insertion
2862 /// point to the body
2863 ///
2864 /// \param OMPD Directive to generate entry blocks for
2865 /// \param EntryCall Call to the entry OMP Runtime Function
2866 /// \param ExitBB block where the region ends.
2867 /// \param Conditional indicate if the entry call result will be used
2868 /// to evaluate a conditional of whether a thread will execute
2869 /// body code or not.
2870 ///
2871 /// \return The insertion position in exit block
2872 InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
2873 BasicBlock *ExitBB,
2874 bool Conditional = false);
2875
2876 /// Common interface to finalize the region
2877 ///
2878 /// \param OMPD Directive to generate exiting code for
2879 /// \param FinIP Insertion point for emitting Finalization code and exit call
2880 /// \param ExitCall Call to the ending OMP Runtime Function
2881 /// \param HasFinalize indicate if the directive will require finalization
2882 /// and has a finalization callback in the stack that
2883 /// should be called.
2884 ///
2885 /// \return The insertion position in exit block
2886 InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
2887 InsertPointTy FinIP,
2888 Instruction *ExitCall,
2889 bool HasFinalize = true);
2890
2891 /// Common Interface to generate OMP inlined regions
2892 ///
2893 /// \param OMPD Directive to generate inlined region for
2894 /// \param EntryCall Call to the entry OMP Runtime Function
2895 /// \param ExitCall Call to the ending OMP Runtime Function
2896 /// \param BodyGenCB Body code generation callback.
2897 /// \param FiniCB Finalization Callback. Will be called when finalizing region
2898 /// \param Conditional indicate if the entry call result will be used
2899 /// to evaluate a conditional of whether a thread will execute
2900 /// body code or not.
2901 /// \param HasFinalize indicate if the directive will require finalization
2902 /// and has a finalization callback in the stack that
2903 /// should be called.
2904 /// \param IsCancellable if HasFinalize is set to true, indicate if the
2905 /// the directive should be cancellable.
2906 /// \return The insertion point after the region
2907
2909 EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
2910 Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
2911 FinalizeCallbackTy FiniCB, bool Conditional = false,
2912 bool HasFinalize = true, bool IsCancellable = false);
2913
2914 /// Get the platform-specific name separator.
2915 /// \param Parts different parts of the final name that needs separation
2916 /// \param FirstSeparator First separator used between the initial two
2917 /// parts of the name.
2918 /// \param Separator separator used between all of the rest consecutive
2919 /// parts of the name
2920 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
2921 StringRef FirstSeparator,
2922 StringRef Separator);
2923
2924 /// Returns corresponding lock object for the specified critical region
2925 /// name. If the lock object does not exist it is created, otherwise the
2926 /// reference to the existing copy is returned.
2927 /// \param CriticalName Name of the critical region.
2928 ///
2929 Value *getOMPCriticalRegionLock(StringRef CriticalName);
2930
2931 /// Callback type for Atomic Expression update
2932 /// ex:
2933 /// \code{.cpp}
2934 /// unsigned x = 0;
2935 /// #pragma omp atomic update
2936 /// x = Expr(x_old); //Expr() is any legal operation
2937 /// \endcode
2938 ///
2939 /// \param XOld the value of the atomic memory address to use for update
2940 /// \param IRB reference to the IRBuilder to use
2941 ///
2942 /// \returns Value to update X to.
2943 using AtomicUpdateCallbackTy =
2944 const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
2945
2946private:
2947 enum AtomicKind { Read, Write, Update, Capture, Compare };
2948
2949 /// Determine whether to emit flush or not
2950 ///
2951 /// \param Loc The insert and source location description.
2952 /// \param AO The required atomic ordering
2953 /// \param AK The OpenMP atomic operation kind used.
2954 ///
2955 /// \returns wether a flush was emitted or not
2956 bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
2957 AtomicOrdering AO, AtomicKind AK);
2958
2959 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
2960 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
2961 /// Only Scalar data types.
2962 ///
2963 /// \param AllocaIP The insertion point to be used for alloca
2964 /// instructions.
2965 /// \param X The target atomic pointer to be updated
2966 /// \param XElemTy The element type of the atomic pointer.
2967 /// \param Expr The value to update X with.
2968 /// \param AO Atomic ordering of the generated atomic
2969 /// instructions.
2970 /// \param RMWOp The binary operation used for update. If
2971 /// operation is not supported by atomicRMW,
2972 /// or belong to {FADD, FSUB, BAD_BINOP}.
2973 /// Then a `cmpExch` based atomic will be generated.
2974 /// \param UpdateOp Code generator for complex expressions that cannot be
2975 /// expressed through atomicrmw instruction.
2976 /// \param VolatileX true if \a X volatile?
2977 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
2978 /// update expression, false otherwise.
2979 /// (e.g. true for X = X BinOp Expr)
2980 ///
2981 /// \returns A pair of the old value of X before the update, and the value
2982 /// used for the update.
2983 std::pair<Value *, Value *>
2984 emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
2986 AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
2987 bool IsXBinopExpr);
2988
2989 /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
2990 ///
2991 /// \Return The instruction
2992 Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
2993 AtomicRMWInst::BinOp RMWOp);
2994
2995public:
2996 /// a struct to pack relevant information while generating atomic Ops
2998 Value *Var = nullptr;
2999 Type *ElemTy = nullptr;
3000 bool IsSigned = false;
3001 bool IsVolatile = false;
3002 };
3003
3004 /// Emit atomic Read for : V = X --- Only Scalar data types.
3005 ///
3006 /// \param Loc The insert and source location description.
3007 /// \param X The target pointer to be atomically read
3008 /// \param V Memory address where to store atomically read
3009 /// value
3010 /// \param AO Atomic ordering of the generated atomic
3011 /// instructions.
3012 ///
3013 /// \return Insertion point after generated atomic read IR.
3016 AtomicOrdering AO);
3017
3018 /// Emit atomic write for : X = Expr --- Only Scalar data types.
3019 ///
3020 /// \param Loc The insert and source location description.
3021 /// \param X The target pointer to be atomically written to
3022 /// \param Expr The value to store.
3023 /// \param AO Atomic ordering of the generated atomic
3024 /// instructions.
3025 ///
3026 /// \return Insertion point after generated atomic Write IR.
3028 AtomicOpValue &X, Value *Expr,
3029 AtomicOrdering AO);
3030
3031 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
3032 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
3033 /// Only Scalar data types.
3034 ///
3035 /// \param Loc The insert and source location description.
3036 /// \param AllocaIP The insertion point to be used for alloca instructions.
3037 /// \param X The target atomic pointer to be updated
3038 /// \param Expr The value to update X with.
3039 /// \param AO Atomic ordering of the generated atomic instructions.
3040 /// \param RMWOp The binary operation used for update. If operation
3041 /// is not supported by atomicRMW, or belong to
3042 /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
3043 /// atomic will be generated.
3044 /// \param UpdateOp Code generator for complex expressions that cannot be
3045 /// expressed through atomicrmw instruction.
3046 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
3047 /// update expression, false otherwise.
3048 /// (e.g. true for X = X BinOp Expr)
3049 ///
3050 /// \return Insertion point after generated atomic update IR.
3052 InsertPointTy AllocaIP, AtomicOpValue &X,
3053 Value *Expr, AtomicOrdering AO,
3055 AtomicUpdateCallbackTy &UpdateOp,
3056 bool IsXBinopExpr);
3057
3058 /// Emit atomic update for constructs: --- Only Scalar data types
3059 /// V = X; X = X BinOp Expr ,
3060 /// X = X BinOp Expr; V = X,
3061 /// V = X; X = Expr BinOp X,
3062 /// X = Expr BinOp X; V = X,
3063 /// V = X; X = UpdateOp(X),
3064 /// X = UpdateOp(X); V = X,
3065 ///
3066 /// \param Loc The insert and source location description.
3067 /// \param AllocaIP The insertion point to be used for alloca instructions.
3068 /// \param X The target atomic pointer to be updated
3069 /// \param V Memory address where to store captured value
3070 /// \param Expr The value to update X with.
3071 /// \param AO Atomic ordering of the generated atomic instructions
3072 /// \param RMWOp The binary operation used for update. If
3073 /// operation is not supported by atomicRMW, or belong to
3074 /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
3075 /// atomic will be generated.
3076 /// \param UpdateOp Code generator for complex expressions that cannot be
3077 /// expressed through atomicrmw instruction.
3078 /// \param UpdateExpr true if X is an in place update of the form
3079 /// X = X BinOp Expr or X = Expr BinOp X
3080 /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
3081 /// update expression, false otherwise.
3082 /// (e.g. true for X = X BinOp Expr)
3083 /// \param IsPostfixUpdate true if original value of 'x' must be stored in
3084 /// 'v', not an updated one.
3085 ///
3086 /// \return Insertion point after generated atomic capture IR.
3089 AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
3091 AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
3092 bool IsPostfixUpdate, bool IsXBinopExpr);
3093
3094 /// Emit atomic compare for constructs: --- Only scalar data types
3095 /// cond-expr-stmt:
3096 /// x = x ordop expr ? expr : x;
3097 /// x = expr ordop x ? expr : x;
3098 /// x = x == e ? d : x;
3099 /// x = e == x ? d : x; (this one is not in the spec)
3100 /// cond-update-stmt:
3101 /// if (x ordop expr) { x = expr; }
3102 /// if (expr ordop x) { x = expr; }
3103 /// if (x == e) { x = d; }
3104 /// if (e == x) { x = d; } (this one is not in the spec)
3105 /// conditional-update-capture-atomic:
3106 /// v = x; cond-update-stmt; (IsPostfixUpdate=true, IsFailOnly=false)
3107 /// cond-update-stmt; v = x; (IsPostfixUpdate=false, IsFailOnly=false)
3108 /// if (x == e) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3109 /// IsFailOnly=true)
3110 /// r = x == e; if (r) { x = d; } (IsPostfixUpdate=false, IsFailOnly=false)
3111 /// r = x == e; if (r) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3112 /// IsFailOnly=true)
3113 ///
3114 /// \param Loc The insert and source location description.
3115 /// \param X The target atomic pointer to be updated.
3116 /// \param V Memory address where to store captured value (for
3117 /// compare capture only).
3118 /// \param R Memory address where to store comparison result
3119 /// (for compare capture with '==' only).
3120 /// \param E The expected value ('e') for forms that use an
3121 /// equality comparison or an expression ('expr') for
3122 /// forms that use 'ordop' (logically an atomic maximum or
3123 /// minimum).
3124 /// \param D The desired value for forms that use an equality
3125 /// comparison. If forms that use 'ordop', it should be
3126 /// \p nullptr.
3127 /// \param AO Atomic ordering of the generated atomic instructions.
3128 /// \param Op Atomic compare operation. It can only be ==, <, or >.
3129 /// \param IsXBinopExpr True if the conditional statement is in the form where
3130 /// x is on LHS. It only matters for < or >.
3131 /// \param IsPostfixUpdate True if original value of 'x' must be stored in
3132 /// 'v', not an updated one (for compare capture
3133 /// only).
3134 /// \param IsFailOnly True if the original value of 'x' is stored to 'v'
3135 /// only when the comparison fails. This is only valid for
3136 /// the case the comparison is '=='.
3137 ///
3138 /// \return Insertion point after generated atomic capture IR.
3143 bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly);
3146 AtomicOpValue &R, Value *E, Value *D,
3147 AtomicOrdering AO,
3149 bool IsXBinopExpr, bool IsPostfixUpdate,
3150 bool IsFailOnly, AtomicOrdering Failure);
3151
3152 /// Create the control flow structure of a canonical OpenMP loop.
3153 ///
3154 /// The emitted loop will be disconnected, i.e. no edge to the loop's
3155 /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
3156 /// IRBuilder location is not preserved.
3157 ///
3158 /// \param DL DebugLoc used for the instructions in the skeleton.
3159 /// \param TripCount Value to be used for the trip count.
3160 /// \param F Function in which to insert the BasicBlocks.
3161 /// \param PreInsertBefore Where to insert BBs that execute before the body,
3162 /// typically the body itself.
3163 /// \param PostInsertBefore Where to insert BBs that execute after the body.
3164 /// \param Name Base name used to derive BB
3165 /// and instruction names.
3166 ///
3167 /// \returns The CanonicalLoopInfo that represents the emitted loop.
3169 Function *F,
3170 BasicBlock *PreInsertBefore,
3171 BasicBlock *PostInsertBefore,
3172 const Twine &Name = {});
3173 /// OMP Offload Info Metadata name string
3174 const std::string ompOffloadInfoName = "omp_offload.info";
3175
3176 /// Loads all the offload entries information from the host IR
3177 /// metadata. This function is only meant to be used with device code
3178 /// generation.
3179 ///
3180 /// \param M Module to load Metadata info from. Module passed maybe
3181 /// loaded from bitcode file, i.e, different from OpenMPIRBuilder::M module.
3183
3184 /// Loads all the offload entries information from the host IR
3185 /// metadata read from the file passed in as the HostFilePath argument. This
3186 /// function is only meant to be used with device code generation.
3187 ///
3188 /// \param HostFilePath The path to the host IR file,
3189 /// used to load in offload metadata for the device, allowing host and device
3190 /// to maintain the same metadata mapping.
3191 void loadOffloadInfoMetadata(StringRef HostFilePath);
3192
3193 /// Gets (if variable with the given name already exist) or creates
3194 /// internal global variable with the specified Name. The created variable has
3195 /// linkage CommonLinkage by default and is initialized by null value.
3196 /// \param Ty Type of the global variable. If it is exist already the type
3197 /// must be the same.
3198 /// \param Name Name of the variable.
3200 unsigned AddressSpace = 0);
3201};
3202
3203/// Class to represented the control flow structure of an OpenMP canonical loop.
3204///
3205/// The control-flow structure is standardized for easy consumption by
3206/// directives associated with loops. For instance, the worksharing-loop
3207/// construct may change this control flow such that each loop iteration is
3208/// executed on only one thread. The constraints of a canonical loop in brief
3209/// are:
3210///
3211/// * The number of loop iterations must have been computed before entering the
3212/// loop.
3213///
3214/// * Has an (unsigned) logical induction variable that starts at zero and
3215/// increments by one.
3216///
3217/// * The loop's CFG itself has no side-effects. The OpenMP specification
3218/// itself allows side-effects, but the order in which they happen, including
3219/// how often or whether at all, is unspecified. We expect that the frontend
3220/// will emit those side-effect instructions somewhere (e.g. before the loop)
3221/// such that the CanonicalLoopInfo itself can be side-effect free.
3222///
3223/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
3224/// execution of a loop body that satifies these constraints. It does NOT
3225/// represent arbitrary SESE regions that happen to contain a loop. Do not use
3226/// CanonicalLoopInfo for such purposes.
3227///
3228/// The control flow can be described as follows:
3229///
3230/// Preheader
3231/// |
3232/// /-> Header
3233/// | |
3234/// | Cond---\
3235/// | | |
3236/// | Body |
3237/// | | | |
3238/// | <...> |
3239/// | | | |
3240/// \--Latch |
3241/// |
3242/// Exit
3243/// |
3244/// After
3245///
3246/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
3247/// including) and end at AfterIP (at the After's first instruction, excluding).
3248/// That is, instructions in the Preheader and After blocks (except the
3249/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
3250/// side-effects. Typically, the Preheader is used to compute the loop's trip
3251/// count. The instructions from BodyIP (at the Body block's first instruction,
3252/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
3253/// control and thus can have side-effects. The body block is the single entry
3254/// point into the loop body, which may contain arbitrary control flow as long
3255/// as all control paths eventually branch to the Latch block.
3256///
3257/// TODO: Consider adding another standardized BasicBlock between Body CFG and
3258/// Latch to guarantee that there is only a single edge to the latch. It would
3259/// make loop transformations easier to not needing to consider multiple
3260/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
3261/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
3262/// executes after each body iteration.
3263///
3264/// There must be no loop-carried dependencies through llvm::Values. This is
3265/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
3266/// for the induction variable.
3267///
3268/// All code in Header, Cond, Latch and Exit (plus the terminator of the
3269/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
3270/// by assertOK(). They are expected to not be modified unless explicitly
3271/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
3272/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
3273/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
3274/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
3275/// anymore as its underlying control flow may not exist anymore.
3276/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
3277/// may also return a new CanonicalLoopInfo that can be passed to other
3278/// loop-associated construct implementing methods. These loop-transforming
3279/// methods may either create a new CanonicalLoopInfo usually using
3280/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
3281/// modify one of the input CanonicalLoopInfo and return it as representing the
3282/// modified loop. What is done is an implementation detail of
3283/// transformation-implementing method and callers should always assume that the
3284/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
3285/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
3286/// created by createCanonicalLoop, such that transforming methods do not have
3287/// to special case where the CanonicalLoopInfo originated from.
3288///
3289/// Generally, methods consuming CanonicalLoopInfo do not need an
3290/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
3291/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
3292/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
3293/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
3294/// any InsertPoint in the Preheader, After or Block can still be used after
3295/// calling such a method.
3296///
3297/// TODO: Provide mechanisms for exception handling and cancellation points.
3298///
3299/// Defined outside OpenMPIRBuilder because nested classes cannot be
3300/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
3302 friend class OpenMPIRBuilder;
3303
3304private:
3305 BasicBlock *Header = nullptr;
3306 BasicBlock *Cond = nullptr;
3307 BasicBlock *Latch = nullptr;
3308 BasicBlock *Exit = nullptr;
3309
3310 /// Add the control blocks of this loop to \p BBs.
3311 ///
3312 /// This does not include any block from the body, including the one returned
3313 /// by getBody().
3314 ///
3315 /// FIXME: This currently includes the Preheader and After blocks even though
3316 /// their content is (mostly) not under CanonicalLoopInfo's control.
3317 /// Re-evaluated whether this makes sense.
3318 void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
3319
3320 /// Sets the number of loop iterations to the given value. This value must be
3321 /// valid in the condition block (i.e., defined in the preheader) and is
3322 /// interpreted as an unsigned integer.
3323 void setTripCount(Value *TripCount);
3324
3325 /// Replace all uses of the canonical induction variable in the loop body with
3326 /// a new one.
3327 ///
3328 /// The intended use case is to update the induction variable for an updated
3329 /// iteration space such that it can stay normalized in the 0...tripcount-1
3330 /// range.
3331 ///
3332 /// The \p Updater is called with the (presumable updated) current normalized
3333 /// induction variable and is expected to return the value that uses of the
3334 /// pre-updated induction values should use instead, typically dependent on
3335 /// the new induction variable. This is a lambda (instead of e.g. just passing
3336 /// the new value) to be able to distinguish the uses of the pre-updated
3337 /// induction variable and uses of the induction varible to compute the
3338 /// updated induction variable value.
3339 void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater);
3340
3341public:
3342 /// Returns whether this object currently represents the IR of a loop. If
3343 /// returning false, it may have been consumed by a loop transformation or not
3344 /// been intialized. Do not use in this case;
3345 bool isValid() const { return Header; }
3346
3347 /// The preheader ensures that there is only a single edge entering the loop.
3348 /// Code that must be execute before any loop iteration can be emitted here,
3349 /// such as computing the loop trip count and begin lifetime markers. Code in
3350 /// the preheader is not considered part of the canonical loop.
3351 BasicBlock *getPreheader() const;
3352
3353 /// The header is the entry for each iteration. In the canonical control flow,
3354 /// it only contains the PHINode for the induction variable.
3356 assert(isValid() && "Requires a valid canonical loop");
3357 return Header;
3358 }
3359
3360 /// The condition block computes whether there is another loop iteration. If
3361 /// yes, branches to the body; otherwise to the exit block.
3363 assert(isValid() && "Requires a valid canonical loop");
3364 return Cond;
3365 }
3366
3367 /// The body block is the single entry for a loop iteration and not controlled
3368 /// by CanonicalLoopInfo. It can contain arbitrary control flow but must
3369 /// eventually branch to the \p Latch block.
3371 assert(isValid() && "Requires a valid canonical loop");
3372 return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
3373 }
3374
3375 /// Reaching the latch indicates the end of the loop body code. In the
3376 /// canonical control flow, it only contains the increment of the induction
3377 /// variable.
3379 assert(isValid() && "Requires a valid canonical loop");
3380 return Latch;
3381 }
3382
3383 /// Reaching the exit indicates no more iterations are being executed.
3385 assert(isValid() && "Requires a valid canonical loop");
3386 return Exit;
3387 }
3388
3389 /// The after block is intended for clean-up code such as lifetime end
3390 /// markers. It is separate from the exit block to ensure, analogous to the
3391 /// preheader, it having just a single entry edge and being free from PHI
3392 /// nodes should there be multiple loop exits (such as from break
3393 /// statements/cancellations).
3395 assert(isValid() && "Requires a valid canonical loop");
3396 return Exit->getSingleSuccessor();
3397 }
3398
3399 /// Returns the llvm::Value containing the number of loop iterations. It must
3400 /// be valid in the preheader and always interpreted as an unsigned integer of
3401 /// any bit-width.
3403 assert(isValid() && "Requires a valid canonical loop");
3404 Instruction *CmpI = &Cond->front();
3405 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
3406 return CmpI->getOperand(1);
3407 }
3408
3409 /// Returns the instruction representing the current logical induction
3410 /// variable. Always unsigned, always starting at 0 with an increment of one.
3412 assert(isValid() && "Requires a valid canonical loop");
3413 Instruction *IndVarPHI = &Header->front();
3414 assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
3415 return IndVarPHI;
3416 }
3417
3418 /// Return the type of the induction variable (and the trip count).
3420 assert(isValid() && "Requires a valid canonical loop");
3421 return getIndVar()->getType();
3422 }
3423
3424 /// Return the insertion point for user code before the loop.
3426 assert(isValid() && "Requires a valid canonical loop");
3427 BasicBlock *Preheader = getPreheader();
3428 return {Preheader, std::prev(Preheader->end())};
3429 };
3430
3431 /// Return the insertion point for user code in the body.
3433 assert(isValid() && "Requires a valid canonical loop");
3434 BasicBlock *Body = getBody();
3435 return {Body, Body->begin()};
3436 };
3437
3438 /// Return the insertion point for user code after the loop.
3440 assert(isValid() && "Requires a valid canonical loop");
3442 return {After, After->begin()};
3443 };
3444
3446 assert(isValid() && "Requires a valid canonical loop");
3447 return Header->getParent();
3448 }
3449
3450 /// Consistency self-check.
3451 void assertOK() const;
3452
3453 /// Invalidate this loop. That is, the underlying IR does not fulfill the
3454 /// requirements of an OpenMP canonical loop anymore.
3455 void invalidate();
3456};
3457
3458} // end namespace llvm
3459
3460#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
arc branch finalize
This file defines the BumpPtrAllocator interface.
BlockVerifier::State From
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
uint64_t Addr
std::string Name
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Hardware Loops
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
This file defines constans and helpers used when dealing with OpenMP.
Provides definitions for Target specific Grid Values.
const SmallVectorImpl< MachineOperand > & Cond
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
an instruction to allocate memory on the stack
Definition: Instructions.h:60
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:707
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:451
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
Class to represented the control flow structure of an OpenMP canonical loop.
Value * getTripCount() const
Returns the llvm::Value containing the number of loop iterations.
BasicBlock * getHeader() const
The header is the entry for each iteration.
void assertOK() const
Consistency self-check.
Type * getIndVarType() const
Return the type of the induction variable (and the trip count).
BasicBlock * getBody() const
The body block is the single entry for a loop iteration and not controlled by CanonicalLoopInfo.
bool isValid() const
Returns whether this object currently represents the IR of a loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const
Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getBodyIP() const
Return the insertion point for user code in the body.
BasicBlock * getAfter() const
The after block is intended for clean-up code such as lifetime end markers.
Function * getFunction() const
void invalidate()
Invalidate this loop.
BasicBlock * getLatch() const
Reaching the latch indicates the end of the loop body code.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const
Return the insertion point for user code before the loop.
BasicBlock * getCond() const
The condition block computes whether there is another loop iteration.
BasicBlock * getExit() const
Reaching the exit indicates no more iterations are being executed.
BasicBlock * getPreheader() const
The preheader ensures that there is only a single edge entering the loop.
Instruction * getIndVar() const
Returns the instruction representing the current logical induction variable.
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:51
InsertPoint - A saved insertion point.
Definition: IRBuilder.h:255
BasicBlock * getBlock() const
Definition: IRBuilder.h:270
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:92
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:218
InsertPoint saveIP() const
Returns the current insert point.
Definition: IRBuilder.h:275
void restoreIP(InsertPoint IP)
Sets the current insert point to a previously-saved location.
Definition: IRBuilder.h:287
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2664
Class to represent integer types.
Definition: DerivedTypes.h:40
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags)
Definition: OMPIRBuilder.h:389
OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage, const std::string &VarName)
Definition: OMPIRBuilder.h:392
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:407
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:314
OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
Definition: OMPIRBuilder.h:301
@ OffloadingEntryInfoTargetRegion
Entry is a target region.
Definition: OMPIRBuilder.h:235
@ OffloadingEntryInfoDeviceGlobalVar
Entry is a declare target variable.
Definition: OMPIRBuilder.h:237
OffloadingEntryInfoKinds getKind() const
Definition: OMPIRBuilder.h:253
OffloadEntryInfo(OffloadingEntryInfoKinds Kind)
Definition: OMPIRBuilder.h:244
static bool classof(const OffloadEntryInfo *Info)
Definition: OMPIRBuilder.h:261
OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags)
Definition: OMPIRBuilder.h:245
Class that manages information about offload code regions and data.
Definition: OMPIRBuilder.h:223
function_ref< void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy
Applies action Action on all registered entries.
Definition: OMPIRBuilder.h:429
OMPTargetDeviceClauseKind
Kind of device clause for declare target variables and functions NOTE: Currently not used as a part o...
Definition: OMPIRBuilder.h:368
@ OMPTargetDeviceClauseNoHost
The target is marked for non-host devices.
Definition: OMPIRBuilder.h:372
@ OMPTargetDeviceClauseAny
The target is marked for all devices.
Definition: OMPIRBuilder.h:370
@ OMPTargetDeviceClauseNone
The target is marked as having no clause.
Definition: OMPIRBuilder.h:376
@ OMPTargetDeviceClauseHost
The target is marked for host devices.
Definition: OMPIRBuilder.h:374
void registerDeviceGlobalVarEntryInfo(StringRef VarName, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage)
Register device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order)
Initialize device global variable entry.
void actOnDeviceGlobalVarEntriesInfo(const OffloadDeviceGlobalVarEntryInfoActTy &Action)
OMPTargetRegionEntryKind
Kind of the target registry entry.
Definition: OMPIRBuilder.h:288
@ OMPTargetRegionEntryTargetRegion
Mark the entry as target region.
Definition: OMPIRBuilder.h:290
OffloadEntriesInfoManager(OpenMPIRBuilder *builder)
Definition: OMPIRBuilder.h:281
void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, const TargetRegionEntryInfo &EntryInfo)
bool hasTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, bool IgnoreAddressId=false) const
Return true if a target region entry with the provided information exists.
void registerTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
Register target region entry.
void actOnTargetRegionEntriesInfo(const OffloadTargetRegionEntryInfoActTy &Action)
unsigned size() const
Return number of entries defined so far.
Definition: OMPIRBuilder.h:279
void initializeTargetRegionEntryInfo(const TargetRegionEntryInfo &EntryInfo, unsigned Order)
Initialize target region entry.
OMPTargetGlobalVarEntryKind
Kind of the global variable entry..
Definition: OMPIRBuilder.h:348
@ OMPTargetGlobalVarEntryEnter
Mark the entry as a declare target enter.
Definition: OMPIRBuilder.h:354
@ OMPTargetGlobalVarEntryNone
Mark the entry as having no declare target entry kind.
Definition: OMPIRBuilder.h:356
@ OMPTargetGlobalRegisterRequires
Mark the entry as a register requires global.
Definition: OMPIRBuilder.h:360
@ OMPTargetGlobalVarEntryIndirect
Mark the entry as a declare target indirect global.
Definition: OMPIRBuilder.h:358
@ OMPTargetGlobalVarEntryLink
Mark the entry as a to declare target link.
Definition: OMPIRBuilder.h:352
@ OMPTargetGlobalVarEntryTo
Mark the entry as a to declare target.
Definition: OMPIRBuilder.h:350
function_ref< void(const TargetRegionEntryInfo &EntryInfo, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy
brief Applies action Action on all registered entries.
Definition: OMPIRBuilder.h:339
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const
Checks if the variable with the given name has been registered already.
Definition: OMPIRBuilder.h:424
bool empty() const
Return true if a there are no entries defined.
Captures attributes that affect generating LLVM-IR using the OpenMPIRBuilder and related classes.
Definition: OMPIRBuilder.h:85
void setIsGPU(bool Value)
Definition: OMPIRBuilder.h:178
std::optional< bool > IsTargetDevice
Flag to define whether to generate code for the role of the OpenMP host (if set to false) or device (...
Definition: OMPIRBuilder.h:91
std::optional< bool > IsGPU
Flag for specifying if the compilation is done for an accelerator.
Definition: OMPIRBuilder.h:101
void setGridValue(omp::GV G)
Definition: OMPIRBuilder.h:183
std::optional< StringRef > FirstSeparator
First separator used between the initial two parts of a name.
Definition: OMPIRBuilder.h:110
StringRef separator() const
Definition: OMPIRBuilder.h:169
int64_t getRequiresFlags() const
Returns requires directive clauses as flags compatible with those expected by libomptarget.
void setFirstSeparator(StringRef FS)
Definition: OMPIRBuilder.h:181
StringRef firstSeparator() const
Definition: OMPIRBuilder.h:159
std::optional< bool > OpenMPOffloadMandatory
Flag for specifying if offloading is mandatory.
Definition: OMPIRBuilder.h:107
std::optional< bool > EmitLLVMUsedMetaInfo
Flag for specifying if LLVMUsed information should be emitted.
Definition: OMPIRBuilder.h:104
omp::GV getGridValue() const
Definition: OMPIRBuilder.h:142
void setHasRequiresReverseOffload(bool Value)
bool hasRequiresUnifiedSharedMemory() const
void setHasRequiresUnifiedSharedMemory(bool Value)
std::optional< StringRef > Separator
Separator used between all of the rest consecutive parts of s name.
Definition: OMPIRBuilder.h:112
bool hasRequiresDynamicAllocators() const
bool openMPOffloadMandatory() const
Definition: OMPIRBuilder.h:136
void setHasRequiresUnifiedAddress(bool Value)
void setOpenMPOffloadMandatory(bool Value)
Definition: OMPIRBuilder.h:180
void setIsTargetDevice(bool Value)
Definition: OMPIRBuilder.h:177
void setSeparator(StringRef S)
Definition: OMPIRBuilder.h:182
void setHasRequiresDynamicAllocators(bool Value)
void setEmitLLVMUsed(bool Value=true)
Definition: OMPIRBuilder.h:179
std::optional< omp::GV > GridValue
Definition: OMPIRBuilder.h:115
bool hasRequiresReverseOffload() const
bool hasRequiresUnifiedAddress() const
Struct that keeps the information that should be kept throughout a 'target data' region.
TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls)
SmallMapVector< const Value *, std::pair< Value *, Value * >, 4 > DevicePtrInfoMap
void clearArrayInfo()
Clear information about the data arrays.
unsigned NumberOfPtrs
The total number of pointers passed to the runtime library.
bool isValid()
Return true if the current target data information has valid arrays.
bool HasMapper
Indicate whether any user-defined mapper exists.
An interface to create LLVM-IR for OpenMP directives.
Definition: OMPIRBuilder.h:465
Constant * getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags=omp::IdentFlag(0), unsigned Reserve2Flags=0)
Return an ident_t* encoding the source location SrcLocStr and Flags.
FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID)
Return the function declaration for the runtime function with FnID.
std::function< void(InsertPointTy CodeGenIP)> FinalizeCallbackTy
Callback type for variable finalization (think destructors).
Definition: OMPIRBuilder.h:511
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, int32_t MinThreadsVal=0, int32_t MaxThreadsVal=0, int32_t MinTeamsVal=0, int32_t MaxTeamsVal=0)
The omp target interface.
void emitIfClause(Value *Cond, BodyGenCallbackTy ThenGen, BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP={})
Emits code for OpenMP 'if' clause using specified BodyGenCallbackTy Here is the logic: if (Cond) { Th...
ReductionGenCBKind
Enum class for the RedctionGen CallBack type to be used.
CanonicalLoopInfo * collapseLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, InsertPointTy ComputeIP)
Collapse a loop nest into a single loop.
function_ref< void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)> BodyGenCallbackTy
Callback type for body (=inner region) code generation.
Definition: OMPIRBuilder.h:563
void createTaskyield(const LocationDescription &Loc)
Generator for '#omp taskyield'.
void emitBranch(BasicBlock *Target)
InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO)
Emit atomic write for : X = Expr — Only Scalar data types.
static void writeThreadBoundsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
EvalKind
Enum class for reduction evaluation types scalar, complex and aggregate.
InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst)
Generator for '#omp critical'.
static TargetRegionEntryInfo getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, StringRef ParentName="")
Creates a unique info for a target entry when provided a filename and line number from.
void emitTaskwaitImpl(const LocationDescription &Loc)
Generate a taskwait runtime call.
Constant * registerTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, Function *OutlinedFunction, StringRef EntryFnName, StringRef EntryFnIDName)
Registers the given function and sets up the attribtues of the function Returns the FunctionID.
InsertPointTy createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr)
Emit atomic update for constructs: — Only Scalar data types V = X; X = X BinOp Expr ,...
void initialize()
Initialize the internal state, this will put structures types and potentially other helpers into the ...
void createTargetDeinit(const LocationDescription &Loc, int32_t TeamsReductionDataSize=0, int32_t TeamsReductionBufferLength=1024)
Create a runtime call for kmpc_target_deinit.
std::function< void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)> StorableBodyGenCallbackTy
Definition: OMPIRBuilder.h:570
CanonicalLoopInfo * createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name="loop")
Generator for the control flow structure of an OpenMP canonical loop.
function_ref< InsertPointTy(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)> PrivatizeCallbackTy
Callback type for variable privatization (think copy & default constructor).
Definition: OMPIRBuilder.h:603
void loadOffloadInfoMetadata(Module &M)
Loads all the offload entries information from the host IR metadata.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr)
Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X For complex Operations: X = ...
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully unroll a loop.
void emitFlush(const LocationDescription &Loc)
Generate a flush runtime call.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive Kind, bool ForceSimpleCall=false, bool CheckCancelFlag=true)
Emitter methods for OpenMP directives.
InsertPointTy emitKernelLaunch(const LocationDescription &Loc, Function *OutlinedFn, Value *OutlinedFnID, EmitFallbackCallbackTy EmitTargetCallFallbackCB, TargetKernelArgs &Args, Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP)
Generate a target region entry call and host fallback call.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective)
Generator for '#omp cancel'.
static std::pair< int32_t, int32_t > readThreadBoundsForKernel(const Triple &T, Function &Kernel)
}
OpenMPIRBuilderConfig Config
The OpenMPIRBuilder Configuration.
CallInst * createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_destroy.
InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO)
Emit atomic Read for : V = X — Only Scalar data types.
std::function< void(EmitMetadataErrorKind, TargetRegionEntryInfo)> EmitMetadataErrorReportFunctionTy
Callback function type.
void setConfig(OpenMPIRBuilderConfig C)
Definition: OMPIRBuilder.h:480
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads)
Generator for '#omp ordered [threads | simd]'.
OpenMPIRBuilder::InsertPointTy createTargetData(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond, TargetDataInfo &Info, GenMapInfoCallbackTy GenMapInfoCB, omp::RuntimeFunction *MapperFunc=nullptr, function_ref< InsertPointTy(InsertPointTy CodeGenIP, BodyGenTy BodyGenType)> BodyGenCB=nullptr, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr, Value *SrcLocInfo=nullptr)
Generator for '#omp target data'.
std::forward_list< CanonicalLoopInfo > LoopInfos
Collection of owned canonical loop objects that eventually need to be free'd.
void createTaskwait(const LocationDescription &Loc)
Generator for '#omp taskwait'.
CanonicalLoopInfo * createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name={})
Create the control flow structure of a canonical OpenMP loop.
std::string createPlatformSpecificName(ArrayRef< StringRef > Parts) const
Get the create a name using the platform specific separators.
FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_next_* runtime function for the specified size IVSize and sign IVSigned.
static void getKernelArgsVector(TargetKernelArgs &KernelArgs, IRBuilderBase &Builder, SmallVector< Value * > &ArgsVector)
Create the kernel args vector used by emitTargetKernel.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully or partially unroll a loop.
omp::OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position)
Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on the position given.
InsertPointTy createReductionsGPU(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< ReductionInfo > ReductionInfos, bool IsNoWait=false, bool IsTeamsReduction=false, bool HasDistribute=false, ReductionGenCBKind ReductionGenCBKind=ReductionGenCBKind::MLIR, std::optional< omp::GV > GridValue={}, unsigned ReductionBufNum=1024, Value *SrcLocInfo=nullptr)
Design of OpenMP reductions on the GPU.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn)
Add attributes known for FnID to Fn.
Module & M
The underlying LLVM-IR module.
StringMap< Constant * > SrcLocStrMap
Map to remember source location strings.
void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas)
Create the allocas instruction used in call to mapper functions.
Constant * getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the source location LocStr.
void addOutlineInfo(OutlineInfo &&OI)
Add a new region that will be outlined later.
FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_fini_* runtime function for the specified size IVSize and sign IVSigned.
void emitOffloadingArraysArgument(IRBuilderBase &Builder, OpenMPIRBuilder::TargetDataRTArgs &RTArgs, OpenMPIRBuilder::TargetDataInfo &Info, bool EmitDebug=false, bool ForEndCall=false)
Emit the arguments to be passed to the runtime library based on the arrays of base pointers,...
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI)
Partially unroll a loop.
InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< StorableBodyGenCallbackTy > SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait)
Generator for '#omp sections'.
InsertPointTy createTask(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, bool Tied=true, Value *Final=nullptr, Value *IfCondition=nullptr, SmallVector< DependData > Dependencies={})
Generator for #omp task
void emitTaskyieldImpl(const LocationDescription &Loc)
Generate a taskyield runtime call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands)
Create the call for the target mapper function.
InsertPointTy createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOpValue &R, Value *E, Value *D, AtomicOrdering AO, omp::OMPAtomicCompareOp Op, bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly)
Emit atomic compare for constructs: — Only scalar data types cond-expr-stmt: x = x ordop expr ?...
InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef< llvm::Value * > StoreValues, const Twine &Name, bool IsDependSource)
Generator for '#omp ordered depend (source | sink)'.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd=true)
Generate conditional branch and relevant BasicBlocks through which private threads copy the 'copyin' ...
void emitOffloadingArrays(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info, bool IsNonContiguous=false, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, function_ref< Value *(unsigned int)> CustomMapperCB=nullptr)
Emit the arrays used to pass the captures and map information to the offloading runtime library.
SmallVector< FinalizationInfo, 8 > FinalizationStack
The finalization stack made up of finalize callbacks currently in-flight, wrapped into FinalizationIn...
std::vector< CanonicalLoopInfo * > tileLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, ArrayRef< Value * > TileSizes)
Tile a loop nest.
CallInst * createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_init.
SmallVector< OutlineInfo, 16 > OutlineInfos
Collection of regions that need to be outlined during finalization.
Function * getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID)
const Triple T
The target triple of the underlying module.
DenseMap< std::pair< Constant *, uint64_t >, Constant * > IdentMap
Map to remember existing ident_t*.
CallInst * createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_free.
FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned, bool IsGPUDistribute)
Returns __kmpc_for_static_init_* runtime function for the specified size IVSize and sign IVSigned.
CallInst * createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_Alloc.
void emitNonContiguousDescriptor(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info)
Emit an array of struct descriptors to be assigned to the offload args.
InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for '#omp section'.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind=llvm::omp::OMP_SCHEDULE_Default, Value *ChunkSize=nullptr, bool HasSimdModifier=false, bool HasMonotonicModifier=false, bool HasNonmonotonicModifier=false, bool HasOrderedClause=false, omp::WorksharingLoopType LoopType=omp::WorksharingLoopType::ForStaticLoop)
Modifies the canonical loop to be a workshare loop.
std::function< InsertPointTy(InsertPointTy CodeGenIP, unsigned Index, Value **LHS, Value **RHS, Function *CurFn)> ReductionGenClangCBTy
ReductionGen CallBack for Clang.
void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished=false)
Value * getOrCreateThreadID(Value *Ident)
Return the current thread ID.
InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for '#omp master'.
void pushFinalizationCB(const FinalizationInfo &FI)
Push a finalization callback on the finalization stack.
Definition: OMPIRBuilder.h:529
InsertPointTy getInsertionPoint()
}
IRBuilder ::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable)
Generator for '#omp parallel'.
StringMap< GlobalVariable *, BumpPtrAllocator > InternalVars
An ordered map of auto-generated variables to their unique names.
GlobalVariable * getOrCreateInternalVariable(Type *Ty, const StringRef &Name, unsigned AddressSpace=0)
Gets (if variable with the given name already exist) or creates internal global variable with the spe...
FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_init_* runtime function for the specified size IVSize and sign IVSigned.
InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsNowait, ArrayRef< llvm::Value * > CPVars={}, ArrayRef< llvm::Function * > CPFuncs={})
Generator for '#omp single'.
CallInst * createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_use.
IRBuilder<>::InsertPoint InsertPointTy
Type used throughout for insertion points.
Definition: OMPIRBuilder.h:491
GlobalVariable * createOffloadMapnames(SmallVectorImpl< llvm::Constant * > &Names, std::string VarName)
Create the global variable holding the offload names information.
static void writeTeamsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
std::function< Function *(StringRef FunctionName)> FunctionGenCallback
Functions used to generate a function with the given name.
std::function< InsertPointTy(InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)> ReductionGenCBTy
ReductionGen CallBack for MLIR.
void setCorrectMemberOfFlag(omp::OpenMPOffloadMappingFlags &Flags, omp::OpenMPOffloadMappingFlags MemberOfFlag)
Given an initial flag set, this function modifies it to contain the passed in MemberOfFlag generated ...
void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB={})
Generate control flow and cleanup for cancellation.
std::function< InsertPointTy(InsertPointTy, Type *, Value *, Value *)> ReductionGenAtomicCBTy
Functions used to generate atomic reductions.
Constant * getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the default source location.
InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter)
Generator for '#omp masked'.
void createOffloadEntry(Constant *ID, Constant *Addr, uint64_t Size, int32_t Flags, GlobalValue::LinkageTypes, StringRef Name="")
Creates offloading entry for the provided entry ID ID, address Addr, size Size, and flags Flags.
static unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple, const StringMap< bool > &Features)
Get the default alignment value for given target.
unsigned getFlagMemberOffset()
Get the offset of the OMP_MAP_MEMBER_OF field.
InsertPointTy createTaskgroup(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB)
Generator for the taskgroup construct.
void createOffloadEntriesAndInfoMetadata(EmitMetadataErrorReportFunctionTy &ErrorReportFunction)
void applySimd(CanonicalLoopInfo *Loop, MapVector< Value *, Value * > AlignedVars, Value *IfCond, omp::OrderKind Order, ConstantInt *Simdlen, ConstantInt *Safelen)
Add metadata to simd-ize a loop.
void emitTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, FunctionGenCallback &GenerateFunctionCallback, bool IsOffloadEntry, Function *&OutlinedFn, Constant *&OutlinedFnID)
Create a unique name for the entry function using the source location information of the current targ...
InsertPointTy createTarget(const LocationDescription &Loc, OpenMPIRBuilder::InsertPointTy AllocaIP, OpenMPIRBuilder::InsertPointTy CodeGenIP, TargetRegionEntryInfo &EntryInfo, int32_t NumTeams, int32_t NumThreads, SmallVectorImpl< Value * > &Inputs, GenMapInfoCallbackTy GenMapInfoCB, TargetBodyGenCallbackTy BodyGenCB, TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB)
Generator for '#omp target'.
bool isLastFinalizationInfoCancellable(omp::Directive DK)
Return true if the last entry in the finalization stack is of kind DK and cancellable.
InsertPointTy emitTargetKernel(const LocationDescription &Loc, InsertPointTy AllocaIP, Value *&Return, Value *Ident, Value *DeviceID, Value *NumTeams, Value *NumThreads, Value *HostPtr, ArrayRef< Value * > KernelArgs)
Generate a target region entry call.
GlobalVariable * createOffloadMaptypes(SmallVectorImpl< uint64_t > &Mappings, std::string VarName)
Create the global variable holding the offload mappings information.
CallInst * createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name=Twine(""))
Create a runtime call for kmpc_threadprivate_cached.
IRBuilder Builder
The LLVM-IR Builder used to create IR.
GlobalValue * createGlobalFlag(unsigned Value, StringRef Name)
Create a hidden global flag Name in the module with initial value Value.
Value * getSizeInBytes(Value *BasePtr)
Computes the size of type in bytes.
OpenMPIRBuilder(Module &M)
Create a new OpenMPIRBuilder operating on the given module M.
Definition: OMPIRBuilder.h:469
void registerTargetGlobalVariable(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage, Type *LlvmPtrTy, Constant *Addr)
Registers a target variable for device or host.
InsertPointTy createTeams(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, Value *NumTeamsLower=nullptr, Value *NumTeamsUpper=nullptr, Value *ThreadLimit=nullptr, Value *IfExpr=nullptr)
Generator for #omp teams
BodyGenTy
Type of BodyGen to use for region codegen.
SmallVector< llvm::Function *, 16 > ConstantAllocaRaiseCandidates
A collection of candidate target functions that's constant allocas will attempt to be raised on a cal...
OffloadEntriesInfoManager OffloadInfoManager
Info manager to keep track of target regions.
static std::pair< int32_t, int32_t > readTeamBoundsForKernel(const Triple &T, Function &Kernel)
Read/write a bounds on teams for Kernel.
std::function< std::tuple< std::string, uint64_t >()> FileIdentifierInfoCallbackTy
const std::string ompOffloadInfoName
OMP Offload Info Metadata name string.
InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt)
Generator for __kmpc_copyprivate.
void popFinalizationCB()
Pop the last finalization callback from the finalization stack.
Definition: OMPIRBuilder.h:536
InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< ReductionInfo > ReductionInfos, ArrayRef< bool > IsByRef, bool IsNoWait=false)
Generator for '#omp reduction'.
bool updateToLocation(const LocationDescription &Loc)
Update the internal location to Loc.
void createFlush(const LocationDescription &Loc)
Generator for '#omp flush'.
Constant * getAddrOfDeclareTargetVar(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, Type *LlvmPtrTy, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage)
Retrieve (or create if non-existent) the address of a declare target variable, used in conjunction wi...
EmitMetadataErrorKind
The kind of errors that can occur when emitting the offload entries and metadata.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
Definition: StringMap.h:276
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
bool pointsToAliveValue() const
Definition: ValueHandle.h:224
An efficient, type-erasing, non-owning reference to a callable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
OpenMPOffloadMappingFlags
Values for bit flags used to specify the mapping type for offloading.
Definition: OMPConstants.h:195
IdentFlag
IDs for all omp runtime library ident_t flag encodings (see their defintion in openmp/runtime/src/kmp...
Definition: OMPConstants.h:65
RTLDependenceKindTy
Dependence kind for RTL.
Definition: OMPConstants.h:273
RuntimeFunction
IDs for all omp runtime library (RTL) functions.
Definition: OMPConstants.h:45
WorksharingLoopType
A type of worksharing loop construct.
Definition: OMPConstants.h:283
OMPAtomicCompareOp
Atomic compare operations. Currently OpenMP only supports ==, >, and <.
Definition: OMPConstants.h:267
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
BasicBlock * splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch, llvm::Twine Suffix=".split")
Like splitBB, but reuses the current block's name for the new name.
@ Offset
Definition: DWP.cpp:480
AddressSpace
Definition: NVPTXBaseInfo.h:21
void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch)
Move the instruction after an InsertPoint to the beginning of another BasicBlock.
BasicBlock * splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch, llvm::Twine Name={})
Split a BasicBlock at an InsertPoint, even if the block is degenerate (missing the terminator).
AtomicOrdering
Atomic ordering for LLVM's memory model.
a struct to pack relevant information while generating atomic Ops
A struct to pack the relevant information for an OpenMP depend clause.
DependData(omp::RTLDependenceKindTy DepKind, Type *DepValueType, Value *DepVal)
omp::RTLDependenceKindTy DepKind
bool IsCancellable
Flag to indicate if the directive is cancellable.
Definition: OMPIRBuilder.h:523
FinalizeCallbackTy FiniCB
The finalization callback provided by the last in-flight invocation of createXXXX for the directive o...
Definition: OMPIRBuilder.h:516
omp::Directive DK
The directive kind of the innermost directive that has an associated region which might require final...
Definition: OMPIRBuilder.h:520
Description of a LLVM-IR insertion point (IP) and a debug/source location (filename,...
Definition: OMPIRBuilder.h:607
LocationDescription(const InsertPointTy &IP)
Definition: OMPIRBuilder.h:610
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
Definition: OMPIRBuilder.h:611
LocationDescription(const IRBuilderBase &IRB)
Definition: OMPIRBuilder.h:608
This structure contains combined information generated for mappable clauses, including base pointers,...
void append(MapInfosTy &CurInfo)
Append arrays in CurInfo.
MapDeviceInfoArrayTy DevicePointers
StructNonContiguousInfo NonContigInfo
Helper that contains information about regions we need to outline during finalization.
void collectBlocks(SmallPtrSetImpl< BasicBlock * > &BlockSet, SmallVectorImpl< BasicBlock * > &BlockVector)
Collect all blocks in between EntryBB and ExitBB in both the given vector and set.
Function * getFunction() const
Return the function that contains the region to be outlined.
SmallVector< Value *, 2 > ExcludeArgsFromAggregate
std::function< void(Function &)> PostOutlineCBTy
Information about an OpenMP reduction.
EvalKind EvaluationKind
Reduction evaluation kind - scalar, complex or aggregate.
ReductionGenAtomicCBTy AtomicReductionGen
Callback for generating the atomic reduction body, may be null.
ReductionGenCBTy ReductionGen
Callback for generating the reduction body.
ReductionInfo(Value *PrivateVariable)
Value * Variable
Reduction variable of pointer type.
Value * PrivateVariable
Thread-private partial reduction variable.
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, EvalKind EvaluationKind, ReductionGenCBTy ReductionGen, ReductionGenClangCBTy ReductionGenClang, ReductionGenAtomicCBTy AtomicReductionGen)
ReductionGenClangCBTy ReductionGenClang
Clang callback for generating the reduction body.
Type * ElementType
Reduction element type, must match pointee type of variable.
Container for the arguments used to pass data to the runtime library.
Value * SizesArray
The array of sizes passed to the runtime library.
TargetDataRTArgs(Value *BasePointersArray, Value *PointersArray, Value *SizesArray, Value *MapTypesArray, Value *MapTypesArrayEnd, Value *MappersArray, Value *MapNamesArray)
Value * PointersArray
The array of section pointers passed to the runtime library.
Value * MappersArray
The array of user-defined mappers passed to the runtime library.
Value * MapTypesArrayEnd
The array of map types passed to the runtime library for the end of the region, or nullptr if there a...
Value * BasePointersArray
The array of base pointer passed to the runtime library.
Value * MapTypesArray
The array of map types passed to the runtime library for the beginning of the region or for the entir...
Value * MapNamesArray
The array of original declaration names of mapped pointers sent to the runtime library for debugging.
Data structure that contains the needed information to construct the kernel args vector.
Value * NumTeams
The number of teams.
TargetKernelArgs(unsigned NumTargetItems, TargetDataRTArgs RTArgs, Value *NumIterations, Value *NumTeams, Value *NumThreads, Value *DynCGGroupMem, bool HasNoWait)
Constructor for TargetKernelArgs.
Value * DynCGGroupMem
The size of the dynamic shared memory.
TargetDataRTArgs RTArgs
Arguments passed to the runtime library.
Value * NumIterations
The number of iterations.
unsigned NumTargetItems
Number of arguments passed to the runtime library.
bool HasNoWait
True if the kernel has 'no wait' clause.
Value * NumThreads
The number of threads.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Data structure to contain the information needed to uniquely identify a target entry.
Definition: OMPIRBuilder.h:197
static void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count)
bool operator<(const TargetRegionEntryInfo &RHS) const
Definition: OMPIRBuilder.h:215
TargetRegionEntryInfo(StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count=0)
Definition: OMPIRBuilder.h:205
Defines various target-specific GPU grid values that must be consistent between host RTL (plugin),...
Definition: OMPGridValues.h:57