LLVM 23.0.0git
OMPIRBuilder.h
Go to the documentation of this file.
1//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the OpenMPIRBuilder class and helpers used as a convenient
10// way to create LLVM instructions for OpenMP directives.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
15#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
16
17#include "llvm/ADT/APSInt.h"
18#include "llvm/ADT/SetVector.h"
22#include "llvm/IR/CallingConv.h"
23#include "llvm/IR/DebugLoc.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/Module.h"
26#include "llvm/IR/ValueMap.h"
29#include "llvm/Support/Error.h"
31#include <forward_list>
32#include <map>
33#include <optional>
34
35namespace llvm {
37class CodeExtractor;
38class ScanInfo;
41class OpenMPIRBuilder;
42class Loop;
43class LoopAnalysis;
44class LoopInfo;
45
46namespace vfs {
47class FileSystem;
48} // namespace vfs
49
50/// Move the instruction after an InsertPoint to the beginning of another
51/// BasicBlock.
52///
53/// The instructions after \p IP are moved to the beginning of \p New which must
54/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
55/// \p New will be added such that there is no semantic change. Otherwise, the
56/// \p IP insert block remains degenerate and it is up to the caller to insert a
57/// terminator. \p DL is used as the debug location for the branch instruction
58/// if one is created.
60 bool CreateBranch, DebugLoc DL);
61
62/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
63/// insert location will stick to after the instruction before the insertion
64/// point (instead of moving with the instruction the InsertPoint stores
65/// internally).
66LLVM_ABI void spliceBB(IRBuilder<> &Builder, BasicBlock *New,
67 bool CreateBranch);
68
69/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
70/// (missing the terminator).
71///
72/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
73/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
74/// is true, a branch to the new successor will new created such that
75/// semantically there is no change; otherwise the block of the insertion point
76/// remains degenerate and it is the caller's responsibility to insert a
77/// terminator. \p DL is used as the debug location for the branch instruction
78/// if one is created. Returns the new successor block.
80 DebugLoc DL, llvm::Twine Name = {});
81
82/// Split a BasicBlock at \p Builder's insertion point, even if the block is
83/// degenerate (missing the terminator). Its new insert location will stick to
84/// after the instruction before the insertion point (instead of moving with the
85/// instruction the InsertPoint stores internally).
86LLVM_ABI BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
87 llvm::Twine Name = {});
88
89/// Split a BasicBlock at \p Builder's insertion point, even if the block is
90/// degenerate (missing the terminator). Its new insert location will stick to
91/// after the instruction before the insertion point (instead of moving with the
92/// instruction the InsertPoint stores internally).
93LLVM_ABI BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch,
94 llvm::Twine Name);
95
96/// Like splitBB, but reuses the current block's name for the new name.
98 bool CreateBranch,
99 llvm::Twine Suffix = ".split");
100
101/// Captures attributes that affect generating LLVM-IR using the
102/// OpenMPIRBuilder and related classes. Note that not all attributes are
103/// required for all classes or functions. In some use cases the configuration
104/// is not necessary at all, because because the only functions that are called
105/// are ones that are not dependent on the configuration.
107public:
108 /// Flag to define whether to generate code for the role of the OpenMP host
109 /// (if set to false) or device (if set to true) in an offloading context. It
110 /// is set when the -fopenmp-is-target-device compiler frontend option is
111 /// specified.
112 std::optional<bool> IsTargetDevice;
113
114 /// Flag for specifying if the compilation is done for an accelerator. It is
115 /// set according to the architecture of the target triple and currently only
116 /// true when targeting AMDGPU or NVPTX. Today, these targets can only perform
117 /// the role of an OpenMP target device, so `IsTargetDevice` must also be true
118 /// if `IsGPU` is true. This restriction might be lifted if an accelerator-
119 /// like target with the ability to work as the OpenMP host is added, or if
120 /// the capabilities of the currently supported GPU architectures are
121 /// expanded.
122 std::optional<bool> IsGPU;
123
124 /// Flag for specifying if LLVMUsed information should be emitted.
125 std::optional<bool> EmitLLVMUsedMetaInfo;
126
127 /// Flag for specifying if offloading is mandatory.
128 std::optional<bool> OpenMPOffloadMandatory;
129
130 /// First separator used between the initial two parts of a name.
131 std::optional<StringRef> FirstSeparator;
132 /// Separator used between all of the rest consecutive parts of s name.
133 std::optional<StringRef> Separator;
134
135 // Grid Value for the GPU target.
136 std::optional<omp::GV> GridValue;
137
138 /// When compilation is being done for the OpenMP host (i.e. `IsTargetDevice =
139 /// false`), this contains the list of offloading triples associated, if any.
141
142 // Default address space for the target.
143 unsigned DefaultTargetAS = 0;
144
146
150 bool HasRequiresReverseOffload,
151 bool HasRequiresUnifiedAddress,
152 bool HasRequiresUnifiedSharedMemory,
153 bool HasRequiresDynamicAllocators);
154
155 // Getters functions that assert if the required values are not present.
156 bool isTargetDevice() const {
157 assert(IsTargetDevice.has_value() && "IsTargetDevice is not set");
158 return *IsTargetDevice;
159 }
160
161 bool isGPU() const {
162 assert(IsGPU.has_value() && "IsGPU is not set");
163 return *IsGPU;
164 }
165
167 assert(OpenMPOffloadMandatory.has_value() &&
168 "OpenMPOffloadMandatory is not set");
170 }
171
173 assert(GridValue.has_value() && "GridValue is not set");
174 return *GridValue;
175 }
176
177 unsigned getDefaultTargetAS() const { return DefaultTargetAS; }
178
180
181 bool hasRequiresFlags() const { return RequiresFlags; }
186
187 /// Returns requires directive clauses as flags compatible with those expected
188 /// by libomptarget.
189 LLVM_ABI int64_t getRequiresFlags() const;
190
191 // Returns the FirstSeparator if set, otherwise use the default separator
192 // depending on isGPU
194 if (FirstSeparator.has_value())
195 return *FirstSeparator;
196 if (isGPU())
197 return "_";
198 return ".";
199 }
200
201 // Returns the Separator if set, otherwise use the default separator depending
202 // on isGPU
204 if (Separator.has_value())
205 return *Separator;
206 if (isGPU())
207 return "$";
208 return ".";
209 }
210
212 void setIsGPU(bool Value) { IsGPU = Value; }
218 void setDefaultTargetAS(unsigned AS) { DefaultTargetAS = AS; }
220
225
226private:
227 /// Flags for specifying which requires directive clauses are present.
228 int64_t RequiresFlags;
229};
230
231/// Data structure to contain the information needed to uniquely identify
232/// a target entry.
234 /// The prefix used for kernel names.
235 static constexpr const char *KernelNamePrefix = "__omp_offloading_";
236
237 std::string ParentName;
238 unsigned DeviceID;
239 unsigned FileID;
240 unsigned Line;
241 unsigned Count;
242
245 unsigned FileID, unsigned Line, unsigned Count = 0)
247 Count(Count) {}
248
249 LLVM_ABI static void
251 unsigned DeviceID, unsigned FileID, unsigned Line,
252 unsigned Count);
253
255 return std::make_tuple(ParentName, DeviceID, FileID, Line, Count) <
256 std::make_tuple(RHS.ParentName, RHS.DeviceID, RHS.FileID, RHS.Line,
257 RHS.Count);
258 }
259};
260
261/// Class that manages information about offload code regions and data
263 /// Number of entries registered so far.
264 OpenMPIRBuilder *OMPBuilder;
265 unsigned OffloadingEntriesNum = 0;
266
267public:
268 /// Base class of the entries info.
270 public:
271 /// Kind of a given entry.
272 enum OffloadingEntryInfoKinds : unsigned {
273 /// Entry is a target region.
275 /// Entry is a declare target variable.
277 /// Invalid entry info.
279 };
280
281 protected:
283 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
284 explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
285 uint32_t Flags)
286 : Flags(Flags), Order(Order), Kind(Kind) {}
287 ~OffloadEntryInfo() = default;
288
289 public:
290 bool isValid() const { return Order != ~0u; }
291 unsigned getOrder() const { return Order; }
292 OffloadingEntryInfoKinds getKind() const { return Kind; }
293 uint32_t getFlags() const { return Flags; }
294 void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
295 Constant *getAddress() const { return cast_or_null<Constant>(Addr); }
297 assert(!Addr.pointsToAliveValue() && "Address has been set before!");
298 Addr = V;
299 }
300 static bool classof(const OffloadEntryInfo *Info) { return true; }
301
302 private:
303 /// Address of the entity that has to be mapped for offloading.
304 WeakTrackingVH Addr;
305
306 /// Flags associated with the device global.
307 uint32_t Flags = 0u;
308
309 /// Order this entry was emitted.
310 unsigned Order = ~0u;
311
312 OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
313 };
314
315 /// Return true if a there are no entries defined.
316 LLVM_ABI bool empty() const;
317 /// Return number of entries defined so far.
318 unsigned size() const { return OffloadingEntriesNum; }
319
320 OffloadEntriesInfoManager(OpenMPIRBuilder *builder) : OMPBuilder(builder) {}
321
322 //
323 // Target region entries related.
324 //
325
326 /// Kind of the target registry entry.
328 /// Mark the entry as target region.
330 };
331
332 /// Target region entries info.
334 /// Address that can be used as the ID of the entry.
335 Constant *ID = nullptr;
336
337 public:
340 explicit OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr,
341 Constant *ID,
344 ID(ID) {
345 setAddress(Addr);
346 }
347
348 Constant *getID() const { return ID; }
349 void setID(Constant *V) {
350 assert(!ID && "ID has been set before!");
351 ID = V;
352 }
353 static bool classof(const OffloadEntryInfo *Info) {
354 return Info->getKind() == OffloadingEntryInfoTargetRegion;
355 }
356 };
357
358 /// Initialize target region entry.
359 /// This is ONLY needed for DEVICE compilation.
360 LLVM_ABI void
362 unsigned Order);
363 /// Register target region entry.
365 Constant *Addr, Constant *ID,
367 /// Return true if a target region entry with the provided information
368 /// exists.
370 bool IgnoreAddressId = false) const;
371
372 // Return the Name based on \a EntryInfo using the next available Count.
373 LLVM_ABI void
375 const TargetRegionEntryInfo &EntryInfo);
376
377 /// brief Applies action \a Action on all registered entries.
378 typedef function_ref<void(const TargetRegionEntryInfo &EntryInfo,
379 const OffloadEntryInfoTargetRegion &)>
381 LLVM_ABI void
383
384 //
385 // Device global variable entries related.
386 //
387
388 /// Kind of the global variable entry..
390 /// Mark the entry as a to declare target.
392 /// Mark the entry as a to declare target link.
394 /// Mark the entry as a declare target enter.
396 /// Mark the entry as having no declare target entry kind.
398 /// Mark the entry as a declare target indirect global.
400 /// Mark the entry as a register requires global.
402 /// Mark the entry as a declare target indirect vtable.
404 };
405
406 /// Kind of device clause for declare target variables
407 /// and functions
408 /// NOTE: Currently not used as a part of a variable entry
409 /// used for Flang and Clang to interface with the variable
410 /// related registration functions
412 /// The target is marked for all devices
414 /// The target is marked for non-host devices
416 /// The target is marked for host devices
418 /// The target is marked as having no clause
420 };
421
422 /// Device global variable entries info.
424 /// Type of the global variable.
425 int64_t VarSize;
427 const std::string VarName;
428
429 public:
435 explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr,
436 int64_t VarSize,
439 const std::string &VarName)
441 VarSize(VarSize), Linkage(Linkage), VarName(VarName) {
442 setAddress(Addr);
443 }
444
445 int64_t getVarSize() const { return VarSize; }
446 StringRef getVarName() const { return VarName; }
447 void setVarSize(int64_t Size) { VarSize = Size; }
448 GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
449 void setLinkage(GlobalValue::LinkageTypes LT) { Linkage = LT; }
450 static bool classof(const OffloadEntryInfo *Info) {
451 return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
452 }
453 };
454
455 /// Initialize device global variable entry.
456 /// This is ONLY used for DEVICE compilation.
458 StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order);
459
460 /// Register device global variable entry.
462 StringRef VarName, Constant *Addr, int64_t VarSize,
464 /// Checks if the variable with the given name has been registered already.
466 return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
467 }
468 /// Applies action \a Action on all registered entries.
469 typedef function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)>
473
474private:
475 /// Return the count of entries at a particular source location.
476 unsigned
477 getTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo) const;
478
479 /// Update the count of entries at a particular source location.
480 void
481 incrementTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo);
482
484 getTargetRegionEntryCountKey(const TargetRegionEntryInfo &EntryInfo) {
485 return TargetRegionEntryInfo(EntryInfo.ParentName, EntryInfo.DeviceID,
486 EntryInfo.FileID, EntryInfo.Line, 0);
487 }
488
489 // Count of entries at a location.
490 std::map<TargetRegionEntryInfo, unsigned> OffloadEntriesTargetRegionCount;
491
492 // Storage for target region entries kind.
493 typedef std::map<TargetRegionEntryInfo, OffloadEntryInfoTargetRegion>
494 OffloadEntriesTargetRegionTy;
495 OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
496 /// Storage for device global variable entries kind. The storage is to be
497 /// indexed by mangled name.
499 OffloadEntriesDeviceGlobalVarTy;
500 OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
501};
502
503/// An interface to create LLVM-IR for OpenMP directives.
504///
505/// Each OpenMP directive has a corresponding public generator method.
507public:
508 /// Create a new OpenMPIRBuilder operating on the given module \p M. This will
509 /// not have an effect on \p M (see initialize)
511 : M(M), Builder(M.getContext()), OffloadInfoManager(this),
512 T(M.getTargetTriple()), IsFinalized(false) {}
514
516 llvm::Value *AtomicVar;
517
518 public:
526
527 llvm::Value *getAtomicPointer() const override { return AtomicVar; }
530 const llvm::Twine &Name) const override {
531 llvm::AllocaInst *allocaInst = Builder->CreateAlloca(Ty);
532 allocaInst->setName(Name);
533 return allocaInst;
534 }
535 };
536 /// Initialize the internal state, this will put structures types and
537 /// potentially other helpers into the underlying module. Must be called
538 /// before any other method and only once! This internal state includes types
539 /// used in the OpenMPIRBuilder generated from OMPKinds.def.
540 LLVM_ABI void initialize();
541
543
544 /// Finalize the underlying module, e.g., by outlining regions.
545 /// \param Fn The function to be finalized. If not used,
546 /// all functions are finalized.
547 LLVM_ABI void finalize(Function *Fn = nullptr);
548
549 /// Check whether the finalize function has already run
550 /// \return true if the finalize function has already run
551 LLVM_ABI bool isFinalized();
552
553 /// Add attributes known for \p FnID to \p Fn.
555
556 /// Type used throughout for insertion points.
558
559 /// Type used to represent an insertion point or an error value.
561
562 /// Get the create a name using the platform specific separators.
563 /// \param Parts parts of the final name that needs separation
564 /// The created name has a first separator between the first and second part
565 /// and a second separator between all other parts.
566 /// E.g. with FirstSeparator "$" and Separator "." and
567 /// parts: "p1", "p2", "p3", "p4"
568 /// The resulting name is "p1$p2.p3.p4"
569 /// The separators are retrieved from the OpenMPIRBuilderConfig.
570 LLVM_ABI std::string
572
573 /// Callback type for variable finalization (think destructors).
574 ///
575 /// \param CodeGenIP is the insertion point at which the finalization code
576 /// should be placed.
577 ///
578 /// A finalize callback knows about all objects that need finalization, e.g.
579 /// destruction, when the scope of the currently generated construct is left
580 /// at the time, and location, the callback is invoked.
581 using FinalizeCallbackTy = std::function<Error(InsertPointTy CodeGenIP)>;
582
584 FinalizationInfo(FinalizeCallbackTy FiniCB, omp::Directive DK,
585 bool IsCancellable)
586 : DK(DK), IsCancellable(IsCancellable), FiniCB(std::move(FiniCB)) {}
587 /// The directive kind of the innermost directive that has an associated
588 /// region which might require finalization when it is left.
589 const omp::Directive DK;
590
591 /// Flag to indicate if the directive is cancellable.
592 const bool IsCancellable;
593
594 /// The basic block to which control should be transferred to
595 /// implement the FiniCB. Memoized to avoid generating finalization
596 /// multiple times.
598
599 /// For cases where there is an unavoidable existing finalization block
600 /// (e.g. loop finialization after omp sections). The existing finalization
601 /// block must not contain any non-finalization code.
603
604 private:
605 /// Access via getFiniBB.
606 BasicBlock *FiniBB = nullptr;
607
608 /// The finalization callback provided by the last in-flight invocation of
609 /// createXXXX for the directive of kind DK.
610 FinalizeCallbackTy FiniCB;
611 };
612
613 /// Push a finalization callback on the finalization stack.
614 ///
615 /// NOTE: Temporary solution until Clang CG is gone.
617 FinalizationStack.push_back(FI);
618 }
619
620 /// Pop the last finalization callback from the finalization stack.
621 ///
622 /// NOTE: Temporary solution until Clang CG is gone.
624
625 /// Callback type for body (=inner region) code generation
626 ///
627 /// The callback takes code locations as arguments, each describing a
628 /// location where additional instructions can be inserted.
629 ///
630 /// The CodeGenIP may be in the middle of a basic block or point to the end of
631 /// it. The basic block may have a terminator or be degenerate. The callback
632 /// function may just insert instructions at that position, but also split the
633 /// block (without the Before argument of BasicBlock::splitBasicBlock such
634 /// that the identify of the split predecessor block is preserved) and insert
635 /// additional control flow, including branches that do not lead back to what
636 /// follows the CodeGenIP. Note that since the callback is allowed to split
637 /// the block, callers must assume that InsertPoints to positions in the
638 /// BasicBlock after CodeGenIP including CodeGenIP itself are invalidated. If
639 /// such InsertPoints need to be preserved, it can split the block itself
640 /// before calling the callback.
641 ///
642 /// AllocaIP and CodeGenIP must not point to the same position.
643 ///
644 /// \param AllocaIP is the insertion point at which new allocations should
645 /// be placed. The BasicBlock it is pointing to must not be
646 /// split.
647 /// \param CodeGenIP is the insertion point at which the body code should be
648 /// placed.
649 /// \param DeallocBlocks is the list of insertion blocks where explicit
650 /// deallocations, if needed, should be placed.
651 /// \return an error, if any were triggered during execution.
653 function_ref<Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
654 ArrayRef<BasicBlock *> DeallocBlocks)>;
655
656 /// Callback type for task duplication function code generation. This is the
657 /// task duplication function passed to __kmpc_taskloop. It is expected that
658 /// this function will set up (first)private variables in the duplicated task
659 /// which have non-trivial (copy-)constructors. Insertion points are handled
660 /// the same way as for BodyGenCallbackTy.
661 ///
662 /// \ref createTaskloop lays out the task's auxiliary data structure as:
663 /// `{ lower bound, upper bound, step, data... }`. DestPtr and SrcPtr point
664 /// to this data.
665 ///
666 /// It is acceptable for the callback to be set to nullptr. In that case no
667 /// function will be generated and nullptr will be passed as the task
668 /// duplication function to __kmpc_taskloop.
669 ///
670 /// \param AllocaIP is the insertion point at which new alloca instructions
671 /// should be placed. The BasicBlock it is pointing to must
672 /// not be split.
673 /// \param CodeGenIP is the insertion point at which the body code should be
674 /// placed.
675 /// \param DestPtr This is a pointer to data inside the newly duplicated
676 /// task's auxiliary data structure (allocated after the task
677 /// descriptor.)
678 /// \param SrcPtr This is a pointer to data inside the original task's
679 /// auxiliary data structure (allocated after the task
680 /// descriptor.)
681 ///
682 /// \return The insertion point immediately after the generated code, or an
683 /// error if any occured.
685 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value *DestPtr,
686 Value *SrcPtr)>;
687
688 // This is created primarily for sections construct as llvm::function_ref
689 // (BodyGenCallbackTy) is not storable (as described in the comments of
690 // function_ref class - function_ref contains non-ownable reference
691 // to the callable.
692 ///
693 /// \return an error, if any were triggered during execution.
695 std::function<Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
696 ArrayRef<BasicBlock *> DeallocBlocks)>;
697
698 /// Callback type for loop body code generation.
699 ///
700 /// \param CodeGenIP is the insertion point where the loop's body code must be
701 /// placed. This will be a dedicated BasicBlock with a
702 /// conditional branch from the loop condition check and
703 /// terminated with an unconditional branch to the loop
704 /// latch.
705 /// \param IndVar is the induction variable usable at the insertion point.
706 ///
707 /// \return an error, if any were triggered during execution.
709 function_ref<Error(InsertPointTy CodeGenIP, Value *IndVar)>;
710
711 /// Callback type for variable privatization (think copy & default
712 /// constructor).
713 ///
714 /// \param AllocaIP is the insertion point at which new alloca instructions
715 /// should be placed.
716 /// \param CodeGenIP is the insertion point at which the privatization code
717 /// should be placed.
718 /// \param Original The value being copied/created, should not be used in the
719 /// generated IR.
720 /// \param Inner The equivalent of \p Original that should be used in the
721 /// generated IR; this is equal to \p Original if the value is
722 /// a pointer and can thus be passed directly, otherwise it is
723 /// an equivalent but different value.
724 /// \param ReplVal The replacement value, thus a copy or new created version
725 /// of \p Inner.
726 ///
727 /// \returns The new insertion point where code generation continues and
728 /// \p ReplVal the replacement value.
730 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
731 Value &Inner, Value *&ReplVal)>;
732
733 /// Description of a LLVM-IR insertion point (IP) and a debug/source location
734 /// (filename, line, column, ...).
737 : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
740 : IP(IP), DL(DL) {}
743 };
744
745 /// Emitter methods for OpenMP directives.
746 ///
747 ///{
748
749 /// Generator for '#omp barrier'
750 ///
751 /// \param Loc The location where the barrier directive was encountered.
752 /// \param Kind The kind of directive that caused the barrier.
753 /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
754 /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
755 /// should be checked and acted upon.
756 /// \param ThreadID Optional parameter to pass in any existing ThreadID value.
757 ///
758 /// \returns The insertion point after the barrier.
760 omp::Directive Kind,
761 bool ForceSimpleCall = false,
762 bool CheckCancelFlag = true);
763
764 /// Generator for '#omp cancel'
765 ///
766 /// \param Loc The location where the directive was encountered.
767 /// \param IfCondition The evaluated 'if' clause expression, if any.
768 /// \param CanceledDirective The kind of directive that is cancled.
769 ///
770 /// \returns The insertion point after the barrier.
772 Value *IfCondition,
773 omp::Directive CanceledDirective);
774
775 /// Generator for '#omp cancellation point'
776 ///
777 /// \param Loc The location where the directive was encountered.
778 /// \param CanceledDirective The kind of directive that is cancled.
779 ///
780 /// \returns The insertion point after the barrier.
782 const LocationDescription &Loc, omp::Directive CanceledDirective);
783
784 /// Creates a ScanInfo object, allocates and returns the pointer.
786
787 /// Generator for '#omp parallel'
788 ///
789 /// \param Loc The insert and source location description.
790 /// \param AllocaIP The insertion point to be used for allocations.
791 /// \param DeallocBlocks The insertion blocks to be used for explicit
792 /// deallocations, if needed.
793 /// \param BodyGenCB Callback that will generate the region code.
794 /// \param PrivCB Callback to copy a given variable (think copy constructor).
795 /// \param FiniCB Callback to finalize variable copies.
796 /// \param IfCondition The evaluated 'if' clause expression, if any.
797 /// \param NumThreads The evaluated 'num_threads' clause expression, if any.
798 /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
799 /// \param IsCancellable Flag to indicate a cancellable parallel region.
800 ///
801 /// \returns The insertion position *after* the parallel.
803 const LocationDescription &Loc, InsertPointTy AllocaIP,
804 ArrayRef<BasicBlock *> DeallocBlocks, BodyGenCallbackTy BodyGenCB,
805 PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition,
806 Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable);
807
808 /// Generator for the control flow structure of an OpenMP canonical loop.
809 ///
810 /// This generator operates on the logical iteration space of the loop, i.e.
811 /// the caller only has to provide a loop trip count of the loop as defined by
812 /// base language semantics. The trip count is interpreted as an unsigned
813 /// integer. The induction variable passed to \p BodyGenCB will be of the same
814 /// type and run from 0 to \p TripCount - 1. It is up to the callback to
815 /// convert the logical iteration variable to the loop counter variable in the
816 /// loop body.
817 ///
818 /// \param Loc The insert and source location description. The insert
819 /// location can be between two instructions or the end of a
820 /// degenerate block (e.g. a BB under construction).
821 /// \param BodyGenCB Callback that will generate the loop body code.
822 /// \param TripCount Number of iterations the loop body is executed.
823 /// \param Name Base name used to derive BB and instruction names.
824 ///
825 /// \returns An object representing the created control flow structure which
826 /// can be used for loop-associated directives.
829 LoopBodyGenCallbackTy BodyGenCB, Value *TripCount,
830 const Twine &Name = "loop");
831
832 /// Generator for the control flow structure of an OpenMP canonical loops if
833 /// the parent directive has an `inscan` modifier specified.
834 /// If the `inscan` modifier is specified, the region of the parent is
835 /// expected to have a `scan` directive. Based on the clauses in
836 /// scan directive, the body of the loop is split into two loops: Input loop
837 /// and Scan Loop. Input loop contains the code generated for input phase of
838 /// scan and Scan loop contains the code generated for scan phase of scan.
839 /// From the bodyGen callback of these loops, `createScan` would be called
840 /// when a scan directive is encountered from the loop body. `createScan`
841 /// based on whether 1. inclusive or exclusive scan is specified and, 2. input
842 /// loop or scan loop is generated, lowers the body of the for loop
843 /// accordingly.
844 ///
845 /// \param Loc The insert and source location description.
846 /// \param BodyGenCB Callback that will generate the loop body code.
847 /// \param Start Value of the loop counter for the first iterations.
848 /// \param Stop Loop counter values past this will stop the loop.
849 /// \param Step Loop counter increment after each iteration; negative
850 /// means counting down.
851 /// \param IsSigned Whether Start, Stop and Step are signed integers.
852 /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
853 /// counter.
854 /// \param ComputeIP Insertion point for instructions computing the trip
855 /// count. Can be used to ensure the trip count is available
856 /// at the outermost loop of a loop nest. If not set,
857 /// defaults to the preheader of the generated loop.
858 /// \param Name Base name used to derive BB and instruction names.
859 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
860 /// `ScanInfoInitialize`.
861 ///
862 /// \returns A vector containing Loop Info of Input Loop and Scan Loop.
865 LoopBodyGenCallbackTy BodyGenCB, Value *Start,
866 Value *Stop, Value *Step, bool IsSigned,
867 bool InclusiveStop, InsertPointTy ComputeIP,
868 const Twine &Name, ScanInfo *ScanRedInfo);
869
870 /// Calculate the trip count of a canonical loop.
871 ///
872 /// This allows specifying user-defined loop counter values using increment,
873 /// upper- and lower bounds. To disambiguate the terminology when counting
874 /// downwards, instead of lower bounds we use \p Start for the loop counter
875 /// value in the first body iteration.
876 ///
877 /// Consider the following limitations:
878 ///
879 /// * A loop counter space over all integer values of its bit-width cannot be
880 /// represented. E.g using uint8_t, its loop trip count of 256 cannot be
881 /// stored into an 8 bit integer):
882 ///
883 /// DO I = 0, 255, 1
884 ///
885 /// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
886 /// effectively counting downwards:
887 ///
888 /// for (uint8_t i = 100u; i > 0; i += 127u)
889 ///
890 ///
891 /// TODO: May need to add additional parameters to represent:
892 ///
893 /// * Allow representing downcounting with unsigned integers.
894 ///
895 /// * Sign of the step and the comparison operator might disagree:
896 ///
897 /// for (int i = 0; i < 42; i -= 1u)
898 ///
899 /// \param Loc The insert and source location description.
900 /// \param Start Value of the loop counter for the first iterations.
901 /// \param Stop Loop counter values past this will stop the loop.
902 /// \param Step Loop counter increment after each iteration; negative
903 /// means counting down.
904 /// \param IsSigned Whether Start, Stop and Step are signed integers.
905 /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
906 /// counter.
907 /// \param Name Base name used to derive instruction names.
908 ///
909 /// \returns The value holding the calculated trip count.
911 const LocationDescription &Loc, Value *Start, Value *Stop, Value *Step,
912 bool IsSigned, bool InclusiveStop, const Twine &Name = "loop");
913
914 /// Generator for the control flow structure of an OpenMP canonical loop.
915 ///
916 /// Instead of a logical iteration space, this allows specifying user-defined
917 /// loop counter values using increment, upper- and lower bounds. To
918 /// disambiguate the terminology when counting downwards, instead of lower
919 /// bounds we use \p Start for the loop counter value in the first body
920 ///
921 /// It calls \see calculateCanonicalLoopTripCount for trip count calculations,
922 /// so limitations of that method apply here as well.
923 ///
924 /// \param Loc The insert and source location description.
925 /// \param BodyGenCB Callback that will generate the loop body code.
926 /// \param Start Value of the loop counter for the first iterations.
927 /// \param Stop Loop counter values past this will stop the loop.
928 /// \param Step Loop counter increment after each iteration; negative
929 /// means counting down.
930 /// \param IsSigned Whether Start, Stop and Step are signed integers.
931 /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
932 /// counter.
933 /// \param ComputeIP Insertion point for instructions computing the trip
934 /// count. Can be used to ensure the trip count is available
935 /// at the outermost loop of a loop nest. If not set,
936 /// defaults to the preheader of the generated loop.
937 /// \param Name Base name used to derive BB and instruction names.
938 /// \param InScan Whether loop has a scan reduction specified.
939 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
940 /// `ScanInfoInitialize`.
941 ///
942 /// \returns An object representing the created control flow structure which
943 /// can be used for loop-associated directives.
946 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
947 InsertPointTy ComputeIP = {}, const Twine &Name = "loop",
948 bool InScan = false, ScanInfo *ScanRedInfo = nullptr);
949
950 /// Collapse a loop nest into a single loop.
951 ///
952 /// Merges loops of a loop nest into a single CanonicalLoopNest representation
953 /// that has the same number of innermost loop iterations as the origin loop
954 /// nest. The induction variables of the input loops are derived from the
955 /// collapsed loop's induction variable. This is intended to be used to
956 /// implement OpenMP's collapse clause. Before applying a directive,
957 /// collapseLoops normalizes a loop nest to contain only a single loop and the
958 /// directive's implementation does not need to handle multiple loops itself.
959 /// This does not remove the need to handle all loop nest handling by
960 /// directives, such as the ordered(<n>) clause or the simd schedule-clause
961 /// modifier of the worksharing-loop directive.
962 ///
963 /// Example:
964 /// \code
965 /// for (int i = 0; i < 7; ++i) // Canonical loop "i"
966 /// for (int j = 0; j < 9; ++j) // Canonical loop "j"
967 /// body(i, j);
968 /// \endcode
969 ///
970 /// After collapsing with Loops={i,j}, the loop is changed to
971 /// \code
972 /// for (int ij = 0; ij < 63; ++ij) {
973 /// int i = ij / 9;
974 /// int j = ij % 9;
975 /// body(i, j);
976 /// }
977 /// \endcode
978 ///
979 /// In the current implementation, the following limitations apply:
980 ///
981 /// * All input loops have an induction variable of the same type.
982 ///
983 /// * The collapsed loop will have the same trip count integer type as the
984 /// input loops. Therefore it is possible that the collapsed loop cannot
985 /// represent all iterations of the input loops. For instance, assuming a
986 /// 32 bit integer type, and two input loops both iterating 2^16 times, the
987 /// theoretical trip count of the collapsed loop would be 2^32 iteration,
988 /// which cannot be represented in an 32-bit integer. Behavior is undefined
989 /// in this case.
990 ///
991 /// * The trip counts of every input loop must be available at \p ComputeIP.
992 /// Non-rectangular loops are not yet supported.
993 ///
994 /// * At each nest level, code between a surrounding loop and its nested loop
995 /// is hoisted into the loop body, and such code will be executed more
996 /// often than before collapsing (or not at all if any inner loop iteration
997 /// has a trip count of 0). This is permitted by the OpenMP specification.
998 ///
999 /// \param DL Debug location for instructions added for collapsing,
1000 /// such as instructions to compute/derive the input loop's
1001 /// induction variables.
1002 /// \param Loops Loops in the loop nest to collapse. Loops are specified
1003 /// from outermost-to-innermost and every control flow of a
1004 /// loop's body must pass through its directly nested loop.
1005 /// \param ComputeIP Where additional instruction that compute the collapsed
1006 /// trip count. If not set, defaults to before the generated
1007 /// loop.
1008 ///
1009 /// \returns The CanonicalLoopInfo object representing the collapsed loop.
1012 InsertPointTy ComputeIP);
1013
1014 /// Get the default alignment value for given target
1015 ///
1016 /// \param TargetTriple Target triple
1017 /// \param Features StringMap which describes extra CPU features
1018 LLVM_ABI static unsigned
1019 getOpenMPDefaultSimdAlign(const Triple &TargetTriple,
1020 const StringMap<bool> &Features);
1021
1022 /// Retrieve (or create if non-existent) the address of a declare
1023 /// target variable, used in conjunction with registerTargetGlobalVariable
1024 /// to create declare target global variables.
1025 ///
1026 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
1027 /// clause used in conjunction with the variable being registered (link,
1028 /// to, enter).
1029 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
1030 /// clause used in conjunction with the variable being registered (nohost,
1031 /// host, any)
1032 /// \param IsDeclaration - boolean stating if the variable being registered
1033 /// is a declaration-only and not a definition
1034 /// \param IsExternallyVisible - boolean stating if the variable is externally
1035 /// visible
1036 /// \param EntryInfo - Unique entry information for the value generated
1037 /// using getTargetEntryUniqueInfo, used to name generated pointer references
1038 /// to the declare target variable
1039 /// \param MangledName - the mangled name of the variable being registered
1040 /// \param GeneratedRefs - references generated by invocations of
1041 /// registerTargetGlobalVariable invoked from getAddrOfDeclareTargetVar,
1042 /// these are required by Clang for book keeping.
1043 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
1044 /// \param TargetTriple - The OpenMP device target triple we are compiling
1045 /// for
1046 /// \param LlvmPtrTy - The type of the variable we are generating or
1047 /// retrieving an address for
1048 /// \param GlobalInitializer - a lambda function which creates a constant
1049 /// used for initializing a pointer reference to the variable in certain
1050 /// cases. If a nullptr is passed, it will default to utilising the original
1051 /// variable to initialize the pointer reference.
1052 /// \param VariableLinkage - a lambda function which returns the variables
1053 /// linkage type, if unspecified and a nullptr is given, it will instead
1054 /// utilise the linkage stored on the existing global variable in the
1055 /// LLVMModule.
1059 bool IsDeclaration, bool IsExternallyVisible,
1060 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
1061 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
1062 std::vector<Triple> TargetTriple, Type *LlvmPtrTy,
1063 std::function<Constant *()> GlobalInitializer,
1064 std::function<GlobalValue::LinkageTypes()> VariableLinkage);
1065
1066 /// Registers a target variable for device or host.
1067 ///
1068 /// \param CaptureClause - enumerator corresponding to the OpenMP capture
1069 /// clause used in conjunction with the variable being registered (link,
1070 /// to, enter).
1071 /// \param DeviceClause - enumerator corresponding to the OpenMP capture
1072 /// clause used in conjunction with the variable being registered (nohost,
1073 /// host, any)
1074 /// \param IsDeclaration - boolean stating if the variable being registered
1075 /// is a declaration-only and not a definition
1076 /// \param IsExternallyVisible - boolean stating if the variable is externally
1077 /// visible
1078 /// \param EntryInfo - Unique entry information for the value generated
1079 /// using getTargetEntryUniqueInfo, used to name generated pointer references
1080 /// to the declare target variable
1081 /// \param MangledName - the mangled name of the variable being registered
1082 /// \param GeneratedRefs - references generated by invocations of
1083 /// registerTargetGlobalVariable these are required by Clang for book
1084 /// keeping.
1085 /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
1086 /// \param TargetTriple - The OpenMP device target triple we are compiling
1087 /// for
1088 /// \param GlobalInitializer - a lambda function which creates a constant
1089 /// used for initializing a pointer reference to the variable in certain
1090 /// cases. If a nullptr is passed, it will default to utilising the original
1091 /// variable to initialize the pointer reference.
1092 /// \param VariableLinkage - a lambda function which returns the variables
1093 /// linkage type, if unspecified and a nullptr is given, it will instead
1094 /// utilise the linkage stored on the existing global variable in the
1095 /// LLVMModule.
1096 /// \param LlvmPtrTy - The type of the variable we are generating or
1097 /// retrieving an address for
1098 /// \param Addr - the original llvm value (addr) of the variable to be
1099 /// registered
1103 bool IsDeclaration, bool IsExternallyVisible,
1104 TargetRegionEntryInfo EntryInfo, StringRef MangledName,
1105 std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
1106 std::vector<Triple> TargetTriple,
1107 std::function<Constant *()> GlobalInitializer,
1108 std::function<GlobalValue::LinkageTypes()> VariableLinkage,
1109 Type *LlvmPtrTy, Constant *Addr);
1110
1111 /// Get the offset of the OMP_MAP_MEMBER_OF field.
1112 LLVM_ABI unsigned getFlagMemberOffset();
1113
1114 /// Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on
1115 /// the position given.
1116 /// \param Position - A value indicating the position of the parent
1117 /// of the member in the kernel argument structure, often retrieved
1118 /// by the parents position in the combined information vectors used
1119 /// to generate the structure itself. Multiple children (member's of)
1120 /// with the same parent will use the same returned member flag.
1122
1123 /// Given an initial flag set, this function modifies it to contain
1124 /// the passed in MemberOfFlag generated from the getMemberOfFlag
1125 /// function. The results are dependent on the existing flag bits
1126 /// set in the original flag set.
1127 /// \param Flags - The original set of flags to be modified with the
1128 /// passed in MemberOfFlag.
1129 /// \param MemberOfFlag - A modified OMP_MAP_MEMBER_OF flag, adjusted
1130 /// slightly based on the getMemberOfFlag which adjusts the flag bits
1131 /// based on the members position in its parent.
1132 LLVM_ABI void
1134 omp::OpenMPOffloadMappingFlags MemberOfFlag);
1135
1136private:
1137 /// Modifies the canonical loop to be a statically-scheduled workshare loop
1138 /// which is executed on the device
1139 ///
1140 /// This takes a \p CLI representing a canonical loop, such as the one
1141 /// created by \see createCanonicalLoop and emits additional instructions to
1142 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1143 /// runtime function in the preheader to call OpenMP device rtl function
1144 /// which handles worksharing of loop body interations.
1145 ///
1146 /// \param DL Debug location for instructions added for the
1147 /// workshare-loop construct itself.
1148 /// \param CLI A descriptor of the canonical loop to workshare.
1149 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1150 /// preheader of the loop.
1151 /// \param LoopType Information about type of loop worksharing.
1152 /// It corresponds to type of loop workshare OpenMP pragma.
1153 /// \param NoLoop If true, no-loop code is generated.
1154 ///
1155 /// \returns Point where to insert code after the workshare construct.
1156 InsertPointTy applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
1157 InsertPointTy AllocaIP,
1158 omp::WorksharingLoopType LoopType,
1159 bool NoLoop);
1160
1161 /// Modifies the canonical loop to be a statically-scheduled workshare loop.
1162 ///
1163 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1164 /// created by \p createCanonicalLoop and emits additional instructions to
1165 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1166 /// runtime function in the preheader to obtain the loop bounds to be used in
1167 /// the current thread, updates the relevant instructions in the canonical
1168 /// loop and calls to an OpenMP runtime finalization function after the loop.
1169 ///
1170 /// \param DL Debug location for instructions added for the
1171 /// workshare-loop construct itself.
1172 /// \param CLI A descriptor of the canonical loop to workshare.
1173 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1174 /// preheader of the loop.
1175 /// \param NeedsBarrier Indicates whether a barrier must be inserted after
1176 /// the loop.
1177 /// \param LoopType Type of workshare loop.
1178 /// \param HasDistSchedule Defines if the clause being lowered is
1179 /// dist_schedule as this is handled slightly differently
1180 /// \param DistScheduleSchedType Defines the Schedule Type for the Distribute
1181 /// loop. Defaults to None if no Distribute loop is present.
1182 ///
1183 /// \returns Point where to insert code after the workshare construct.
1184 InsertPointOrErrorTy applyStaticWorkshareLoop(
1186 omp::WorksharingLoopType LoopType, bool NeedsBarrier,
1187 bool HasDistSchedule = false,
1188 omp::OMPScheduleType DistScheduleSchedType = omp::OMPScheduleType::None);
1189
1190 /// Modifies the canonical loop a statically-scheduled workshare loop with a
1191 /// user-specified chunk size.
1192 ///
1193 /// \param DL Debug location for instructions added for the
1194 /// workshare-loop construct itself.
1195 /// \param CLI A descriptor of the canonical loop to workshare.
1196 /// \param AllocaIP An insertion point for Alloca instructions usable in
1197 /// the preheader of the loop.
1198 /// \param NeedsBarrier Indicates whether a barrier must be inserted after the
1199 /// loop.
1200 /// \param ChunkSize The user-specified chunk size.
1201 /// \param SchedType Optional type of scheduling to be passed to the init
1202 /// function.
1203 /// \param DistScheduleChunkSize The size of dist_shcedule chunk considered
1204 /// as a unit when
1205 /// scheduling. If \p nullptr, defaults to 1.
1206 /// \param DistScheduleSchedType Defines the Schedule Type for the Distribute
1207 /// loop. Defaults to None if no Distribute loop is present.
1208 ///
1209 /// \returns Point where to insert code after the workshare construct.
1210 InsertPointOrErrorTy applyStaticChunkedWorkshareLoop(
1212 bool NeedsBarrier, Value *ChunkSize,
1213 omp::OMPScheduleType SchedType =
1215 Value *DistScheduleChunkSize = nullptr,
1216 omp::OMPScheduleType DistScheduleSchedType = omp::OMPScheduleType::None);
1217
1218 /// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
1219 ///
1220 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1221 /// created by \p createCanonicalLoop and emits additional instructions to
1222 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1223 /// runtime function in the preheader to obtain, and then in each iteration
1224 /// to update the loop counter.
1225 ///
1226 /// \param DL Debug location for instructions added for the
1227 /// workshare-loop construct itself.
1228 /// \param CLI A descriptor of the canonical loop to workshare.
1229 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1230 /// preheader of the loop.
1231 /// \param SchedType Type of scheduling to be passed to the init function.
1232 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1233 /// the loop.
1234 /// \param Chunk The size of loop chunk considered as a unit when
1235 /// scheduling. If \p nullptr, defaults to 1.
1236 ///
1237 /// \returns Point where to insert code after the workshare construct.
1238 InsertPointOrErrorTy applyDynamicWorkshareLoop(DebugLoc DL,
1239 CanonicalLoopInfo *CLI,
1240 InsertPointTy AllocaIP,
1241 omp::OMPScheduleType SchedType,
1242 bool NeedsBarrier,
1243 Value *Chunk = nullptr);
1244
1245 /// Create alternative version of the loop to support if clause
1246 ///
1247 /// OpenMP if clause can require to generate second loop. This loop
1248 /// will be executed when if clause condition is not met. createIfVersion
1249 /// adds branch instruction to the copied loop if \p ifCond is not met.
1250 ///
1251 /// \param Loop Original loop which should be versioned.
1252 /// \param IfCond Value which corresponds to if clause condition
1253 /// \param VMap Value to value map to define relation between
1254 /// original and copied loop values and loop blocks.
1255 /// \param NamePrefix Optional name prefix for if.then if.else blocks.
1256 void createIfVersion(CanonicalLoopInfo *Loop, Value *IfCond,
1258 LoopAnalysis &LIA, LoopInfo &LI, llvm::Loop *L,
1259 const Twine &NamePrefix = "");
1260
1261 /// Creates a task duplication function to be passed to kmpc_taskloop.
1262 ///
1263 /// The OpenMP runtime defines this function as taking the destination
1264 /// kmp_task_t, source kmp_task_t, and a lastprivate flag. This function is
1265 /// called on the source and destination tasks after the source task has been
1266 /// duplicated to create the destination task. At this point the destination
1267 /// task has been otherwise set up from the runtime's perspective, but this
1268 /// function is needed to fix up any data for the duplicated task e.g. private
1269 /// variables with non-trivial constructors.
1270 ///
1271 /// \param PrivatesTy The type of the privates structure for the task.
1272 /// \param PrivatesIndex The index inside the privates structure containing
1273 /// the data for the callback.
1274 /// \param DupCB The callback to generate the duplication code. See
1275 /// documentation for \ref TaskDupCallbackTy. This can be
1276 /// nullptr.
1277 Expected<Value *> createTaskDuplicationFunction(Type *PrivatesTy,
1278 int32_t PrivatesIndex,
1279 TaskDupCallbackTy DupCB);
1280
1281public:
1282 /// Modifies the canonical loop to be a workshare loop.
1283 ///
1284 /// This takes a \p LoopInfo representing a canonical loop, such as the one
1285 /// created by \p createCanonicalLoop and emits additional instructions to
1286 /// turn it into a workshare loop. In particular, it calls to an OpenMP
1287 /// runtime function in the preheader to obtain the loop bounds to be used in
1288 /// the current thread, updates the relevant instructions in the canonical
1289 /// loop and calls to an OpenMP runtime finalization function after the loop.
1290 ///
1291 /// The concrete transformation is done by applyStaticWorkshareLoop,
1292 /// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending
1293 /// on the value of \p SchedKind and \p ChunkSize.
1294 ///
1295 /// \param DL Debug location for instructions added for the
1296 /// workshare-loop construct itself.
1297 /// \param CLI A descriptor of the canonical loop to workshare.
1298 /// \param AllocaIP An insertion point for Alloca instructions usable in the
1299 /// preheader of the loop.
1300 /// \param NeedsBarrier Indicates whether a barrier must be insterted after
1301 /// the loop.
1302 /// \param SchedKind Scheduling algorithm to use.
1303 /// \param ChunkSize The chunk size for the inner loop.
1304 /// \param HasSimdModifier Whether the simd modifier is present in the
1305 /// schedule clause.
1306 /// \param HasMonotonicModifier Whether the monotonic modifier is present in
1307 /// the schedule clause.
1308 /// \param HasNonmonotonicModifier Whether the nonmonotonic modifier is
1309 /// present in the schedule clause.
1310 /// \param HasOrderedClause Whether the (parameterless) ordered clause is
1311 /// present.
1312 /// \param LoopType Information about type of loop worksharing.
1313 /// It corresponds to type of loop workshare OpenMP pragma.
1314 /// \param NoLoop If true, no-loop code is generated.
1315 /// \param HasDistSchedule Defines if the clause being lowered is
1316 /// dist_schedule as this is handled slightly differently
1317 ///
1318 /// \param DistScheduleChunkSize The chunk size for dist_schedule loop
1319 ///
1320 /// \returns Point where to insert code after the workshare construct.
1323 bool NeedsBarrier,
1324 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default,
1325 Value *ChunkSize = nullptr, bool HasSimdModifier = false,
1326 bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false,
1327 bool HasOrderedClause = false,
1328 omp::WorksharingLoopType LoopType =
1330 bool NoLoop = false, bool HasDistSchedule = false,
1331 Value *DistScheduleChunkSize = nullptr);
1332
1333 /// Tile a loop nest.
1334 ///
1335 /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
1336 /// \p/ Loops must be perfectly nested, from outermost to innermost loop
1337 /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
1338 /// of every loop and every tile sizes must be usable in the outermost
1339 /// loop's preheader. This implies that the loop nest is rectangular.
1340 ///
1341 /// Example:
1342 /// \code
1343 /// for (int i = 0; i < 15; ++i) // Canonical loop "i"
1344 /// for (int j = 0; j < 14; ++j) // Canonical loop "j"
1345 /// body(i, j);
1346 /// \endcode
1347 ///
1348 /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
1349 /// \code
1350 /// for (int i1 = 0; i1 < 3; ++i1)
1351 /// for (int j1 = 0; j1 < 2; ++j1)
1352 /// for (int i2 = 0; i2 < 5; ++i2)
1353 /// for (int j2 = 0; j2 < 7; ++j2)
1354 /// body(i1*3+i2, j1*3+j2);
1355 /// \endcode
1356 ///
1357 /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
1358 /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
1359 /// handles non-constant trip counts, non-constant tile sizes and trip counts
1360 /// that are not multiples of the tile size. In the latter case the tile loop
1361 /// of the last floor-loop iteration will have fewer iterations than specified
1362 /// as its tile size.
1363 ///
1364 ///
1365 /// @param DL Debug location for instructions added by tiling, for
1366 /// instance the floor- and tile trip count computation.
1367 /// @param Loops Loops to tile. The CanonicalLoopInfo objects are
1368 /// invalidated by this method, i.e. should not used after
1369 /// tiling.
1370 /// @param TileSizes For each loop in \p Loops, the tile size for that
1371 /// dimensions.
1372 ///
1373 /// \returns A list of generated loops. Contains twice as many loops as the
1374 /// input loop nest; the first half are the floor loops and the
1375 /// second half are the tile loops.
1376 LLVM_ABI std::vector<CanonicalLoopInfo *>
1378 ArrayRef<Value *> TileSizes);
1379
1380 /// Fuse a sequence of loops.
1381 ///
1382 /// Fuses the loops of \p Loops.
1383 /// The merging of the loops is done in the following structure:
1384 ///
1385 /// Example:
1386 /// \code
1387 /// for (int i = lb0; i < ub0; i += st0) // trip count is calculated as:
1388 /// body(i) // tc0 = (ub0 - lb0 + st0) / st0
1389 /// for (int j = lb1; j < ub1; j += st1)
1390 /// body(j);
1391 ///
1392 /// ...
1393 ///
1394 /// for (int k = lbk; j < ubk; j += stk)
1395 /// body(k);
1396 /// \endcode
1397 ///
1398 /// After fusing the loops a single loop is left:
1399 /// \code
1400 /// for (fuse.index = 0; fuse.index < max(tc0, tc1, ... tck); ++fuse.index) {
1401 /// if (fuse.index < tc0){
1402 /// iv0 = lb0 + st0 * fuse.index;
1403 /// original.index0 = iv0
1404 /// body(0);
1405 /// }
1406 /// if (fuse.index < tc1){
1407 /// iv1 = lb1 + st1 * fuse.index;
1408 /// original.index1 = iv1
1409 /// body(1);
1410 /// }
1411 ///
1412 /// ...
1413 ///
1414 /// if (fuse.index < tck){
1415 /// ivk = lbk + stk * fuse.index;
1416 /// original.indexk = ivk
1417 /// body(k);
1418 /// }
1419 /// }
1420 /// \endcode
1421 ///
1422 ///
1423 /// @param DL Debug location for instructions added by fusion.
1424 ///
1425 /// @param Loops Loops to fuse. The CanonicalLoopInfo objects are
1426 /// invalidated by this method, i.e. should not used after
1427 /// fusion.
1428 ///
1429 /// \returns A single loop generated by the loop fusion
1432
1433 /// Fully unroll a loop.
1434 ///
1435 /// Instead of unrolling the loop immediately (and duplicating its body
1436 /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
1437 /// metadata.
1438 ///
1439 /// \param DL Debug location for instructions added by unrolling.
1440 /// \param Loop The loop to unroll. The loop will be invalidated.
1442
1443 /// Fully or partially unroll a loop. How the loop is unrolled is determined
1444 /// using LLVM's LoopUnrollPass.
1445 ///
1446 /// \param DL Debug location for instructions added by unrolling.
1447 /// \param Loop The loop to unroll. The loop will be invalidated.
1449
1450 /// Partially unroll a loop.
1451 ///
1452 /// The CanonicalLoopInfo of the unrolled loop for use with chained
1453 /// loop-associated directive can be requested using \p UnrolledCLI. Not
1454 /// needing the CanonicalLoopInfo allows more efficient code generation by
1455 /// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
1456 /// A loop-associated directive applied to the unrolled loop needs to know the
1457 /// new trip count which means that if using a heuristically determined unroll
1458 /// factor (\p Factor == 0), that factor must be computed immediately. We are
1459 /// using the same logic as the LoopUnrollPass to derived the unroll factor,
1460 /// but which assumes that some canonicalization has taken place (e.g.
1461 /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
1462 /// better when the unrolled loop's CanonicalLoopInfo is not needed.
1463 ///
1464 /// \param DL Debug location for instructions added by unrolling.
1465 /// \param Loop The loop to unroll. The loop will be invalidated.
1466 /// \param Factor The factor to unroll the loop by. A factor of 0
1467 /// indicates that a heuristic should be used to determine
1468 /// the unroll-factor.
1469 /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
1470 /// partially unrolled loop. Otherwise, uses loop metadata
1471 /// to defer unrolling to the LoopUnrollPass.
1473 int32_t Factor,
1474 CanonicalLoopInfo **UnrolledCLI);
1475
1476 /// Add metadata to simd-ize a loop. If IfCond is not nullptr, the loop
1477 /// is cloned. The metadata which prevents vectorization is added to
1478 /// to the cloned loop. The cloned loop is executed when ifCond is evaluated
1479 /// to false.
1480 ///
1481 /// \param Loop The loop to simd-ize.
1482 /// \param AlignedVars The map which containts pairs of the pointer
1483 /// and its corresponding alignment.
1484 /// \param IfCond The value which corresponds to the if clause
1485 /// condition.
1486 /// \param Order The enum to map order clause.
1487 /// \param Simdlen The Simdlen length to apply to the simd loop.
1488 /// \param Safelen The Safelen length to apply to the simd loop.
1490 MapVector<Value *, Value *> AlignedVars,
1491 Value *IfCond, omp::OrderKind Order,
1492 ConstantInt *Simdlen, ConstantInt *Safelen);
1493
1494 /// Generator for '#omp flush'
1495 ///
1496 /// \param Loc The location where the flush directive was encountered
1497 LLVM_ABI void createFlush(const LocationDescription &Loc);
1498
1499 /// Generator for '#omp taskwait'
1500 ///
1501 /// \param Loc The location where the taskwait directive was encountered.
1502 LLVM_ABI void createTaskwait(const LocationDescription &Loc);
1503
1504 /// Generator for '#omp taskyield'
1505 ///
1506 /// \param Loc The location where the taskyield directive was encountered.
1507 LLVM_ABI void createTaskyield(const LocationDescription &Loc);
1508
1509 /// A struct to pack the relevant information for an OpenMP depend clause.
1519
1520 /// A struct to pack static and dynamic dependency information for a task.
1521 ///
1522 /// For fixed-count (non-iterator) dependencies, callers populate \p Deps
1523 /// and the builder allocates and fills the kmp_depend_info array internally.
1524 /// For iterator-based dependencies, the caller pre-builds the array and
1525 /// sets \p NumDeps and \p DepArray directly.
1527 SmallVector<DependData> Deps; // vector of dependencies
1528 Value *NumDeps; // number of kmp_depend_info entries (used by iterator path)
1529 Value *DepArray; // kmp_depend_info array (used by iterator path)
1530
1531 DependenciesInfo() : Deps(), NumDeps(nullptr), DepArray(nullptr) {}
1534
1535 bool empty() const { return Deps.empty() && DepArray == nullptr; }
1536 };
1537
1538 /// Store one kmp_depend_info entry at the given \p Entry pointer.
1539 LLVM_ABI void emitTaskDependency(IRBuilderBase &Builder, Value *Entry,
1540 const DependData &Dep);
1541
1542 /// Return the LLVM struct type matching runtime `kmp_task_affinity_info_t`.
1543 /// `{ kmp_intptr_t base_addr; size_t len; flags (bitfield storage as i32) }`
1545
1546 /// A struct to pack the relevant information for an OpenMP affinity clause.
1548 Value *Count; // number of kmp_task_affinity_info_t entries
1549 Value *Info; // kmp_task_affinity_info_t
1550 };
1551
1552 /// Generator for `#omp taskloop`
1553 ///
1554 /// \param Loc The location where the taskloop construct was encountered.
1555 /// \param AllocaIP The insertion point to be used for alloca instructions.
1556 /// \param DeallocBlocks The list of insertion blocks where explicit
1557 /// deallocations, if needed, should be placed.
1558 /// \param BodyGenCB Callback that will generate the region code.
1559 /// \param LoopInfo Callback that return the CLI
1560 /// \param LBVal Lowerbound value of loop
1561 /// \param UBVal Upperbound value of loop
1562 /// \param StepVal Step value of loop
1563 /// \param Untied True if the task is untied, false if the task is tied.
1564 /// \param IfCond i1 value. If it evaluates to `false`, an undeferred
1565 /// task is generated, and the encountering thread must
1566 /// suspend the current task region, for which execution
1567 /// cannot be resumed until execution of the structured
1568 /// block that is associated with the generated task is
1569 /// completed.
1570 /// \param GrainSize Value of the GrainSize/Num of Tasks if present
1571 /// \param NoGroup False if NoGroup is defined, true if not
1572 /// \param Sched If Grainsize is defined, Sched is 1. Num Tasks, Shed is 2.
1573 /// Otherwise Sched is 0
1574 /// \param Final i1 value which is `true` if the task is final, `false` if the
1575 /// task is not final.
1576 /// \param Mergeable If the given task is `mergeable`
1577 /// \param Priority `priority-value' specifies the execution order of the
1578 /// tasks that is generated by the construct
1579 /// \param NumOfCollapseLoops Defines the number of loops that are being
1580 /// collapsed. The default value is 1, as thats the value when collapse is not
1581 /// used.
1582 /// \param DupCB The callback to generate the duplication code. See
1583 /// documentation for \ref TaskDupCallbackTy. This can be nullptr.
1584 /// \param TaskContextStructPtrVal If non-null, a pointer to to be placed
1585 /// immediately after the {lower bound, upper
1586 /// bound, step} values in the task data.
1587 LLVM_ABI InsertPointOrErrorTy createTaskloop(
1588 const LocationDescription &Loc, InsertPointTy AllocaIP,
1589 ArrayRef<BasicBlock *> DeallocBlocks, BodyGenCallbackTy BodyGenCB,
1591 Value *LBVal, Value *UBVal, Value *StepVal, bool Untied = false,
1592 Value *IfCond = nullptr, Value *GrainSize = nullptr, bool NoGroup = false,
1593 int Sched = 0, Value *Final = nullptr, bool Mergeable = false,
1594 Value *Priority = nullptr, uint64_t NumOfCollapseLoops = 1,
1595 TaskDupCallbackTy DupCB = nullptr,
1596 Value *TaskContextStructPtrVal = nullptr);
1597
1598 /// Generator for `#omp task`
1599 ///
1600 /// \param Loc The location where the task construct was encountered.
1601 /// \param AllocaIP The insertion point to be used for allocations.
1602 /// \param DeallocBlocks The insertion blocks to be used for explicit
1603 /// deallocations, if needed.
1604 /// \param BodyGenCB Callback that will generate the region code.
1605 /// \param Tied True if the task is tied, false if the task is untied.
1606 /// \param Final i1 value which is `true` if the task is final, `false` if the
1607 /// task is not final.
1608 /// \param IfCondition i1 value. If it evaluates to `false`, an undeferred
1609 /// task is generated, and the encountering thread must
1610 /// suspend the current task region, for which execution
1611 /// cannot be resumed until execution of the structured
1612 /// block that is associated with the generated task is
1613 /// completed.
1614 /// \param Dependencies Dependencies info holding either a vector of
1615 /// DependData objects or a pre-built dependency array.
1616 /// \param Affinities AffinityData object holding information of accumulated
1617 /// affinities as specified by the 'affinity' clause.
1618 /// \param EventHandle If present, signifies the event handle as part of
1619 /// the detach clause
1620 /// \param Mergeable If the given task is `mergeable`
1621 /// \param priority `priority-value' specifies the execution order of the
1622 /// tasks that is generated by the construct
1624 const LocationDescription &Loc, InsertPointTy AllocaIP,
1625 ArrayRef<BasicBlock *> DeallocBlocks, BodyGenCallbackTy BodyGenCB,
1626 bool Tied = true, Value *Final = nullptr, Value *IfCondition = nullptr,
1627 const DependenciesInfo &Dependencies = {},
1628 const AffinityData &Affinities = {}, bool Mergeable = false,
1629 Value *EventHandle = nullptr, Value *Priority = nullptr);
1630
1631 /// Generator for the taskgroup construct
1632 ///
1633 /// \param Loc The location where the taskgroup construct was encountered.
1634 /// \param AllocaIP The insertion point to be used for allocations.
1635 /// \param DeallocBlocks The insertion blocks to be used for explicit
1636 /// deallocation instructions, if needed.
1637 /// \param BodyGenCB Callback that will generate the region code.
1639 const LocationDescription &Loc, InsertPointTy AllocaIP,
1640 ArrayRef<BasicBlock *> DeallocBlocks, BodyGenCallbackTy BodyGenCB);
1641
1643 std::function<std::tuple<std::string, uint64_t>()>;
1644
1645 /// Creates a unique info for a target entry when provided a filename and
1646 /// line number from.
1647 ///
1648 /// \param CallBack A callback function which should return filename the entry
1649 /// resides in as well as the line number for the target entry
1650 /// \param ParentName The name of the parent the target entry resides in, if
1651 /// any.
1654 vfs::FileSystem &VFS, StringRef ParentName = "");
1655
1656 /// Enum class for the RedctionGen CallBack type to be used.
1658
1659 /// ReductionGen CallBack for Clang
1660 ///
1661 /// \param CodeGenIP InsertPoint for CodeGen.
1662 /// \param Index Index of the ReductionInfo to generate code for.
1663 /// \param LHSPtr Optionally used by Clang to return the LHSPtr it used for
1664 /// codegen, used for fixup later.
1665 /// \param RHSPtr Optionally used by Clang to
1666 /// return the RHSPtr it used for codegen, used for fixup later.
1667 /// \param CurFn Optionally used by Clang to pass in the Current Function as
1668 /// Clang context may be old.
1670 std::function<InsertPointTy(InsertPointTy CodeGenIP, unsigned Index,
1671 Value **LHS, Value **RHS, Function *CurFn)>;
1672
1673 /// ReductionGen CallBack for MLIR
1674 ///
1675 /// \param CodeGenIP InsertPoint for CodeGen.
1676 /// \param LHS Pass in the LHS Value to be used for CodeGen.
1677 /// \param RHS Pass in the RHS Value to be used for CodeGen.
1679 InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)>;
1680
1681 /// Functions used to generate atomic reductions. Such functions take two
1682 /// Values representing pointers to LHS and RHS of the reduction, as well as
1683 /// the element type of these pointers. They are expected to atomically
1684 /// update the LHS to the reduced value.
1686 InsertPointTy, Type *, Value *, Value *)>;
1687
1689 InsertPointTy, Value *ByRefVal, Value *&Res)>;
1690
1691 /// Enum class for reduction evaluation types scalar, complex and aggregate.
1693
1694 /// Information about an OpenMP reduction.
1709
1715
1716 /// Reduction element type, must match pointee type of variable. For by-ref
1717 /// reductions, this would be just an opaque `ptr`.
1719
1720 /// Reduction variable of pointer type.
1722
1723 /// Thread-private partial reduction variable.
1725
1726 /// Reduction evaluation kind - scalar, complex or aggregate.
1728
1729 /// Callback for generating the reduction body. The IR produced by this will
1730 /// be used to combine two values in a thread-safe context, e.g., under
1731 /// lock or within the same thread, and therefore need not be atomic.
1733
1734 /// Clang callback for generating the reduction body. The IR produced by
1735 /// this will be used to combine two values in a thread-safe context, e.g.,
1736 /// under lock or within the same thread, and therefore need not be atomic.
1738
1739 /// Callback for generating the atomic reduction body, may be null. The IR
1740 /// produced by this will be used to atomically combine two values during
1741 /// reduction. If null, the implementation will use the non-atomic version
1742 /// along with the appropriate synchronization mechanisms.
1744
1746
1747 /// For by-ref reductions, we need to keep track of 2 extra types that are
1748 /// potentially different:
1749 /// * The allocated type is the type of the storage allocated by the
1750 /// reduction op's `alloc` region. For example, for allocatables and arrays,
1751 /// this type would be the descriptor/box struct.
1753
1754 /// * The by-ref element type is the type of the actual storage needed for
1755 /// the data of the allocatable or array. For example, an float allocatable
1756 /// of would need some float storage to store intermediate reduction
1757 /// results.
1759 };
1760
1761 enum class CopyAction : unsigned {
1762 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1763 // the warp using shuffle instructions.
1765 // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1767 };
1768
1774
1775 /// Supporting functions for Reductions CodeGen.
1776private:
1777 /// Get the id of the current thread on the GPU.
1778 Value *getGPUThreadID();
1779
1780 /// Get the GPU warp size.
1781 Value *getGPUWarpSize();
1782
1783 /// Get the id of the warp in the block.
1784 /// We assume that the warp size is 32, which is always the case
1785 /// on the NVPTX device, to generate more efficient code.
1786 Value *getNVPTXWarpID();
1787
1788 /// Get the id of the current lane in the Warp.
1789 /// We assume that the warp size is 32, which is always the case
1790 /// on the NVPTX device, to generate more efficient code.
1791 Value *getNVPTXLaneID();
1792
1793 /// Cast value to the specified type.
1794 Value *castValueToType(InsertPointTy AllocaIP, Value *From, Type *ToType);
1795
1796 /// This function creates calls to one of two shuffle functions to copy
1797 /// variables between lanes in a warp.
1798 Value *createRuntimeShuffleFunction(InsertPointTy AllocaIP, Value *Element,
1799 Type *ElementType, Value *Offset);
1800
1801 /// Function to shuffle over the value from the remote lane.
1802 void shuffleAndStore(InsertPointTy AllocaIP, Value *SrcAddr, Value *DstAddr,
1803 Type *ElementType, Value *Offset, Type *ReductionArrayTy,
1804 bool IsByRefElem);
1805
1806 /// Emit instructions to copy a Reduce list, which contains partially
1807 /// aggregated values, in the specified direction.
1808 Error emitReductionListCopy(
1809 InsertPointTy AllocaIP, CopyAction Action, Type *ReductionArrayTy,
1810 ArrayRef<ReductionInfo> ReductionInfos, Value *SrcBase, Value *DestBase,
1811 ArrayRef<bool> IsByRef,
1812 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr});
1813
1814 /// Emit a helper that reduces data across two OpenMP threads (lanes)
1815 /// in the same warp. It uses shuffle instructions to copy over data from
1816 /// a remote lane's stack. The reduction algorithm performed is specified
1817 /// by the fourth parameter.
1818 ///
1819 /// Algorithm Versions.
1820 /// Full Warp Reduce (argument value 0):
1821 /// This algorithm assumes that all 32 lanes are active and gathers
1822 /// data from these 32 lanes, producing a single resultant value.
1823 /// Contiguous Partial Warp Reduce (argument value 1):
1824 /// This algorithm assumes that only a *contiguous* subset of lanes
1825 /// are active. This happens for the last warp in a parallel region
1826 /// when the user specified num_threads is not an integer multiple of
1827 /// 32. This contiguous subset always starts with the zeroth lane.
1828 /// Partial Warp Reduce (argument value 2):
1829 /// This algorithm gathers data from any number of lanes at any position.
1830 /// All reduced values are stored in the lowest possible lane. The set
1831 /// of problems every algorithm addresses is a super set of those
1832 /// addressable by algorithms with a lower version number. Overhead
1833 /// increases as algorithm version increases.
1834 ///
1835 /// Terminology
1836 /// Reduce element:
1837 /// Reduce element refers to the individual data field with primitive
1838 /// data types to be combined and reduced across threads.
1839 /// Reduce list:
1840 /// Reduce list refers to a collection of local, thread-private
1841 /// reduce elements.
1842 /// Remote Reduce list:
1843 /// Remote Reduce list refers to a collection of remote (relative to
1844 /// the current thread) reduce elements.
1845 ///
1846 /// We distinguish between three states of threads that are important to
1847 /// the implementation of this function.
1848 /// Alive threads:
1849 /// Threads in a warp executing the SIMT instruction, as distinguished from
1850 /// threads that are inactive due to divergent control flow.
1851 /// Active threads:
1852 /// The minimal set of threads that has to be alive upon entry to this
1853 /// function. The computation is correct iff active threads are alive.
1854 /// Some threads are alive but they are not active because they do not
1855 /// contribute to the computation in any useful manner. Turning them off
1856 /// may introduce control flow overheads without any tangible benefits.
1857 /// Effective threads:
1858 /// In order to comply with the argument requirements of the shuffle
1859 /// function, we must keep all lanes holding data alive. But at most
1860 /// half of them perform value aggregation; we refer to this half of
1861 /// threads as effective. The other half is simply handing off their
1862 /// data.
1863 ///
1864 /// Procedure
1865 /// Value shuffle:
1866 /// In this step active threads transfer data from higher lane positions
1867 /// in the warp to lower lane positions, creating Remote Reduce list.
1868 /// Value aggregation:
1869 /// In this step, effective threads combine their thread local Reduce list
1870 /// with Remote Reduce list and store the result in the thread local
1871 /// Reduce list.
1872 /// Value copy:
1873 /// In this step, we deal with the assumption made by algorithm 2
1874 /// (i.e. contiguity assumption). When we have an odd number of lanes
1875 /// active, say 2k+1, only k threads will be effective and therefore k
1876 /// new values will be produced. However, the Reduce list owned by the
1877 /// (2k+1)th thread is ignored in the value aggregation. Therefore
1878 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
1879 /// that the contiguity assumption still holds.
1880 ///
1881 /// \param ReductionInfos Array type containing the ReductionOps.
1882 /// \param ReduceFn The reduction function.
1883 /// \param FuncAttrs Optional param to specify any function attributes that
1884 /// need to be copied to the new function.
1885 /// \param IsByRef For each reduction clause, whether the reduction is by-ref
1886 /// or not.
1887 ///
1888 /// \return The ShuffleAndReduce function.
1889 Expected<Function *> emitShuffleAndReduceFunction(
1891 Function *ReduceFn, AttributeList FuncAttrs, ArrayRef<bool> IsByRef);
1892
1893 /// Helper function for CreateCanonicalScanLoops to create InputLoop
1894 /// in the firstGen and Scan Loop in the SecondGen
1895 /// \param InputLoopGen Callback for generating the loop for input phase
1896 /// \param ScanLoopGen Callback for generating the loop for scan phase
1897 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
1898 /// `ScanInfoInitialize`.
1899 ///
1900 /// \return error if any produced, else return success.
1901 Error emitScanBasedDirectiveIR(
1902 llvm::function_ref<Error()> InputLoopGen,
1903 llvm::function_ref<Error(LocationDescription Loc)> ScanLoopGen,
1904 ScanInfo *ScanRedInfo);
1905
1906 /// Creates the basic blocks required for scan reduction.
1907 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
1908 /// `ScanInfoInitialize`.
1909 void createScanBBs(ScanInfo *ScanRedInfo);
1910
1911 /// Dynamically allocates the buffer needed for scan reduction.
1912 /// \param AllocaIP The IP where possibly-shared pointer of buffer needs to
1913 /// be declared.
1914 /// \param ScanVars Scan Variables.
1915 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
1916 /// `ScanInfoInitialize`.
1917 ///
1918 /// \return error if any produced, else return success.
1919 Error emitScanBasedDirectiveDeclsIR(InsertPointTy AllocaIP,
1920 ArrayRef<llvm::Value *> ScanVars,
1921 ArrayRef<llvm::Type *> ScanVarsType,
1922 ScanInfo *ScanRedInfo);
1923
1924 /// Copies the result back to the reduction variable.
1925 /// \param ReductionInfos Array type containing the ReductionOps.
1926 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
1927 /// `ScanInfoInitialize`.
1928 ///
1929 /// \return error if any produced, else return success.
1930 Error emitScanBasedDirectiveFinalsIR(
1933
1934 /// This function emits a helper that gathers Reduce lists from the first
1935 /// lane of every active warp to lanes in the first warp.
1936 ///
1937 /// void inter_warp_copy_func(void* reduce_data, num_warps)
1938 /// shared smem[warp_size];
1939 /// For all data entries D in reduce_data:
1940 /// sync
1941 /// If (I am the first lane in each warp)
1942 /// Copy my local D to smem[warp_id]
1943 /// sync
1944 /// if (I am the first warp)
1945 /// Copy smem[thread_id] to my local D
1946 ///
1947 /// \param Loc The insert and source location description.
1948 /// \param ReductionInfos Array type containing the ReductionOps.
1949 /// \param FuncAttrs Optional param to specify any function attributes that
1950 /// need to be copied to the new function.
1951 /// \param IsByRef For each reduction clause, whether the reduction is by-ref
1952 /// or not.
1953 ///
1954 /// \return The InterWarpCopy function.
1956 emitInterWarpCopyFunction(const LocationDescription &Loc,
1957 ArrayRef<ReductionInfo> ReductionInfos,
1958 AttributeList FuncAttrs, ArrayRef<bool> IsByRef);
1959
1960 /// This function emits a helper that copies all the reduction variables from
1961 /// the team into the provided global buffer for the reduction variables.
1962 ///
1963 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1964 /// For all data entries D in reduce_data:
1965 /// Copy local D to buffer.D[Idx]
1966 ///
1967 /// \param ReductionInfos Array type containing the ReductionOps.
1968 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1969 /// \param FuncAttrs Optional param to specify any function attributes that
1970 /// need to be copied to the new function.
1971 ///
1972 /// \return The ListToGlobalCopy function.
1974 emitListToGlobalCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1975 Type *ReductionsBufferTy,
1976 AttributeList FuncAttrs, ArrayRef<bool> IsByRef);
1977
1978 /// This function emits a helper that copies all the reduction variables from
1979 /// the team into the provided global buffer for the reduction variables.
1980 ///
1981 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
1982 /// For all data entries D in reduce_data:
1983 /// Copy buffer.D[Idx] to local D;
1984 ///
1985 /// \param ReductionInfos Array type containing the ReductionOps.
1986 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
1987 /// \param FuncAttrs Optional param to specify any function attributes that
1988 /// need to be copied to the new function.
1989 ///
1990 /// \return The GlobalToList function.
1992 emitGlobalToListCopyFunction(ArrayRef<ReductionInfo> ReductionInfos,
1993 Type *ReductionsBufferTy,
1994 AttributeList FuncAttrs, ArrayRef<bool> IsByRef);
1995
1996 /// This function emits a helper that reduces all the reduction variables from
1997 /// the team into the provided global buffer for the reduction variables.
1998 ///
1999 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
2000 /// void *GlobPtrs[];
2001 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2002 /// ...
2003 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2004 /// reduce_function(GlobPtrs, reduce_data);
2005 ///
2006 /// \param ReductionInfos Array type containing the ReductionOps.
2007 /// \param ReduceFn The reduction function.
2008 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
2009 /// \param FuncAttrs Optional param to specify any function attributes that
2010 /// need to be copied to the new function.
2011 ///
2012 /// \return The ListToGlobalReduce function.
2014 emitListToGlobalReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
2015 Function *ReduceFn, Type *ReductionsBufferTy,
2016 AttributeList FuncAttrs,
2017 ArrayRef<bool> IsByRef);
2018
2019 /// This function emits a helper that reduces all the reduction variables from
2020 /// the team into the provided global buffer for the reduction variables.
2021 ///
2022 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
2023 /// void *GlobPtrs[];
2024 /// GlobPtrs[0] = (void*)&buffer.D0[Idx];
2025 /// ...
2026 /// GlobPtrs[N] = (void*)&buffer.DN[Idx];
2027 /// reduce_function(reduce_data, GlobPtrs);
2028 ///
2029 /// \param ReductionInfos Array type containing the ReductionOps.
2030 /// \param ReduceFn The reduction function.
2031 /// \param ReductionsBufferTy The StructTy for the reductions buffer.
2032 /// \param FuncAttrs Optional param to specify any function attributes that
2033 /// need to be copied to the new function.
2034 ///
2035 /// \return The GlobalToListReduce function.
2037 emitGlobalToListReduceFunction(ArrayRef<ReductionInfo> ReductionInfos,
2038 Function *ReduceFn, Type *ReductionsBufferTy,
2039 AttributeList FuncAttrs,
2040 ArrayRef<bool> IsByRef);
2041
2042 /// Get the function name of a reduction function.
2043 std::string getReductionFuncName(StringRef Name) const;
2044
2045 /// Generate a Fortran descriptor for array reductions
2046 ///
2047 /// \param DescriptorAddr Address of the descriptor to initialize
2048 /// \param DataPtr Pointer to the actual data the descriptor should reference
2049 /// \param ElemType Type of elements in the array (may be array type)
2050 /// \param DescriptorType Type of the descriptor structure
2051 /// \param DataPtrPtrGen Callback to get the base_ptr field in the descriptor
2052 ///
2053 /// \return Error if DataPtrPtrGen fails, otherwise success.
2054 InsertPointOrErrorTy generateReductionDescriptor(
2055 Value *DescriptorAddr, Value *DataPtr, Value *SrcDescriptorAddr,
2056 Type *DescriptorType,
2058 DataPtrPtrGen);
2059
2060 /// Emits reduction function.
2061 /// \param ReducerName Name of the function calling the reduction.
2062 /// \param ReductionInfos Array type containing the ReductionOps.
2063 /// \param ReductionGenCBKind Optional param to specify Clang or MLIR
2064 /// CodeGenCB kind.
2065 /// \param FuncAttrs Optional param to specify any function attributes that
2066 /// need to be copied to the new function.
2067 ///
2068 /// \return The reduction function.
2069 Expected<Function *> createReductionFunction(
2070 StringRef ReducerName, ArrayRef<ReductionInfo> ReductionInfos,
2071 ArrayRef<bool> IsByRef,
2073 AttributeList FuncAttrs = {});
2074
2075public:
2076 ///
2077 /// Design of OpenMP reductions on the GPU
2078 ///
2079 /// Consider a typical OpenMP program with one or more reduction
2080 /// clauses:
2081 ///
2082 /// float foo;
2083 /// double bar;
2084 /// #pragma omp target teams distribute parallel for \
2085 /// reduction(+:foo) reduction(*:bar)
2086 /// for (int i = 0; i < N; i++) {
2087 /// foo += A[i]; bar *= B[i];
2088 /// }
2089 ///
2090 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
2091 /// all teams. In our OpenMP implementation on the NVPTX device an
2092 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
2093 /// within a team are mapped to CUDA threads within a threadblock.
2094 /// Our goal is to efficiently aggregate values across all OpenMP
2095 /// threads such that:
2096 ///
2097 /// - the compiler and runtime are logically concise, and
2098 /// - the reduction is performed efficiently in a hierarchical
2099 /// manner as follows: within OpenMP threads in the same warp,
2100 /// across warps in a threadblock, and finally across teams on
2101 /// the NVPTX device.
2102 ///
2103 /// Introduction to Decoupling
2104 ///
2105 /// We would like to decouple the compiler and the runtime so that the
2106 /// latter is ignorant of the reduction variables (number, data types)
2107 /// and the reduction operators. This allows a simpler interface
2108 /// and implementation while still attaining good performance.
2109 ///
2110 /// Pseudocode for the aforementioned OpenMP program generated by the
2111 /// compiler is as follows:
2112 ///
2113 /// 1. Create private copies of reduction variables on each OpenMP
2114 /// thread: 'foo_private', 'bar_private'
2115 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
2116 /// to it and writes the result in 'foo_private' and 'bar_private'
2117 /// respectively.
2118 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
2119 /// and store the result on the team master:
2120 ///
2121 /// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
2122 /// reduceData, shuffleReduceFn, interWarpCpyFn)
2123 ///
2124 /// where:
2125 /// struct ReduceData {
2126 /// double *foo;
2127 /// double *bar;
2128 /// } reduceData
2129 /// reduceData.foo = &foo_private
2130 /// reduceData.bar = &bar_private
2131 ///
2132 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
2133 /// auxiliary functions generated by the compiler that operate on
2134 /// variables of type 'ReduceData'. They aid the runtime perform
2135 /// algorithmic steps in a data agnostic manner.
2136 ///
2137 /// 'shuffleReduceFn' is a pointer to a function that reduces data
2138 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
2139 /// same warp. It takes the following arguments as input:
2140 ///
2141 /// a. variable of type 'ReduceData' on the calling lane,
2142 /// b. its lane_id,
2143 /// c. an offset relative to the current lane_id to generate a
2144 /// remote_lane_id. The remote lane contains the second
2145 /// variable of type 'ReduceData' that is to be reduced.
2146 /// d. an algorithm version parameter determining which reduction
2147 /// algorithm to use.
2148 ///
2149 /// 'shuffleReduceFn' retrieves data from the remote lane using
2150 /// efficient GPU shuffle intrinsics and reduces, using the
2151 /// algorithm specified by the 4th parameter, the two operands
2152 /// element-wise. The result is written to the first operand.
2153 ///
2154 /// Different reduction algorithms are implemented in different
2155 /// runtime functions, all calling 'shuffleReduceFn' to perform
2156 /// the essential reduction step. Therefore, based on the 4th
2157 /// parameter, this function behaves slightly differently to
2158 /// cooperate with the runtime to ensure correctness under
2159 /// different circumstances.
2160 ///
2161 /// 'InterWarpCpyFn' is a pointer to a function that transfers
2162 /// reduced variables across warps. It tunnels, through CUDA
2163 /// shared memory, the thread-private data of type 'ReduceData'
2164 /// from lane 0 of each warp to a lane in the first warp.
2165 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
2166 /// The last team writes the global reduced value to memory.
2167 ///
2168 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
2169 /// reduceData, shuffleReduceFn, interWarpCpyFn,
2170 /// scratchpadCopyFn, loadAndReduceFn)
2171 ///
2172 /// 'scratchpadCopyFn' is a helper that stores reduced
2173 /// data from the team master to a scratchpad array in
2174 /// global memory.
2175 ///
2176 /// 'loadAndReduceFn' is a helper that loads data from
2177 /// the scratchpad array and reduces it with the input
2178 /// operand.
2179 ///
2180 /// These compiler generated functions hide address
2181 /// calculation and alignment information from the runtime.
2182 /// 5. if ret == 1:
2183 /// The team master of the last team stores the reduced
2184 /// result to the globals in memory.
2185 /// foo += reduceData.foo; bar *= reduceData.bar
2186 ///
2187 ///
2188 /// Warp Reduction Algorithms
2189 ///
2190 /// On the warp level, we have three algorithms implemented in the
2191 /// OpenMP runtime depending on the number of active lanes:
2192 ///
2193 /// Full Warp Reduction
2194 ///
2195 /// The reduce algorithm within a warp where all lanes are active
2196 /// is implemented in the runtime as follows:
2197 ///
2198 /// full_warp_reduce(void *reduce_data,
2199 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2200 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
2201 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
2202 /// }
2203 ///
2204 /// The algorithm completes in log(2, WARPSIZE) steps.
2205 ///
2206 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
2207 /// not used therefore we save instructions by not retrieving lane_id
2208 /// from the corresponding special registers. The 4th parameter, which
2209 /// represents the version of the algorithm being used, is set to 0 to
2210 /// signify full warp reduction.
2211 ///
2212 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2213 ///
2214 /// #reduce_elem refers to an element in the local lane's data structure
2215 /// #remote_elem is retrieved from a remote lane
2216 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2217 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
2218 ///
2219 /// Contiguous Partial Warp Reduction
2220 ///
2221 /// This reduce algorithm is used within a warp where only the first
2222 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
2223 /// number of OpenMP threads in a parallel region is not a multiple of
2224 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
2225 ///
2226 /// void
2227 /// contiguous_partial_reduce(void *reduce_data,
2228 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
2229 /// int size, int lane_id) {
2230 /// int curr_size;
2231 /// int offset;
2232 /// curr_size = size;
2233 /// mask = curr_size/2;
2234 /// while (offset>0) {
2235 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
2236 /// curr_size = (curr_size+1)/2;
2237 /// offset = curr_size/2;
2238 /// }
2239 /// }
2240 ///
2241 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2242 ///
2243 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2244 /// if (lane_id < offset)
2245 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
2246 /// else
2247 /// reduce_elem = remote_elem
2248 ///
2249 /// This algorithm assumes that the data to be reduced are located in a
2250 /// contiguous subset of lanes starting from the first. When there is
2251 /// an odd number of active lanes, the data in the last lane is not
2252 /// aggregated with any other lane's dat but is instead copied over.
2253 ///
2254 /// Dispersed Partial Warp Reduction
2255 ///
2256 /// This algorithm is used within a warp when any discontiguous subset of
2257 /// lanes are active. It is used to implement the reduction operation
2258 /// across lanes in an OpenMP simd region or in a nested parallel region.
2259 ///
2260 /// void
2261 /// dispersed_partial_reduce(void *reduce_data,
2262 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2263 /// int size, remote_id;
2264 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
2265 /// do {
2266 /// remote_id = next_active_lane_id_right_after_me();
2267 /// # the above function returns 0 of no active lane
2268 /// # is present right after the current lane.
2269 /// size = number_of_active_lanes_in_this_warp();
2270 /// logical_lane_id /= 2;
2271 /// ShuffleReduceFn(reduce_data, logical_lane_id,
2272 /// remote_id-1-threadIdx.x, 2);
2273 /// } while (logical_lane_id % 2 == 0 && size > 1);
2274 /// }
2275 ///
2276 /// There is no assumption made about the initial state of the reduction.
2277 /// Any number of lanes (>=1) could be active at any position. The reduction
2278 /// result is returned in the first active lane.
2279 ///
2280 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2281 ///
2282 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2283 /// if (lane_id % 2 == 0 && offset > 0)
2284 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
2285 /// else
2286 /// reduce_elem = remote_elem
2287 ///
2288 ///
2289 /// Intra-Team Reduction
2290 ///
2291 /// This function, as implemented in the runtime call
2292 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
2293 /// threads in a team. It first reduces within a warp using the
2294 /// aforementioned algorithms. We then proceed to gather all such
2295 /// reduced values at the first warp.
2296 ///
2297 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
2298 /// data from each of the "warp master" (zeroth lane of each warp, where
2299 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
2300 /// a mathematical sense) the problem of reduction across warp masters in
2301 /// a block to the problem of warp reduction.
2302 ///
2303 ///
2304 /// Inter-Team Reduction
2305 ///
2306 /// Once a team has reduced its data to a single value, it is stored in
2307 /// a global scratchpad array. Since each team has a distinct slot, this
2308 /// can be done without locking.
2309 ///
2310 /// The last team to write to the scratchpad array proceeds to reduce the
2311 /// scratchpad array. One or more workers in the last team use the helper
2312 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
2313 /// the k'th worker reduces every k'th element.
2314 ///
2315 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
2316 /// reduce across workers and compute a globally reduced value.
2317 ///
2318 /// \param Loc The location where the reduction was
2319 /// encountered. Must be within the associate
2320 /// directive and after the last local access to the
2321 /// reduction variables.
2322 /// \param AllocaIP An insertion point suitable for allocas usable
2323 /// in reductions.
2324 /// \param CodeGenIP An insertion point suitable for code
2325 /// generation.
2326 /// \param ReductionInfos A list of info on each reduction
2327 /// variable.
2328 /// \param IsNoWait Optional flag set if the reduction is
2329 /// marked as nowait.
2330 /// \param IsByRef For each reduction clause, whether the reduction is by-ref.
2331 /// \param IsTeamsReduction Optional flag set if it is a teams
2332 /// reduction.
2333 /// \param GridValue Optional GPU grid value.
2334 /// \param ReductionBufNum Optional OpenMPCUDAReductionBufNumValue to be
2335 /// used for teams reduction.
2336 /// \param SrcLocInfo Source location information global.
2338 const LocationDescription &Loc, InsertPointTy AllocaIP,
2339 InsertPointTy CodeGenIP, ArrayRef<ReductionInfo> ReductionInfos,
2340 ArrayRef<bool> IsByRef, bool IsNoWait = false,
2341 bool IsTeamsReduction = false,
2343 std::optional<omp::GV> GridValue = {}, unsigned ReductionBufNum = 1024,
2344 Value *SrcLocInfo = nullptr);
2345
2346 // TODO: provide atomic and non-atomic reduction generators for reduction
2347 // operators defined by the OpenMP specification.
2348
2349 /// Generator for '#omp reduction'.
2350 ///
2351 /// Emits the IR instructing the runtime to perform the specific kind of
2352 /// reductions. Expects reduction variables to have been privatized and
2353 /// initialized to reduction-neutral values separately. Emits the calls to
2354 /// runtime functions as well as the reduction function and the basic blocks
2355 /// performing the reduction atomically and non-atomically.
2356 ///
2357 /// The code emitted for the following:
2358 ///
2359 /// \code
2360 /// type var_1;
2361 /// type var_2;
2362 /// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
2363 /// /* body */;
2364 /// \endcode
2365 ///
2366 /// corresponds to the following sketch.
2367 ///
2368 /// \code
2369 /// void _outlined_par() {
2370 /// // N is the number of different reductions.
2371 /// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
2372 /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
2373 /// _omp_reduction_func,
2374 /// _gomp_critical_user.reduction.var)) {
2375 /// case 1: {
2376 /// var_1 = var_1 <reduction-op> privatized_var_1;
2377 /// var_2 = var_2 <reduction-op> privatized_var_2;
2378 /// // ...
2379 /// __kmpc_end_reduce(...);
2380 /// break;
2381 /// }
2382 /// case 2: {
2383 /// _Atomic<ReductionOp>(var_1, privatized_var_1);
2384 /// _Atomic<ReductionOp>(var_2, privatized_var_2);
2385 /// // ...
2386 /// break;
2387 /// }
2388 /// default: break;
2389 /// }
2390 /// }
2391 ///
2392 /// void _omp_reduction_func(void **lhs, void **rhs) {
2393 /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
2394 /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
2395 /// // ...
2396 /// }
2397 /// \endcode
2398 ///
2399 /// \param Loc The location where the reduction was
2400 /// encountered. Must be within the associate
2401 /// directive and after the last local access to the
2402 /// reduction variables.
2403 /// \param AllocaIP An insertion point suitable for allocas usable
2404 /// in reductions.
2405 /// \param ReductionInfos A list of info on each reduction variable.
2406 /// \param IsNoWait A flag set if the reduction is marked as nowait.
2407 /// \param IsByRef A flag set if the reduction is using reference
2408 /// or direct value.
2409 /// \param IsTeamsReduction Optional flag set if it is a teams
2410 /// reduction.
2412 const LocationDescription &Loc, InsertPointTy AllocaIP,
2413 ArrayRef<ReductionInfo> ReductionInfos, ArrayRef<bool> IsByRef,
2414 bool IsNoWait = false, bool IsTeamsReduction = false);
2415
2416 ///}
2417
2418 /// Return the insertion point used by the underlying IRBuilder.
2420
2421 /// Update the internal location to \p Loc.
2423 Builder.restoreIP(Loc.IP);
2424 Builder.SetCurrentDebugLocation(Loc.DL);
2425 return Loc.IP.getBlock() != nullptr;
2426 }
2427
2428 /// Return the function declaration for the runtime function with \p FnID.
2431
2433
2435 ArrayRef<Value *> Args,
2436 StringRef Name = "");
2437
2438 /// Return the (LLVM-IR) string describing the source location \p LocStr.
2440 uint32_t &SrcLocStrSize);
2441
2442 /// Return the (LLVM-IR) string describing the default source location.
2444
2445 /// Return the (LLVM-IR) string describing the source location identified by
2446 /// the arguments.
2448 StringRef FileName, unsigned Line,
2449 unsigned Column,
2450 uint32_t &SrcLocStrSize);
2451
2452 /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
2453 /// fallback if \p DL does not specify the function name.
2455 Function *F = nullptr);
2456
2457 /// Return the (LLVM-IR) string describing the source location \p Loc.
2458 LLVM_ABI Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
2459 uint32_t &SrcLocStrSize);
2460
2461 /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
2462 /// TODO: Create a enum class for the Reserve2Flags
2464 uint32_t SrcLocStrSize,
2465 omp::IdentFlag Flags = omp::IdentFlag(0),
2466 unsigned Reserve2Flags = 0);
2467
2468 /// Create a hidden global flag \p Name in the module with initial value \p
2469 /// Value.
2471
2472 /// Emit the llvm.used metadata.
2474
2475 /// Emit the kernel execution mode.
2478
2479 /// Generate control flow and cleanup for cancellation.
2480 ///
2481 /// \param CancelFlag Flag indicating if the cancellation is performed.
2482 /// \param CanceledDirective The kind of directive that is cancled.
2483 /// \param ExitCB Extra code to be generated in the exit block.
2484 ///
2485 /// \return an error, if any were triggered during execution.
2487 omp::Directive CanceledDirective);
2488
2489 /// Generate a target region entry call.
2490 ///
2491 /// \param Loc The location at which the request originated and is fulfilled.
2492 /// \param AllocaIP The insertion point to be used for alloca instructions.
2493 /// \param Return Return value of the created function returned by reference.
2494 /// \param DeviceID Identifier for the device via the 'device' clause.
2495 /// \param NumTeams Numer of teams for the region via the 'num_teams' clause
2496 /// or 0 if unspecified and -1 if there is no 'teams' clause.
2497 /// \param NumThreads Number of threads via the 'thread_limit' clause.
2498 /// \param HostPtr Pointer to the host-side pointer of the target kernel.
2499 /// \param KernelArgs Array of arguments to the kernel.
2500 LLVM_ABI InsertPointTy emitTargetKernel(const LocationDescription &Loc,
2501 InsertPointTy AllocaIP,
2502 Value *&Return, Value *Ident,
2503 Value *DeviceID, Value *NumTeams,
2504 Value *NumThreads, Value *HostPtr,
2505 ArrayRef<Value *> KernelArgs);
2506
2507 /// Generate a flush runtime call.
2508 ///
2509 /// \param Loc The location at which the request originated and is fulfilled.
2510 LLVM_ABI void emitFlush(const LocationDescription &Loc);
2511
2512 /// The finalization stack made up of finalize callbacks currently in-flight,
2513 /// wrapped into FinalizationInfo objects that reference also the finalization
2514 /// target block and the kind of cancellable directive.
2516
2517 /// Return true if the last entry in the finalization stack is of kind \p DK
2518 /// and cancellable.
2519 bool isLastFinalizationInfoCancellable(omp::Directive DK) {
2520 return !FinalizationStack.empty() &&
2521 FinalizationStack.back().IsCancellable &&
2522 FinalizationStack.back().DK == DK;
2523 }
2524
2525 /// Generate a taskwait runtime call.
2526 ///
2527 /// \param Loc The location at which the request originated and is fulfilled.
2528 LLVM_ABI void emitTaskwaitImpl(const LocationDescription &Loc);
2529
2530 /// Generate a taskyield runtime call.
2531 ///
2532 /// \param Loc The location at which the request originated and is fulfilled.
2533 LLVM_ABI void emitTaskyieldImpl(const LocationDescription &Loc);
2534
2535 /// Return the current thread ID.
2536 ///
2537 /// \param Ident The ident (ident_t*) describing the query origin.
2539
2540 /// The OpenMPIRBuilder Configuration
2542
2543 /// The underlying LLVM-IR module
2545
2546 /// The LLVM-IR Builder used to create IR.
2548
2549 /// Map to remember source location strings
2551
2552 /// Map to remember existing ident_t*.
2554
2555 /// Info manager to keep track of target regions.
2557
2558 /// The target triple of the underlying module.
2559 const Triple T;
2560
2561 /// Helper that contains information about regions we need to outline
2562 /// during finalization.
2564 using PostOutlineCBTy = std::function<void(Function &)>;
2570 // TODO: this should be safe to enable by default
2572
2573 LLVM_ABI virtual ~OutlineInfo() = default;
2574
2575 /// Collect all blocks in between EntryBB and ExitBB in both the given
2576 /// vector and set.
2578 SmallVectorImpl<BasicBlock *> &BlockVector);
2579
2580 /// Create a CodeExtractor instance based on the information stored in this
2581 /// structure, the list of collected blocks from a previous call to
2582 /// \c collectBlocks and a flag stating whether arguments must be passed in
2583 /// address space 0.
2584 LLVM_ABI virtual std::unique_ptr<CodeExtractor>
2586 bool ArgsInZeroAddressSpace, Twine Suffix = Twine(""));
2587
2588 /// Return the function that contains the region to be outlined.
2589 Function *getFunction() const { return EntryBB->getParent(); }
2590 };
2591
2592 /// Collection of regions that need to be outlined during finalization.
2594
2595 /// A collection of candidate target functions that's constant allocas will
2596 /// attempt to be raised on a call of finalize after all currently enqueued
2597 /// outline info's have been processed.
2599
2600 /// Collection of owned canonical loop objects that eventually need to be
2601 /// free'd.
2602 std::forward_list<CanonicalLoopInfo> LoopInfos;
2603
2604 /// Collection of owned ScanInfo objects that eventually need to be free'd.
2605 std::forward_list<ScanInfo> ScanInfos;
2606
2607 /// Add a new region that will be outlined later.
2608 void addOutlineInfo(std::unique_ptr<OutlineInfo> &&OI) {
2609 OutlineInfos.emplace_back(std::move(OI));
2610 }
2611
2612 /// An ordered map of auto-generated variables to their unique names.
2613 /// It stores variables with the following names: 1) ".gomp_critical_user_" +
2614 /// <critical_section_name> + ".var" for "omp critical" directives; 2)
2615 /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
2616 /// variables.
2618
2619 /// Computes the size of type in bytes.
2621
2622 // Emit a branch from the current block to the Target block only if
2623 // the current block has a terminator.
2625
2626 // If BB has no use then delete it and return. Else place BB after the current
2627 // block, if possible, or else at the end of the function. Also add a branch
2628 // from current block to BB if current block does not have a terminator.
2629 LLVM_ABI void emitBlock(BasicBlock *BB, Function *CurFn,
2630 bool IsFinished = false);
2631
2632 /// Emits code for OpenMP 'if' clause using specified \a BodyGenCallbackTy
2633 /// Here is the logic:
2634 /// if (Cond) {
2635 /// ThenGen();
2636 /// } else {
2637 /// ElseGen();
2638 /// }
2639 ///
2640 /// \return an error, if any were triggered during execution.
2642 BodyGenCallbackTy ElseGen,
2643 InsertPointTy AllocaIP = {},
2644 ArrayRef<BasicBlock *> DeallocBlocks = {});
2645
2646 /// Create the global variable holding the offload mappings information.
2647 LLVM_ABI GlobalVariable *
2648 createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
2649 std::string VarName);
2650
2651 /// Create the global variable holding the offload names information.
2652 LLVM_ABI GlobalVariable *
2653 createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
2654 std::string VarName);
2655
2658 AllocaInst *Args = nullptr;
2660 };
2661
2662 /// Create the allocas instruction used in call to mapper functions.
2664 InsertPointTy AllocaIP,
2665 unsigned NumOperands,
2667
2668 /// Create the call for the target mapper function.
2669 /// \param Loc The source location description.
2670 /// \param MapperFunc Function to be called.
2671 /// \param SrcLocInfo Source location information global.
2672 /// \param MaptypesArg The argument types.
2673 /// \param MapnamesArg The argument names.
2674 /// \param MapperAllocas The AllocaInst used for the call.
2675 /// \param DeviceID Device ID for the call.
2676 /// \param NumOperands Number of operands in the call.
2678 Function *MapperFunc, Value *SrcLocInfo,
2679 Value *MaptypesArg, Value *MapnamesArg,
2681 int64_t DeviceID, unsigned NumOperands);
2682
2683 /// Container for the arguments used to pass data to the runtime library.
2685 /// The array of base pointer passed to the runtime library.
2687 /// The array of section pointers passed to the runtime library.
2689 /// The array of sizes passed to the runtime library.
2690 Value *SizesArray = nullptr;
2691 /// The array of map types passed to the runtime library for the beginning
2692 /// of the region or for the entire region if there are no separate map
2693 /// types for the region end.
2695 /// The array of map types passed to the runtime library for the end of the
2696 /// region, or nullptr if there are no separate map types for the region
2697 /// end.
2699 /// The array of user-defined mappers passed to the runtime library.
2701 /// The array of original declaration names of mapped pointers sent to the
2702 /// runtime library for debugging
2704
2705 explicit TargetDataRTArgs() = default;
2714 };
2715
2716 /// Container to pass the default attributes with which a kernel must be
2717 /// launched, used to set kernel attributes and populate associated static
2718 /// structures.
2719 ///
2720 /// For max values, < 0 means unset, == 0 means set but unknown at compile
2721 /// time. The number of max values will be 1 except for the case where
2722 /// ompx_bare is set.
2733
2734 /// Container to pass LLVM IR runtime values or constants related to the
2735 /// number of teams and threads with which the kernel must be launched, as
2736 /// well as the trip count of the loop, if it is an SPMD or Generic-SPMD
2737 /// kernel. These must be defined in the host prior to the call to the kernel
2738 /// launch OpenMP RTL function.
2741 Value *MinTeams = nullptr;
2744
2745 /// 'parallel' construct 'num_threads' clause value, if present and it is an
2746 /// SPMD kernel.
2747 Value *MaxThreads = nullptr;
2748
2749 /// Total number of iterations of the SPMD or Generic-SPMD kernel or null if
2750 /// it is a generic kernel.
2752
2753 /// Device ID value used in the kernel launch.
2754 Value *DeviceID = nullptr;
2755 };
2756
2757 /// Data structure that contains the needed information to construct the
2758 /// kernel args vector.
2760 /// Number of arguments passed to the runtime library.
2761 unsigned NumTargetItems = 0;
2762 /// Arguments passed to the runtime library
2764 /// The number of iterations
2766 /// The number of teams.
2768 /// The number of threads.
2770 /// The size of the dynamic shared memory.
2772 /// True if the kernel has 'no wait' clause.
2773 bool HasNoWait = false;
2774 /// The fallback mechanism for the shared memory.
2777
2778 // Constructors for TargetKernelArgs.
2779 TargetKernelArgs() = default;
2789 };
2790
2791 /// Create the kernel args vector used by emitTargetKernel. This function
2792 /// creates various constant values that are used in the resulting args
2793 /// vector.
2794 LLVM_ABI static void getKernelArgsVector(TargetKernelArgs &KernelArgs,
2795 IRBuilderBase &Builder,
2796 SmallVector<Value *> &ArgsVector);
2797
2798 /// Struct that keeps the information that should be kept throughout
2799 /// a 'target data' region.
2801 /// Set to true if device pointer information have to be obtained.
2802 bool RequiresDevicePointerInfo = false;
2803 /// Set to true if Clang emits separate runtime calls for the beginning and
2804 /// end of the region. These calls might have separate map type arrays.
2805 bool SeparateBeginEndCalls = false;
2806
2807 public:
2809
2812
2813 /// Indicate whether any user-defined mapper exists.
2814 bool HasMapper = false;
2815 /// The total number of pointers passed to the runtime library.
2816 unsigned NumberOfPtrs = 0u;
2817
2818 bool EmitDebug = false;
2819
2820 /// Whether the `target ... data` directive has a `nowait` clause.
2821 bool HasNoWait = false;
2822
2823 explicit TargetDataInfo() = default;
2824 explicit TargetDataInfo(bool RequiresDevicePointerInfo,
2825 bool SeparateBeginEndCalls)
2826 : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
2827 SeparateBeginEndCalls(SeparateBeginEndCalls) {}
2828 /// Clear information about the data arrays.
2831 HasMapper = false;
2832 NumberOfPtrs = 0u;
2833 }
2834 /// Return true if the current target data information has valid arrays.
2835 bool isValid() {
2836 return RTArgs.BasePointersArray && RTArgs.PointersArray &&
2837 RTArgs.SizesArray && RTArgs.MapTypesArray &&
2838 (!HasMapper || RTArgs.MappersArray) && NumberOfPtrs;
2839 }
2840 bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
2841 bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
2842 };
2843
2851
2852 /// This structure contains combined information generated for mappable
2853 /// clauses, including base pointers, pointers, sizes, map types, user-defined
2854 /// mappers, and non-contiguous information.
2855 struct MapInfosTy {
2870
2871 /// Append arrays in \a CurInfo.
2872 void append(MapInfosTy &CurInfo) {
2873 BasePointers.append(CurInfo.BasePointers.begin(),
2874 CurInfo.BasePointers.end());
2875 Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
2876 DevicePointers.append(CurInfo.DevicePointers.begin(),
2877 CurInfo.DevicePointers.end());
2878 Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
2879 Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
2880 Names.append(CurInfo.Names.begin(), CurInfo.Names.end());
2881 NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
2882 CurInfo.NonContigInfo.Dims.end());
2883 NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
2884 CurInfo.NonContigInfo.Offsets.end());
2885 NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
2886 CurInfo.NonContigInfo.Counts.end());
2887 NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
2888 CurInfo.NonContigInfo.Strides.end());
2889 }
2890 };
2892
2893 /// Callback function type for functions emitting the host fallback code that
2894 /// is executed when the kernel launch fails. It takes an insertion point as
2895 /// parameter where the code should be emitted. It returns an insertion point
2896 /// that points right after after the emitted code.
2899
2900 // Callback function type for emitting and fetching user defined custom
2901 // mappers.
2903 function_ref<Expected<Function *>(unsigned int)>;
2904
2905 /// Generate a target region entry call and host fallback call.
2906 ///
2907 /// \param Loc The location at which the request originated and is fulfilled.
2908 /// \param OutlinedFnID The ooulined function ID.
2909 /// \param EmitTargetCallFallbackCB Call back function to generate host
2910 /// fallback code.
2911 /// \param Args Data structure holding information about the kernel arguments.
2912 /// \param DeviceID Identifier for the device via the 'device' clause.
2913 /// \param RTLoc Source location identifier
2914 /// \param AllocaIP The insertion point to be used for alloca instructions.
2916 const LocationDescription &Loc, Value *OutlinedFnID,
2917 EmitFallbackCallbackTy EmitTargetCallFallbackCB, TargetKernelArgs &Args,
2918 Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP);
2919
2920 /// Callback type for generating the bodies of device directives that require
2921 /// outer target tasks (e.g. in case of having `nowait` or `depend` clauses).
2922 ///
2923 /// \param DeviceID The ID of the device on which the target region will
2924 /// execute.
2925 /// \param RTLoc Source location identifier
2926 /// \Param TargetTaskAllocaIP Insertion point for the alloca block of the
2927 /// generated task.
2928 ///
2929 /// \return an error, if any were triggered during execution.
2931 function_ref<Error(Value *DeviceID, Value *RTLoc,
2932 IRBuilderBase::InsertPoint TargetTaskAllocaIP)>;
2933
2934 /// Generate a target-task for the target construct
2935 ///
2936 /// \param TaskBodyCB Callback to generate the actual body of the target task.
2937 /// \param DeviceID Identifier for the device via the 'device' clause.
2938 /// \param RTLoc Source location identifier
2939 /// \param AllocaIP The insertion point to be used for alloca instructions.
2940 /// \param Dependencies Dependencies info as specified by the 'depend' clause.
2941 /// \param HasNoWait True if the target construct had 'nowait' on it, false
2942 /// otherwise
2944 emitTargetTask(TargetTaskBodyCallbackTy TaskBodyCB, Value *DeviceID,
2945 Value *RTLoc, OpenMPIRBuilder::InsertPointTy AllocaIP,
2946 const DependenciesInfo &Dependencies,
2947 const TargetDataRTArgs &RTArgs, bool HasNoWait);
2948
2949 /// Emit the arguments to be passed to the runtime library based on the
2950 /// arrays of base pointers, pointers, sizes, map types, and mappers. If
2951 /// ForEndCall, emit map types to be passed for the end of the region instead
2952 /// of the beginning.
2955 OpenMPIRBuilder::TargetDataInfo &Info, bool ForEndCall = false);
2956
2957 /// Emit an array of struct descriptors to be assigned to the offload args.
2959 InsertPointTy CodeGenIP,
2960 MapInfosTy &CombinedInfo,
2961 TargetDataInfo &Info);
2962
2963 /// Emit the arrays used to pass the captures and map information to the
2964 /// offloading runtime library. If there is no map or capture information,
2965 /// return nullptr by reference. Accepts a reference to a MapInfosTy object
2966 /// that contains information generated for mappable clauses,
2967 /// including base pointers, pointers, sizes, map types, user-defined mappers.
2969 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo,
2970 TargetDataInfo &Info, CustomMapperCallbackTy CustomMapperCB,
2971 bool IsNonContiguous = false,
2972 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr);
2973
2974 /// Allocates memory for and populates the arrays required for offloading
2975 /// (offload_{baseptrs|ptrs|mappers|sizes|maptypes|mapnames}). Then, it
2976 /// emits their base addresses as arguments to be passed to the runtime
2977 /// library. In essence, this function is a combination of
2978 /// emitOffloadingArrays and emitOffloadingArraysArgument and should arguably
2979 /// be preferred by clients of OpenMPIRBuilder.
2981 InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info,
2982 TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo,
2983 CustomMapperCallbackTy CustomMapperCB, bool IsNonContiguous = false,
2984 bool ForEndCall = false,
2985 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr);
2986
2987 /// Creates offloading entry for the provided entry ID \a ID, address \a
2988 /// Addr, size \a Size, and flags \a Flags.
2990 int32_t Flags, GlobalValue::LinkageTypes,
2991 StringRef Name = "");
2992
2993 /// The kind of errors that can occur when emitting the offload entries and
2994 /// metadata.
3001
3002 /// Callback function type
3004 std::function<void(EmitMetadataErrorKind, TargetRegionEntryInfo)>;
3005
3006 // Emit the offloading entries and metadata so that the device codegen side
3007 // can easily figure out what to emit. The produced metadata looks like
3008 // this:
3009 //
3010 // !omp_offload.info = !{!1, ...}
3011 //
3012 // We only generate metadata for function that contain target regions.
3014 EmitMetadataErrorReportFunctionTy &ErrorReportFunction);
3015
3016public:
3017 /// Generator for __kmpc_copyprivate
3018 ///
3019 /// \param Loc The source location description.
3020 /// \param BufSize Number of elements in the buffer.
3021 /// \param CpyBuf List of pointers to data to be copied.
3022 /// \param CpyFn function to call for copying data.
3023 /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
3024 ///
3025 /// \return The insertion position *after* the CopyPrivate call.
3026
3028 llvm::Value *BufSize,
3029 llvm::Value *CpyBuf,
3030 llvm::Value *CpyFn,
3031 llvm::Value *DidIt);
3032
3033 /// Generator for '#omp single'
3034 ///
3035 /// \param Loc The source location description.
3036 /// \param BodyGenCB Callback that will generate the region code.
3037 /// \param FiniCB Callback to finalize variable copies.
3038 /// \param IsNowait If false, a barrier is emitted.
3039 /// \param CPVars copyprivate variables.
3040 /// \param CPFuncs copy functions to use for each copyprivate variable.
3041 ///
3042 /// \returns The insertion position *after* the single call.
3045 FinalizeCallbackTy FiniCB, bool IsNowait,
3046 ArrayRef<llvm::Value *> CPVars = {},
3047 ArrayRef<llvm::Function *> CPFuncs = {});
3048
3049 /// Generator for '#omp scope'
3050 ///
3051 /// \param Loc The source location description.
3052 /// \param BodyGenCB Callback that will generate the region code.
3053 /// \param FiniCB Callback to finalize variable copies.
3054 /// \param IsNowait If false, a barrier is emitted.
3055 ///
3056 /// \returns The insertion position *after* the scope.
3057 LLVM_ABI InsertPointOrErrorTy createScope(const LocationDescription &Loc,
3058 BodyGenCallbackTy BodyGenCB,
3059 FinalizeCallbackTy FiniCB,
3060 bool IsNowait);
3061
3062 /// Generator for '#omp master'
3063 ///
3064 /// \param Loc The insert and source location description.
3065 /// \param BodyGenCB Callback that will generate the region code.
3066 /// \param FiniCB Callback to finalize variable copies.
3067 ///
3068 /// \returns The insertion position *after* the master.
3069 LLVM_ABI InsertPointOrErrorTy createMaster(const LocationDescription &Loc,
3070 BodyGenCallbackTy BodyGenCB,
3071 FinalizeCallbackTy FiniCB);
3072
3073 /// Generator for '#omp masked'
3074 ///
3075 /// \param Loc The insert and source location description.
3076 /// \param BodyGenCB Callback that will generate the region code.
3077 /// \param FiniCB Callback to finialize variable copies.
3078 ///
3079 /// \returns The insertion position *after* the masked.
3080 LLVM_ABI InsertPointOrErrorTy createMasked(const LocationDescription &Loc,
3081 BodyGenCallbackTy BodyGenCB,
3082 FinalizeCallbackTy FiniCB,
3083 Value *Filter);
3084
3085 /// This function performs the scan reduction of the values updated in
3086 /// the input phase. The reduction logic needs to be emitted between input
3087 /// and scan loop returned by `CreateCanonicalScanLoops`. The following
3088 /// is the code that is generated, `buffer` and `span` are expected to be
3089 /// populated before executing the generated code.
3090 /// \code{c}
3091 /// for (int k = 0; k != ceil(log2(span)); ++k) {
3092 /// i=pow(2,k)
3093 /// for (size cnt = last_iter; cnt >= i; --cnt)
3094 /// buffer[cnt] op= buffer[cnt-i];
3095 /// }
3096 /// \endcode
3097 /// \param Loc The insert and source location description.
3098 /// \param ReductionInfos Array type containing the ReductionOps.
3099 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
3100 /// `ScanInfoInitialize`.
3101 ///
3102 /// \returns The insertion position *after* the masked.
3104 const LocationDescription &Loc,
3106 ScanInfo *ScanRedInfo);
3107
3108 /// This directive split and directs the control flow to input phase
3109 /// blocks or scan phase blocks based on 1. whether input loop or scan loop
3110 /// is executed, 2. whether exclusive or inclusive scan is used.
3111 ///
3112 /// \param Loc The insert and source location description.
3113 /// \param AllocaIP The IP where the temporary buffer for scan reduction
3114 // needs to be allocated.
3115 /// \param ScanVars Scan Variables.
3116 /// \param IsInclusive Whether it is an inclusive or exclusive scan.
3117 /// \param ScanRedInfo Pointer to the ScanInfo objected created using
3118 /// `ScanInfoInitialize`.
3119 ///
3120 /// \returns The insertion position *after* the scan.
3121 LLVM_ABI InsertPointOrErrorTy createScan(const LocationDescription &Loc,
3122 InsertPointTy AllocaIP,
3123 ArrayRef<llvm::Value *> ScanVars,
3124 ArrayRef<llvm::Type *> ScanVarsType,
3125 bool IsInclusive,
3126 ScanInfo *ScanRedInfo);
3127
3128 /// Generator for '#omp critical'
3129 ///
3130 /// \param Loc The insert and source location description.
3131 /// \param BodyGenCB Callback that will generate the region body code.
3132 /// \param FiniCB Callback to finalize variable copies.
3133 /// \param CriticalName name of the lock used by the critical directive
3134 /// \param HintInst Hint Instruction for hint clause associated with critical
3135 ///
3136 /// \returns The insertion position *after* the critical.
3137 LLVM_ABI InsertPointOrErrorTy createCritical(const LocationDescription &Loc,
3138 BodyGenCallbackTy BodyGenCB,
3139 FinalizeCallbackTy FiniCB,
3140 StringRef CriticalName,
3141 Value *HintInst);
3142
3143 /// Generator for '#omp ordered depend (source | sink)'
3144 ///
3145 /// \param Loc The insert and source location description.
3146 /// \param AllocaIP The insertion point to be used for alloca instructions.
3147 /// \param NumLoops The number of loops in depend clause.
3148 /// \param StoreValues The value will be stored in vector address.
3149 /// \param Name The name of alloca instruction.
3150 /// \param IsDependSource If true, depend source; otherwise, depend sink.
3151 ///
3152 /// \return The insertion position *after* the ordered.
3154 createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP,
3155 unsigned NumLoops, ArrayRef<llvm::Value *> StoreValues,
3156 const Twine &Name, bool IsDependSource);
3157
3158 /// Generator for '#omp ordered [threads | simd]'
3159 ///
3160 /// \param Loc The insert and source location description.
3161 /// \param BodyGenCB Callback that will generate the region code.
3162 /// \param FiniCB Callback to finalize variable copies.
3163 /// \param IsThreads If true, with threads clause or without clause;
3164 /// otherwise, with simd clause;
3165 ///
3166 /// \returns The insertion position *after* the ordered.
3168 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
3169 FinalizeCallbackTy FiniCB, bool IsThreads);
3170
3171 /// Generator for '#omp sections'
3172 ///
3173 /// \param Loc The insert and source location description.
3174 /// \param AllocaIP The insertion points to be used for alloca instructions.
3175 /// \param SectionCBs Callbacks that will generate body of each section.
3176 /// \param PrivCB Callback to copy a given variable (think copy constructor).
3177 /// \param FiniCB Callback to finalize variable copies.
3178 /// \param IsCancellable Flag to indicate a cancellable parallel region.
3179 /// \param IsNowait If true, barrier - to ensure all sections are executed
3180 /// before moving forward will not be generated.
3181 /// \returns The insertion position *after* the sections.
3183 createSections(const LocationDescription &Loc, InsertPointTy AllocaIP,
3186 bool IsCancellable, bool IsNowait);
3187
3188 /// Generator for '#omp section'
3189 ///
3190 /// \param Loc The insert and source location description.
3191 /// \param BodyGenCB Callback that will generate the region body code.
3192 /// \param FiniCB Callback to finalize variable copies.
3193 /// \returns The insertion position *after* the section.
3194 LLVM_ABI InsertPointOrErrorTy createSection(const LocationDescription &Loc,
3195 BodyGenCallbackTy BodyGenCB,
3196 FinalizeCallbackTy FiniCB);
3197
3198 /// Generator for `#omp teams`
3199 ///
3200 /// \param Loc The location where the teams construct was encountered.
3201 /// \param BodyGenCB Callback that will generate the region code.
3202 /// \param NumTeamsLower Lower bound on number of teams. If this is nullptr,
3203 /// it is as if lower bound is specified as equal to upperbound. If
3204 /// this is non-null, then upperbound must also be non-null.
3205 /// \param NumTeamsUpper Upper bound on the number of teams.
3206 /// \param ThreadLimit on the number of threads that may participate in a
3207 /// contention group created by each team.
3208 /// \param IfExpr is the integer argument value of the if condition on the
3209 /// teams clause.
3210 LLVM_ABI InsertPointOrErrorTy createTeams(const LocationDescription &Loc,
3211 BodyGenCallbackTy BodyGenCB,
3212 Value *NumTeamsLower = nullptr,
3213 Value *NumTeamsUpper = nullptr,
3214 Value *ThreadLimit = nullptr,
3215 Value *IfExpr = nullptr);
3216
3217 /// Generator for `#omp distribute`
3218 ///
3219 /// \param Loc The location where the distribute construct was encountered.
3220 /// \param AllocaIP The insertion point to be used for allocations.
3221 /// \param DeallocBlocks The insertion blocks to be used for explicit
3222 /// deallocations, if needed.
3223 /// \param BodyGenCB Callback that will generate the region code.
3225 const LocationDescription &Loc, InsertPointTy AllocaIP,
3226 ArrayRef<BasicBlock *> DeallocBlocks, BodyGenCallbackTy BodyGenCB);
3227
3228 /// Generate conditional branch and relevant BasicBlocks through which private
3229 /// threads copy the 'copyin' variables from Master copy to threadprivate
3230 /// copies.
3231 ///
3232 /// \param IP insertion block for copyin conditional
3233 /// \param MasterVarPtr a pointer to the master variable
3234 /// \param PrivateVarPtr a pointer to the threadprivate variable
3235 /// \param IntPtrTy Pointer size type
3236 /// \param BranchtoEnd Create a branch between the copyin.not.master blocks
3237 // and copy.in.end block
3238 ///
3239 /// \returns The insertion point where copying operation to be emitted.
3241 Value *MasterAddr,
3242 Value *PrivateAddr,
3243 llvm::IntegerType *IntPtrTy,
3244 bool BranchtoEnd = true);
3245
3246 /// Create a runtime call for kmpc_alloc
3247 ///
3248 /// \param Loc The insert and source location description.
3249 /// \param Size Size of allocated memory space
3250 /// \param Allocator Allocator information instruction
3251 /// \param Name Name of call Instruction for OMP_alloc
3252 ///
3253 /// \returns CallInst to the OMP_Alloc call
3254 LLVM_ABI CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
3255 Value *Allocator, std::string Name = "");
3256
3257 /// Create a runtime call for kmpc_align_alloc
3258 ///
3259 /// \param Loc The insert and source location description.
3260 /// \param Align Align value
3261 /// \param Size Size of allocated memory space
3262 /// \param Allocator Allocator information instruction
3263 /// \param Name Name of call Instruction for OMP_Align_Alloc
3264 ///
3265 /// \returns CallInst to the OMP_Align_Alloc call
3266 LLVM_ABI CallInst *createOMPAlignedAlloc(const LocationDescription &Loc,
3267 Value *Align, Value *Size,
3268 Value *Allocator,
3269 std::string Name = "");
3270
3271 /// Create a runtime call for kmpc_free
3272 ///
3273 /// \param Loc The insert and source location description.
3274 /// \param Addr Address of memory space to be freed
3275 /// \param Allocator Allocator information instruction
3276 /// \param Name Name of call Instruction for OMP_Free
3277 ///
3278 /// \returns CallInst to the OMP_Free call
3279 LLVM_ABI CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
3280 Value *Allocator, std::string Name = "");
3281
3282 /// Create a runtime call for kmpc_alloc_shared.
3283 ///
3284 /// \param Loc The insert and source location description.
3285 /// \param Size Size of allocated memory space.
3286 /// \param Name Name of call Instruction.
3287 ///
3288 /// \returns CallInst to the kmpc_alloc_shared call.
3289 LLVM_ABI CallInst *createOMPAllocShared(const LocationDescription &Loc,
3290 Value *Size,
3291 const Twine &Name = Twine(""));
3292
3293 /// Create a runtime call for kmpc_alloc_shared.
3294 ///
3295 /// \param Loc The insert and source location description.
3296 /// \param VarType Type of variable to be allocated.
3297 /// \param Name Name of call Instruction.
3298 ///
3299 /// \returns CallInst to the kmpc_alloc_shared call.
3300 LLVM_ABI CallInst *createOMPAllocShared(const LocationDescription &Loc,
3301 Type *VarType,
3302 const Twine &Name = Twine(""));
3303
3304 /// Create a runtime call for kmpc_free_shared.
3305 ///
3306 /// \param Loc The insert and source location description.
3307 /// \param Addr Value obtained from the corresponding kmpc_alloc_shared call.
3308 /// \param Size Size of allocated memory space.
3309 /// \param Name Name of call Instruction.
3310 ///
3311 /// \returns CallInst to the kmpc_free_shared call.
3312 LLVM_ABI CallInst *createOMPFreeShared(const LocationDescription &Loc,
3313 Value *Addr, Value *Size,
3314 const Twine &Name = Twine(""));
3315
3316 /// Create a runtime call for kmpc_free_shared.
3317 ///
3318 /// \param Loc The insert and source location description.
3319 /// \param Addr Value obtained from the corresponding kmpc_alloc_shared call.
3320 /// \param VarType Type of variable to be freed.
3321 /// \param Name Name of call Instruction.
3322 ///
3323 /// \returns CallInst to the kmpc_free_shared call.
3324 LLVM_ABI CallInst *createOMPFreeShared(const LocationDescription &Loc,
3325 Value *Addr, Type *VarType,
3326 const Twine &Name = Twine(""));
3327
3328 /// Create a runtime call for kmpc_threadprivate_cached
3329 ///
3330 /// \param Loc The insert and source location description.
3331 /// \param Pointer pointer to data to be cached
3332 /// \param Size size of data to be cached
3333 /// \param Name Name of call Instruction for callinst
3334 ///
3335 /// \returns CallInst to the thread private cache call.
3336 LLVM_ABI CallInst *
3337 createCachedThreadPrivate(const LocationDescription &Loc,
3339 const llvm::Twine &Name = Twine(""));
3340
3341 /// Create a runtime call for __tgt_interop_init
3342 ///
3343 /// \param Loc The insert and source location description.
3344 /// \param InteropVar variable to be allocated
3345 /// \param InteropType type of interop operation
3346 /// \param Device devide to which offloading will occur
3347 /// \param NumDependences number of dependence variables
3348 /// \param DependenceAddress pointer to dependence variables
3349 /// \param HaveNowaitClause does nowait clause exist
3350 ///
3351 /// \returns CallInst to the __tgt_interop_init call
3352 LLVM_ABI CallInst *createOMPInteropInit(const LocationDescription &Loc,
3353 Value *InteropVar,
3354 omp::OMPInteropType InteropType,
3355 Value *Device, Value *NumDependences,
3356 Value *DependenceAddress,
3357 bool HaveNowaitClause);
3358
3359 /// Create a runtime call for __tgt_interop_destroy
3360 ///
3361 /// \param Loc The insert and source location description.
3362 /// \param InteropVar variable to be allocated
3363 /// \param Device devide to which offloading will occur
3364 /// \param NumDependences number of dependence variables
3365 /// \param DependenceAddress pointer to dependence variables
3366 /// \param HaveNowaitClause does nowait clause exist
3367 ///
3368 /// \returns CallInst to the __tgt_interop_destroy call
3369 LLVM_ABI CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
3370 Value *InteropVar, Value *Device,
3371 Value *NumDependences,
3372 Value *DependenceAddress,
3373 bool HaveNowaitClause);
3374
3375 /// Create a runtime call for __tgt_interop_use
3376 ///
3377 /// \param Loc The insert and source location description.
3378 /// \param InteropVar variable to be allocated
3379 /// \param Device devide to which offloading will occur
3380 /// \param NumDependences number of dependence variables
3381 /// \param DependenceAddress pointer to dependence variables
3382 /// \param HaveNowaitClause does nowait clause exist
3383 ///
3384 /// \returns CallInst to the __tgt_interop_use call
3385 LLVM_ABI CallInst *createOMPInteropUse(const LocationDescription &Loc,
3386 Value *InteropVar, Value *Device,
3387 Value *NumDependences,
3388 Value *DependenceAddress,
3389 bool HaveNowaitClause);
3390
3391 /// The `omp target` interface
3392 ///
3393 /// For more information about the usage of this interface,
3394 /// \see openmp/device/include/Interface.h
3395 ///
3396 ///{
3397
3398 /// Create a runtime call for kmpc_target_init
3399 ///
3400 /// \param Loc The insert and source location description.
3401 /// \param Attrs Structure containing the default attributes, including
3402 /// numbers of threads and teams to launch the kernel with.
3404 const LocationDescription &Loc,
3406
3407 /// Create a runtime call for kmpc_target_deinit
3408 ///
3409 /// \param Loc The insert and source location description.
3410 /// \param TeamsReductionDataSize The maximal size of all the reduction data
3411 /// for teams reduction.
3412 /// \param TeamsReductionBufferLength The number of elements (each of up to
3413 /// \p TeamsReductionDataSize size), in the teams reduction buffer.
3414 LLVM_ABI void createTargetDeinit(const LocationDescription &Loc,
3415 int32_t TeamsReductionDataSize = 0,
3416 int32_t TeamsReductionBufferLength = 1024);
3417
3418 ///}
3419
3420 /// Helpers to read/write kernel annotations from the IR.
3421 ///
3422 ///{
3423
3424 /// Read/write a bounds on threads for \p Kernel. Read will return 0 if none
3425 /// is set.
3426 LLVM_ABI static std::pair<int32_t, int32_t>
3427 readThreadBoundsForKernel(const Triple &T, Function &Kernel);
3428 LLVM_ABI static void writeThreadBoundsForKernel(const Triple &T,
3429 Function &Kernel, int32_t LB,
3430 int32_t UB);
3431
3432 /// Read/write a bounds on teams for \p Kernel. Read will return 0 if none
3433 /// is set.
3434 LLVM_ABI static std::pair<int32_t, int32_t>
3435 readTeamBoundsForKernel(const Triple &T, Function &Kernel);
3436 LLVM_ABI static void writeTeamsForKernel(const Triple &T, Function &Kernel,
3437 int32_t LB, int32_t UB);
3438 ///}
3439
3440private:
3441 // Sets the function attributes expected for the outlined function
3442 void setOutlinedTargetRegionFunctionAttributes(Function *OutlinedFn);
3443
3444 // Creates the function ID/Address for the given outlined function.
3445 // In the case of an embedded device function the address of the function is
3446 // used, in the case of a non-offload function a constant is created.
3447 Constant *createOutlinedFunctionID(Function *OutlinedFn,
3448 StringRef EntryFnIDName);
3449
3450 // Creates the region entry address for the outlined function
3451 Constant *createTargetRegionEntryAddr(Function *OutlinedFunction,
3452 StringRef EntryFnName);
3453
3454public:
3455 /// Functions used to generate a function with the given name.
3457 std::function<Expected<Function *>(StringRef FunctionName)>;
3458
3459 /// Create a unique name for the entry function using the source location
3460 /// information of the current target region. The name will be something like:
3461 ///
3462 /// __omp_offloading_DD_FFFF_PP_lBB[_CC]
3463 ///
3464 /// where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
3465 /// mangled name of the function that encloses the target region and BB is the
3466 /// line number of the target region. CC is a count added when more than one
3467 /// region is located at the same location.
3468 ///
3469 /// If this target outline function is not an offload entry, we don't need to
3470 /// register it. This may happen if it is guarded by an if clause that is
3471 /// false at compile time, or no target archs have been specified.
3472 ///
3473 /// The created target region ID is used by the runtime library to identify
3474 /// the current target region, so it only has to be unique and not
3475 /// necessarily point to anything. It could be the pointer to the outlined
3476 /// function that implements the target region, but we aren't using that so
3477 /// that the compiler doesn't need to keep that, and could therefore inline
3478 /// the host function if proven worthwhile during optimization. In the other
3479 /// hand, if emitting code for the device, the ID has to be the function
3480 /// address so that it can retrieved from the offloading entry and launched
3481 /// by the runtime library. We also mark the outlined function to have
3482 /// external linkage in case we are emitting code for the device, because
3483 /// these functions will be entry points to the device.
3484 ///
3485 /// \param InfoManager The info manager keeping track of the offload entries
3486 /// \param EntryInfo The entry information about the function
3487 /// \param GenerateFunctionCallback The callback function to generate the code
3488 /// \param OutlinedFunction Pointer to the outlined function
3489 /// \param EntryFnIDName Name of the ID o be created
3491 TargetRegionEntryInfo &EntryInfo,
3492 FunctionGenCallback &GenerateFunctionCallback, bool IsOffloadEntry,
3493 Function *&OutlinedFn, Constant *&OutlinedFnID);
3494
3495 /// Registers the given function and sets up the attribtues of the function
3496 /// Returns the FunctionID.
3497 ///
3498 /// \param InfoManager The info manager keeping track of the offload entries
3499 /// \param EntryInfo The entry information about the function
3500 /// \param OutlinedFunction Pointer to the outlined function
3501 /// \param EntryFnName Name of the outlined function
3502 /// \param EntryFnIDName Name of the ID o be created
3505 Function *OutlinedFunction,
3506 StringRef EntryFnName, StringRef EntryFnIDName);
3507
3508 /// Type of BodyGen to use for region codegen
3509 ///
3510 /// Priv: If device pointer privatization is required, emit the body of the
3511 /// region here. It will have to be duplicated: with and without
3512 /// privatization.
3513 /// DupNoPriv: If we need device pointer privatization, we need
3514 /// to emit the body of the region with no privatization in the 'else' branch
3515 /// of the conditional.
3516 /// NoPriv: If we don't require privatization of device
3517 /// pointers, we emit the body in between the runtime calls. This avoids
3518 /// duplicating the body code.
3520
3521 /// Callback type for creating the map infos for the kernel parameters.
3522 /// \param CodeGenIP is the insertion point where code should be generated,
3523 /// if any.
3526
3527private:
3528 /// Emit the array initialization or deletion portion for user-defined mapper
3529 /// code generation. First, it evaluates whether an array section is mapped
3530 /// and whether the \a MapType instructs to delete this section. If \a IsInit
3531 /// is true, and \a MapType indicates to not delete this array, array
3532 /// initialization code is generated. If \a IsInit is false, and \a MapType
3533 /// indicates to delete this array, array deletion code is generated.
3534 void emitUDMapperArrayInitOrDel(Function *MapperFn, llvm::Value *MapperHandle,
3535 llvm::Value *Base, llvm::Value *Begin,
3536 llvm::Value *Size, llvm::Value *MapType,
3537 llvm::Value *MapName, TypeSize ElementSize,
3538 llvm::BasicBlock *ExitBB, bool IsInit);
3539
3540public:
3541 /// Emit the user-defined mapper function. The code generation follows the
3542 /// pattern in the example below.
3543 /// \code
3544 /// void .omp_mapper.<type_name>.<mapper_id>.(void *rt_mapper_handle,
3545 /// void *base, void *begin,
3546 /// int64_t size, int64_t type,
3547 /// void *name = nullptr) {
3548 /// // Allocate space for an array section first or add a base/begin for
3549 /// // pointer dereference.
3550 /// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) &&
3551 /// !maptype.IsDelete)
3552 /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
3553 /// size*sizeof(Ty), clearToFromMember(type));
3554 /// // Map members.
3555 /// for (unsigned i = 0; i < size; i++) {
3556 /// // For each component specified by this mapper:
3557 /// for (auto c : begin[i]->all_components) {
3558 /// if (c.hasMapper())
3559 /// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin,
3560 /// c.arg_size,
3561 /// c.arg_type, c.arg_name);
3562 /// else
3563 /// __tgt_push_mapper_component(rt_mapper_handle, c.arg_base,
3564 /// c.arg_begin, c.arg_size, c.arg_type,
3565 /// c.arg_name);
3566 /// }
3567 /// }
3568 /// // Delete the array section.
3569 /// if (size > 1 && maptype.IsDelete)
3570 /// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
3571 /// size*sizeof(Ty), clearToFromMember(type));
3572 /// }
3573 /// \endcode
3574 ///
3575 /// \param PrivAndGenMapInfoCB Callback that privatizes code and populates the
3576 /// MapInfos and returns.
3577 /// \param ElemTy DeclareMapper element type.
3578 /// \param FuncName Optional param to specify mapper function name.
3579 /// \param CustomMapperCB Optional callback to generate code related to
3580 /// custom mappers.
3583 InsertPointTy CodeGenIP, llvm::Value *PtrPHI, llvm::Value *BeginArg)>
3584 PrivAndGenMapInfoCB,
3585 llvm::Type *ElemTy, StringRef FuncName,
3586 CustomMapperCallbackTy CustomMapperCB);
3587
3588 /// Generator for '#omp target data'
3589 ///
3590 /// \param Loc The location where the target data construct was encountered.
3591 /// \param AllocaIP The insertion points to be used for allocations.
3592 /// \param CodeGenIP The insertion point at which the target directive code
3593 /// should be placed.
3594 /// \param DeallocBlocks The insertion blocks at which explicit deallocations
3595 /// should be placed, if needed.
3596 /// \param IsBegin If true then emits begin mapper call otherwise emits
3597 /// end mapper call.
3598 /// \param DeviceID Stores the DeviceID from the device clause.
3599 /// \param IfCond Value which corresponds to the if clause condition.
3600 /// \param Info Stores all information realted to the Target Data directive.
3601 /// \param GenMapInfoCB Callback that populates the MapInfos and returns.
3602 /// \param CustomMapperCB Callback to generate code related to
3603 /// custom mappers.
3604 /// \param BodyGenCB Optional Callback to generate the region code.
3605 /// \param DeviceAddrCB Optional callback to generate code related to
3606 /// use_device_ptr and use_device_addr.
3608 const LocationDescription &Loc, InsertPointTy AllocaIP,
3609 InsertPointTy CodeGenIP, ArrayRef<BasicBlock *> DeallocBlocks,
3610 Value *DeviceID, Value *IfCond, TargetDataInfo &Info,
3611 GenMapInfoCallbackTy GenMapInfoCB, CustomMapperCallbackTy CustomMapperCB,
3612 omp::RuntimeFunction *MapperFunc = nullptr,
3614 BodyGenTy BodyGenType)>
3615 BodyGenCB = nullptr,
3616 function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
3617 Value *SrcLocInfo = nullptr);
3618
3620 InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
3621 ArrayRef<BasicBlock *> DeallocBlocks)>;
3622
3624 Argument &Arg, Value *Input, Value *&RetVal, InsertPointTy AllocaIP,
3625 InsertPointTy CodeGenIP, ArrayRef<InsertPointTy> DeallocIPs)>;
3626
3627 /// Generator for '#omp target'
3628 ///
3629 /// \param Loc where the target data construct was encountered.
3630 /// \param IsOffloadEntry whether it is an offload entry.
3631 /// \param CodeGenIP The insertion point where the call to the outlined
3632 /// function should be emitted.
3633 /// \param DeallocBlocks The insertion points at which explicit deallocations
3634 /// should be placed, if needed.
3635 /// \param Info Stores all information realted to the Target directive.
3636 /// \param EntryInfo The entry information about the function.
3637 /// \param DefaultAttrs Structure containing the default attributes, including
3638 /// numbers of threads and teams to launch the kernel with.
3639 /// \param RuntimeAttrs Structure containing the runtime numbers of threads
3640 /// and teams to launch the kernel with.
3641 /// \param IfCond value of the `if` clause.
3642 /// \param Inputs The input values to the region that will be passed.
3643 /// as arguments to the outlined function.
3644 /// \param BodyGenCB Callback that will generate the region code.
3645 /// \param ArgAccessorFuncCB Callback that will generate accessors
3646 /// instructions for passed in target arguments where neccessary
3647 /// \param CustomMapperCB Callback to generate code related to
3648 /// custom mappers.
3649 /// \param Dependencies A vector of DependData objects that carry
3650 /// dependency information as passed in the depend clause
3651 /// \param HasNowait Whether the target construct has a `nowait` clause or
3652 /// not.
3653 /// \param DynCGroupMem The size of the dynamic groupprivate memory for each
3654 /// cgroup.
3655 /// \param DynCGroupMem The fallback mechanism to execute if the requested
3656 /// cgroup memory cannot be provided.
3658 const LocationDescription &Loc, bool IsOffloadEntry,
3661 ArrayRef<BasicBlock *> DeallocBlocks, TargetDataInfo &Info,
3662 TargetRegionEntryInfo &EntryInfo,
3663 const TargetKernelDefaultAttrs &DefaultAttrs,
3664 const TargetKernelRuntimeAttrs &RuntimeAttrs, Value *IfCond,
3665 SmallVectorImpl<Value *> &Inputs, GenMapInfoCallbackTy GenMapInfoCB,
3666 TargetBodyGenCallbackTy BodyGenCB,
3667 TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB,
3668 CustomMapperCallbackTy CustomMapperCB,
3669 const DependenciesInfo &Dependencies = {}, bool HasNowait = false,
3670 Value *DynCGroupMem = nullptr,
3671 omp::OMPDynGroupprivateFallbackType DynCGroupMemFallback =
3673
3674 /// Returns __kmpc_for_static_init_* runtime function for the specified
3675 /// size \a IVSize and sign \a IVSigned. Will create a distribute call
3676 /// __kmpc_distribute_static_init* if \a IsGPUDistribute is set.
3678 bool IVSigned,
3679 bool IsGPUDistribute);
3680
3681 /// Returns __kmpc_dispatch_init_* runtime function for the specified
3682 /// size \a IVSize and sign \a IVSigned.
3684 bool IVSigned);
3685
3686 /// Returns __kmpc_dispatch_next_* runtime function for the specified
3687 /// size \a IVSize and sign \a IVSigned.
3689 bool IVSigned);
3690
3691 /// Returns __kmpc_dispatch_fini_* runtime function for the specified
3692 /// size \a IVSize and sign \a IVSigned.
3694 bool IVSigned);
3695
3696 /// Returns __kmpc_dispatch_deinit runtime function.
3698
3699 /// Declarations for LLVM-IR types (simple, array, function and structure) are
3700 /// generated below. Their names are defined and used in OpenMPKinds.def. Here
3701 /// we provide the declarations, the initializeTypes function will provide the
3702 /// values.
3703 ///
3704 ///{
3705#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
3706#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
3707 ArrayType *VarName##Ty = nullptr; \
3708 PointerType *VarName##PtrTy = nullptr;
3709#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
3710 FunctionType *VarName = nullptr; \
3711 PointerType *VarName##Ptr = nullptr;
3712#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
3713 StructType *VarName = nullptr; \
3714 PointerType *VarName##Ptr = nullptr;
3715#include "llvm/Frontend/OpenMP/OMPKinds.def"
3716
3717 ///}
3718
3719private:
3720 /// Create all simple and struct types exposed by the runtime and remember
3721 /// the llvm::PointerTypes of them for easy access later.
3722 void initializeTypes(Module &M);
3723
3724 /// Common interface for generating entry calls for OMP Directives.
3725 /// if the directive has a region/body, It will set the insertion
3726 /// point to the body
3727 ///
3728 /// \param OMPD Directive to generate entry blocks for
3729 /// \param EntryCall Call to the entry OMP Runtime Function
3730 /// \param ExitBB block where the region ends.
3731 /// \param Conditional indicate if the entry call result will be used
3732 /// to evaluate a conditional of whether a thread will execute
3733 /// body code or not.
3734 ///
3735 /// \return The insertion position in exit block
3736 InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
3737 BasicBlock *ExitBB,
3738 bool Conditional = false);
3739
3740 /// Common interface to finalize the region
3741 ///
3742 /// \param OMPD Directive to generate exiting code for
3743 /// \param FinIP Insertion point for emitting Finalization code and exit call.
3744 /// This block must not contain any non-finalization code.
3745 /// \param ExitCall Call to the ending OMP Runtime Function
3746 /// \param HasFinalize indicate if the directive will require finalization
3747 /// and has a finalization callback in the stack that
3748 /// should be called.
3749 ///
3750 /// \return The insertion position in exit block
3751 InsertPointOrErrorTy emitCommonDirectiveExit(omp::Directive OMPD,
3752 InsertPointTy FinIP,
3753 Instruction *ExitCall,
3754 bool HasFinalize = true);
3755
3756 /// Common Interface to generate OMP inlined regions
3757 ///
3758 /// \param OMPD Directive to generate inlined region for
3759 /// \param EntryCall Call to the entry OMP Runtime Function
3760 /// \param ExitCall Call to the ending OMP Runtime Function
3761 /// \param BodyGenCB Body code generation callback.
3762 /// \param FiniCB Finalization Callback. Will be called when finalizing region
3763 /// \param Conditional indicate if the entry call result will be used
3764 /// to evaluate a conditional of whether a thread will execute
3765 /// body code or not.
3766 /// \param HasFinalize indicate if the directive will require finalization
3767 /// and has a finalization callback in the stack that
3768 /// should be called.
3769 /// \param IsCancellable if HasFinalize is set to true, indicate if the
3770 /// the directive should be cancellable.
3771 /// \return The insertion point after the region
3773 EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
3774 Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
3775 FinalizeCallbackTy FiniCB, bool Conditional = false,
3776 bool HasFinalize = true, bool IsCancellable = false);
3777
3778 /// Get the platform-specific name separator.
3779 /// \param Parts different parts of the final name that needs separation
3780 /// \param FirstSeparator First separator used between the initial two
3781 /// parts of the name.
3782 /// \param Separator separator used between all of the rest consecutive
3783 /// parts of the name
3784 static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
3785 StringRef FirstSeparator,
3786 StringRef Separator);
3787
3788 /// Returns corresponding lock object for the specified critical region
3789 /// name. If the lock object does not exist it is created, otherwise the
3790 /// reference to the existing copy is returned.
3791 /// \param CriticalName Name of the critical region.
3792 ///
3793 Value *getOMPCriticalRegionLock(StringRef CriticalName);
3794
3795 /// Callback type for Atomic Expression update
3796 /// ex:
3797 /// \code{.cpp}
3798 /// unsigned x = 0;
3799 /// #pragma omp atomic update
3800 /// x = Expr(x_old); //Expr() is any legal operation
3801 /// \endcode
3802 ///
3803 /// \param XOld the value of the atomic memory address to use for update
3804 /// \param IRB reference to the IRBuilder to use
3805 ///
3806 /// \returns Value to update X to.
3807 using AtomicUpdateCallbackTy =
3808 const function_ref<Expected<Value *>(Value *XOld, IRBuilder<> &IRB)>;
3809
3810private:
3811 enum AtomicKind { Read, Write, Update, Capture, Compare };
3812
3813 /// Determine whether to emit flush or not
3814 ///
3815 /// \param Loc The insert and source location description.
3816 /// \param AO The required atomic ordering
3817 /// \param AK The OpenMP atomic operation kind used.
3818 ///
3819 /// \returns wether a flush was emitted or not
3820 bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
3821 AtomicOrdering AO, AtomicKind AK);
3822
3823 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
3824 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
3825 /// Only Scalar data types.
3826 ///
3827 /// \param AllocaIP The insertion point to be used for alloca
3828 /// instructions.
3829 /// \param X The target atomic pointer to be updated
3830 /// \param XElemTy The element type of the atomic pointer.
3831 /// \param Expr The value to update X with.
3832 /// \param AO Atomic ordering of the generated atomic
3833 /// instructions.
3834 /// \param RMWOp The binary operation used for update. If
3835 /// operation is not supported by atomicRMW,
3836 /// or belong to {FADD, FSUB, BAD_BINOP}.
3837 /// Then a `cmpExch` based atomic will be generated.
3838 /// \param UpdateOp Code generator for complex expressions that cannot be
3839 /// expressed through atomicrmw instruction.
3840 /// \param VolatileX true if \a X volatile?
3841 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
3842 /// update expression, false otherwise.
3843 /// (e.g. true for X = X BinOp Expr)
3844 ///
3845 /// \returns A pair of the old value of X before the update, and the value
3846 /// used for the update.
3847 Expected<std::pair<Value *, Value *>>
3848 emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
3850 AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
3851 bool IsXBinopExpr, bool IsIgnoreDenormalMode,
3852 bool IsFineGrainedMemory, bool IsRemoteMemory);
3853
3854 /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
3855 ///
3856 /// \Return The instruction
3857 Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
3858 AtomicRMWInst::BinOp RMWOp);
3859
3860 bool IsFinalized;
3861
3862public:
3863 /// a struct to pack relevant information while generating atomic Ops
3865 Value *Var = nullptr;
3866 Type *ElemTy = nullptr;
3867 bool IsSigned = false;
3868 bool IsVolatile = false;
3869 };
3870
3871 /// Emit atomic Read for : V = X --- Only Scalar data types.
3872 ///
3873 /// \param Loc The insert and source location description.
3874 /// \param X The target pointer to be atomically read
3875 /// \param V Memory address where to store atomically read
3876 /// value
3877 /// \param AO Atomic ordering of the generated atomic
3878 /// instructions.
3879 /// \param AllocaIP Insert point for allocas
3880 //
3881 /// \return Insertion point after generated atomic read IR.
3884 AtomicOrdering AO,
3885 InsertPointTy AllocaIP);
3886
3887 /// Emit atomic write for : X = Expr --- Only Scalar data types.
3888 ///
3889 /// \param Loc The insert and source location description.
3890 /// \param X The target pointer to be atomically written to
3891 /// \param Expr The value to store.
3892 /// \param AO Atomic ordering of the generated atomic
3893 /// instructions.
3894 /// \param AllocaIP Insert point for allocas
3895 ///
3896 /// \return Insertion point after generated atomic Write IR.
3898 AtomicOpValue &X, Value *Expr,
3899 AtomicOrdering AO,
3900 InsertPointTy AllocaIP);
3901
3902 /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
3903 /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
3904 /// Only Scalar data types.
3905 ///
3906 /// \param Loc The insert and source location description.
3907 /// \param AllocaIP The insertion point to be used for alloca instructions.
3908 /// \param X The target atomic pointer to be updated
3909 /// \param Expr The value to update X with.
3910 /// \param AO Atomic ordering of the generated atomic instructions.
3911 /// \param RMWOp The binary operation used for update. If operation
3912 /// is not supported by atomicRMW, or belong to
3913 /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
3914 /// atomic will be generated.
3915 /// \param UpdateOp Code generator for complex expressions that cannot be
3916 /// expressed through atomicrmw instruction.
3917 /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
3918 /// update expression, false otherwise.
3919 /// (e.g. true for X = X BinOp Expr)
3920 ///
3921 /// \return Insertion point after generated atomic update IR.
3924 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
3925 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr,
3926 bool IsIgnoreDenormalMode = false, bool IsFineGrainedMemory = false,
3927 bool IsRemoteMemory = false);
3928
3929 /// Emit atomic update for constructs: --- Only Scalar data types
3930 /// V = X; X = X BinOp Expr ,
3931 /// X = X BinOp Expr; V = X,
3932 /// V = X; X = Expr BinOp X,
3933 /// X = Expr BinOp X; V = X,
3934 /// V = X; X = UpdateOp(X),
3935 /// X = UpdateOp(X); V = X,
3936 ///
3937 /// \param Loc The insert and source location description.
3938 /// \param AllocaIP The insertion point to be used for alloca instructions.
3939 /// \param X The target atomic pointer to be updated
3940 /// \param V Memory address where to store captured value
3941 /// \param Expr The value to update X with.
3942 /// \param AO Atomic ordering of the generated atomic instructions
3943 /// \param RMWOp The binary operation used for update. If
3944 /// operation is not supported by atomicRMW, or belong to
3945 /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
3946 /// atomic will be generated.
3947 /// \param UpdateOp Code generator for complex expressions that cannot be
3948 /// expressed through atomicrmw instruction.
3949 /// \param UpdateExpr true if X is an in place update of the form
3950 /// X = X BinOp Expr or X = Expr BinOp X
3951 /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
3952 /// update expression, false otherwise.
3953 /// (e.g. true for X = X BinOp Expr)
3954 /// \param IsPostfixUpdate true if original value of 'x' must be stored in
3955 /// 'v', not an updated one.
3956 ///
3957 /// \return Insertion point after generated atomic capture IR.
3960 AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
3961 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
3962 bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr,
3963 bool IsIgnoreDenormalMode = false, bool IsFineGrainedMemory = false,
3964 bool IsRemoteMemory = false);
3965
3966 /// Emit atomic compare for constructs: --- Only scalar data types
3967 /// cond-expr-stmt:
3968 /// x = x ordop expr ? expr : x;
3969 /// x = expr ordop x ? expr : x;
3970 /// x = x == e ? d : x;
3971 /// x = e == x ? d : x; (this one is not in the spec)
3972 /// cond-update-stmt:
3973 /// if (x ordop expr) { x = expr; }
3974 /// if (expr ordop x) { x = expr; }
3975 /// if (x == e) { x = d; }
3976 /// if (e == x) { x = d; } (this one is not in the spec)
3977 /// conditional-update-capture-atomic:
3978 /// v = x; cond-update-stmt; (IsPostfixUpdate=true, IsFailOnly=false)
3979 /// cond-update-stmt; v = x; (IsPostfixUpdate=false, IsFailOnly=false)
3980 /// if (x == e) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3981 /// IsFailOnly=true)
3982 /// r = x == e; if (r) { x = d; } (IsPostfixUpdate=false, IsFailOnly=false)
3983 /// r = x == e; if (r) { x = d; } else { v = x; } (IsPostfixUpdate=false,
3984 /// IsFailOnly=true)
3985 ///
3986 /// \param Loc The insert and source location description.
3987 /// \param X The target atomic pointer to be updated.
3988 /// \param V Memory address where to store captured value (for
3989 /// compare capture only).
3990 /// \param R Memory address where to store comparison result
3991 /// (for compare capture with '==' only).
3992 /// \param E The expected value ('e') for forms that use an
3993 /// equality comparison or an expression ('expr') for
3994 /// forms that use 'ordop' (logically an atomic maximum or
3995 /// minimum).
3996 /// \param D The desired value for forms that use an equality
3997 /// comparison. If forms that use 'ordop', it should be
3998 /// \p nullptr.
3999 /// \param AO Atomic ordering of the generated atomic instructions.
4000 /// \param Op Atomic compare operation. It can only be ==, <, or >.
4001 /// \param IsXBinopExpr True if the conditional statement is in the form where
4002 /// x is on LHS. It only matters for < or >.
4003 /// \param IsPostfixUpdate True if original value of 'x' must be stored in
4004 /// 'v', not an updated one (for compare capture
4005 /// only).
4006 /// \param IsFailOnly True if the original value of 'x' is stored to 'v'
4007 /// only when the comparison fails. This is only valid for
4008 /// the case the comparison is '=='.
4009 ///
4010 /// \return Insertion point after generated atomic capture IR.
4015 bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly);
4019 omp::OMPAtomicCompareOp Op, bool IsXBinopExpr, bool IsPostfixUpdate,
4020 bool IsFailOnly, AtomicOrdering Failure);
4021
4022 /// Create the control flow structure of a canonical OpenMP loop.
4023 ///
4024 /// The emitted loop will be disconnected, i.e. no edge to the loop's
4025 /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
4026 /// IRBuilder location is not preserved.
4027 ///
4028 /// \param DL DebugLoc used for the instructions in the skeleton.
4029 /// \param TripCount Value to be used for the trip count.
4030 /// \param F Function in which to insert the BasicBlocks.
4031 /// \param PreInsertBefore Where to insert BBs that execute before the body,
4032 /// typically the body itself.
4033 /// \param PostInsertBefore Where to insert BBs that execute after the body.
4034 /// \param Name Base name used to derive BB
4035 /// and instruction names.
4036 ///
4037 /// \returns The CanonicalLoopInfo that represents the emitted loop.
4039 Function *F,
4040 BasicBlock *PreInsertBefore,
4041 BasicBlock *PostInsertBefore,
4042 const Twine &Name = {});
4043 /// OMP Offload Info Metadata name string
4044 const std::string ompOffloadInfoName = "omp_offload.info";
4045
4046 /// Loads all the offload entries information from the host IR
4047 /// metadata. This function is only meant to be used with device code
4048 /// generation.
4049 ///
4050 /// \param M Module to load Metadata info from. Module passed maybe
4051 /// loaded from bitcode file, i.e, different from OpenMPIRBuilder::M module.
4053
4054 /// Loads all the offload entries information from the host IR
4055 /// metadata read from the file passed in as the HostFilePath argument. This
4056 /// function is only meant to be used with device code generation.
4057 ///
4058 /// \param HostFilePath The path to the host IR file,
4059 /// used to load in offload metadata for the device, allowing host and device
4060 /// to maintain the same metadata mapping.
4062 StringRef HostFilePath);
4063
4064 /// Gets (if variable with the given name already exist) or creates
4065 /// internal global variable with the specified Name. The created variable has
4066 /// linkage CommonLinkage by default and is initialized by null value.
4067 /// \param Ty Type of the global variable. If it is exist already the type
4068 /// must be the same.
4069 /// \param Name Name of the variable.
4072 std::optional<unsigned> AddressSpace = {});
4073
4075 InsertPointTy BodyIP, llvm::Value *LinearIV)>;
4076
4077 /// Create a canonical iterator loop at the current insertion point.
4078 ///
4079 /// This helper splits the current block and builds a canonical loop
4080 /// using createLoopSkeleton(). The resulting control flow looks like:
4081 ///
4082 /// CurBB -> Preheader -> Header -> Body -> Latch -> After -> ContBB
4083 ///
4084 /// The body of the loop is produced by calling \p BodyGen with the insertion
4085 /// point for the loop body and the induction variable.
4086 /// Unlike createCanonicalLoop(), this function is intended for \p BodyGen
4087 /// that may perform region lowering (e.g., translating MLIR regions) and are
4088 /// not guaranteed to preserve the canonical skeleton's body terminator. In
4089 /// particular:
4090 ///
4091 /// - The skeleton’s unconditional branch from the loop body is removed
4092 /// before invoking \p BodyGen.
4093 /// - \p BodyGen may freely emit instructions and temporarily introduce
4094 /// control flow.
4095 /// - If the loop body does not end with a terminator after \p BodyGen
4096 /// returns, a branch to the latch is inserted to restore canonical form.
4097 ///
4098 /// \param Loc The location where the iterator modifier was encountered.
4099 /// \param TripCount Number of loop iterations.
4100 /// \param BodyGen Callback to generate the loop body.
4101 /// \param Name Base name used for creating the loop
4102 /// \returns The insertion position *after* the iterator loop
4105 IteratorBodyGenTy BodyGen, llvm::StringRef Name = "iterator");
4106
4107 /// Kind of parameter in a function with 'declare simd' directive.
4116
4117 /// Attribute set of the `declare simd` parameter.
4124
4130
4131 /// Emit x86 vector-function ABI attributes for a `declare simd` function.
4132 ///
4133 /// Generates and attaches `_ZGV*` vector function ABI attributes to \p Fn
4134 /// following the x86 vector ABI used by OpenMP `declare simd`. For each
4135 /// supported ISA (SSE, AVX, AVX2, AVX512) and masking variant, this
4136 /// constructs the appropriate mangled vector-function name and adds it as a
4137 /// function attribute.
4138 ///
4139 /// \param Fn The scalar function to which vector-function attributes
4140 /// are attached.
4141 /// \param NumElements Number of elements used to derive the vector length
4142 /// when
4143 /// \p VLENVal is not specified.
4144 /// \param VLENVal User provided vector length.
4145 /// \param ParamAttrs Array of attribute set of the `declare simd` parameter.
4146 /// \param Branch `undefined`, `inbranch` or `notinbranch` clause.
4148 llvm::Function *Fn, unsigned NumElements, const llvm::APSInt &VLENVal,
4150
4151 /// Emit AArch64 vector-function ABI attributes for a `declare simd` function.
4152 ///
4153 /// Generates and attaches `_ZGV*` vector function ABI attributes to \p Fn
4154 /// following the AArch64 vector-function ABI. The emitted names depend on the
4155 /// selected ISA, user-specified vector length, parameter attribute mangling,
4156 /// and the declare simd branch clause.
4157 ///
4158 /// \param Fn The scalar function to which vector-function
4159 /// attributes are attached.
4160 /// \param VLENVal User provided vector length.
4161 /// \param ParamAttrs Array of attribute set of the `declare simd`
4162 /// parameter.
4163 /// \param Branch `undefined`, `inbranch` or `notinbranch`
4164 /// clause.
4165 /// \param ISA `'n'` for Advanced SIMD or `'s'` for SVE.
4166 /// \param NarrowestDataSize Narrowest data size in bits used to infer the
4167 /// default vector length when \p VLENVal is
4168 /// absent.
4169 /// \param OutputBecomesInput Whether result values are represented as input
4170 /// parameters in the emitted vector-function ABI
4171 /// name.
4173 llvm::Function *Fn, unsigned VLENVal,
4175 char ISA, unsigned NarrowestDataSize, bool OutputBecomesInput);
4176};
4177
4178/// Class to represented the control flow structure of an OpenMP canonical loop.
4179///
4180/// The control-flow structure is standardized for easy consumption by
4181/// directives associated with loops. For instance, the worksharing-loop
4182/// construct may change this control flow such that each loop iteration is
4183/// executed on only one thread. The constraints of a canonical loop in brief
4184/// are:
4185///
4186/// * The number of loop iterations must have been computed before entering the
4187/// loop.
4188///
4189/// * Has an (unsigned) logical induction variable that starts at zero and
4190/// increments by one.
4191///
4192/// * The loop's CFG itself has no side-effects. The OpenMP specification
4193/// itself allows side-effects, but the order in which they happen, including
4194/// how often or whether at all, is unspecified. We expect that the frontend
4195/// will emit those side-effect instructions somewhere (e.g. before the loop)
4196/// such that the CanonicalLoopInfo itself can be side-effect free.
4197///
4198/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
4199/// execution of a loop body that satifies these constraints. It does NOT
4200/// represent arbitrary SESE regions that happen to contain a loop. Do not use
4201/// CanonicalLoopInfo for such purposes.
4202///
4203/// The control flow can be described as follows:
4204///
4205/// Preheader
4206/// |
4207/// /-> Header
4208/// | |
4209/// | Cond---\
4210/// | | |
4211/// | Body |
4212/// | | | |
4213/// | <...> |
4214/// | | | |
4215/// \--Latch |
4216/// |
4217/// Exit
4218/// |
4219/// After
4220///
4221/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
4222/// including) and end at AfterIP (at the After's first instruction, excluding).
4223/// That is, instructions in the Preheader and After blocks (except the
4224/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
4225/// side-effects. Typically, the Preheader is used to compute the loop's trip
4226/// count. The instructions from BodyIP (at the Body block's first instruction,
4227/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
4228/// control and thus can have side-effects. The body block is the single entry
4229/// point into the loop body, which may contain arbitrary control flow as long
4230/// as all control paths eventually branch to the Latch block.
4231///
4232/// TODO: Consider adding another standardized BasicBlock between Body CFG and
4233/// Latch to guarantee that there is only a single edge to the latch. It would
4234/// make loop transformations easier to not needing to consider multiple
4235/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
4236/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
4237/// executes after each body iteration.
4238///
4239/// There must be no loop-carried dependencies through llvm::Values. This is
4240/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
4241/// for the induction variable.
4242///
4243/// All code in Header, Cond, Latch and Exit (plus the terminator of the
4244/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
4245/// by assertOK(). They are expected to not be modified unless explicitly
4246/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
4247/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
4248/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
4249/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
4250/// anymore as its underlying control flow may not exist anymore.
4251/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
4252/// may also return a new CanonicalLoopInfo that can be passed to other
4253/// loop-associated construct implementing methods. These loop-transforming
4254/// methods may either create a new CanonicalLoopInfo usually using
4255/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
4256/// modify one of the input CanonicalLoopInfo and return it as representing the
4257/// modified loop. What is done is an implementation detail of
4258/// transformation-implementing method and callers should always assume that the
4259/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
4260/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
4261/// created by createCanonicalLoop, such that transforming methods do not have
4262/// to special case where the CanonicalLoopInfo originated from.
4263///
4264/// Generally, methods consuming CanonicalLoopInfo do not need an
4265/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
4266/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
4267/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
4268/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
4269/// any InsertPoint in the Preheader, After or Block can still be used after
4270/// calling such a method.
4271///
4272/// TODO: Provide mechanisms for exception handling and cancellation points.
4273///
4274/// Defined outside OpenMPIRBuilder because nested classes cannot be
4275/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
4277 friend class OpenMPIRBuilder;
4278
4279private:
4280 BasicBlock *Header = nullptr;
4281 BasicBlock *Cond = nullptr;
4282 BasicBlock *Latch = nullptr;
4283 BasicBlock *Exit = nullptr;
4284
4285 // Hold the MLIR value for the `lastiter` of the canonical loop.
4286 Value *LastIter = nullptr;
4287
4288 /// Add the control blocks of this loop to \p BBs.
4289 ///
4290 /// This does not include any block from the body, including the one returned
4291 /// by getBody().
4292 ///
4293 /// FIXME: This currently includes the Preheader and After blocks even though
4294 /// their content is (mostly) not under CanonicalLoopInfo's control.
4295 /// Re-evaluated whether this makes sense.
4296 void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
4297
4298 /// Sets the number of loop iterations to the given value. This value must be
4299 /// valid in the condition block (i.e., defined in the preheader) and is
4300 /// interpreted as an unsigned integer.
4301 void setTripCount(Value *TripCount);
4302
4303 /// Replace all uses of the canonical induction variable in the loop body with
4304 /// a new one.
4305 ///
4306 /// The intended use case is to update the induction variable for an updated
4307 /// iteration space such that it can stay normalized in the 0...tripcount-1
4308 /// range.
4309 ///
4310 /// The \p Updater is called with the (presumable updated) current normalized
4311 /// induction variable and is expected to return the value that uses of the
4312 /// pre-updated induction values should use instead, typically dependent on
4313 /// the new induction variable. This is a lambda (instead of e.g. just passing
4314 /// the new value) to be able to distinguish the uses of the pre-updated
4315 /// induction variable and uses of the induction varible to compute the
4316 /// updated induction variable value.
4317 void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater);
4318
4319public:
4320 /// Sets the last iteration variable for this loop.
4321 void setLastIter(Value *IterVar) { LastIter = std::move(IterVar); }
4322
4323 /// Returns the last iteration variable for this loop.
4324 /// Certain use-cases (like translation of linear clause) may access
4325 /// this variable even after a loop transformation. Hence, do not guard
4326 /// this getter function by `isValid`. It is the responsibility of the
4327 /// callee to ensure this functionality is not invoked by a non-outlined
4328 /// CanonicalLoopInfo object (in which case, `setLastIter` will never be
4329 /// invoked and `LastIter` will be by default `nullptr`).
4330 Value *getLastIter() { return LastIter; }
4331
4332 /// Returns whether this object currently represents the IR of a loop. If
4333 /// returning false, it may have been consumed by a loop transformation or not
4334 /// been initialized. Do not use in this case;
4335 bool isValid() const { return Header; }
4336
4337 /// The preheader ensures that there is only a single edge entering the loop.
4338 /// Code that must be execute before any loop iteration can be emitted here,
4339 /// such as computing the loop trip count and begin lifetime markers. Code in
4340 /// the preheader is not considered part of the canonical loop.
4342
4343 /// The header is the entry for each iteration. In the canonical control flow,
4344 /// it only contains the PHINode for the induction variable.
4346 assert(isValid() && "Requires a valid canonical loop");
4347 return Header;
4348 }
4349
4350 /// The condition block computes whether there is another loop iteration. If
4351 /// yes, branches to the body; otherwise to the exit block.
4353 assert(isValid() && "Requires a valid canonical loop");
4354 return Cond;
4355 }
4356
4357 /// The body block is the single entry for a loop iteration and not controlled
4358 /// by CanonicalLoopInfo. It can contain arbitrary control flow but must
4359 /// eventually branch to the \p Latch block.
4361 assert(isValid() && "Requires a valid canonical loop");
4362 return cast<CondBrInst>(Cond->getTerminator())->getSuccessor(0);
4363 }
4364
4365 /// Reaching the latch indicates the end of the loop body code. In the
4366 /// canonical control flow, it only contains the increment of the induction
4367 /// variable.
4369 assert(isValid() && "Requires a valid canonical loop");
4370 return Latch;
4371 }
4372
4373 /// Reaching the exit indicates no more iterations are being executed.
4375 assert(isValid() && "Requires a valid canonical loop");
4376 return Exit;
4377 }
4378
4379 /// The after block is intended for clean-up code such as lifetime end
4380 /// markers. It is separate from the exit block to ensure, analogous to the
4381 /// preheader, it having just a single entry edge and being free from PHI
4382 /// nodes should there be multiple loop exits (such as from break
4383 /// statements/cancellations).
4385 assert(isValid() && "Requires a valid canonical loop");
4386 return Exit->getSingleSuccessor();
4387 }
4388
4389 /// Returns the llvm::Value containing the number of loop iterations. It must
4390 /// be valid in the preheader and always interpreted as an unsigned integer of
4391 /// any bit-width.
4393 assert(isValid() && "Requires a valid canonical loop");
4394 Instruction *CmpI = &Cond->front();
4395 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
4396 return CmpI->getOperand(1);
4397 }
4398
4399 /// Returns the instruction representing the current logical induction
4400 /// variable. Always unsigned, always starting at 0 with an increment of one.
4402 assert(isValid() && "Requires a valid canonical loop");
4403 Instruction *IndVarPHI = &Header->front();
4404 assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
4405 return IndVarPHI;
4406 }
4407
4408 /// Return the type of the induction variable (and the trip count).
4410 assert(isValid() && "Requires a valid canonical loop");
4411 return getIndVar()->getType();
4412 }
4413
4414 /// Return the insertion point for user code before the loop.
4416 assert(isValid() && "Requires a valid canonical loop");
4417 BasicBlock *Preheader = getPreheader();
4418 return {Preheader, std::prev(Preheader->end())};
4419 };
4420
4421 /// Return the insertion point for user code in the body.
4423 assert(isValid() && "Requires a valid canonical loop");
4424 BasicBlock *Body = getBody();
4425 return {Body, Body->begin()};
4426 };
4427
4428 /// Return the insertion point for user code after the loop.
4430 assert(isValid() && "Requires a valid canonical loop");
4431 BasicBlock *After = getAfter();
4432 return {After, After->begin()};
4433 };
4434
4436 assert(isValid() && "Requires a valid canonical loop");
4437 return Header->getParent();
4438 }
4439
4440 /// Consistency self-check.
4441 LLVM_ABI void assertOK() const;
4442
4443 /// Invalidate this loop. That is, the underlying IR does not fulfill the
4444 /// requirements of an OpenMP canonical loop anymore.
4445 LLVM_ABI void invalidate();
4446};
4447
4448/// ScanInfo holds the information to assist in lowering of Scan reduction.
4449/// Before lowering, the body of the for loop specifying scan reduction is
4450/// expected to have the following structure
4451///
4452/// Loop Body Entry
4453/// |
4454/// Code before the scan directive
4455/// |
4456/// Scan Directive
4457/// |
4458/// Code after the scan directive
4459/// |
4460/// Loop Body Exit
4461/// When `createCanonicalScanLoops` is executed, the bodyGen callback of it
4462/// transforms the body to:
4463///
4464/// Loop Body Entry
4465/// |
4466/// OMPScanDispatch
4467///
4468/// OMPBeforeScanBlock
4469/// |
4470/// OMPScanLoopExit
4471/// |
4472/// Loop Body Exit
4473///
4474/// The insert point is updated to the first insert point of OMPBeforeScanBlock.
4475/// It dominates the control flow of code generated until
4476/// scan directive is encountered and OMPAfterScanBlock dominates the
4477/// control flow of code generated after scan is encountered. The successor
4478/// of OMPScanDispatch can be OMPBeforeScanBlock or OMPAfterScanBlock based
4479/// on 1.whether it is in Input phase or Scan Phase , 2. whether it is an
4480/// exclusive or inclusive scan. This jump is added when `createScan` is
4481/// executed. If input loop is being generated, if it is inclusive scan,
4482/// `OMPAfterScanBlock` succeeds `OMPScanDispatch` , if exclusive,
4483/// `OMPBeforeScanBlock` succeeds `OMPDispatch` and vice versa for scan loop. At
4484/// the end of the input loop, temporary buffer is populated and at the
4485/// beginning of the scan loop, temporary buffer is read. After scan directive
4486/// is encountered, insertion point is updated to `OMPAfterScanBlock` as it is
4487/// expected to dominate the code after the scan directive. Both Before and
4488/// After scan blocks are succeeded by `OMPScanLoopExit`.
4489/// Temporary buffer allocations are done in `ScanLoopInit` block before the
4490/// lowering of for-loop. The results are copied back to reduction variable in
4491/// `ScanLoopFinish` block.
4493public:
4494 /// Dominates the body of the loop before scan directive
4496
4497 /// Dominates the body of the loop before scan directive
4499
4500 /// Controls the flow to before or after scan blocks
4502
4503 /// Exit block of loop body
4505
4506 /// Block before loop body where scan initializations are done
4508
4509 /// Block after loop body where scan finalizations are done
4511
4512 /// If true, it indicates Input phase is lowered; else it indicates
4513 /// ScanPhase is lowered
4514 bool OMPFirstScanLoop = false;
4515
4516 /// Maps the private reduction variable to the pointer of the temporary
4517 /// buffer
4519
4520 /// Keeps track of value of iteration variable for input/scan loop to be
4521 /// used for Scan directive lowering
4522 llvm::Value *IV = nullptr;
4523
4524 /// Stores the span of canonical loop being lowered to be used for temporary
4525 /// buffer allocation or Finalization.
4526 llvm::Value *Span = nullptr;
4527
4531 ScanInfo(ScanInfo &) = delete;
4532 ScanInfo &operator=(const ScanInfo &) = delete;
4533
4534 ~ScanInfo() { delete (ScanBuffPtrs); }
4535};
4536
4537} // end namespace llvm
4538
4539#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
arc branch finalize
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
DXIL Finalize Linkage
Hexagon Hardware Loops
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Machine Check Debug Module
#define T
This file defines constans and helpers used when dealing with OpenMP.
Provides definitions for Target specific Grid Values.
const SmallVectorImpl< MachineOperand > & Cond
Basic Register Allocator
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
std::unordered_set< BasicBlock * > BlockSet
This file implements a set that has insertion order iteration characteristics.
Value * RHS
Value * LHS
The Input class is used to parse a yaml document into in-memory structs and vectors.
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Align AtomicAlign
Definition Atomic.h:23
bool UseLibcall
Definition Atomic.h:25
IRBuilderBase * Builder
Definition Atomic.h:19
uint64_t AtomicSizeInBits
Definition Atomic.h:21
uint64_t ValueSizeInBits
Definition Atomic.h:22
IRBuilderBase::InsertPoint AllocaIP
Definition Atomic.h:26
Align ValueAlign
Definition Atomic.h:24
BinOp
This enumeration lists the possible modifications atomicrmw can make.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:474
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
This class represents a function call, abstracting a target machine's calling convention.
Class to represented the control flow structure of an OpenMP canonical loop.
Value * getTripCount() const
Returns the llvm::Value containing the number of loop iterations.
BasicBlock * getHeader() const
The header is the entry for each iteration.
LLVM_ABI void assertOK() const
Consistency self-check.
Type * getIndVarType() const
Return the type of the induction variable (and the trip count).
BasicBlock * getBody() const
The body block is the single entry for a loop iteration and not controlled by CanonicalLoopInfo.
bool isValid() const
Returns whether this object currently represents the IR of a loop.
void setLastIter(Value *IterVar)
Sets the last iteration variable for this loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const
Return the insertion point for user code after the loop.
Value * getLastIter()
Returns the last iteration variable for this loop.
OpenMPIRBuilder::InsertPointTy getBodyIP() const
Return the insertion point for user code in the body.
BasicBlock * getAfter() const
The after block is intended for clean-up code such as lifetime end markers.
Function * getFunction() const
LLVM_ABI void invalidate()
Invalidate this loop.
BasicBlock * getLatch() const
Reaching the latch indicates the end of the loop body code.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const
Return the insertion point for user code before the loop.
BasicBlock * getCond() const
The condition block computes whether there is another loop iteration.
BasicBlock * getExit() const
Reaching the exit indicates no more iterations are being executed.
LLVM_ABI BasicBlock * getPreheader() const
The preheader ensures that there is only a single edge entering the loop.
Instruction * getIndVar() const
Returns the instruction representing the current logical induction variable.
Utility class for extracting code into a new function.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
This is an important base class in LLVM.
Definition Constant.h:43
A debug info location.
Definition DebugLoc.h:123
Lightweight error class with error context and mandatory checking.
Definition Error.h:159
Tagged union holding either a T or a Error.
Definition Error.h:485
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
InsertPoint - A saved insertion point.
Definition IRBuilder.h:298
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2847
Class to represent integer types.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:589
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags)
OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage, const std::string &VarName)
static bool classof(const OffloadEntryInfo *Info)
OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
@ OffloadingEntryInfoTargetRegion
Entry is a target region.
@ OffloadingEntryInfoDeviceGlobalVar
Entry is a declare target variable.
OffloadingEntryInfoKinds getKind() const
OffloadEntryInfo(OffloadingEntryInfoKinds Kind)
static bool classof(const OffloadEntryInfo *Info)
OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags)
Class that manages information about offload code regions and data.
function_ref< void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy
Applies action Action on all registered entries.
OMPTargetDeviceClauseKind
Kind of device clause for declare target variables and functions NOTE: Currently not used as a part o...
@ OMPTargetDeviceClauseNoHost
The target is marked for non-host devices.
@ OMPTargetDeviceClauseAny
The target is marked for all devices.
@ OMPTargetDeviceClauseNone
The target is marked as having no clause.
@ OMPTargetDeviceClauseHost
The target is marked for host devices.
LLVM_ABI void registerDeviceGlobalVarEntryInfo(StringRef VarName, Constant *Addr, int64_t VarSize, OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage)
Register device global variable entry.
LLVM_ABI void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order)
Initialize device global variable entry.
LLVM_ABI void actOnDeviceGlobalVarEntriesInfo(const OffloadDeviceGlobalVarEntryInfoActTy &Action)
OMPTargetRegionEntryKind
Kind of the target registry entry.
@ OMPTargetRegionEntryTargetRegion
Mark the entry as target region.
OffloadEntriesInfoManager(OpenMPIRBuilder *builder)
LLVM_ABI void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, const TargetRegionEntryInfo &EntryInfo)
LLVM_ABI bool hasTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, bool IgnoreAddressId=false) const
Return true if a target region entry with the provided information exists.
LLVM_ABI void registerTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo, Constant *Addr, Constant *ID, OMPTargetRegionEntryKind Flags)
Register target region entry.
LLVM_ABI void actOnTargetRegionEntriesInfo(const OffloadTargetRegionEntryInfoActTy &Action)
unsigned size() const
Return number of entries defined so far.
LLVM_ABI void initializeTargetRegionEntryInfo(const TargetRegionEntryInfo &EntryInfo, unsigned Order)
Initialize target region entry.
OMPTargetGlobalVarEntryKind
Kind of the global variable entry..
@ OMPTargetGlobalVarEntryEnter
Mark the entry as a declare target enter.
@ OMPTargetGlobalVarEntryNone
Mark the entry as having no declare target entry kind.
@ OMPTargetGlobalRegisterRequires
Mark the entry as a register requires global.
@ OMPTargetGlobalVarEntryIndirect
Mark the entry as a declare target indirect global.
@ OMPTargetGlobalVarEntryLink
Mark the entry as a to declare target link.
@ OMPTargetGlobalVarEntryTo
Mark the entry as a to declare target.
@ OMPTargetGlobalVarEntryIndirectVTable
Mark the entry as a declare target indirect vtable.
function_ref< void(const TargetRegionEntryInfo &EntryInfo, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy
brief Applies action Action on all registered entries.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const
Checks if the variable with the given name has been registered already.
LLVM_ABI bool empty() const
Return true if a there are no entries defined.
Captures attributes that affect generating LLVM-IR using the OpenMPIRBuilder and related classes.
std::optional< bool > IsTargetDevice
Flag to define whether to generate code for the role of the OpenMP host (if set to false) or device (...
std::optional< bool > IsGPU
Flag for specifying if the compilation is done for an accelerator.
std::optional< StringRef > FirstSeparator
First separator used between the initial two parts of a name.
StringRef separator() const
LLVM_ABI int64_t getRequiresFlags() const
Returns requires directive clauses as flags compatible with those expected by libomptarget.
void setFirstSeparator(StringRef FS)
void setDefaultTargetAS(unsigned AS)
StringRef firstSeparator() const
std::optional< bool > OpenMPOffloadMandatory
Flag for specifying if offloading is mandatory.
std::optional< bool > EmitLLVMUsedMetaInfo
Flag for specifying if LLVMUsed information should be emitted.
SmallVector< Triple > TargetTriples
When compilation is being done for the OpenMP host (i.e.
LLVM_ABI void setHasRequiresReverseOffload(bool Value)
LLVM_ABI bool hasRequiresUnifiedSharedMemory() const
LLVM_ABI void setHasRequiresUnifiedSharedMemory(bool Value)
unsigned getDefaultTargetAS() const
std::optional< StringRef > Separator
Separator used between all of the rest consecutive parts of s name.
LLVM_ABI bool hasRequiresDynamicAllocators() const
bool openMPOffloadMandatory() const
CallingConv::ID getRuntimeCC() const
LLVM_ABI void setHasRequiresUnifiedAddress(bool Value)
void setOpenMPOffloadMandatory(bool Value)
void setIsTargetDevice(bool Value)
void setSeparator(StringRef S)
void setRuntimeCC(CallingConv::ID CC)
LLVM_ABI void setHasRequiresDynamicAllocators(bool Value)
void setEmitLLVMUsed(bool Value=true)
std::optional< omp::GV > GridValue
LLVM_ABI bool hasRequiresReverseOffload() const
LLVM_ABI bool hasRequiresUnifiedAddress() const
llvm::AllocaInst * CreateAlloca(llvm::Type *Ty, const llvm::Twine &Name) const override
void decorateWithTBAA(llvm::Instruction *I) override
AtomicInfo(IRBuilder<> *Builder, llvm::Type *Ty, uint64_t AtomicSizeInBits, uint64_t ValueSizeInBits, llvm::Align AtomicAlign, llvm::Align ValueAlign, bool UseLibcall, IRBuilderBase::InsertPoint AllocaIP, llvm::Value *AtomicVar)
llvm::Value * getAtomicPointer() const override
Struct that keeps the information that should be kept throughout a 'target data' region.
TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls)
SmallMapVector< const Value *, std::pair< Value *, Value * >, 4 > DevicePtrInfoMap
void clearArrayInfo()
Clear information about the data arrays.
unsigned NumberOfPtrs
The total number of pointers passed to the runtime library.
bool HasNoWait
Whether the target ... data directive has a nowait clause.
bool isValid()
Return true if the current target data information has valid arrays.
bool HasMapper
Indicate whether any user-defined mapper exists.
An interface to create LLVM-IR for OpenMP directives.
LLVM_ABI InsertPointOrErrorTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads)
Generator for 'omp ordered [threads | simd]'.
LLVM_ABI void emitAArch64DeclareSimdFunction(llvm::Function *Fn, unsigned VLENVal, llvm::ArrayRef< DeclareSimdAttrTy > ParamAttrs, DeclareSimdBranch Branch, char ISA, unsigned NarrowestDataSize, bool OutputBecomesInput)
Emit AArch64 vector-function ABI attributes for a declare simd function.
LLVM_ABI Constant * getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags=omp::IdentFlag(0), unsigned Reserve2Flags=0)
Return an ident_t* encoding the source location SrcLocStr and Flags.
LLVM_ABI FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID)
Return the function declaration for the runtime function with FnID.
LLVM_ABI InsertPointOrErrorTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective)
Generator for 'omp cancel'.
std::function< Expected< Function * >(StringRef FunctionName)> FunctionGenCallback
Functions used to generate a function with the given name.
LLVM_ABI CallInst * createOMPAllocShared(const LocationDescription &Loc, Value *Size, const Twine &Name=Twine(""))
Create a runtime call for kmpc_alloc_shared.
ReductionGenCBKind
Enum class for the RedctionGen CallBack type to be used.
LLVM_ABI CanonicalLoopInfo * collapseLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, InsertPointTy ComputeIP)
Collapse a loop nest into a single loop.
LLVM_ABI void createTaskyield(const LocationDescription &Loc)
Generator for 'omp taskyield'.
std::function< Error(InsertPointTy CodeGenIP)> FinalizeCallbackTy
Callback type for variable finalization (think destructors).
LLVM_ABI void emitBranch(BasicBlock *Target)
LLVM_ABI Error emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective)
Generate control flow and cleanup for cancellation.
static LLVM_ABI void writeThreadBoundsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
EvalKind
Enum class for reduction evaluation types scalar, complex and aggregate.
LLVM_ABI void emitTaskwaitImpl(const LocationDescription &Loc)
Generate a taskwait runtime call.
LLVM_ABI Constant * registerTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, Function *OutlinedFunction, StringRef EntryFnName, StringRef EntryFnIDName)
Registers the given function and sets up the attribtues of the function Returns the FunctionID.
LLVM_ABI GlobalVariable * emitKernelExecutionMode(StringRef KernelName, omp::OMPTgtExecModeFlags Mode)
Emit the kernel execution mode.
LLVM_ABI void initialize()
Initialize the internal state, this will put structures types and potentially other helpers into the ...
LLVM_ABI void createTargetDeinit(const LocationDescription &Loc, int32_t TeamsReductionDataSize=0, int32_t TeamsReductionBufferLength=1024)
Create a runtime call for kmpc_target_deinit.
std::function< InsertPointTy(InsertPointTy CodeGenIP, unsigned Index, Value **LHS, Value **RHS, Function *CurFn)> ReductionGenClangCBTy
ReductionGen CallBack for Clang.
LLVM_ABI InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, InsertPointTy AllocaIP)
Emit atomic write for : X = Expr — Only Scalar data types.
LLVM_ABI void loadOffloadInfoMetadata(Module &M)
Loads all the offload entries information from the host IR metadata.
function_ref< MapInfosTy &(InsertPointTy CodeGenIP)> GenMapInfoCallbackTy
Callback type for creating the map infos for the kernel parameters.
LLVM_ABI Error emitOffloadingArrays(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info, CustomMapperCallbackTy CustomMapperCB, bool IsNonContiguous=false, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr)
Emit the arrays used to pass the captures and map information to the offloading runtime library.
LLVM_ABI void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully unroll a loop.
function_ref< Error(InsertPointTy CodeGenIP, Value *IndVar)> LoopBodyGenCallbackTy
Callback type for loop body code generation.
LLVM_ABI InsertPointOrErrorTy emitScanReduction(const LocationDescription &Loc, ArrayRef< llvm::OpenMPIRBuilder::ReductionInfo > ReductionInfos, ScanInfo *ScanRedInfo)
This function performs the scan reduction of the values updated in the input phase.
LLVM_ABI void emitFlush(const LocationDescription &Loc)
Generate a flush runtime call.
LLVM_ABI InsertPointOrErrorTy createScope(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsNowait)
Generator for 'omp scope'.
static LLVM_ABI std::pair< int32_t, int32_t > readThreadBoundsForKernel(const Triple &T, Function &Kernel)
}
OpenMPIRBuilderConfig Config
The OpenMPIRBuilder Configuration.
LLVM_ABI CallInst * createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_destroy.
std::function< InsertPointOrErrorTy( InsertPointTy CodeGenIP, Value *LHS, Value *RHS, Value *&Res)> ReductionGenCBTy
ReductionGen CallBack for MLIR.
LLVM_ABI void emitUsed(StringRef Name, ArrayRef< llvm::WeakTrackingVH > List)
Emit the llvm.used metadata.
void setConfig(OpenMPIRBuilderConfig C)
LLVM_ABI InsertPointOrErrorTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsNowait, ArrayRef< llvm::Value * > CPVars={}, ArrayRef< llvm::Function * > CPFuncs={})
Generator for 'omp single'.
LLVM_ABI InsertPointOrErrorTy createTeams(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, Value *NumTeamsLower=nullptr, Value *NumTeamsUpper=nullptr, Value *ThreadLimit=nullptr, Value *IfExpr=nullptr)
Generator for #omp teams
std::forward_list< CanonicalLoopInfo > LoopInfos
Collection of owned canonical loop objects that eventually need to be free'd.
LLVM_ABI void createTaskwait(const LocationDescription &Loc)
Generator for 'omp taskwait'.
LLVM_ABI llvm::StructType * getKmpTaskAffinityInfoTy()
Return the LLVM struct type matching runtime kmp_task_affinity_info_t.
LLVM_ABI CanonicalLoopInfo * createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name={})
Create the control flow structure of a canonical OpenMP loop.
SmallVector< uint64_t, 4 > MapDimArrayTy
std::function< Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< BasicBlock * > DeallocBlocks)> StorableBodyGenCallbackTy
LLVM_ABI std::string createPlatformSpecificName(ArrayRef< StringRef > Parts) const
Get the create a name using the platform specific separators.
LLVM_ABI FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_next_* runtime function for the specified size IVSize and sign IVSigned.
static LLVM_ABI void getKernelArgsVector(TargetKernelArgs &KernelArgs, IRBuilderBase &Builder, SmallVector< Value * > &ArgsVector)
Create the kernel args vector used by emitTargetKernel.
SmallVector< Constant *, 4 > MapNamesArrayTy
LLVM_ABI InsertPointOrErrorTy createTarget(const LocationDescription &Loc, bool IsOffloadEntry, OpenMPIRBuilder::InsertPointTy AllocaIP, OpenMPIRBuilder::InsertPointTy CodeGenIP, ArrayRef< BasicBlock * > DeallocBlocks, TargetDataInfo &Info, TargetRegionEntryInfo &EntryInfo, const TargetKernelDefaultAttrs &DefaultAttrs, const TargetKernelRuntimeAttrs &RuntimeAttrs, Value *IfCond, SmallVectorImpl< Value * > &Inputs, GenMapInfoCallbackTy GenMapInfoCB, TargetBodyGenCallbackTy BodyGenCB, TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB, CustomMapperCallbackTy CustomMapperCB, const DependenciesInfo &Dependencies={}, bool HasNowait=false, Value *DynCGroupMem=nullptr, omp::OMPDynGroupprivateFallbackType DynCGroupMemFallback=omp::OMPDynGroupprivateFallbackType::Abort)
Generator for 'omp target'.
LLVM_ABI void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop)
Fully or partially unroll a loop.
LLVM_ABI omp::OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position)
Get OMP_MAP_MEMBER_OF flag with extra bits reserved based on the position given.
LLVM_ABI void addAttributes(omp::RuntimeFunction FnID, Function &Fn)
Add attributes known for FnID to Fn.
Module & M
The underlying LLVM-IR module.
StringMap< Constant * > SrcLocStrMap
Map to remember source location strings.
LLVM_ABI void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas)
Create the allocas instruction used in call to mapper functions.
LLVM_ABI Constant * getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the source location LocStr.
LLVM_ABI Error emitTargetRegionFunction(TargetRegionEntryInfo &EntryInfo, FunctionGenCallback &GenerateFunctionCallback, bool IsOffloadEntry, Function *&OutlinedFn, Constant *&OutlinedFnID)
Create a unique name for the entry function using the source location information of the current targ...
LLVM_ABI InsertPointOrErrorTy createIteratorLoop(LocationDescription Loc, llvm::Value *TripCount, IteratorBodyGenTy BodyGen, llvm::StringRef Name="iterator")
Create a canonical iterator loop at the current insertion point.
LLVM_ABI Expected< SmallVector< llvm::CanonicalLoopInfo * > > createCanonicalScanLoops(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP, const Twine &Name, ScanInfo *ScanRedInfo)
Generator for the control flow structure of an OpenMP canonical loops if the parent directive has an ...
LLVM_ABI FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_fini_* runtime function for the specified size IVSize and sign IVSigned.
function_ref< InsertPointOrErrorTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< BasicBlock * > DeallocBlocks)> TargetBodyGenCallbackTy
LLVM_ABI void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI)
Partially unroll a loop.
function_ref< Error(Value *DeviceID, Value *RTLoc, IRBuilderBase::InsertPoint TargetTaskAllocaIP)> TargetTaskBodyCallbackTy
Callback type for generating the bodies of device directives that require outer target tasks (e....
Expected< MapInfosTy & > MapInfosOrErrorTy
SmallVector< omp::OpenMPOffloadMappingFlags, 4 > MapFlagsArrayTy
LLVM_ABI void emitTaskyieldImpl(const LocationDescription &Loc)
Generate a taskyield runtime call.
LLVM_ABI void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands)
Create the call for the target mapper function.
LLVM_ABI InsertPointOrErrorTy createDistribute(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< BasicBlock * > DeallocBlocks, BodyGenCallbackTy BodyGenCB)
Generator for #omp distribute
LLVM_ABI InsertPointOrErrorTy createTask(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< BasicBlock * > DeallocBlocks, BodyGenCallbackTy BodyGenCB, bool Tied=true, Value *Final=nullptr, Value *IfCondition=nullptr, const DependenciesInfo &Dependencies={}, const AffinityData &Affinities={}, bool Mergeable=false, Value *EventHandle=nullptr, Value *Priority=nullptr)
Generator for #omp taskloop
function_ref< Expected< Function * >(unsigned int)> CustomMapperCallbackTy
LLVM_ABI InsertPointTy createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOpValue &R, Value *E, Value *D, AtomicOrdering AO, omp::OMPAtomicCompareOp Op, bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly)
Emit atomic compare for constructs: — Only scalar data types cond-expr-stmt: x = x ordop expr ?
LLVM_ABI InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef< llvm::Value * > StoreValues, const Twine &Name, bool IsDependSource)
Generator for 'omp ordered depend (source | sink)'.
LLVM_ABI InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd=true)
Generate conditional branch and relevant BasicBlocks through which private threads copy the 'copyin' ...
SmallVector< MapValuesArrayTy, 4 > MapNonContiguousArrayTy
function_ref< InsertPointOrErrorTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)> PrivatizeCallbackTy
Callback type for variable privatization (think copy & default constructor).
LLVM_ABI bool isFinalized()
Check whether the finalize function has already run.
SmallVector< DeviceInfoTy, 4 > MapDeviceInfoArrayTy
SmallVector< FinalizationInfo, 8 > FinalizationStack
The finalization stack made up of finalize callbacks currently in-flight, wrapped into FinalizationIn...
LLVM_ABI std::vector< CanonicalLoopInfo * > tileLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops, ArrayRef< Value * > TileSizes)
Tile a loop nest.
LLVM_ABI CallInst * createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_init.
LLVM_ABI Error emitIfClause(Value *Cond, BodyGenCallbackTy ThenGen, BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP={}, ArrayRef< BasicBlock * > DeallocBlocks={})
Emits code for OpenMP 'if' clause using specified BodyGenCallbackTy Here is the logic: if (Cond) { Th...
LLVM_ABI Function * getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID)
std::function< InsertPointOrErrorTy( InsertPointTy, Value *ByRefVal, Value *&Res)> ReductionGenDataPtrPtrCBTy
void addOutlineInfo(std::unique_ptr< OutlineInfo > &&OI)
Add a new region that will be outlined later.
LLVM_ABI InsertPointTy createTargetInit(const LocationDescription &Loc, const llvm::OpenMPIRBuilder::TargetKernelDefaultAttrs &Attrs)
The omp target interface.
LLVM_ABI InsertPointOrErrorTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< ReductionInfo > ReductionInfos, ArrayRef< bool > IsByRef, bool IsNoWait=false, bool IsTeamsReduction=false)
Generator for 'omp reduction'.
const Triple T
The target triple of the underlying module.
DenseMap< std::pair< Constant *, uint64_t >, Constant * > IdentMap
Map to remember existing ident_t*.
LLVM_ABI CallInst * createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_free.
LLVM_ABI FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned, bool IsGPUDistribute)
Returns __kmpc_for_static_init_* runtime function for the specified size IVSize and sign IVSigned.
LLVM_ABI CallInst * createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_alloc.
LLVM_ABI void emitNonContiguousDescriptor(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo, TargetDataInfo &Info)
Emit an array of struct descriptors to be assigned to the offload args.
SmallVector< Value *, 4 > MapValuesArrayTy
LLVM_ABI InsertPointOrErrorTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for 'omp section'.
LLVM_ABI InsertPointOrErrorTy createTaskgroup(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< BasicBlock * > DeallocBlocks, BodyGenCallbackTy BodyGenCB)
Generator for the taskgroup construct.
LLVM_ABI InsertPointOrErrorTy createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< BasicBlock * > DeallocBlocks, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable)
Generator for 'omp parallel'.
function_ref< InsertPointOrErrorTy(InsertPointTy)> EmitFallbackCallbackTy
Callback function type for functions emitting the host fallback code that is executed when the kernel...
static LLVM_ABI TargetRegionEntryInfo getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, vfs::FileSystem &VFS, StringRef ParentName="")
Creates a unique info for a target entry when provided a filename and line number from.
LLVM_ABI void emitTaskDependency(IRBuilderBase &Builder, Value *Entry, const DependData &Dep)
Store one kmp_depend_info entry at the given Entry pointer.
LLVM_ABI void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished=false)
LLVM_ABI Value * getOrCreateThreadID(Value *Ident)
Return the current thread ID.
LLVM_ABI InsertPointOrErrorTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB)
Generator for 'omp master'.
void pushFinalizationCB(const FinalizationInfo &FI)
Push a finalization callback on the finalization stack.
LLVM_ABI InsertPointOrErrorTy createTargetData(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< BasicBlock * > DeallocBlocks, Value *DeviceID, Value *IfCond, TargetDataInfo &Info, GenMapInfoCallbackTy GenMapInfoCB, CustomMapperCallbackTy CustomMapperCB, omp::RuntimeFunction *MapperFunc=nullptr, function_ref< InsertPointOrErrorTy(InsertPointTy CodeGenIP, BodyGenTy BodyGenType)> BodyGenCB=nullptr, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr, Value *SrcLocInfo=nullptr)
Generator for 'omp target data'.
CallInst * createRuntimeFunctionCall(FunctionCallee Callee, ArrayRef< Value * > Args, StringRef Name="")
LLVM_ABI InsertPointOrErrorTy emitKernelLaunch(const LocationDescription &Loc, Value *OutlinedFnID, EmitFallbackCallbackTy EmitTargetCallFallbackCB, TargetKernelArgs &Args, Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP)
Generate a target region entry call and host fallback call.
InsertPointTy getInsertionPoint()
}
StringMap< GlobalVariable *, BumpPtrAllocator > InternalVars
An ordered map of auto-generated variables to their unique names.
LLVM_ABI InsertPointOrErrorTy createCancellationPoint(const LocationDescription &Loc, omp::Directive CanceledDirective)
Generator for 'omp cancellation point'.
LLVM_ABI CallInst * createOMPAlignedAlloc(const LocationDescription &Loc, Value *Align, Value *Size, Value *Allocator, std::string Name="")
Create a runtime call for kmpc_align_alloc.
LLVM_ABI FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned)
Returns __kmpc_dispatch_init_* runtime function for the specified size IVSize and sign IVSigned.
LLVM_ABI InsertPointOrErrorTy createScan(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< llvm::Value * > ScanVars, ArrayRef< llvm::Type * > ScanVarsType, bool IsInclusive, ScanInfo *ScanRedInfo)
This directive split and directs the control flow to input phase blocks or scan phase blocks based on...
LLVM_ABI CallInst * createOMPFreeShared(const LocationDescription &Loc, Value *Addr, Value *Size, const Twine &Name=Twine(""))
Create a runtime call for kmpc_free_shared.
LLVM_ABI CallInst * createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause)
Create a runtime call for __tgt_interop_use.
IRBuilder<>::InsertPoint InsertPointTy
Type used throughout for insertion points.
LLVM_ABI GlobalVariable * getOrCreateInternalVariable(Type *Ty, const StringRef &Name, std::optional< unsigned > AddressSpace={})
Gets (if variable with the given name already exist) or creates internal global variable with the spe...
LLVM_ABI GlobalVariable * createOffloadMapnames(SmallVectorImpl< llvm::Constant * > &Names, std::string VarName)
Create the global variable holding the offload names information.
std::forward_list< ScanInfo > ScanInfos
Collection of owned ScanInfo objects that eventually need to be free'd.
static LLVM_ABI void writeTeamsForKernel(const Triple &T, Function &Kernel, int32_t LB, int32_t UB)
std::function< InsertPointOrErrorTy( InsertPointTy, Type *, Value *, Value *)> ReductionGenAtomicCBTy
Functions used to generate atomic reductions.
LLVM_ABI Value * calculateCanonicalLoopTripCount(const LocationDescription &Loc, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, const Twine &Name="loop")
Calculate the trip count of a canonical loop.
DeclareSimdKindTy
Kind of parameter in a function with 'declare simd' directive.
LLVM_ABI InsertPointOrErrorTy createBarrier(const LocationDescription &Loc, omp::Directive Kind, bool ForceSimpleCall=false, bool CheckCancelFlag=true)
Emitter methods for OpenMP directives.
LLVM_ABI void setCorrectMemberOfFlag(omp::OpenMPOffloadMappingFlags &Flags, omp::OpenMPOffloadMappingFlags MemberOfFlag)
Given an initial flag set, this function modifies it to contain the passed in MemberOfFlag generated ...
LLVM_ABI Error emitOffloadingArraysAndArgs(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, TargetDataInfo &Info, TargetDataRTArgs &RTArgs, MapInfosTy &CombinedInfo, CustomMapperCallbackTy CustomMapperCB, bool IsNonContiguous=false, bool ForEndCall=false, function_ref< void(unsigned int, Value *)> DeviceAddrCB=nullptr)
Allocates memory for and populates the arrays required for offloading (offload_{baseptrs|ptrs|mappers...
LLVM_ABI Constant * getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize)
Return the (LLVM-IR) string describing the default source location.
LLVM_ABI InsertPointOrErrorTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst)
Generator for 'omp critical'.
LLVM_ABI void createOffloadEntry(Constant *ID, Constant *Addr, uint64_t Size, int32_t Flags, GlobalValue::LinkageTypes, StringRef Name="")
Creates offloading entry for the provided entry ID ID, address Addr, size Size, and flags Flags.
static LLVM_ABI unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple, const StringMap< bool > &Features)
Get the default alignment value for given target.
LLVM_ABI unsigned getFlagMemberOffset()
Get the offset of the OMP_MAP_MEMBER_OF field.
LLVM_ABI InsertPointOrErrorTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind=llvm::omp::OMP_SCHEDULE_Default, Value *ChunkSize=nullptr, bool HasSimdModifier=false, bool HasMonotonicModifier=false, bool HasNonmonotonicModifier=false, bool HasOrderedClause=false, omp::WorksharingLoopType LoopType=omp::WorksharingLoopType::ForStaticLoop, bool NoLoop=false, bool HasDistSchedule=false, Value *DistScheduleChunkSize=nullptr)
Modifies the canonical loop to be a workshare loop.
LLVM_ABI InsertPointOrErrorTy createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr, bool IsIgnoreDenormalMode=false, bool IsFineGrainedMemory=false, bool IsRemoteMemory=false)
Emit atomic update for constructs: — Only Scalar data types V = X; X = X BinOp Expr ,...
LLVM_ABI void createOffloadEntriesAndInfoMetadata(EmitMetadataErrorReportFunctionTy &ErrorReportFunction)
LLVM_ABI void applySimd(CanonicalLoopInfo *Loop, MapVector< Value *, Value * > AlignedVars, Value *IfCond, omp::OrderKind Order, ConstantInt *Simdlen, ConstantInt *Safelen)
Add metadata to simd-ize a loop.
SmallVector< std::unique_ptr< OutlineInfo >, 16 > OutlineInfos
Collection of regions that need to be outlined during finalization.
LLVM_ABI InsertPointOrErrorTy createAtomicUpdate(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr, bool IsIgnoreDenormalMode=false, bool IsFineGrainedMemory=false, bool IsRemoteMemory=false)
Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X For complex Operations: X = ...
std::function< std::tuple< std::string, uint64_t >()> FileIdentifierInfoCallbackTy
bool isLastFinalizationInfoCancellable(omp::Directive DK)
Return true if the last entry in the finalization stack is of kind DK and cancellable.
LLVM_ABI InsertPointTy emitTargetKernel(const LocationDescription &Loc, InsertPointTy AllocaIP, Value *&Return, Value *Ident, Value *DeviceID, Value *NumTeams, Value *NumThreads, Value *HostPtr, ArrayRef< Value * > KernelArgs)
Generate a target region entry call.
LLVM_ABI GlobalVariable * createOffloadMaptypes(SmallVectorImpl< uint64_t > &Mappings, std::string VarName)
Create the global variable holding the offload mappings information.
LLVM_ABI CallInst * createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name=Twine(""))
Create a runtime call for kmpc_threadprivate_cached.
IRBuilder Builder
The LLVM-IR Builder used to create IR.
LLVM_ABI GlobalValue * createGlobalFlag(unsigned Value, StringRef Name)
Create a hidden global flag Name in the module with initial value Value.
LLVM_ABI void emitOffloadingArraysArgument(IRBuilderBase &Builder, OpenMPIRBuilder::TargetDataRTArgs &RTArgs, OpenMPIRBuilder::TargetDataInfo &Info, bool ForEndCall=false)
Emit the arguments to be passed to the runtime library based on the arrays of base pointers,...
LLVM_ABI InsertPointOrErrorTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter)
Generator for 'omp masked'.
LLVM_ABI Expected< CanonicalLoopInfo * > createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name="loop")
Generator for the control flow structure of an OpenMP canonical loop.
function_ref< Expected< InsertPointTy >( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value *DestPtr, Value *SrcPtr)> TaskDupCallbackTy
Callback type for task duplication function code generation.
LLVM_ABI Value * getSizeInBytes(Value *BasePtr)
Computes the size of type in bytes.
llvm::function_ref< llvm::Error( InsertPointTy BodyIP, llvm::Value *LinearIV)> IteratorBodyGenTy
OpenMPIRBuilder(Module &M)
Create a new OpenMPIRBuilder operating on the given module M.
LLVM_ABI InsertPointOrErrorTy createReductionsGPU(const LocationDescription &Loc, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< ReductionInfo > ReductionInfos, ArrayRef< bool > IsByRef, bool IsNoWait=false, bool IsTeamsReduction=false, ReductionGenCBKind ReductionGenCBKind=ReductionGenCBKind::MLIR, std::optional< omp::GV > GridValue={}, unsigned ReductionBufNum=1024, Value *SrcLocInfo=nullptr)
Design of OpenMP reductions on the GPU.
LLVM_ABI Expected< Function * > emitUserDefinedMapper(function_ref< MapInfosOrErrorTy(InsertPointTy CodeGenIP, llvm::Value *PtrPHI, llvm::Value *BeginArg)> PrivAndGenMapInfoCB, llvm::Type *ElemTy, StringRef FuncName, CustomMapperCallbackTy CustomMapperCB)
Emit the user-defined mapper function.
LLVM_ABI FunctionCallee createDispatchDeinitFunction()
Returns __kmpc_dispatch_deinit runtime function.
LLVM_ABI void registerTargetGlobalVariable(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage, Type *LlvmPtrTy, Constant *Addr)
Registers a target variable for device or host.
BodyGenTy
Type of BodyGen to use for region codegen.
LLVM_ABI CanonicalLoopInfo * fuseLoops(DebugLoc DL, ArrayRef< CanonicalLoopInfo * > Loops)
Fuse a sequence of loops.
LLVM_ABI void emitX86DeclareSimdFunction(llvm::Function *Fn, unsigned NumElements, const llvm::APSInt &VLENVal, llvm::ArrayRef< DeclareSimdAttrTy > ParamAttrs, DeclareSimdBranch Branch)
Emit x86 vector-function ABI attributes for a declare simd function.
SmallVector< llvm::Function *, 16 > ConstantAllocaRaiseCandidates
A collection of candidate target functions that's constant allocas will attempt to be raised on a cal...
OffloadEntriesInfoManager OffloadInfoManager
Info manager to keep track of target regions.
static LLVM_ABI std::pair< int32_t, int32_t > readTeamBoundsForKernel(const Triple &T, Function &Kernel)
Read/write a bounds on teams for Kernel.
const std::string ompOffloadInfoName
OMP Offload Info Metadata name string.
Expected< InsertPointTy > InsertPointOrErrorTy
Type used to represent an insertion point or an error value.
LLVM_ABI InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt)
Generator for __kmpc_copyprivate.
void popFinalizationCB()
Pop the last finalization callback from the finalization stack.
LLVM_ABI InsertPointOrErrorTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef< StorableBodyGenCallbackTy > SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait)
Generator for 'omp sections'.
std::function< void(EmitMetadataErrorKind, TargetRegionEntryInfo)> EmitMetadataErrorReportFunctionTy
Callback function type.
function_ref< InsertPointOrErrorTy( Argument &Arg, Value *Input, Value *&RetVal, InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< InsertPointTy > DeallocIPs)> TargetGenArgAccessorsCallbackTy
LLVM_ABI Expected< ScanInfo * > scanInfoInitialize()
Creates a ScanInfo object, allocates and returns the pointer.
LLVM_ABI InsertPointOrErrorTy emitTargetTask(TargetTaskBodyCallbackTy TaskBodyCB, Value *DeviceID, Value *RTLoc, OpenMPIRBuilder::InsertPointTy AllocaIP, const DependenciesInfo &Dependencies, const TargetDataRTArgs &RTArgs, bool HasNoWait)
Generate a target-task for the target construct.
LLVM_ABI InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO, InsertPointTy AllocaIP)
Emit atomic Read for : V = X — Only Scalar data types.
function_ref< Error(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, ArrayRef< BasicBlock * > DeallocBlocks)> BodyGenCallbackTy
Callback type for body (=inner region) code generation.
bool updateToLocation(const LocationDescription &Loc)
Update the internal location to Loc.
LLVM_ABI void createFlush(const LocationDescription &Loc)
Generator for 'omp flush'.
LLVM_ABI Constant * getAddrOfDeclareTargetVar(OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause, OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause, bool IsDeclaration, bool IsExternallyVisible, TargetRegionEntryInfo EntryInfo, StringRef MangledName, std::vector< GlobalVariable * > &GeneratedRefs, bool OpenMPSIMD, std::vector< Triple > TargetTriple, Type *LlvmPtrTy, std::function< Constant *()> GlobalInitializer, std::function< GlobalValue::LinkageTypes()> VariableLinkage)
Retrieve (or create if non-existent) the address of a declare target variable, used in conjunction wi...
EmitMetadataErrorKind
The kind of errors that can occur when emitting the offload entries and metadata.
ScanInfo holds the information to assist in lowering of Scan reduction.
llvm::SmallDenseMap< llvm::Value *, llvm::Value * > * ScanBuffPtrs
Maps the private reduction variable to the pointer of the temporary buffer.
llvm::BasicBlock * OMPScanLoopExit
Exit block of loop body.
llvm::Value * IV
Keeps track of value of iteration variable for input/scan loop to be used for Scan directive lowering...
llvm::BasicBlock * OMPAfterScanBlock
Dominates the body of the loop before scan directive.
llvm::BasicBlock * OMPScanInit
Block before loop body where scan initializations are done.
llvm::BasicBlock * OMPBeforeScanBlock
Dominates the body of the loop before scan directive.
llvm::BasicBlock * OMPScanFinish
Block after loop body where scan finalizations are done.
ScanInfo & operator=(const ScanInfo &)=delete
llvm::Value * Span
Stores the span of canonical loop being lowered to be used for temporary buffer allocation or Finaliz...
bool OMPFirstScanLoop
If true, it indicates Input phase is lowered; else it indicates ScanPhase is lowered.
ScanInfo(ScanInfo &)=delete
llvm::BasicBlock * OMPScanDispatch
Controls the flow to before or after scan blocks.
A vector that has set insertion semantics.
Definition SetVector.h:57
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition StringMap.h:133
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
Value * getOperand(unsigned i) const
Definition User.h:207
See the file comment.
Definition ValueMap.h:84
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
The virtual file system interface.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
OpenMPOffloadMappingFlags
Values for bit flags used to specify the mapping type for offloading.
IdentFlag
IDs for all omp runtime library ident_t flag encodings (see their defintion in openmp/runtime/src/kmp...
RTLDependenceKindTy
Dependence kind for RTL.
RuntimeFunction
IDs for all omp runtime library (RTL) functions.
OMPDynGroupprivateFallbackType
The fallback types for the dyn_groupprivate clause.
WorksharingLoopType
A type of worksharing loop construct.
OMPAtomicCompareOp
Atomic compare operations. Currently OpenMP only supports ==, >, and <.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI BasicBlock * splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch, llvm::Twine Suffix=".split")
Like splitBB, but reuses the current block's name for the new name.
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI BasicBlock * splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch, DebugLoc DL, llvm::Twine Name={})
Split a BasicBlock at an InsertPoint, even if the block is degenerate (missing the terminator).
auto cast_or_null(const Y &Val)
Definition Casting.h:714
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
LLVM_ABI void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch, DebugLoc DL)
Move the instruction after an InsertPoint to the beginning of another BasicBlock.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
A struct to pack the relevant information for an OpenMP affinity clause.
a struct to pack relevant information while generating atomic Ops
Attribute set of the declare simd parameter.
DependData(omp::RTLDependenceKindTy DepKind, Type *DepValueType, Value *DepVal)
omp::RTLDependenceKindTy DepKind
A struct to pack static and dynamic dependency information for a task.
DependenciesInfo(SmallVector< DependData > D)
const omp::Directive DK
The directive kind of the innermost directive that has an associated region which might require final...
const bool IsCancellable
Flag to indicate if the directive is cancellable.
Error mergeFiniBB(IRBuilderBase &Builder, BasicBlock *ExistingFiniBB)
For cases where there is an unavoidable existing finalization block (e.g.
FinalizationInfo(FinalizeCallbackTy FiniCB, omp::Directive DK, bool IsCancellable)
Expected< BasicBlock * > getFiniBB(IRBuilderBase &Builder)
The basic block to which control should be transferred to implement the FiniCB.
Description of a LLVM-IR insertion point (IP) and a debug/source location (filename,...
LocationDescription(const InsertPointTy &IP)
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
LocationDescription(const IRBuilderBase &IRB)
This structure contains combined information generated for mappable clauses, including base pointers,...
void append(MapInfosTy &CurInfo)
Append arrays in CurInfo.
MapDeviceInfoArrayTy DevicePointers
StructNonContiguousInfo NonContigInfo
Helper that contains information about regions we need to outline during finalization.
LLVM_ABI void collectBlocks(SmallPtrSetImpl< BasicBlock * > &BlockSet, SmallVectorImpl< BasicBlock * > &BlockVector)
Collect all blocks in between EntryBB and ExitBB in both the given vector and set.
Function * getFunction() const
Return the function that contains the region to be outlined.
SmallVector< Value *, 2 > ExcludeArgsFromAggregate
virtual LLVM_ABI ~OutlineInfo()=default
virtual LLVM_ABI std::unique_ptr< CodeExtractor > createCodeExtractor(ArrayRef< BasicBlock * > Blocks, bool ArgsInZeroAddressSpace, Twine Suffix=Twine(""))
Create a CodeExtractor instance based on the information stored in this structure,...
std::function< void(Function &)> PostOutlineCBTy
SmallVector< BasicBlock * > OuterDeallocBBs
EvalKind EvaluationKind
Reduction evaluation kind - scalar, complex or aggregate.
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, EvalKind EvaluationKind, ReductionGenCBTy ReductionGen, ReductionGenClangCBTy ReductionGenClang, ReductionGenAtomicCBTy AtomicReductionGen, ReductionGenDataPtrPtrCBTy DataPtrPtrGen, Type *ByRefAllocatedType=nullptr, Type *ByRefElementType=nullptr)
ReductionGenAtomicCBTy AtomicReductionGen
Callback for generating the atomic reduction body, may be null.
ReductionGenCBTy ReductionGen
Callback for generating the reduction body.
ReductionInfo(Value *PrivateVariable)
Type * ByRefAllocatedType
For by-ref reductions, we need to keep track of 2 extra types that are potentially different:
Value * Variable
Reduction variable of pointer type.
Value * PrivateVariable
Thread-private partial reduction variable.
ReductionGenClangCBTy ReductionGenClang
Clang callback for generating the reduction body.
Type * ElementType
Reduction element type, must match pointee type of variable.
ReductionGenDataPtrPtrCBTy DataPtrPtrGen
Container for the arguments used to pass data to the runtime library.
Value * SizesArray
The array of sizes passed to the runtime library.
TargetDataRTArgs(Value *BasePointersArray, Value *PointersArray, Value *SizesArray, Value *MapTypesArray, Value *MapTypesArrayEnd, Value *MappersArray, Value *MapNamesArray)
Value * PointersArray
The array of section pointers passed to the runtime library.
Value * MappersArray
The array of user-defined mappers passed to the runtime library.
Value * MapTypesArrayEnd
The array of map types passed to the runtime library for the end of the region, or nullptr if there a...
Value * BasePointersArray
The array of base pointer passed to the runtime library.
Value * MapTypesArray
The array of map types passed to the runtime library for the beginning of the region or for the entir...
Value * MapNamesArray
The array of original declaration names of mapped pointers sent to the runtime library for debugging.
Data structure that contains the needed information to construct the kernel args vector.
ArrayRef< Value * > NumThreads
The number of threads.
TargetDataRTArgs RTArgs
Arguments passed to the runtime library.
Value * NumIterations
The number of iterations.
Value * DynCGroupMem
The size of the dynamic shared memory.
TargetKernelArgs(unsigned NumTargetItems, TargetDataRTArgs RTArgs, Value *NumIterations, ArrayRef< Value * > NumTeams, ArrayRef< Value * > NumThreads, Value *DynCGroupMem, bool HasNoWait, omp::OMPDynGroupprivateFallbackType DynCGroupMemFallback)
unsigned NumTargetItems
Number of arguments passed to the runtime library.
bool HasNoWait
True if the kernel has 'no wait' clause.
ArrayRef< Value * > NumTeams
The number of teams.
omp::OMPDynGroupprivateFallbackType DynCGroupMemFallback
The fallback mechanism for the shared memory.
Container to pass the default attributes with which a kernel must be launched, used to set kernel att...
Container to pass LLVM IR runtime values or constants related to the number of teams and threads with...
Value * DeviceID
Device ID value used in the kernel launch.
Value * MaxThreads
'parallel' construct 'num_threads' clause value, if present and it is an SPMD kernel.
Value * LoopTripCount
Total number of iterations of the SPMD or Generic-SPMD kernel or null if it is a generic kernel.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Data structure to contain the information needed to uniquely identify a target entry.
static LLVM_ABI void getTargetRegionEntryFnName(SmallVectorImpl< char > &Name, StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count)
static constexpr const char * KernelNamePrefix
The prefix used for kernel names.
bool operator<(const TargetRegionEntryInfo &RHS) const
TargetRegionEntryInfo(StringRef ParentName, unsigned DeviceID, unsigned FileID, unsigned Line, unsigned Count=0)
Defines various target-specific GPU grid values that must be consistent between host RTL (plugin),...