LLVM 19.0.0git
OMPIRBuilder.cpp
Go to the documentation of this file.
1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/StringRef.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/CallingConv.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
36#include "llvm/IR/Function.h"
38#include "llvm/IR/IRBuilder.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/Metadata.h"
42#include "llvm/IR/PassManager.h"
43#include "llvm/IR/Value.h"
55
56#include <cstdint>
57#include <optional>
58
59#define DEBUG_TYPE "openmp-ir-builder"
60
61using namespace llvm;
62using namespace omp;
63
64static cl::opt<bool>
65 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
66 cl::desc("Use optimistic attributes describing "
67 "'as-if' properties of runtime calls."),
68 cl::init(false));
69
71 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden,
72 cl::desc("Factor for the unroll threshold to account for code "
73 "simplifications still taking place"),
74 cl::init(1.5));
75
76#ifndef NDEBUG
77/// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions
78/// at position IP1 may change the meaning of IP2 or vice-versa. This is because
79/// an InsertPoint stores the instruction before something is inserted. For
80/// instance, if both point to the same instruction, two IRBuilders alternating
81/// creating instruction will cause the instructions to be interleaved.
84 if (!IP1.isSet() || !IP2.isSet())
85 return false;
86 return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint();
87}
88
90 // Valid ordered/unordered and base algorithm combinations.
91 switch (SchedType & ~OMPScheduleType::MonotonicityMask) {
92 case OMPScheduleType::UnorderedStaticChunked:
93 case OMPScheduleType::UnorderedStatic:
94 case OMPScheduleType::UnorderedDynamicChunked:
95 case OMPScheduleType::UnorderedGuidedChunked:
96 case OMPScheduleType::UnorderedRuntime:
97 case OMPScheduleType::UnorderedAuto:
98 case OMPScheduleType::UnorderedTrapezoidal:
99 case OMPScheduleType::UnorderedGreedy:
100 case OMPScheduleType::UnorderedBalanced:
101 case OMPScheduleType::UnorderedGuidedIterativeChunked:
102 case OMPScheduleType::UnorderedGuidedAnalyticalChunked:
103 case OMPScheduleType::UnorderedSteal:
104 case OMPScheduleType::UnorderedStaticBalancedChunked:
105 case OMPScheduleType::UnorderedGuidedSimd:
106 case OMPScheduleType::UnorderedRuntimeSimd:
107 case OMPScheduleType::OrderedStaticChunked:
108 case OMPScheduleType::OrderedStatic:
109 case OMPScheduleType::OrderedDynamicChunked:
110 case OMPScheduleType::OrderedGuidedChunked:
111 case OMPScheduleType::OrderedRuntime:
112 case OMPScheduleType::OrderedAuto:
113 case OMPScheduleType::OrderdTrapezoidal:
114 case OMPScheduleType::NomergeUnorderedStaticChunked:
115 case OMPScheduleType::NomergeUnorderedStatic:
116 case OMPScheduleType::NomergeUnorderedDynamicChunked:
117 case OMPScheduleType::NomergeUnorderedGuidedChunked:
118 case OMPScheduleType::NomergeUnorderedRuntime:
119 case OMPScheduleType::NomergeUnorderedAuto:
120 case OMPScheduleType::NomergeUnorderedTrapezoidal:
121 case OMPScheduleType::NomergeUnorderedGreedy:
122 case OMPScheduleType::NomergeUnorderedBalanced:
123 case OMPScheduleType::NomergeUnorderedGuidedIterativeChunked:
124 case OMPScheduleType::NomergeUnorderedGuidedAnalyticalChunked:
125 case OMPScheduleType::NomergeUnorderedSteal:
126 case OMPScheduleType::NomergeOrderedStaticChunked:
127 case OMPScheduleType::NomergeOrderedStatic:
128 case OMPScheduleType::NomergeOrderedDynamicChunked:
129 case OMPScheduleType::NomergeOrderedGuidedChunked:
130 case OMPScheduleType::NomergeOrderedRuntime:
131 case OMPScheduleType::NomergeOrderedAuto:
132 case OMPScheduleType::NomergeOrderedTrapezoidal:
133 break;
134 default:
135 return false;
136 }
137
138 // Must not set both monotonicity modifiers at the same time.
139 OMPScheduleType MonotonicityFlags =
140 SchedType & OMPScheduleType::MonotonicityMask;
141 if (MonotonicityFlags == OMPScheduleType::MonotonicityMask)
142 return false;
143
144 return true;
145}
146#endif
147
148static const omp::GV &getGridValue(const Triple &T, Function *Kernel) {
149 if (T.isAMDGPU()) {
150 StringRef Features =
151 Kernel->getFnAttribute("target-features").getValueAsString();
152 if (Features.count("+wavefrontsize64"))
153 return omp::getAMDGPUGridValues<64>();
154 return omp::getAMDGPUGridValues<32>();
155 }
156 if (T.isNVPTX())
158 llvm_unreachable("No grid value available for this architecture!");
159}
160
161/// Determine which scheduling algorithm to use, determined from schedule clause
162/// arguments.
163static OMPScheduleType
164getOpenMPBaseScheduleType(llvm::omp::ScheduleKind ClauseKind, bool HasChunks,
165 bool HasSimdModifier) {
166 // Currently, the default schedule it static.
167 switch (ClauseKind) {
168 case OMP_SCHEDULE_Default:
169 case OMP_SCHEDULE_Static:
170 return HasChunks ? OMPScheduleType::BaseStaticChunked
171 : OMPScheduleType::BaseStatic;
172 case OMP_SCHEDULE_Dynamic:
173 return OMPScheduleType::BaseDynamicChunked;
174 case OMP_SCHEDULE_Guided:
175 return HasSimdModifier ? OMPScheduleType::BaseGuidedSimd
176 : OMPScheduleType::BaseGuidedChunked;
177 case OMP_SCHEDULE_Auto:
179 case OMP_SCHEDULE_Runtime:
180 return HasSimdModifier ? OMPScheduleType::BaseRuntimeSimd
181 : OMPScheduleType::BaseRuntime;
182 }
183 llvm_unreachable("unhandled schedule clause argument");
184}
185
186/// Adds ordering modifier flags to schedule type.
187static OMPScheduleType
189 bool HasOrderedClause) {
190 assert((BaseScheduleType & OMPScheduleType::ModifierMask) ==
191 OMPScheduleType::None &&
192 "Must not have ordering nor monotonicity flags already set");
193
194 OMPScheduleType OrderingModifier = HasOrderedClause
195 ? OMPScheduleType::ModifierOrdered
196 : OMPScheduleType::ModifierUnordered;
197 OMPScheduleType OrderingScheduleType = BaseScheduleType | OrderingModifier;
198
199 // Unsupported combinations
200 if (OrderingScheduleType ==
201 (OMPScheduleType::BaseGuidedSimd | OMPScheduleType::ModifierOrdered))
202 return OMPScheduleType::OrderedGuidedChunked;
203 else if (OrderingScheduleType == (OMPScheduleType::BaseRuntimeSimd |
204 OMPScheduleType::ModifierOrdered))
205 return OMPScheduleType::OrderedRuntime;
206
207 return OrderingScheduleType;
208}
209
210/// Adds monotonicity modifier flags to schedule type.
211static OMPScheduleType
213 bool HasSimdModifier, bool HasMonotonic,
214 bool HasNonmonotonic, bool HasOrderedClause) {
215 assert((ScheduleType & OMPScheduleType::MonotonicityMask) ==
216 OMPScheduleType::None &&
217 "Must not have monotonicity flags already set");
218 assert((!HasMonotonic || !HasNonmonotonic) &&
219 "Monotonic and Nonmonotonic are contradicting each other");
220
221 if (HasMonotonic) {
222 return ScheduleType | OMPScheduleType::ModifierMonotonic;
223 } else if (HasNonmonotonic) {
224 return ScheduleType | OMPScheduleType::ModifierNonmonotonic;
225 } else {
226 // OpenMP 5.1, 2.11.4 Worksharing-Loop Construct, Description.
227 // If the static schedule kind is specified or if the ordered clause is
228 // specified, and if the nonmonotonic modifier is not specified, the
229 // effect is as if the monotonic modifier is specified. Otherwise, unless
230 // the monotonic modifier is specified, the effect is as if the
231 // nonmonotonic modifier is specified.
232 OMPScheduleType BaseScheduleType =
233 ScheduleType & ~OMPScheduleType::ModifierMask;
234 if ((BaseScheduleType == OMPScheduleType::BaseStatic) ||
235 (BaseScheduleType == OMPScheduleType::BaseStaticChunked) ||
236 HasOrderedClause) {
237 // The monotonic is used by default in openmp runtime library, so no need
238 // to set it.
239 return ScheduleType;
240 } else {
241 return ScheduleType | OMPScheduleType::ModifierNonmonotonic;
242 }
243 }
244}
245
246/// Determine the schedule type using schedule and ordering clause arguments.
247static OMPScheduleType
248computeOpenMPScheduleType(ScheduleKind ClauseKind, bool HasChunks,
249 bool HasSimdModifier, bool HasMonotonicModifier,
250 bool HasNonmonotonicModifier, bool HasOrderedClause) {
251 OMPScheduleType BaseSchedule =
252 getOpenMPBaseScheduleType(ClauseKind, HasChunks, HasSimdModifier);
253 OMPScheduleType OrderedSchedule =
254 getOpenMPOrderingScheduleType(BaseSchedule, HasOrderedClause);
256 OrderedSchedule, HasSimdModifier, HasMonotonicModifier,
257 HasNonmonotonicModifier, HasOrderedClause);
258
260 return Result;
261}
262
263/// Make \p Source branch to \p Target.
264///
265/// Handles two situations:
266/// * \p Source already has an unconditional branch.
267/// * \p Source is a degenerate block (no terminator because the BB is
268/// the current head of the IR construction).
270 if (Instruction *Term = Source->getTerminator()) {
271 auto *Br = cast<BranchInst>(Term);
272 assert(!Br->isConditional() &&
273 "BB's terminator must be an unconditional branch (or degenerate)");
274 BasicBlock *Succ = Br->getSuccessor(0);
275 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
276 Br->setSuccessor(0, Target);
277 return;
278 }
279
280 auto *NewBr = BranchInst::Create(Target, Source);
281 NewBr->setDebugLoc(DL);
282}
283
285 bool CreateBranch) {
286 assert(New->getFirstInsertionPt() == New->begin() &&
287 "Target BB must not have PHI nodes");
288
289 // Move instructions to new block.
290 BasicBlock *Old = IP.getBlock();
291 New->splice(New->begin(), Old, IP.getPoint(), Old->end());
292
293 if (CreateBranch)
294 BranchInst::Create(New, Old);
295}
296
297void llvm::spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) {
299 BasicBlock *Old = Builder.GetInsertBlock();
300
301 spliceBB(Builder.saveIP(), New, CreateBranch);
302 if (CreateBranch)
303 Builder.SetInsertPoint(Old->getTerminator());
304 else
305 Builder.SetInsertPoint(Old);
306
307 // SetInsertPoint also updates the Builder's debug location, but we want to
308 // keep the one the Builder was configured to use.
310}
311
314 BasicBlock *Old = IP.getBlock();
316 Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
317 Old->getParent(), Old->getNextNode());
318 spliceBB(IP, New, CreateBranch);
319 New->replaceSuccessorsPhiUsesWith(Old, New);
320 return New;
321}
322
323BasicBlock *llvm::splitBB(IRBuilderBase &Builder, bool CreateBranch,
326 BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name);
327 if (CreateBranch)
328 Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
329 else
330 Builder.SetInsertPoint(Builder.GetInsertBlock());
331 // SetInsertPoint also updates the Builder's debug location, but we want to
332 // keep the one the Builder was configured to use.
334 return New;
335}
336
337BasicBlock *llvm::splitBB(IRBuilder<> &Builder, bool CreateBranch,
340 BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name);
341 if (CreateBranch)
342 Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
343 else
344 Builder.SetInsertPoint(Builder.GetInsertBlock());
345 // SetInsertPoint also updates the Builder's debug location, but we want to
346 // keep the one the Builder was configured to use.
348 return New;
349}
350
352 llvm::Twine Suffix) {
353 BasicBlock *Old = Builder.GetInsertBlock();
354 return splitBB(Builder, CreateBranch, Old->getName() + Suffix);
355}
356
357// This function creates a fake integer value and a fake use for the integer
358// value. It returns the fake value created. This is useful in modeling the
359// extra arguments to the outlined functions.
361 OpenMPIRBuilder::InsertPointTy OuterAllocaIP,
362 std::stack<Instruction *> &ToBeDeleted,
363 OpenMPIRBuilder::InsertPointTy InnerAllocaIP,
364 const Twine &Name = "", bool AsPtr = true) {
365 Builder.restoreIP(OuterAllocaIP);
366 Instruction *FakeVal;
367 AllocaInst *FakeValAddr =
368 Builder.CreateAlloca(Builder.getInt32Ty(), nullptr, Name + ".addr");
369 ToBeDeleted.push(FakeValAddr);
370
371 if (AsPtr) {
372 FakeVal = FakeValAddr;
373 } else {
374 FakeVal =
375 Builder.CreateLoad(Builder.getInt32Ty(), FakeValAddr, Name + ".val");
376 ToBeDeleted.push(FakeVal);
377 }
378
379 // Generate a fake use of this value
380 Builder.restoreIP(InnerAllocaIP);
381 Instruction *UseFakeVal;
382 if (AsPtr) {
383 UseFakeVal =
384 Builder.CreateLoad(Builder.getInt32Ty(), FakeVal, Name + ".use");
385 } else {
386 UseFakeVal =
387 cast<BinaryOperator>(Builder.CreateAdd(FakeVal, Builder.getInt32(10)));
388 }
389 ToBeDeleted.push(UseFakeVal);
390 return FakeVal;
391}
392
393//===----------------------------------------------------------------------===//
394// OpenMPIRBuilderConfig
395//===----------------------------------------------------------------------===//
396
397namespace {
399/// Values for bit flags for marking which requires clauses have been used.
400enum OpenMPOffloadingRequiresDirFlags {
401 /// flag undefined.
402 OMP_REQ_UNDEFINED = 0x000,
403 /// no requires directive present.
404 OMP_REQ_NONE = 0x001,
405 /// reverse_offload clause.
406 OMP_REQ_REVERSE_OFFLOAD = 0x002,
407 /// unified_address clause.
408 OMP_REQ_UNIFIED_ADDRESS = 0x004,
409 /// unified_shared_memory clause.
410 OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
411 /// dynamic_allocators clause.
412 OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
413 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
414};
415
416} // anonymous namespace
417
419 : RequiresFlags(OMP_REQ_UNDEFINED) {}
420
422 bool IsTargetDevice, bool IsGPU, bool OpenMPOffloadMandatory,
423 bool HasRequiresReverseOffload, bool HasRequiresUnifiedAddress,
424 bool HasRequiresUnifiedSharedMemory, bool HasRequiresDynamicAllocators)
425 : IsTargetDevice(IsTargetDevice), IsGPU(IsGPU),
426 OpenMPOffloadMandatory(OpenMPOffloadMandatory),
427 RequiresFlags(OMP_REQ_UNDEFINED) {
428 if (HasRequiresReverseOffload)
429 RequiresFlags |= OMP_REQ_REVERSE_OFFLOAD;
430 if (HasRequiresUnifiedAddress)
431 RequiresFlags |= OMP_REQ_UNIFIED_ADDRESS;
432 if (HasRequiresUnifiedSharedMemory)
433 RequiresFlags |= OMP_REQ_UNIFIED_SHARED_MEMORY;
434 if (HasRequiresDynamicAllocators)
435 RequiresFlags |= OMP_REQ_DYNAMIC_ALLOCATORS;
436}
437
439 return RequiresFlags & OMP_REQ_REVERSE_OFFLOAD;
440}
441
443 return RequiresFlags & OMP_REQ_UNIFIED_ADDRESS;
444}
445
447 return RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY;
448}
449
451 return RequiresFlags & OMP_REQ_DYNAMIC_ALLOCATORS;
452}
453
455 return hasRequiresFlags() ? RequiresFlags
456 : static_cast<int64_t>(OMP_REQ_NONE);
457}
458
460 if (Value)
461 RequiresFlags |= OMP_REQ_REVERSE_OFFLOAD;
462 else
463 RequiresFlags &= ~OMP_REQ_REVERSE_OFFLOAD;
464}
465
467 if (Value)
468 RequiresFlags |= OMP_REQ_UNIFIED_ADDRESS;
469 else
470 RequiresFlags &= ~OMP_REQ_UNIFIED_ADDRESS;
471}
472
474 if (Value)
475 RequiresFlags |= OMP_REQ_UNIFIED_SHARED_MEMORY;
476 else
477 RequiresFlags &= ~OMP_REQ_UNIFIED_SHARED_MEMORY;
478}
479
481 if (Value)
482 RequiresFlags |= OMP_REQ_DYNAMIC_ALLOCATORS;
483 else
484 RequiresFlags &= ~OMP_REQ_DYNAMIC_ALLOCATORS;
485}
486
487//===----------------------------------------------------------------------===//
488// OpenMPIRBuilder
489//===----------------------------------------------------------------------===//
490
492 IRBuilderBase &Builder,
493 SmallVector<Value *> &ArgsVector) {
495 Value *PointerNum = Builder.getInt32(KernelArgs.NumTargetItems);
498 Value *Flags = Builder.getInt64(KernelArgs.HasNoWait);
499
500 Value *NumTeams3D =
501 Builder.CreateInsertValue(ZeroArray, KernelArgs.NumTeams, {0});
502 Value *NumThreads3D =
503 Builder.CreateInsertValue(ZeroArray, KernelArgs.NumThreads, {0});
504
505 ArgsVector = {Version,
506 PointerNum,
507 KernelArgs.RTArgs.BasePointersArray,
508 KernelArgs.RTArgs.PointersArray,
509 KernelArgs.RTArgs.SizesArray,
510 KernelArgs.RTArgs.MapTypesArray,
511 KernelArgs.RTArgs.MapNamesArray,
512 KernelArgs.RTArgs.MappersArray,
513 KernelArgs.NumIterations,
514 Flags,
515 NumTeams3D,
516 NumThreads3D,
517 KernelArgs.DynCGGroupMem};
518}
519
521 LLVMContext &Ctx = Fn.getContext();
522
523 // Get the function's current attributes.
524 auto Attrs = Fn.getAttributes();
525 auto FnAttrs = Attrs.getFnAttrs();
526 auto RetAttrs = Attrs.getRetAttrs();
528 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo)
529 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo));
530
531 // Add AS to FnAS while taking special care with integer extensions.
532 auto addAttrSet = [&](AttributeSet &FnAS, const AttributeSet &AS,
533 bool Param = true) -> void {
534 bool HasSignExt = AS.hasAttribute(Attribute::SExt);
535 bool HasZeroExt = AS.hasAttribute(Attribute::ZExt);
536 if (HasSignExt || HasZeroExt) {
537 assert(AS.getNumAttributes() == 1 &&
538 "Currently not handling extension attr combined with others.");
539 if (Param) {
540 if (auto AK = TargetLibraryInfo::getExtAttrForI32Param(T, HasSignExt))
541 FnAS = FnAS.addAttribute(Ctx, AK);
542 } else if (auto AK =
543 TargetLibraryInfo::getExtAttrForI32Return(T, HasSignExt))
544 FnAS = FnAS.addAttribute(Ctx, AK);
545 } else {
546 FnAS = FnAS.addAttributes(Ctx, AS);
547 }
548 };
549
550#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
551#include "llvm/Frontend/OpenMP/OMPKinds.def"
552
553 // Add attributes to the function declaration.
554 switch (FnID) {
555#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
556 case Enum: \
557 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \
558 addAttrSet(RetAttrs, RetAttrSet, /*Param*/ false); \
559 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \
560 addAttrSet(ArgAttrs[ArgNo], ArgAttrSets[ArgNo]); \
561 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \
562 break;
563#include "llvm/Frontend/OpenMP/OMPKinds.def"
564 default:
565 // Attributes are optional.
566 break;
567 }
568}
569
572 FunctionType *FnTy = nullptr;
573 Function *Fn = nullptr;
574
575 // Try to find the declation in the module first.
576 switch (FnID) {
577#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
578 case Enum: \
579 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
580 IsVarArg); \
581 Fn = M.getFunction(Str); \
582 break;
583#include "llvm/Frontend/OpenMP/OMPKinds.def"
584 }
585
586 if (!Fn) {
587 // Create a new declaration if we need one.
588 switch (FnID) {
589#define OMP_RTL(Enum, Str, ...) \
590 case Enum: \
591 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
592 break;
593#include "llvm/Frontend/OpenMP/OMPKinds.def"
594 }
595
596 // Add information if the runtime function takes a callback function
597 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
598 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
599 LLVMContext &Ctx = Fn->getContext();
600 MDBuilder MDB(Ctx);
601 // Annotate the callback behavior of the runtime function:
602 // - The callback callee is argument number 2 (microtask).
603 // - The first two arguments of the callback callee are unknown (-1).
604 // - All variadic arguments to the runtime function are passed to the
605 // callback callee.
606 Fn->addMetadata(
607 LLVMContext::MD_callback,
609 2, {-1, -1}, /* VarArgsArePassed */ true)}));
610 }
611 }
612
613 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()
614 << " with type " << *Fn->getFunctionType() << "\n");
615 addAttributes(FnID, *Fn);
616
617 } else {
618 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()
619 << " with type " << *Fn->getFunctionType() << "\n");
620 }
621
622 assert(Fn && "Failed to create OpenMP runtime function");
623
624 return {FnTy, Fn};
625}
626
629 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
630 assert(Fn && "Failed to create OpenMP runtime function pointer");
631 return Fn;
632}
633
634void OpenMPIRBuilder::initialize() { initializeTypes(M); }
635
638 BasicBlock &EntryBlock = Function->getEntryBlock();
639 Instruction *MoveLocInst = EntryBlock.getFirstNonPHI();
640
641 // Loop over blocks looking for constant allocas, skipping the entry block
642 // as any allocas there are already in the desired location.
643 for (auto Block = std::next(Function->begin(), 1); Block != Function->end();
644 Block++) {
645 for (auto Inst = Block->getReverseIterator()->begin();
646 Inst != Block->getReverseIterator()->end();) {
647 if (auto *AllocaInst = dyn_cast_if_present<llvm::AllocaInst>(Inst)) {
648 Inst++;
649 if (!isa<ConstantData>(AllocaInst->getArraySize()))
650 continue;
651 AllocaInst->moveBeforePreserving(MoveLocInst);
652 } else {
653 Inst++;
654 }
655 }
656 }
657}
658
660 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
662 SmallVector<OutlineInfo, 16> DeferredOutlines;
663 for (OutlineInfo &OI : OutlineInfos) {
664 // Skip functions that have not finalized yet; may happen with nested
665 // function generation.
666 if (Fn && OI.getFunction() != Fn) {
667 DeferredOutlines.push_back(OI);
668 continue;
669 }
670
671 ParallelRegionBlockSet.clear();
672 Blocks.clear();
673 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
674
675 Function *OuterFn = OI.getFunction();
676 CodeExtractorAnalysisCache CEAC(*OuterFn);
677 // If we generate code for the target device, we need to allocate
678 // struct for aggregate params in the device default alloca address space.
679 // OpenMP runtime requires that the params of the extracted functions are
680 // passed as zero address space pointers. This flag ensures that
681 // CodeExtractor generates correct code for extracted functions
682 // which are used by OpenMP runtime.
683 bool ArgsInZeroAddressSpace = Config.isTargetDevice();
684 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
685 /* AggregateArgs */ true,
686 /* BlockFrequencyInfo */ nullptr,
687 /* BranchProbabilityInfo */ nullptr,
688 /* AssumptionCache */ nullptr,
689 /* AllowVarArgs */ true,
690 /* AllowAlloca */ true,
691 /* AllocaBlock*/ OI.OuterAllocaBB,
692 /* Suffix */ ".omp_par", ArgsInZeroAddressSpace);
693
694 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n");
695 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()
696 << " Exit: " << OI.ExitBB->getName() << "\n");
697 assert(Extractor.isEligible() &&
698 "Expected OpenMP outlining to be possible!");
699
700 for (auto *V : OI.ExcludeArgsFromAggregate)
701 Extractor.excludeArgFromAggregate(V);
702
703 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
704
705 // Forward target-cpu, target-features attributes to the outlined function.
706 auto TargetCpuAttr = OuterFn->getFnAttribute("target-cpu");
707 if (TargetCpuAttr.isStringAttribute())
708 OutlinedFn->addFnAttr(TargetCpuAttr);
709
710 auto TargetFeaturesAttr = OuterFn->getFnAttribute("target-features");
711 if (TargetFeaturesAttr.isStringAttribute())
712 OutlinedFn->addFnAttr(TargetFeaturesAttr);
713
714 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n");
715 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n");
716 assert(OutlinedFn->getReturnType()->isVoidTy() &&
717 "OpenMP outlined functions should not return a value!");
718
719 // For compability with the clang CG we move the outlined function after the
720 // one with the parallel region.
721 OutlinedFn->removeFromParent();
722 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
723
724 // Remove the artificial entry introduced by the extractor right away, we
725 // made our own entry block after all.
726 {
727 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
728 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB);
729 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry);
730 // Move instructions from the to-be-deleted ArtificialEntry to the entry
731 // basic block of the parallel region. CodeExtractor generates
732 // instructions to unwrap the aggregate argument and may sink
733 // allocas/bitcasts for values that are solely used in the outlined region
734 // and do not escape.
735 assert(!ArtificialEntry.empty() &&
736 "Expected instructions to add in the outlined region entry");
737 for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(),
738 End = ArtificialEntry.rend();
739 It != End;) {
740 Instruction &I = *It;
741 It++;
742
743 if (I.isTerminator())
744 continue;
745
746 I.moveBeforePreserving(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
747 }
748
749 OI.EntryBB->moveBefore(&ArtificialEntry);
750 ArtificialEntry.eraseFromParent();
751 }
752 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB);
753 assert(OutlinedFn && OutlinedFn->getNumUses() == 1);
754
755 // Run a user callback, e.g. to add attributes.
756 if (OI.PostOutlineCB)
757 OI.PostOutlineCB(*OutlinedFn);
758 }
759
760 // Remove work items that have been completed.
761 OutlineInfos = std::move(DeferredOutlines);
762
763 // The createTarget functions embeds user written code into
764 // the target region which may inject allocas which need to
765 // be moved to the entry block of our target or risk malformed
766 // optimisations by later passes, this is only relevant for
767 // the device pass which appears to be a little more delicate
768 // when it comes to optimisations (however, we do not block on
769 // that here, it's up to the inserter to the list to do so).
770 // This notbaly has to occur after the OutlinedInfo candidates
771 // have been extracted so we have an end product that will not
772 // be implicitly adversely affected by any raises unless
773 // intentionally appended to the list.
774 // NOTE: This only does so for ConstantData, it could be extended
775 // to ConstantExpr's with further effort, however, they should
776 // largely be folded when they get here. Extending it to runtime
777 // defined/read+writeable allocation sizes would be non-trivial
778 // (need to factor in movement of any stores to variables the
779 // allocation size depends on, as well as the usual loads,
780 // otherwise it'll yield the wrong result after movement) and
781 // likely be more suitable as an LLVM optimisation pass.
784
785 EmitMetadataErrorReportFunctionTy &&ErrorReportFn =
786 [](EmitMetadataErrorKind Kind,
787 const TargetRegionEntryInfo &EntryInfo) -> void {
788 errs() << "Error of kind: " << Kind
789 << " when emitting offload entries and metadata during "
790 "OMPIRBuilder finalization \n";
791 };
792
795}
796
798 assert(OutlineInfos.empty() && "There must be no outstanding outlinings");
799}
800
803 auto *GV =
804 new GlobalVariable(M, I32Ty,
805 /* isConstant = */ true, GlobalValue::WeakODRLinkage,
806 ConstantInt::get(I32Ty, Value), Name);
807 GV->setVisibility(GlobalValue::HiddenVisibility);
808
809 return GV;
810}
811
813 uint32_t SrcLocStrSize,
814 IdentFlag LocFlags,
815 unsigned Reserve2Flags) {
816 // Enable "C-mode".
817 LocFlags |= OMP_IDENT_FLAG_KMPC;
818
819 Constant *&Ident =
820 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
821 if (!Ident) {
823 Constant *IdentData[] = {I32Null,
824 ConstantInt::get(Int32, uint32_t(LocFlags)),
825 ConstantInt::get(Int32, Reserve2Flags),
826 ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr};
827 Constant *Initializer =
828 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData);
829
830 // Look for existing encoding of the location + flags, not needed but
831 // minimizes the difference to the existing solution while we transition.
832 for (GlobalVariable &GV : M.globals())
833 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer())
834 if (GV.getInitializer() == Initializer)
835 Ident = &GV;
836
837 if (!Ident) {
838 auto *GV = new GlobalVariable(
839 M, OpenMPIRBuilder::Ident,
840 /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "",
843 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
844 GV->setAlignment(Align(8));
845 Ident = GV;
846 }
847 }
848
850}
851
853 uint32_t &SrcLocStrSize) {
854 SrcLocStrSize = LocStr.size();
855 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
856 if (!SrcLocStr) {
857 Constant *Initializer =
859
860 // Look for existing encoding of the location, not needed but minimizes the
861 // difference to the existing solution while we transition.
862 for (GlobalVariable &GV : M.globals())
863 if (GV.isConstant() && GV.hasInitializer() &&
864 GV.getInitializer() == Initializer)
865 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
866
867 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
868 /* AddressSpace */ 0, &M);
869 }
870 return SrcLocStr;
871}
872
874 StringRef FileName,
875 unsigned Line, unsigned Column,
876 uint32_t &SrcLocStrSize) {
877 SmallString<128> Buffer;
878 Buffer.push_back(';');
879 Buffer.append(FileName);
880 Buffer.push_back(';');
881 Buffer.append(FunctionName);
882 Buffer.push_back(';');
883 Buffer.append(std::to_string(Line));
884 Buffer.push_back(';');
885 Buffer.append(std::to_string(Column));
886 Buffer.push_back(';');
887 Buffer.push_back(';');
888 return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize);
889}
890
891Constant *
893 StringRef UnknownLoc = ";unknown;unknown;0;0;;";
894 return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize);
895}
896
898 uint32_t &SrcLocStrSize,
899 Function *F) {
900 DILocation *DIL = DL.get();
901 if (!DIL)
902 return getOrCreateDefaultSrcLocStr(SrcLocStrSize);
903 StringRef FileName = M.getName();
904 if (DIFile *DIF = DIL->getFile())
905 if (std::optional<StringRef> Source = DIF->getSource())
906 FileName = *Source;
907 StringRef Function = DIL->getScope()->getSubprogram()->getName();
908 if (Function.empty() && F)
909 Function = F->getName();
910 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
911 DIL->getColumn(), SrcLocStrSize);
912}
913
915 uint32_t &SrcLocStrSize) {
916 return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize,
917 Loc.IP.getBlock()->getParent());
918}
919
921 return Builder.CreateCall(
922 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
923 "omp_global_thread_num");
924}
925
928 bool ForceSimpleCall, bool CheckCancelFlag) {
929 if (!updateToLocation(Loc))
930 return Loc.IP;
931 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
932}
933
936 bool ForceSimpleCall, bool CheckCancelFlag) {
937 // Build call __kmpc_cancel_barrier(loc, thread_id) or
938 // __kmpc_barrier(loc, thread_id);
939
940 IdentFlag BarrierLocFlags;
941 switch (Kind) {
942 case OMPD_for:
943 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
944 break;
945 case OMPD_sections:
946 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
947 break;
948 case OMPD_single:
949 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
950 break;
951 case OMPD_barrier:
952 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
953 break;
954 default:
955 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
956 break;
957 }
958
959 uint32_t SrcLocStrSize;
960 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
961 Value *Args[] = {
962 getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags),
963 getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))};
964
965 // If we are in a cancellable parallel region, barriers are cancellation
966 // points.
967 // TODO: Check why we would force simple calls or to ignore the cancel flag.
968 bool UseCancelBarrier =
969 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
970
971 Value *Result =
973 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
974 : OMPRTL___kmpc_barrier),
975 Args);
976
977 if (UseCancelBarrier && CheckCancelFlag)
978 emitCancelationCheckImpl(Result, OMPD_parallel);
979
980 return Builder.saveIP();
981}
982
985 Value *IfCondition,
986 omp::Directive CanceledDirective) {
987 if (!updateToLocation(Loc))
988 return Loc.IP;
989
990 // LLVM utilities like blocks with terminators.
991 auto *UI = Builder.CreateUnreachable();
992
993 Instruction *ThenTI = UI, *ElseTI = nullptr;
994 if (IfCondition)
995 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
996 Builder.SetInsertPoint(ThenTI);
997
998 Value *CancelKind = nullptr;
999 switch (CanceledDirective) {
1000#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
1001 case DirectiveEnum: \
1002 CancelKind = Builder.getInt32(Value); \
1003 break;
1004#include "llvm/Frontend/OpenMP/OMPKinds.def"
1005 default:
1006 llvm_unreachable("Unknown cancel kind!");
1007 }
1008
1009 uint32_t SrcLocStrSize;
1010 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1011 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1012 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
1013 Value *Result = Builder.CreateCall(
1014 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
1015 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) {
1016 if (CanceledDirective == OMPD_parallel) {
1018 Builder.restoreIP(IP);
1020 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
1021 /* CheckCancelFlag */ false);
1022 }
1023 };
1024
1025 // The actual cancel logic is shared with others, e.g., cancel_barriers.
1026 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB);
1027
1028 // Update the insertion point and remove the terminator we introduced.
1029 Builder.SetInsertPoint(UI->getParent());
1030 UI->eraseFromParent();
1031
1032 return Builder.saveIP();
1033}
1034
1036 const LocationDescription &Loc, InsertPointTy AllocaIP, Value *&Return,
1037 Value *Ident, Value *DeviceID, Value *NumTeams, Value *NumThreads,
1038 Value *HostPtr, ArrayRef<Value *> KernelArgs) {
1039 if (!updateToLocation(Loc))
1040 return Loc.IP;
1041
1042 Builder.restoreIP(AllocaIP);
1043 auto *KernelArgsPtr =
1044 Builder.CreateAlloca(OpenMPIRBuilder::KernelArgs, nullptr, "kernel_args");
1045 Builder.restoreIP(Loc.IP);
1046
1047 for (unsigned I = 0, Size = KernelArgs.size(); I != Size; ++I) {
1048 llvm::Value *Arg =
1049 Builder.CreateStructGEP(OpenMPIRBuilder::KernelArgs, KernelArgsPtr, I);
1051 KernelArgs[I], Arg,
1052 M.getDataLayout().getPrefTypeAlign(KernelArgs[I]->getType()));
1053 }
1054
1055 SmallVector<Value *> OffloadingArgs{Ident, DeviceID, NumTeams,
1056 NumThreads, HostPtr, KernelArgsPtr};
1057
1058 Return = Builder.CreateCall(
1059 getOrCreateRuntimeFunction(M, OMPRTL___tgt_target_kernel),
1060 OffloadingArgs);
1061
1062 return Builder.saveIP();
1063}
1064
1066 const LocationDescription &Loc, Function *OutlinedFn, Value *OutlinedFnID,
1067 EmitFallbackCallbackTy emitTargetCallFallbackCB, TargetKernelArgs &Args,
1068 Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP) {
1069
1070 if (!updateToLocation(Loc))
1071 return Loc.IP;
1072
1073 Builder.restoreIP(Loc.IP);
1074 // On top of the arrays that were filled up, the target offloading call
1075 // takes as arguments the device id as well as the host pointer. The host
1076 // pointer is used by the runtime library to identify the current target
1077 // region, so it only has to be unique and not necessarily point to
1078 // anything. It could be the pointer to the outlined function that
1079 // implements the target region, but we aren't using that so that the
1080 // compiler doesn't need to keep that, and could therefore inline the host
1081 // function if proven worthwhile during optimization.
1082
1083 // From this point on, we need to have an ID of the target region defined.
1084 assert(OutlinedFnID && "Invalid outlined function ID!");
1085 (void)OutlinedFnID;
1086
1087 // Return value of the runtime offloading call.
1088 Value *Return = nullptr;
1089
1090 // Arguments for the target kernel.
1091 SmallVector<Value *> ArgsVector;
1092 getKernelArgsVector(Args, Builder, ArgsVector);
1093
1094 // The target region is an outlined function launched by the runtime
1095 // via calls to __tgt_target_kernel().
1096 //
1097 // Note that on the host and CPU targets, the runtime implementation of
1098 // these calls simply call the outlined function without forking threads.
1099 // The outlined functions themselves have runtime calls to
1100 // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
1101 // the compiler in emitTeamsCall() and emitParallelCall().
1102 //
1103 // In contrast, on the NVPTX target, the implementation of
1104 // __tgt_target_teams() launches a GPU kernel with the requested number
1105 // of teams and threads so no additional calls to the runtime are required.
1106 // Check the error code and execute the host version if required.
1107 Builder.restoreIP(emitTargetKernel(Builder, AllocaIP, Return, RTLoc, DeviceID,
1108 Args.NumTeams, Args.NumThreads,
1109 OutlinedFnID, ArgsVector));
1110
1111 BasicBlock *OffloadFailedBlock =
1112 BasicBlock::Create(Builder.getContext(), "omp_offload.failed");
1113 BasicBlock *OffloadContBlock =
1114 BasicBlock::Create(Builder.getContext(), "omp_offload.cont");
1116 Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
1117
1118 auto CurFn = Builder.GetInsertBlock()->getParent();
1119 emitBlock(OffloadFailedBlock, CurFn);
1120 Builder.restoreIP(emitTargetCallFallbackCB(Builder.saveIP()));
1121 emitBranch(OffloadContBlock);
1122 emitBlock(OffloadContBlock, CurFn, /*IsFinished=*/true);
1123 return Builder.saveIP();
1124}
1125
1127 omp::Directive CanceledDirective,
1128 FinalizeCallbackTy ExitCB) {
1129 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&
1130 "Unexpected cancellation!");
1131
1132 // For a cancel barrier we create two new blocks.
1134 BasicBlock *NonCancellationBlock;
1135 if (Builder.GetInsertPoint() == BB->end()) {
1136 // TODO: This branch will not be needed once we moved to the
1137 // OpenMPIRBuilder codegen completely.
1138 NonCancellationBlock = BasicBlock::Create(
1139 BB->getContext(), BB->getName() + ".cont", BB->getParent());
1140 } else {
1141 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
1144 }
1145 BasicBlock *CancellationBlock = BasicBlock::Create(
1146 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
1147
1148 // Jump to them based on the return value.
1149 Value *Cmp = Builder.CreateIsNull(CancelFlag);
1150 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
1151 /* TODO weight */ nullptr, nullptr);
1152
1153 // From the cancellation block we finalize all variables and go to the
1154 // post finalization block that is known to the FiniCB callback.
1155 Builder.SetInsertPoint(CancellationBlock);
1156 if (ExitCB)
1157 ExitCB(Builder.saveIP());
1158 auto &FI = FinalizationStack.back();
1159 FI.FiniCB(Builder.saveIP());
1160
1161 // The continuation block is where code generation continues.
1162 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
1163}
1164
1165// Callback used to create OpenMP runtime calls to support
1166// omp parallel clause for the device.
1167// We need to use this callback to replace call to the OutlinedFn in OuterFn
1168// by the call to the OpenMP DeviceRTL runtime function (kmpc_parallel_51)
1170 OpenMPIRBuilder *OMPIRBuilder, Function &OutlinedFn, Function *OuterFn,
1171 BasicBlock *OuterAllocaBB, Value *Ident, Value *IfCondition,
1172 Value *NumThreads, Instruction *PrivTID, AllocaInst *PrivTIDAddr,
1173 Value *ThreadID, const SmallVector<Instruction *, 4> &ToBeDeleted) {
1174 // Add some known attributes.
1175 IRBuilder<> &Builder = OMPIRBuilder->Builder;
1176 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
1177 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
1178 OutlinedFn.addParamAttr(0, Attribute::NoUndef);
1179 OutlinedFn.addParamAttr(1, Attribute::NoUndef);
1180 OutlinedFn.addFnAttr(Attribute::NoUnwind);
1181
1182 assert(OutlinedFn.arg_size() >= 2 &&
1183 "Expected at least tid and bounded tid as arguments");
1184 unsigned NumCapturedVars = OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
1185
1186 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
1187 assert(CI && "Expected call instruction to outlined function");
1188 CI->getParent()->setName("omp_parallel");
1189
1190 Builder.SetInsertPoint(CI);
1191 Type *PtrTy = OMPIRBuilder->VoidPtr;
1192 Value *NullPtrValue = Constant::getNullValue(PtrTy);
1193
1194 // Add alloca for kernel args
1195 OpenMPIRBuilder ::InsertPointTy CurrentIP = Builder.saveIP();
1196 Builder.SetInsertPoint(OuterAllocaBB, OuterAllocaBB->getFirstInsertionPt());
1197 AllocaInst *ArgsAlloca =
1198 Builder.CreateAlloca(ArrayType::get(PtrTy, NumCapturedVars));
1199 Value *Args = ArgsAlloca;
1200 // Add address space cast if array for storing arguments is not allocated
1201 // in address space 0
1202 if (ArgsAlloca->getAddressSpace())
1203 Args = Builder.CreatePointerCast(ArgsAlloca, PtrTy);
1204 Builder.restoreIP(CurrentIP);
1205
1206 // Store captured vars which are used by kmpc_parallel_51
1207 for (unsigned Idx = 0; Idx < NumCapturedVars; Idx++) {
1208 Value *V = *(CI->arg_begin() + 2 + Idx);
1209 Value *StoreAddress = Builder.CreateConstInBoundsGEP2_64(
1210 ArrayType::get(PtrTy, NumCapturedVars), Args, 0, Idx);
1211 Builder.CreateStore(V, StoreAddress);
1212 }
1213
1214 Value *Cond =
1215 IfCondition ? Builder.CreateSExtOrTrunc(IfCondition, OMPIRBuilder->Int32)
1216 : Builder.getInt32(1);
1217
1218 // Build kmpc_parallel_51 call
1219 Value *Parallel51CallArgs[] = {
1220 /* identifier*/ Ident,
1221 /* global thread num*/ ThreadID,
1222 /* if expression */ Cond,
1223 /* number of threads */ NumThreads ? NumThreads : Builder.getInt32(-1),
1224 /* Proc bind */ Builder.getInt32(-1),
1225 /* outlined function */
1226 Builder.CreateBitCast(&OutlinedFn, OMPIRBuilder->ParallelTaskPtr),
1227 /* wrapper function */ NullPtrValue,
1228 /* arguments of the outlined funciton*/ Args,
1229 /* number of arguments */ Builder.getInt64(NumCapturedVars)};
1230
1231 FunctionCallee RTLFn =
1232 OMPIRBuilder->getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_parallel_51);
1233
1234 Builder.CreateCall(RTLFn, Parallel51CallArgs);
1235
1236 LLVM_DEBUG(dbgs() << "With kmpc_parallel_51 placed: "
1237 << *Builder.GetInsertBlock()->getParent() << "\n");
1238
1239 // Initialize the local TID stack location with the argument value.
1240 Builder.SetInsertPoint(PrivTID);
1241 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
1242 Builder.CreateStore(Builder.CreateLoad(OMPIRBuilder->Int32, OutlinedAI),
1243 PrivTIDAddr);
1244
1245 // Remove redundant call to the outlined function.
1246 CI->eraseFromParent();
1247
1248 for (Instruction *I : ToBeDeleted) {
1249 I->eraseFromParent();
1250 }
1251}
1252
1253// Callback used to create OpenMP runtime calls to support
1254// omp parallel clause for the host.
1255// We need to use this callback to replace call to the OutlinedFn in OuterFn
1256// by the call to the OpenMP host runtime function ( __kmpc_fork_call[_if])
1257static void
1259 Function *OuterFn, Value *Ident, Value *IfCondition,
1260 Instruction *PrivTID, AllocaInst *PrivTIDAddr,
1261 const SmallVector<Instruction *, 4> &ToBeDeleted) {
1262 IRBuilder<> &Builder = OMPIRBuilder->Builder;
1263 FunctionCallee RTLFn;
1264 if (IfCondition) {
1265 RTLFn =
1266 OMPIRBuilder->getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call_if);
1267 } else {
1268 RTLFn =
1269 OMPIRBuilder->getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
1270 }
1271 if (auto *F = dyn_cast<Function>(RTLFn.getCallee())) {
1272 if (!F->hasMetadata(LLVMContext::MD_callback)) {
1273 LLVMContext &Ctx = F->getContext();
1274 MDBuilder MDB(Ctx);
1275 // Annotate the callback behavior of the __kmpc_fork_call:
1276 // - The callback callee is argument number 2 (microtask).
1277 // - The first two arguments of the callback callee are unknown (-1).
1278 // - All variadic arguments to the __kmpc_fork_call are passed to the
1279 // callback callee.
1280 F->addMetadata(LLVMContext::MD_callback,
1282 2, {-1, -1},
1283 /* VarArgsArePassed */ true)}));
1284 }
1285 }
1286 // Add some known attributes.
1287 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
1288 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
1289 OutlinedFn.addFnAttr(Attribute::NoUnwind);
1290
1291 assert(OutlinedFn.arg_size() >= 2 &&
1292 "Expected at least tid and bounded tid as arguments");
1293 unsigned NumCapturedVars = OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
1294
1295 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
1296 CI->getParent()->setName("omp_parallel");
1297 Builder.SetInsertPoint(CI);
1298
1299 // Build call __kmpc_fork_call[_if](Ident, n, microtask, var1, .., varn);
1300 Value *ForkCallArgs[] = {
1301 Ident, Builder.getInt32(NumCapturedVars),
1302 Builder.CreateBitCast(&OutlinedFn, OMPIRBuilder->ParallelTaskPtr)};
1303
1304 SmallVector<Value *, 16> RealArgs;
1305 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
1306 if (IfCondition) {
1307 Value *Cond = Builder.CreateSExtOrTrunc(IfCondition, OMPIRBuilder->Int32);
1308 RealArgs.push_back(Cond);
1309 }
1310 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
1311
1312 // __kmpc_fork_call_if always expects a void ptr as the last argument
1313 // If there are no arguments, pass a null pointer.
1314 auto PtrTy = OMPIRBuilder->VoidPtr;
1315 if (IfCondition && NumCapturedVars == 0) {
1316 Value *NullPtrValue = Constant::getNullValue(PtrTy);
1317 RealArgs.push_back(NullPtrValue);
1318 }
1319 if (IfCondition && RealArgs.back()->getType() != PtrTy)
1320 RealArgs.back() = Builder.CreateBitCast(RealArgs.back(), PtrTy);
1321
1322 Builder.CreateCall(RTLFn, RealArgs);
1323
1324 LLVM_DEBUG(dbgs() << "With fork_call placed: "
1325 << *Builder.GetInsertBlock()->getParent() << "\n");
1326
1327 // Initialize the local TID stack location with the argument value.
1328 Builder.SetInsertPoint(PrivTID);
1329 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
1330 Builder.CreateStore(Builder.CreateLoad(OMPIRBuilder->Int32, OutlinedAI),
1331 PrivTIDAddr);
1332
1333 // Remove redundant call to the outlined function.
1334 CI->eraseFromParent();
1335
1336 for (Instruction *I : ToBeDeleted) {
1337 I->eraseFromParent();
1338 }
1339}
1340
1342 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
1343 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
1344 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
1345 omp::ProcBindKind ProcBind, bool IsCancellable) {
1346 assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous");
1347
1348 if (!updateToLocation(Loc))
1349 return Loc.IP;
1350
1351 uint32_t SrcLocStrSize;
1352 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1353 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1354 Value *ThreadID = getOrCreateThreadID(Ident);
1355 // If we generate code for the target device, we need to allocate
1356 // struct for aggregate params in the device default alloca address space.
1357 // OpenMP runtime requires that the params of the extracted functions are
1358 // passed as zero address space pointers. This flag ensures that extracted
1359 // function arguments are declared in zero address space
1360 bool ArgsInZeroAddressSpace = Config.isTargetDevice();
1361
1362 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
1363 // only if we compile for host side.
1364 if (NumThreads && !Config.isTargetDevice()) {
1365 Value *Args[] = {
1366 Ident, ThreadID,
1367 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
1369 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
1370 }
1371
1372 if (ProcBind != OMP_PROC_BIND_default) {
1373 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
1374 Value *Args[] = {
1375 Ident, ThreadID,
1376 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
1378 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
1379 }
1380
1381 BasicBlock *InsertBB = Builder.GetInsertBlock();
1382 Function *OuterFn = InsertBB->getParent();
1383
1384 // Save the outer alloca block because the insertion iterator may get
1385 // invalidated and we still need this later.
1386 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
1387
1388 // Vector to remember instructions we used only during the modeling but which
1389 // we want to delete at the end.
1391
1392 // Change the location to the outer alloca insertion point to create and
1393 // initialize the allocas we pass into the parallel region.
1394 Builder.restoreIP(OuterAllocaIP);
1395 AllocaInst *TIDAddrAlloca = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
1396 AllocaInst *ZeroAddrAlloca =
1397 Builder.CreateAlloca(Int32, nullptr, "zero.addr");
1398 Instruction *TIDAddr = TIDAddrAlloca;
1399 Instruction *ZeroAddr = ZeroAddrAlloca;
1400 if (ArgsInZeroAddressSpace && M.getDataLayout().getAllocaAddrSpace() != 0) {
1401 // Add additional casts to enforce pointers in zero address space
1402 TIDAddr = new AddrSpaceCastInst(
1403 TIDAddrAlloca, PointerType ::get(M.getContext(), 0), "tid.addr.ascast");
1404 TIDAddr->insertAfter(TIDAddrAlloca);
1405 ToBeDeleted.push_back(TIDAddr);
1406 ZeroAddr = new AddrSpaceCastInst(ZeroAddrAlloca,
1407 PointerType ::get(M.getContext(), 0),
1408 "zero.addr.ascast");
1409 ZeroAddr->insertAfter(ZeroAddrAlloca);
1410 ToBeDeleted.push_back(ZeroAddr);
1411 }
1412
1413 // We only need TIDAddr and ZeroAddr for modeling purposes to get the
1414 // associated arguments in the outlined function, so we delete them later.
1415 ToBeDeleted.push_back(TIDAddrAlloca);
1416 ToBeDeleted.push_back(ZeroAddrAlloca);
1417
1418 // Create an artificial insertion point that will also ensure the blocks we
1419 // are about to split are not degenerated.
1420 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
1421
1422 BasicBlock *EntryBB = UI->getParent();
1423 BasicBlock *PRegEntryBB = EntryBB->splitBasicBlock(UI, "omp.par.entry");
1424 BasicBlock *PRegBodyBB = PRegEntryBB->splitBasicBlock(UI, "omp.par.region");
1425 BasicBlock *PRegPreFiniBB =
1426 PRegBodyBB->splitBasicBlock(UI, "omp.par.pre_finalize");
1427 BasicBlock *PRegExitBB = PRegPreFiniBB->splitBasicBlock(UI, "omp.par.exit");
1428
1429 auto FiniCBWrapper = [&](InsertPointTy IP) {
1430 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
1431 // target to the region exit block.
1432 if (IP.getBlock()->end() == IP.getPoint()) {
1434 Builder.restoreIP(IP);
1435 Instruction *I = Builder.CreateBr(PRegExitBB);
1436 IP = InsertPointTy(I->getParent(), I->getIterator());
1437 }
1438 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&
1439 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&
1440 "Unexpected insertion point for finalization call!");
1441 return FiniCB(IP);
1442 };
1443
1444 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
1445
1446 // Generate the privatization allocas in the block that will become the entry
1447 // of the outlined function.
1448 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
1449 InsertPointTy InnerAllocaIP = Builder.saveIP();
1450
1451 AllocaInst *PrivTIDAddr =
1452 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
1453 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid");
1454
1455 // Add some fake uses for OpenMP provided arguments.
1456 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use"));
1457 Instruction *ZeroAddrUse =
1458 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use");
1459 ToBeDeleted.push_back(ZeroAddrUse);
1460
1461 // EntryBB
1462 // |
1463 // V
1464 // PRegionEntryBB <- Privatization allocas are placed here.
1465 // |
1466 // V
1467 // PRegionBodyBB <- BodeGen is invoked here.
1468 // |
1469 // V
1470 // PRegPreFiniBB <- The block we will start finalization from.
1471 // |
1472 // V
1473 // PRegionExitBB <- A common exit to simplify block collection.
1474 //
1475
1476 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n");
1477
1478 // Let the caller create the body.
1479 assert(BodyGenCB && "Expected body generation callback!");
1480 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
1481 BodyGenCB(InnerAllocaIP, CodeGenIP);
1482
1483 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n");
1484
1485 OutlineInfo OI;
1486 if (Config.isTargetDevice()) {
1487 // Generate OpenMP target specific runtime call
1488 OI.PostOutlineCB = [=, ToBeDeletedVec =
1489 std::move(ToBeDeleted)](Function &OutlinedFn) {
1490 targetParallelCallback(this, OutlinedFn, OuterFn, OuterAllocaBlock, Ident,
1491 IfCondition, NumThreads, PrivTID, PrivTIDAddr,
1492 ThreadID, ToBeDeletedVec);
1493 };
1494 } else {
1495 // Generate OpenMP host runtime call
1496 OI.PostOutlineCB = [=, ToBeDeletedVec =
1497 std::move(ToBeDeleted)](Function &OutlinedFn) {
1498 hostParallelCallback(this, OutlinedFn, OuterFn, Ident, IfCondition,
1499 PrivTID, PrivTIDAddr, ToBeDeletedVec);
1500 };
1501 }
1502
1503 // Adjust the finalization stack, verify the adjustment, and call the
1504 // finalize function a last time to finalize values between the pre-fini
1505 // block and the exit block if we left the parallel "the normal way".
1506 auto FiniInfo = FinalizationStack.pop_back_val();
1507 (void)FiniInfo;
1508 assert(FiniInfo.DK == OMPD_parallel &&
1509 "Unexpected finalization stack state!");
1510
1511 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
1512
1513 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
1514 FiniCB(PreFiniIP);
1515
1516 OI.OuterAllocaBB = OuterAllocaBlock;
1517 OI.EntryBB = PRegEntryBB;
1518 OI.ExitBB = PRegExitBB;
1519
1520 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
1522 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
1523
1524 // Ensure a single exit node for the outlined region by creating one.
1525 // We might have multiple incoming edges to the exit now due to finalizations,
1526 // e.g., cancel calls that cause the control flow to leave the region.
1527 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
1528 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
1529 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
1530 Blocks.push_back(PRegOutlinedExitBB);
1531
1532 CodeExtractorAnalysisCache CEAC(*OuterFn);
1533 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
1534 /* AggregateArgs */ false,
1535 /* BlockFrequencyInfo */ nullptr,
1536 /* BranchProbabilityInfo */ nullptr,
1537 /* AssumptionCache */ nullptr,
1538 /* AllowVarArgs */ true,
1539 /* AllowAlloca */ true,
1540 /* AllocationBlock */ OuterAllocaBlock,
1541 /* Suffix */ ".omp_par", ArgsInZeroAddressSpace);
1542
1543 // Find inputs to, outputs from the code region.
1544 BasicBlock *CommonExit = nullptr;
1545 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
1546 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
1547 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
1548
1549 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n");
1550
1551 FunctionCallee TIDRTLFn =
1552 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
1553
1554 auto PrivHelper = [&](Value &V) {
1555 if (&V == TIDAddr || &V == ZeroAddr) {
1556 OI.ExcludeArgsFromAggregate.push_back(&V);
1557 return;
1558 }
1559
1561 for (Use &U : V.uses())
1562 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
1563 if (ParallelRegionBlockSet.count(UserI->getParent()))
1564 Uses.insert(&U);
1565
1566 // __kmpc_fork_call expects extra arguments as pointers. If the input
1567 // already has a pointer type, everything is fine. Otherwise, store the
1568 // value onto stack and load it back inside the to-be-outlined region. This
1569 // will ensure only the pointer will be passed to the function.
1570 // FIXME: if there are more than 15 trailing arguments, they must be
1571 // additionally packed in a struct.
1572 Value *Inner = &V;
1573 if (!V.getType()->isPointerTy()) {
1575 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n");
1576
1577 Builder.restoreIP(OuterAllocaIP);
1578 Value *Ptr =
1579 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
1580
1581 // Store to stack at end of the block that currently branches to the entry
1582 // block of the to-be-outlined region.
1583 Builder.SetInsertPoint(InsertBB,
1584 InsertBB->getTerminator()->getIterator());
1585 Builder.CreateStore(&V, Ptr);
1586
1587 // Load back next to allocations in the to-be-outlined region.
1588 Builder.restoreIP(InnerAllocaIP);
1589 Inner = Builder.CreateLoad(V.getType(), Ptr);
1590 }
1591
1592 Value *ReplacementValue = nullptr;
1593 CallInst *CI = dyn_cast<CallInst>(&V);
1594 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
1595 ReplacementValue = PrivTID;
1596 } else {
1598 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
1599 assert(ReplacementValue &&
1600 "Expected copy/create callback to set replacement value!");
1601 if (ReplacementValue == &V)
1602 return;
1603 }
1604
1605 for (Use *UPtr : Uses)
1606 UPtr->set(ReplacementValue);
1607 };
1608
1609 // Reset the inner alloca insertion as it will be used for loading the values
1610 // wrapped into pointers before passing them into the to-be-outlined region.
1611 // Configure it to insert immediately after the fake use of zero address so
1612 // that they are available in the generated body and so that the
1613 // OpenMP-related values (thread ID and zero address pointers) remain leading
1614 // in the argument list.
1615 InnerAllocaIP = IRBuilder<>::InsertPoint(
1616 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
1617
1618 // Reset the outer alloca insertion point to the entry of the relevant block
1619 // in case it was invalidated.
1620 OuterAllocaIP = IRBuilder<>::InsertPoint(
1621 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
1622
1623 for (Value *Input : Inputs) {
1624 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n");
1625 PrivHelper(*Input);
1626 }
1627 LLVM_DEBUG({
1628 for (Value *Output : Outputs)
1629 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");
1630 });
1631 assert(Outputs.empty() &&
1632 "OpenMP outlining should not produce live-out values!");
1633
1634 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n");
1635 LLVM_DEBUG({
1636 for (auto *BB : Blocks)
1637 dbgs() << " PBR: " << BB->getName() << "\n";
1638 });
1639
1640 // Register the outlined info.
1641 addOutlineInfo(std::move(OI));
1642
1643 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
1644 UI->eraseFromParent();
1645
1646 return AfterIP;
1647}
1648
1650 // Build call void __kmpc_flush(ident_t *loc)
1651 uint32_t SrcLocStrSize;
1652 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1653 Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)};
1654
1655 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
1656}
1657
1659 if (!updateToLocation(Loc))
1660 return;
1661 emitFlush(Loc);
1662}
1663
1665 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
1666 // global_tid);
1667 uint32_t SrcLocStrSize;
1668 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1669 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1670 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
1671
1672 // Ignore return result until untied tasks are supported.
1673 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
1674 Args);
1675}
1676
1678 if (!updateToLocation(Loc))
1679 return;
1680 emitTaskwaitImpl(Loc);
1681}
1682
1684 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
1685 uint32_t SrcLocStrSize;
1686 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1687 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1689 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
1690
1691 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
1692 Args);
1693}
1694
1696 if (!updateToLocation(Loc))
1697 return;
1698 emitTaskyieldImpl(Loc);
1699}
1700
1703 InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB,
1704 bool Tied, Value *Final, Value *IfCondition,
1705 SmallVector<DependData> Dependencies) {
1706
1707 if (!updateToLocation(Loc))
1708 return InsertPointTy();
1709
1710 uint32_t SrcLocStrSize;
1711 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1712 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1713 // The current basic block is split into four basic blocks. After outlining,
1714 // they will be mapped as follows:
1715 // ```
1716 // def current_fn() {
1717 // current_basic_block:
1718 // br label %task.exit
1719 // task.exit:
1720 // ; instructions after task
1721 // }
1722 // def outlined_fn() {
1723 // task.alloca:
1724 // br label %task.body
1725 // task.body:
1726 // ret void
1727 // }
1728 // ```
1729 BasicBlock *TaskExitBB = splitBB(Builder, /*CreateBranch=*/true, "task.exit");
1730 BasicBlock *TaskBodyBB = splitBB(Builder, /*CreateBranch=*/true, "task.body");
1731 BasicBlock *TaskAllocaBB =
1732 splitBB(Builder, /*CreateBranch=*/true, "task.alloca");
1733
1734 InsertPointTy TaskAllocaIP =
1735 InsertPointTy(TaskAllocaBB, TaskAllocaBB->begin());
1736 InsertPointTy TaskBodyIP = InsertPointTy(TaskBodyBB, TaskBodyBB->begin());
1737 BodyGenCB(TaskAllocaIP, TaskBodyIP);
1738
1739 OutlineInfo OI;
1740 OI.EntryBB = TaskAllocaBB;
1741 OI.OuterAllocaBB = AllocaIP.getBlock();
1742 OI.ExitBB = TaskExitBB;
1743
1744 // Add the thread ID argument.
1745 std::stack<Instruction *> ToBeDeleted;
1747 Builder, AllocaIP, ToBeDeleted, TaskAllocaIP, "global.tid", false));
1748
1749 OI.PostOutlineCB = [this, Ident, Tied, Final, IfCondition, Dependencies,
1750 TaskAllocaBB, ToBeDeleted](Function &OutlinedFn) mutable {
1751 // Replace the Stale CI by appropriate RTL function call.
1752 assert(OutlinedFn.getNumUses() == 1 &&
1753 "there must be a single user for the outlined function");
1754 CallInst *StaleCI = cast<CallInst>(OutlinedFn.user_back());
1755
1756 // HasShareds is true if any variables are captured in the outlined region,
1757 // false otherwise.
1758 bool HasShareds = StaleCI->arg_size() > 1;
1759 Builder.SetInsertPoint(StaleCI);
1760
1761 // Gather the arguments for emitting the runtime call for
1762 // @__kmpc_omp_task_alloc
1763 Function *TaskAllocFn =
1764 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_alloc);
1765
1766 // Arguments - `loc_ref` (Ident) and `gtid` (ThreadID)
1767 // call.
1768 Value *ThreadID = getOrCreateThreadID(Ident);
1769
1770 // Argument - `flags`
1771 // Task is tied iff (Flags & 1) == 1.
1772 // Task is untied iff (Flags & 1) == 0.
1773 // Task is final iff (Flags & 2) == 2.
1774 // Task is not final iff (Flags & 2) == 0.
1775 // TODO: Handle the other flags.
1776 Value *Flags = Builder.getInt32(Tied);
1777 if (Final) {
1778 Value *FinalFlag =
1780 Flags = Builder.CreateOr(FinalFlag, Flags);
1781 }
1782
1783 // Argument - `sizeof_kmp_task_t` (TaskSize)
1784 // Tasksize refers to the size in bytes of kmp_task_t data structure
1785 // including private vars accessed in task.
1786 // TODO: add kmp_task_t_with_privates (privates)
1787 Value *TaskSize = Builder.getInt64(
1789
1790 // Argument - `sizeof_shareds` (SharedsSize)
1791 // SharedsSize refers to the shareds array size in the kmp_task_t data
1792 // structure.
1793 Value *SharedsSize = Builder.getInt64(0);
1794 if (HasShareds) {
1795 AllocaInst *ArgStructAlloca =
1796 dyn_cast<AllocaInst>(StaleCI->getArgOperand(1));
1797 assert(ArgStructAlloca &&
1798 "Unable to find the alloca instruction corresponding to arguments "
1799 "for extracted function");
1800 StructType *ArgStructType =
1801 dyn_cast<StructType>(ArgStructAlloca->getAllocatedType());
1802 assert(ArgStructType && "Unable to find struct type corresponding to "
1803 "arguments for extracted function");
1804 SharedsSize =
1806 }
1807 // Emit the @__kmpc_omp_task_alloc runtime call
1808 // The runtime call returns a pointer to an area where the task captured
1809 // variables must be copied before the task is run (TaskData)
1810 CallInst *TaskData = Builder.CreateCall(
1811 TaskAllocFn, {/*loc_ref=*/Ident, /*gtid=*/ThreadID, /*flags=*/Flags,
1812 /*sizeof_task=*/TaskSize, /*sizeof_shared=*/SharedsSize,
1813 /*task_func=*/&OutlinedFn});
1814
1815 // Copy the arguments for outlined function
1816 if (HasShareds) {
1817 Value *Shareds = StaleCI->getArgOperand(1);
1818 Align Alignment = TaskData->getPointerAlignment(M.getDataLayout());
1819 Value *TaskShareds = Builder.CreateLoad(VoidPtr, TaskData);
1820 Builder.CreateMemCpy(TaskShareds, Alignment, Shareds, Alignment,
1821 SharedsSize);
1822 }
1823
1824 Value *DepArray = nullptr;
1825 if (Dependencies.size()) {
1826 InsertPointTy OldIP = Builder.saveIP();
1828 &OldIP.getBlock()->getParent()->getEntryBlock().back());
1829
1830 Type *DepArrayTy = ArrayType::get(DependInfo, Dependencies.size());
1831 DepArray = Builder.CreateAlloca(DepArrayTy, nullptr, ".dep.arr.addr");
1832
1833 unsigned P = 0;
1834 for (const DependData &Dep : Dependencies) {
1835 Value *Base =
1836 Builder.CreateConstInBoundsGEP2_64(DepArrayTy, DepArray, 0, P);
1837 // Store the pointer to the variable
1839 DependInfo, Base,
1840 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr));
1841 Value *DepValPtr =
1843 Builder.CreateStore(DepValPtr, Addr);
1844 // Store the size of the variable
1846 DependInfo, Base,
1847 static_cast<unsigned int>(RTLDependInfoFields::Len));
1849 Dep.DepValueType)),
1850 Size);
1851 // Store the dependency kind
1853 DependInfo, Base,
1854 static_cast<unsigned int>(RTLDependInfoFields::Flags));
1856 ConstantInt::get(Builder.getInt8Ty(),
1857 static_cast<unsigned int>(Dep.DepKind)),
1858 Flags);
1859 ++P;
1860 }
1861
1862 Builder.restoreIP(OldIP);
1863 }
1864
1865 // In the presence of the `if` clause, the following IR is generated:
1866 // ...
1867 // %data = call @__kmpc_omp_task_alloc(...)
1868 // br i1 %if_condition, label %then, label %else
1869 // then:
1870 // call @__kmpc_omp_task(...)
1871 // br label %exit
1872 // else:
1873 // call @__kmpc_omp_task_begin_if0(...)
1874 // call @outlined_fn(...)
1875 // call @__kmpc_omp_task_complete_if0(...)
1876 // br label %exit
1877 // exit:
1878 // ...
1879 if (IfCondition) {
1880 // `SplitBlockAndInsertIfThenElse` requires the block to have a
1881 // terminator.
1882 splitBB(Builder, /*CreateBranch=*/true, "if.end");
1883 Instruction *IfTerminator =
1884 Builder.GetInsertPoint()->getParent()->getTerminator();
1885 Instruction *ThenTI = IfTerminator, *ElseTI = nullptr;
1886 Builder.SetInsertPoint(IfTerminator);
1887 SplitBlockAndInsertIfThenElse(IfCondition, IfTerminator, &ThenTI,
1888 &ElseTI);
1889 Builder.SetInsertPoint(ElseTI);
1890 Function *TaskBeginFn =
1891 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_begin_if0);
1892 Function *TaskCompleteFn =
1893 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_complete_if0);
1894 Builder.CreateCall(TaskBeginFn, {Ident, ThreadID, TaskData});
1895 CallInst *CI = nullptr;
1896 if (HasShareds)
1897 CI = Builder.CreateCall(&OutlinedFn, {ThreadID, TaskData});
1898 else
1899 CI = Builder.CreateCall(&OutlinedFn, {ThreadID});
1900 CI->setDebugLoc(StaleCI->getDebugLoc());
1901 Builder.CreateCall(TaskCompleteFn, {Ident, ThreadID, TaskData});
1902 Builder.SetInsertPoint(ThenTI);
1903 }
1904
1905 if (Dependencies.size()) {
1906 Function *TaskFn =
1907 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_with_deps);
1909 TaskFn,
1910 {Ident, ThreadID, TaskData, Builder.getInt32(Dependencies.size()),
1911 DepArray, ConstantInt::get(Builder.getInt32Ty(), 0),
1913
1914 } else {
1915 // Emit the @__kmpc_omp_task runtime call to spawn the task
1916 Function *TaskFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task);
1917 Builder.CreateCall(TaskFn, {Ident, ThreadID, TaskData});
1918 }
1919
1920 StaleCI->eraseFromParent();
1921
1922 Builder.SetInsertPoint(TaskAllocaBB, TaskAllocaBB->begin());
1923 if (HasShareds) {
1924 LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
1925 OutlinedFn.getArg(1)->replaceUsesWithIf(
1926 Shareds, [Shareds](Use &U) { return U.getUser() != Shareds; });
1927 }
1928
1929 while (!ToBeDeleted.empty()) {
1930 ToBeDeleted.top()->eraseFromParent();
1931 ToBeDeleted.pop();
1932 }
1933 };
1934
1935 addOutlineInfo(std::move(OI));
1936 Builder.SetInsertPoint(TaskExitBB, TaskExitBB->begin());
1937
1938 return Builder.saveIP();
1939}
1940
1943 InsertPointTy AllocaIP,
1944 BodyGenCallbackTy BodyGenCB) {
1945 if (!updateToLocation(Loc))
1946 return InsertPointTy();
1947
1948 uint32_t SrcLocStrSize;
1949 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1950 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1951 Value *ThreadID = getOrCreateThreadID(Ident);
1952
1953 // Emit the @__kmpc_taskgroup runtime call to start the taskgroup
1954 Function *TaskgroupFn =
1955 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskgroup);
1956 Builder.CreateCall(TaskgroupFn, {Ident, ThreadID});
1957
1958 BasicBlock *TaskgroupExitBB = splitBB(Builder, true, "taskgroup.exit");
1959 BodyGenCB(AllocaIP, Builder.saveIP());
1960
1961 Builder.SetInsertPoint(TaskgroupExitBB);
1962 // Emit the @__kmpc_end_taskgroup runtime call to end the taskgroup
1963 Function *EndTaskgroupFn =
1964 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_taskgroup);
1965 Builder.CreateCall(EndTaskgroupFn, {Ident, ThreadID});
1966
1967 return Builder.saveIP();
1968}
1969
1971 const LocationDescription &Loc, InsertPointTy AllocaIP,
1973 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) {
1974 assert(!isConflictIP(AllocaIP, Loc.IP) && "Dedicated IP allocas required");
1975
1976 if (!updateToLocation(Loc))
1977 return Loc.IP;
1978
1979 auto FiniCBWrapper = [&](InsertPointTy IP) {
1980 if (IP.getBlock()->end() != IP.getPoint())
1981 return FiniCB(IP);
1982 // This must be done otherwise any nested constructs using FinalizeOMPRegion
1983 // will fail because that function requires the Finalization Basic Block to
1984 // have a terminator, which is already removed by EmitOMPRegionBody.
1985 // IP is currently at cancelation block.
1986 // We need to backtrack to the condition block to fetch
1987 // the exit block and create a branch from cancelation
1988 // to exit block.
1990 Builder.restoreIP(IP);
1991 auto *CaseBB = IP.getBlock()->getSinglePredecessor();
1992 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
1993 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
1994 Instruction *I = Builder.CreateBr(ExitBB);
1995 IP = InsertPointTy(I->getParent(), I->getIterator());
1996 return FiniCB(IP);
1997 };
1998
1999 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable});
2000
2001 // Each section is emitted as a switch case
2002 // Each finalization callback is handled from clang.EmitOMPSectionDirective()
2003 // -> OMP.createSection() which generates the IR for each section
2004 // Iterate through all sections and emit a switch construct:
2005 // switch (IV) {
2006 // case 0:
2007 // <SectionStmt[0]>;
2008 // break;
2009 // ...
2010 // case <NumSection> - 1:
2011 // <SectionStmt[<NumSection> - 1]>;
2012 // break;
2013 // }
2014 // ...
2015 // section_loop.after:
2016 // <FiniCB>;
2017 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) {
2018 Builder.restoreIP(CodeGenIP);
2020 splitBBWithSuffix(Builder, /*CreateBranch=*/false, ".sections.after");
2021 Function *CurFn = Continue->getParent();
2022 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, Continue);
2023
2024 unsigned CaseNumber = 0;
2025 for (auto SectionCB : SectionCBs) {
2027 M.getContext(), "omp_section_loop.body.case", CurFn, Continue);
2028 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB);
2029 Builder.SetInsertPoint(CaseBB);
2030 BranchInst *CaseEndBr = Builder.CreateBr(Continue);
2031 SectionCB(InsertPointTy(),
2032 {CaseEndBr->getParent(), CaseEndBr->getIterator()});
2033 CaseNumber++;
2034 }
2035 // remove the existing terminator from body BB since there can be no
2036 // terminators after switch/case
2037 };
2038 // Loop body ends here
2039 // LowerBound, UpperBound, and STride for createCanonicalLoop
2040 Type *I32Ty = Type::getInt32Ty(M.getContext());
2041 Value *LB = ConstantInt::get(I32Ty, 0);
2042 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size());
2043 Value *ST = ConstantInt::get(I32Ty, 1);
2045 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop");
2046 InsertPointTy AfterIP =
2047 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait);
2048
2049 // Apply the finalization callback in LoopAfterBB
2050 auto FiniInfo = FinalizationStack.pop_back_val();
2051 assert(FiniInfo.DK == OMPD_sections &&
2052 "Unexpected finalization stack state!");
2053 if (FinalizeCallbackTy &CB = FiniInfo.FiniCB) {
2054 Builder.restoreIP(AfterIP);
2055 BasicBlock *FiniBB =
2056 splitBBWithSuffix(Builder, /*CreateBranch=*/true, "sections.fini");
2057 CB(Builder.saveIP());
2058 AfterIP = {FiniBB, FiniBB->begin()};
2059 }
2060
2061 return AfterIP;
2062}
2063
2066 BodyGenCallbackTy BodyGenCB,
2067 FinalizeCallbackTy FiniCB) {
2068 if (!updateToLocation(Loc))
2069 return Loc.IP;
2070
2071 auto FiniCBWrapper = [&](InsertPointTy IP) {
2072 if (IP.getBlock()->end() != IP.getPoint())
2073 return FiniCB(IP);
2074 // This must be done otherwise any nested constructs using FinalizeOMPRegion
2075 // will fail because that function requires the Finalization Basic Block to
2076 // have a terminator, which is already removed by EmitOMPRegionBody.
2077 // IP is currently at cancelation block.
2078 // We need to backtrack to the condition block to fetch
2079 // the exit block and create a branch from cancelation
2080 // to exit block.
2082 Builder.restoreIP(IP);
2083 auto *CaseBB = Loc.IP.getBlock();
2084 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
2085 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
2086 Instruction *I = Builder.CreateBr(ExitBB);
2087 IP = InsertPointTy(I->getParent(), I->getIterator());
2088 return FiniCB(IP);
2089 };
2090
2091 Directive OMPD = Directive::OMPD_sections;
2092 // Since we are using Finalization Callback here, HasFinalize
2093 // and IsCancellable have to be true
2094 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper,
2095 /*Conditional*/ false, /*hasFinalize*/ true,
2096 /*IsCancellable*/ true);
2097}
2098
2099/// Create a function with a unique name and a "void (i8*, i8*)" signature in
2100/// the given module and return it.
2102 Type *VoidTy = Type::getVoidTy(M.getContext());
2103 Type *Int8PtrTy = PointerType::getUnqual(M.getContext());
2104 auto *FuncTy =
2105 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false);
2107 M.getDataLayout().getDefaultGlobalsAddressSpace(),
2108 ".omp.reduction.func", &M);
2109}
2110
2112 const LocationDescription &Loc, InsertPointTy AllocaIP,
2113 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) {
2114 for (const ReductionInfo &RI : ReductionInfos) {
2115 (void)RI;
2116 assert(RI.Variable && "expected non-null variable");
2117 assert(RI.PrivateVariable && "expected non-null private variable");
2118 assert(RI.ReductionGen && "expected non-null reduction generator callback");
2119 assert(RI.Variable->getType() == RI.PrivateVariable->getType() &&
2120 "expected variables and their private equivalents to have the same "
2121 "type");
2122 assert(RI.Variable->getType()->isPointerTy() &&
2123 "expected variables to be pointers");
2124 }
2125
2126 if (!updateToLocation(Loc))
2127 return InsertPointTy();
2128
2129 BasicBlock *InsertBlock = Loc.IP.getBlock();
2130 BasicBlock *ContinuationBlock =
2131 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize");
2132 InsertBlock->getTerminator()->eraseFromParent();
2133
2134 // Create and populate array of type-erased pointers to private reduction
2135 // values.
2136 unsigned NumReductions = ReductionInfos.size();
2137 Type *RedArrayTy = ArrayType::get(Builder.getPtrTy(), NumReductions);
2138 Builder.restoreIP(AllocaIP);
2139 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
2140
2141 Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
2142
2143 for (auto En : enumerate(ReductionInfos)) {
2144 unsigned Index = En.index();
2145 const ReductionInfo &RI = En.value();
2146 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64(
2147 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index));
2148 Builder.CreateStore(RI.PrivateVariable, RedArrayElemPtr);
2149 }
2150
2151 // Emit a call to the runtime function that orchestrates the reduction.
2152 // Declare the reduction function in the process.
2154 Module *Module = Func->getParent();
2155 uint32_t SrcLocStrSize;
2156 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2157 bool CanGenerateAtomic =
2158 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) {
2159 return RI.AtomicReductionGen;
2160 });
2161 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize,
2162 CanGenerateAtomic
2163 ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE
2164 : IdentFlag(0));
2165 Value *ThreadId = getOrCreateThreadID(Ident);
2166 Constant *NumVariables = Builder.getInt32(NumReductions);
2167 const DataLayout &DL = Module->getDataLayout();
2168 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy);
2169 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize);
2170 Function *ReductionFunc = getFreshReductionFunc(*Module);
2171 Value *Lock = getOMPCriticalRegionLock(".reduction");
2173 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait
2174 : RuntimeFunction::OMPRTL___kmpc_reduce);
2175 CallInst *ReduceCall =
2176 Builder.CreateCall(ReduceFunc,
2177 {Ident, ThreadId, NumVariables, RedArraySize, RedArray,
2178 ReductionFunc, Lock},
2179 "reduce");
2180
2181 // Create final reduction entry blocks for the atomic and non-atomic case.
2182 // Emit IR that dispatches control flow to one of the blocks based on the
2183 // reduction supporting the atomic mode.
2184 BasicBlock *NonAtomicRedBlock =
2185 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func);
2186 BasicBlock *AtomicRedBlock =
2187 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func);
2188 SwitchInst *Switch =
2189 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2);
2190 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock);
2191 Switch->addCase(Builder.getInt32(2), AtomicRedBlock);
2192
2193 // Populate the non-atomic reduction using the elementwise reduction function.
2194 // This loads the elements from the global and private variables and reduces
2195 // them before storing back the result to the global variable.
2196 Builder.SetInsertPoint(NonAtomicRedBlock);
2197 for (auto En : enumerate(ReductionInfos)) {
2198 const ReductionInfo &RI = En.value();
2200 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable,
2201 "red.value." + Twine(En.index()));
2202 Value *PrivateRedValue =
2204 "red.private.value." + Twine(En.index()));
2205 Value *Reduced;
2207 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced));
2208 if (!Builder.GetInsertBlock())
2209 return InsertPointTy();
2210 Builder.CreateStore(Reduced, RI.Variable);
2211 }
2212 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr(
2213 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait
2214 : RuntimeFunction::OMPRTL___kmpc_end_reduce);
2215 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock});
2216 Builder.CreateBr(ContinuationBlock);
2217
2218 // Populate the atomic reduction using the atomic elementwise reduction
2219 // function. There are no loads/stores here because they will be happening
2220 // inside the atomic elementwise reduction.
2221 Builder.SetInsertPoint(AtomicRedBlock);
2222 if (CanGenerateAtomic) {
2223 for (const ReductionInfo &RI : ReductionInfos) {
2225 RI.Variable, RI.PrivateVariable));
2226 if (!Builder.GetInsertBlock())
2227 return InsertPointTy();
2228 }
2229 Builder.CreateBr(ContinuationBlock);
2230 } else {
2232 }
2233
2234 // Populate the outlined reduction function using the elementwise reduction
2235 // function. Partial values are extracted from the type-erased array of
2236 // pointers to private variables.
2237 BasicBlock *ReductionFuncBlock =
2238 BasicBlock::Create(Module->getContext(), "", ReductionFunc);
2239 Builder.SetInsertPoint(ReductionFuncBlock);
2240 Value *LHSArrayPtr = ReductionFunc->getArg(0);
2241 Value *RHSArrayPtr = ReductionFunc->getArg(1);
2242
2243 for (auto En : enumerate(ReductionInfos)) {
2244 const ReductionInfo &RI = En.value();
2246 RedArrayTy, LHSArrayPtr, 0, En.index());
2247 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getPtrTy(), LHSI8PtrPtr);
2248 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType());
2249 Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr);
2251 RedArrayTy, RHSArrayPtr, 0, En.index());
2252 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getPtrTy(), RHSI8PtrPtr);
2253 Value *RHSPtr =
2255 Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr);
2256 Value *Reduced;
2258 if (!Builder.GetInsertBlock())
2259 return InsertPointTy();
2260 Builder.CreateStore(Reduced, LHSPtr);
2261 }
2263
2264 Builder.SetInsertPoint(ContinuationBlock);
2265 return Builder.saveIP();
2266}
2267
2270 BodyGenCallbackTy BodyGenCB,
2271 FinalizeCallbackTy FiniCB) {
2272
2273 if (!updateToLocation(Loc))
2274 return Loc.IP;
2275
2276 Directive OMPD = Directive::OMPD_master;
2277 uint32_t SrcLocStrSize;
2278 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2279 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2280 Value *ThreadId = getOrCreateThreadID(Ident);
2281 Value *Args[] = {Ident, ThreadId};
2282
2283 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
2284 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2285
2286 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
2287 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2288
2289 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2290 /*Conditional*/ true, /*hasFinalize*/ true);
2291}
2292
2295 BodyGenCallbackTy BodyGenCB,
2296 FinalizeCallbackTy FiniCB, Value *Filter) {
2297 if (!updateToLocation(Loc))
2298 return Loc.IP;
2299
2300 Directive OMPD = Directive::OMPD_masked;
2301 uint32_t SrcLocStrSize;
2302 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2303 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2304 Value *ThreadId = getOrCreateThreadID(Ident);
2305 Value *Args[] = {Ident, ThreadId, Filter};
2306 Value *ArgsEnd[] = {Ident, ThreadId};
2307
2308 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked);
2309 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2310
2311 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked);
2312 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd);
2313
2314 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2315 /*Conditional*/ true, /*hasFinalize*/ true);
2316}
2317
2319 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
2320 BasicBlock *PostInsertBefore, const Twine &Name) {
2321 Module *M = F->getParent();
2322 LLVMContext &Ctx = M->getContext();
2323 Type *IndVarTy = TripCount->getType();
2324
2325 // Create the basic block structure.
2326 BasicBlock *Preheader =
2327 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
2328 BasicBlock *Header =
2329 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
2330 BasicBlock *Cond =
2331 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
2332 BasicBlock *Body =
2333 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
2334 BasicBlock *Latch =
2335 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
2336 BasicBlock *Exit =
2337 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
2338 BasicBlock *After =
2339 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
2340
2341 // Use specified DebugLoc for new instructions.
2343
2344 Builder.SetInsertPoint(Preheader);
2345 Builder.CreateBr(Header);
2346
2347 Builder.SetInsertPoint(Header);
2348 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
2349 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
2351
2353 Value *Cmp =
2354 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
2355 Builder.CreateCondBr(Cmp, Body, Exit);
2356
2357 Builder.SetInsertPoint(Body);
2358 Builder.CreateBr(Latch);
2359
2360 Builder.SetInsertPoint(Latch);
2361 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
2362 "omp_" + Name + ".next", /*HasNUW=*/true);
2363 Builder.CreateBr(Header);
2364 IndVarPHI->addIncoming(Next, Latch);
2365
2366 Builder.SetInsertPoint(Exit);
2368
2369 // Remember and return the canonical control flow.
2370 LoopInfos.emplace_front();
2371 CanonicalLoopInfo *CL = &LoopInfos.front();
2372
2373 CL->Header = Header;
2374 CL->Cond = Cond;
2375 CL->Latch = Latch;
2376 CL->Exit = Exit;
2377
2378#ifndef NDEBUG
2379 CL->assertOK();
2380#endif
2381 return CL;
2382}
2383
2386 LoopBodyGenCallbackTy BodyGenCB,
2387 Value *TripCount, const Twine &Name) {
2388 BasicBlock *BB = Loc.IP.getBlock();
2389 BasicBlock *NextBB = BB->getNextNode();
2390
2391 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
2392 NextBB, NextBB, Name);
2393 BasicBlock *After = CL->getAfter();
2394
2395 // If location is not set, don't connect the loop.
2396 if (updateToLocation(Loc)) {
2397 // Split the loop at the insertion point: Branch to the preheader and move
2398 // every following instruction to after the loop (the After BB). Also, the
2399 // new successor is the loop's after block.
2400 spliceBB(Builder, After, /*CreateBranch=*/false);
2402 }
2403
2404 // Emit the body content. We do it after connecting the loop to the CFG to
2405 // avoid that the callback encounters degenerate BBs.
2406 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
2407
2408#ifndef NDEBUG
2409 CL->assertOK();
2410#endif
2411 return CL;
2412}
2413
2415 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
2416 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
2417 InsertPointTy ComputeIP, const Twine &Name) {
2418
2419 // Consider the following difficulties (assuming 8-bit signed integers):
2420 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
2421 // DO I = 1, 100, 50
2422 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
2423 // DO I = 100, 0, -128
2424
2425 // Start, Stop and Step must be of the same integer type.
2426 auto *IndVarTy = cast<IntegerType>(Start->getType());
2427 assert(IndVarTy == Stop->getType() && "Stop type mismatch");
2428 assert(IndVarTy == Step->getType() && "Step type mismatch");
2429
2430 LocationDescription ComputeLoc =
2431 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
2432 updateToLocation(ComputeLoc);
2433
2434 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
2435 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
2436
2437 // Like Step, but always positive.
2438 Value *Incr = Step;
2439
2440 // Distance between Start and Stop; always positive.
2441 Value *Span;
2442
2443 // Condition whether there are no iterations are executed at all, e.g. because
2444 // UB < LB.
2445 Value *ZeroCmp;
2446
2447 if (IsSigned) {
2448 // Ensure that increment is positive. If not, negate and invert LB and UB.
2449 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
2450 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
2451 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
2452 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
2453 Span = Builder.CreateSub(UB, LB, "", false, true);
2454 ZeroCmp = Builder.CreateICmp(
2455 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
2456 } else {
2457 Span = Builder.CreateSub(Stop, Start, "", true);
2458 ZeroCmp = Builder.CreateICmp(
2459 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
2460 }
2461
2462 Value *CountIfLooping;
2463 if (InclusiveStop) {
2464 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
2465 } else {
2466 // Avoid incrementing past stop since it could overflow.
2467 Value *CountIfTwo = Builder.CreateAdd(
2468 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
2469 Value *OneCmp = Builder.CreateICmp(CmpInst::ICMP_ULE, Span, Incr);
2470 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
2471 }
2472 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
2473 "omp_" + Name + ".tripcount");
2474
2475 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
2476 Builder.restoreIP(CodeGenIP);
2477 Value *Span = Builder.CreateMul(IV, Step);
2478 Value *IndVar = Builder.CreateAdd(Span, Start);
2479 BodyGenCB(Builder.saveIP(), IndVar);
2480 };
2481 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
2482 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
2483}
2484
2485// Returns an LLVM function to call for initializing loop bounds using OpenMP
2486// static scheduling depending on `type`. Only i32 and i64 are supported by the
2487// runtime. Always interpret integers as unsigned similarly to
2488// CanonicalLoopInfo.
2490 OpenMPIRBuilder &OMPBuilder) {
2491 unsigned Bitwidth = Ty->getIntegerBitWidth();
2492 if (Bitwidth == 32)
2493 return OMPBuilder.getOrCreateRuntimeFunction(
2494 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
2495 if (Bitwidth == 64)
2496 return OMPBuilder.getOrCreateRuntimeFunction(
2497 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
2498 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
2499}
2500
2502OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
2503 InsertPointTy AllocaIP,
2504 bool NeedsBarrier) {
2505 assert(CLI->isValid() && "Requires a valid canonical loop");
2506 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&
2507 "Require dedicated allocate IP");
2508
2509 // Set up the source location value for OpenMP runtime.
2512
2513 uint32_t SrcLocStrSize;
2514 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
2515 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2516
2517 // Declare useful OpenMP runtime functions.
2518 Value *IV = CLI->getIndVar();
2519 Type *IVTy = IV->getType();
2520 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
2521 FunctionCallee StaticFini =
2522 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
2523
2524 // Allocate space for computed loop bounds as expected by the "init" function.
2525 Builder.restoreIP(AllocaIP);
2526 Type *I32Type = Type::getInt32Ty(M.getContext());
2527 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
2528 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
2529 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
2530 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
2531
2532 // At the end of the preheader, prepare for calling the "init" function by
2533 // storing the current loop bounds into the allocated space. A canonical loop
2534 // always iterates from 0 to trip-count with step 1. Note that "init" expects
2535 // and produces an inclusive upper bound.
2537 Constant *Zero = ConstantInt::get(IVTy, 0);
2538 Constant *One = ConstantInt::get(IVTy, 1);
2539 Builder.CreateStore(Zero, PLowerBound);
2540 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
2541 Builder.CreateStore(UpperBound, PUpperBound);
2542 Builder.CreateStore(One, PStride);
2543
2544 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
2545
2546 Constant *SchedulingType = ConstantInt::get(
2547 I32Type, static_cast<int>(OMPScheduleType::UnorderedStatic));
2548
2549 // Call the "init" function and update the trip count of the loop with the
2550 // value it produced.
2551 Builder.CreateCall(StaticInit,
2552 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
2553 PUpperBound, PStride, One, Zero});
2554 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound);
2555 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound);
2556 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
2557 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
2558 CLI->setTripCount(TripCount);
2559
2560 // Update all uses of the induction variable except the one in the condition
2561 // block that compares it with the actual upper bound, and the increment in
2562 // the latch block.
2563
2564 CLI->mapIndVar([&](Instruction *OldIV) -> Value * {
2566 CLI->getBody()->getFirstInsertionPt());
2568 return Builder.CreateAdd(OldIV, LowerBound);
2569 });
2570
2571 // In the "exit" block, call the "fini" function.
2573 CLI->getExit()->getTerminator()->getIterator());
2574 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
2575
2576 // Add the barrier if requested.
2577 if (NeedsBarrier)
2578 createBarrier(LocationDescription(Builder.saveIP(), DL),
2579 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
2580 /* CheckCancelFlag */ false);
2581
2582 InsertPointTy AfterIP = CLI->getAfterIP();
2583 CLI->invalidate();
2584
2585 return AfterIP;
2586}
2587
2588OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(
2589 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
2590 bool NeedsBarrier, Value *ChunkSize) {
2591 assert(CLI->isValid() && "Requires a valid canonical loop");
2592 assert(ChunkSize && "Chunk size is required");
2593
2594 LLVMContext &Ctx = CLI->getFunction()->getContext();
2595 Value *IV = CLI->getIndVar();
2596 Value *OrigTripCount = CLI->getTripCount();
2597 Type *IVTy = IV->getType();
2598 assert(IVTy->getIntegerBitWidth() <= 64 &&
2599 "Max supported tripcount bitwidth is 64 bits");
2600 Type *InternalIVTy = IVTy->getIntegerBitWidth() <= 32 ? Type::getInt32Ty(Ctx)
2601 : Type::getInt64Ty(Ctx);
2602 Type *I32Type = Type::getInt32Ty(M.getContext());
2603 Constant *Zero = ConstantInt::get(InternalIVTy, 0);
2604 Constant *One = ConstantInt::get(InternalIVTy, 1);
2605
2606 // Declare useful OpenMP runtime functions.
2607 FunctionCallee StaticInit =
2608 getKmpcForStaticInitForType(InternalIVTy, M, *this);
2609 FunctionCallee StaticFini =
2610 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
2611
2612 // Allocate space for computed loop bounds as expected by the "init" function.
2613 Builder.restoreIP(AllocaIP);
2615 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
2616 Value *PLowerBound =
2617 Builder.CreateAlloca(InternalIVTy, nullptr, "p.lowerbound");
2618 Value *PUpperBound =
2619 Builder.CreateAlloca(InternalIVTy, nullptr, "p.upperbound");
2620 Value *PStride = Builder.CreateAlloca(InternalIVTy, nullptr, "p.stride");
2621
2622 // Set up the source location value for the OpenMP runtime.
2625
2626 // TODO: Detect overflow in ubsan or max-out with current tripcount.
2627 Value *CastedChunkSize =
2628 Builder.CreateZExtOrTrunc(ChunkSize, InternalIVTy, "chunksize");
2629 Value *CastedTripCount =
2630 Builder.CreateZExt(OrigTripCount, InternalIVTy, "tripcount");
2631
2632 Constant *SchedulingType = ConstantInt::get(
2633 I32Type, static_cast<int>(OMPScheduleType::UnorderedStaticChunked));
2634 Builder.CreateStore(Zero, PLowerBound);
2635 Value *OrigUpperBound = Builder.CreateSub(CastedTripCount, One);
2636 Builder.CreateStore(OrigUpperBound, PUpperBound);
2637 Builder.CreateStore(One, PStride);
2638
2639 // Call the "init" function and update the trip count of the loop with the
2640 // value it produced.
2641 uint32_t SrcLocStrSize;
2642 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
2643 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2644 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
2645 Builder.CreateCall(StaticInit,
2646 {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum,
2647 /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter,
2648 /*plower=*/PLowerBound, /*pupper=*/PUpperBound,
2649 /*pstride=*/PStride, /*incr=*/One,
2650 /*chunk=*/CastedChunkSize});
2651
2652 // Load values written by the "init" function.
2653 Value *FirstChunkStart =
2654 Builder.CreateLoad(InternalIVTy, PLowerBound, "omp_firstchunk.lb");
2655 Value *FirstChunkStop =
2656 Builder.CreateLoad(InternalIVTy, PUpperBound, "omp_firstchunk.ub");
2657 Value *FirstChunkEnd = Builder.CreateAdd(FirstChunkStop, One);
2658 Value *ChunkRange =
2659 Builder.CreateSub(FirstChunkEnd, FirstChunkStart, "omp_chunk.range");
2660 Value *NextChunkStride =
2661 Builder.CreateLoad(InternalIVTy, PStride, "omp_dispatch.stride");
2662
2663 // Create outer "dispatch" loop for enumerating the chunks.
2664 BasicBlock *DispatchEnter = splitBB(Builder, true);
2665 Value *DispatchCounter;
2667 {Builder.saveIP(), DL},
2668 [&](InsertPointTy BodyIP, Value *Counter) { DispatchCounter = Counter; },
2669 FirstChunkStart, CastedTripCount, NextChunkStride,
2670 /*IsSigned=*/false, /*InclusiveStop=*/false, /*ComputeIP=*/{},
2671 "dispatch");
2672
2673 // Remember the BasicBlocks of the dispatch loop we need, then invalidate to
2674 // not have to preserve the canonical invariant.
2675 BasicBlock *DispatchBody = DispatchCLI->getBody();
2676 BasicBlock *DispatchLatch = DispatchCLI->getLatch();
2677 BasicBlock *DispatchExit = DispatchCLI->getExit();
2678 BasicBlock *DispatchAfter = DispatchCLI->getAfter();
2679 DispatchCLI->invalidate();
2680
2681 // Rewire the original loop to become the chunk loop inside the dispatch loop.
2682 redirectTo(DispatchAfter, CLI->getAfter(), DL);
2683 redirectTo(CLI->getExit(), DispatchLatch, DL);
2684 redirectTo(DispatchBody, DispatchEnter, DL);
2685
2686 // Prepare the prolog of the chunk loop.
2689
2690 // Compute the number of iterations of the chunk loop.
2692 Value *ChunkEnd = Builder.CreateAdd(DispatchCounter, ChunkRange);
2693 Value *IsLastChunk =
2694 Builder.CreateICmpUGE(ChunkEnd, CastedTripCount, "omp_chunk.is_last");
2695 Value *CountUntilOrigTripCount =
2696 Builder.CreateSub(CastedTripCount, DispatchCounter);
2697 Value *ChunkTripCount = Builder.CreateSelect(
2698 IsLastChunk, CountUntilOrigTripCount, ChunkRange, "omp_chunk.tripcount");
2699 Value *BackcastedChunkTC =
2700 Builder.CreateTrunc(ChunkTripCount, IVTy, "omp_chunk.tripcount.trunc");
2701 CLI->setTripCount(BackcastedChunkTC);
2702
2703 // Update all uses of the induction variable except the one in the condition
2704 // block that compares it with the actual upper bound, and the increment in
2705 // the latch block.
2706 Value *BackcastedDispatchCounter =
2707 Builder.CreateTrunc(DispatchCounter, IVTy, "omp_dispatch.iv.trunc");
2708 CLI->mapIndVar([&](Instruction *) -> Value * {
2709 Builder.restoreIP(CLI->getBodyIP());
2710 return Builder.CreateAdd(IV, BackcastedDispatchCounter);
2711 });
2712
2713 // In the "exit" block, call the "fini" function.
2714 Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt());
2715 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
2716
2717 // Add the barrier if requested.
2718 if (NeedsBarrier)
2719 createBarrier(LocationDescription(Builder.saveIP(), DL), OMPD_for,
2720 /*ForceSimpleCall=*/false, /*CheckCancelFlag=*/false);
2721
2722#ifndef NDEBUG
2723 // Even though we currently do not support applying additional methods to it,
2724 // the chunk loop should remain a canonical loop.
2725 CLI->assertOK();
2726#endif
2727
2728 return {DispatchAfter, DispatchAfter->getFirstInsertionPt()};
2729}
2730
2731// Returns an LLVM function to call for executing an OpenMP static worksharing
2732// for loop depending on `type`. Only i32 and i64 are supported by the runtime.
2733// Always interpret integers as unsigned similarly to CanonicalLoopInfo.
2734static FunctionCallee
2736 WorksharingLoopType LoopType) {
2737 unsigned Bitwidth = Ty->getIntegerBitWidth();
2738 Module &M = OMPBuilder->M;
2739 switch (LoopType) {
2740 case WorksharingLoopType::ForStaticLoop:
2741 if (Bitwidth == 32)
2742 return OMPBuilder->getOrCreateRuntimeFunction(
2743 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_loop_4u);
2744 if (Bitwidth == 64)
2745 return OMPBuilder->getOrCreateRuntimeFunction(
2746 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_loop_8u);
2747 break;
2748 case WorksharingLoopType::DistributeStaticLoop:
2749 if (Bitwidth == 32)
2750 return OMPBuilder->getOrCreateRuntimeFunction(
2751 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_static_loop_4u);
2752 if (Bitwidth == 64)
2753 return OMPBuilder->getOrCreateRuntimeFunction(
2754 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_static_loop_8u);
2755 break;
2756 case WorksharingLoopType::DistributeForStaticLoop:
2757 if (Bitwidth == 32)
2758 return OMPBuilder->getOrCreateRuntimeFunction(
2759 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_for_static_loop_4u);
2760 if (Bitwidth == 64)
2761 return OMPBuilder->getOrCreateRuntimeFunction(
2762 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_for_static_loop_8u);
2763 break;
2764 }
2765 if (Bitwidth != 32 && Bitwidth != 64) {
2766 llvm_unreachable("Unknown OpenMP loop iterator bitwidth");
2767 }
2768 llvm_unreachable("Unknown type of OpenMP worksharing loop");
2769}
2770
2771// Inserts a call to proper OpenMP Device RTL function which handles
2772// loop worksharing.
2774 OpenMPIRBuilder *OMPBuilder, WorksharingLoopType LoopType,
2775 BasicBlock *InsertBlock, Value *Ident, Value *LoopBodyArg,
2776 Type *ParallelTaskPtr, Value *TripCount, Function &LoopBodyFn) {
2777 Type *TripCountTy = TripCount->getType();
2778 Module &M = OMPBuilder->M;
2779 IRBuilder<> &Builder = OMPBuilder->Builder;
2780 FunctionCallee RTLFn =
2781 getKmpcForStaticLoopForType(TripCountTy, OMPBuilder, LoopType);
2782 SmallVector<Value *, 8> RealArgs;
2783 RealArgs.push_back(Ident);
2784 RealArgs.push_back(Builder.CreateBitCast(&LoopBodyFn, ParallelTaskPtr));
2785 RealArgs.push_back(LoopBodyArg);
2786 RealArgs.push_back(TripCount);
2787 if (LoopType == WorksharingLoopType::DistributeStaticLoop) {
2788 RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
2789 Builder.CreateCall(RTLFn, RealArgs);
2790 return;
2791 }
2792 FunctionCallee RTLNumThreads = OMPBuilder->getOrCreateRuntimeFunction(
2793 M, omp::RuntimeFunction::OMPRTL_omp_get_num_threads);
2794 Builder.restoreIP({InsertBlock, std::prev(InsertBlock->end())});
2795 Value *NumThreads = Builder.CreateCall(RTLNumThreads, {});
2796
2797 RealArgs.push_back(
2798 Builder.CreateZExtOrTrunc(NumThreads, TripCountTy, "num.threads.cast"));
2799 RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
2800 if (LoopType == WorksharingLoopType::DistributeForStaticLoop) {
2801 RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
2802 }
2803
2804 Builder.CreateCall(RTLFn, RealArgs);
2805}
2806
2807static void
2809 CanonicalLoopInfo *CLI, Value *Ident,
2810 Function &OutlinedFn, Type *ParallelTaskPtr,
2811 const SmallVector<Instruction *, 4> &ToBeDeleted,
2812 WorksharingLoopType LoopType) {
2813 IRBuilder<> &Builder = OMPIRBuilder->Builder;
2814 BasicBlock *Preheader = CLI->getPreheader();
2815 Value *TripCount = CLI->getTripCount();
2816
2817 // After loop body outling, the loop body contains only set up
2818 // of loop body argument structure and the call to the outlined
2819 // loop body function. Firstly, we need to move setup of loop body args
2820 // into loop preheader.
2821 Preheader->splice(std::prev(Preheader->end()), CLI->getBody(),
2822 CLI->getBody()->begin(), std::prev(CLI->getBody()->end()));
2823
2824 // The next step is to remove the whole loop. We do not it need anymore.
2825 // That's why make an unconditional branch from loop preheader to loop
2826 // exit block
2827 Builder.restoreIP({Preheader, Preheader->end()});
2828 Preheader->getTerminator()->eraseFromParent();
2829 Builder.CreateBr(CLI->getExit());
2830
2831 // Delete dead loop blocks
2832 OpenMPIRBuilder::OutlineInfo CleanUpInfo;
2833 SmallPtrSet<BasicBlock *, 32> RegionBlockSet;
2834 SmallVector<BasicBlock *, 32> BlocksToBeRemoved;
2835 CleanUpInfo.EntryBB = CLI->getHeader();
2836 CleanUpInfo.ExitBB = CLI->getExit();
2837 CleanUpInfo.collectBlocks(RegionBlockSet, BlocksToBeRemoved);
2838 DeleteDeadBlocks(BlocksToBeRemoved);
2839
2840 // Find the instruction which corresponds to loop body argument structure
2841 // and remove the call to loop body function instruction.
2842 Value *LoopBodyArg;
2843 User *OutlinedFnUser = OutlinedFn.getUniqueUndroppableUser();
2844 assert(OutlinedFnUser &&
2845 "Expected unique undroppable user of outlined function");
2846 CallInst *OutlinedFnCallInstruction = dyn_cast<CallInst>(OutlinedFnUser);
2847 assert(OutlinedFnCallInstruction && "Expected outlined function call");
2848 assert((OutlinedFnCallInstruction->getParent() == Preheader) &&
2849 "Expected outlined function call to be located in loop preheader");
2850 // Check in case no argument structure has been passed.
2851 if (OutlinedFnCallInstruction->arg_size() > 1)
2852 LoopBodyArg = OutlinedFnCallInstruction->getArgOperand(1);
2853 else
2854 LoopBodyArg = Constant::getNullValue(Builder.getPtrTy());
2855 OutlinedFnCallInstruction->eraseFromParent();
2856
2857 createTargetLoopWorkshareCall(OMPIRBuilder, LoopType, Preheader, Ident,
2858 LoopBodyArg, ParallelTaskPtr, TripCount,
2859 OutlinedFn);
2860
2861 for (auto &ToBeDeletedItem : ToBeDeleted)
2862 ToBeDeletedItem->eraseFromParent();
2863 CLI->invalidate();
2864}
2865
2867OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
2868 InsertPointTy AllocaIP,
2869 WorksharingLoopType LoopType) {
2870 uint32_t SrcLocStrSize;
2871 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
2872 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2873
2874 OutlineInfo OI;
2875 OI.OuterAllocaBB = CLI->getPreheader();
2876 Function *OuterFn = CLI->getPreheader()->getParent();
2877
2878 // Instructions which need to be deleted at the end of code generation
2880
2881 OI.OuterAllocaBB = AllocaIP.getBlock();
2882
2883 // Mark the body loop as region which needs to be extracted
2884 OI.EntryBB = CLI->getBody();
2885 OI.ExitBB = CLI->getLatch()->splitBasicBlock(CLI->getLatch()->begin(),
2886 "omp.prelatch", true);
2887
2888 // Prepare loop body for extraction
2889 Builder.restoreIP({CLI->getPreheader(), CLI->getPreheader()->begin()});
2890
2891 // Insert new loop counter variable which will be used only in loop
2892 // body.
2893 AllocaInst *NewLoopCnt = Builder.CreateAlloca(CLI->getIndVarType(), 0, "");
2894 Instruction *NewLoopCntLoad =
2895 Builder.CreateLoad(CLI->getIndVarType(), NewLoopCnt);
2896 // New loop counter instructions are redundant in the loop preheader when
2897 // code generation for workshare loop is finshed. That's why mark them as
2898 // ready for deletion.
2899 ToBeDeleted.push_back(NewLoopCntLoad);
2900 ToBeDeleted.push_back(NewLoopCnt);
2901
2902 // Analyse loop body region. Find all input variables which are used inside
2903 // loop body region.
2904 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
2906 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
2907 SmallVector<BasicBlock *, 32> BlocksT(ParallelRegionBlockSet.begin(),
2908 ParallelRegionBlockSet.end());
2909
2910 CodeExtractorAnalysisCache CEAC(*OuterFn);
2911 CodeExtractor Extractor(Blocks,
2912 /* DominatorTree */ nullptr,
2913 /* AggregateArgs */ true,
2914 /* BlockFrequencyInfo */ nullptr,
2915 /* BranchProbabilityInfo */ nullptr,
2916 /* AssumptionCache */ nullptr,
2917 /* AllowVarArgs */ true,
2918 /* AllowAlloca */ true,
2919 /* AllocationBlock */ CLI->getPreheader(),
2920 /* Suffix */ ".omp_wsloop",
2921 /* AggrArgsIn0AddrSpace */ true);
2922
2923 BasicBlock *CommonExit = nullptr;
2924 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
2925
2926 // Find allocas outside the loop body region which are used inside loop
2927 // body
2928 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
2929
2930 // We need to model loop body region as the function f(cnt, loop_arg).
2931 // That's why we replace loop induction variable by the new counter
2932 // which will be one of loop body function argument
2934 CLI->getIndVar()->user_end());
2935 for (auto Use : Users) {
2936 if (Instruction *Inst = dyn_cast<Instruction>(Use)) {
2937 if (ParallelRegionBlockSet.count(Inst->getParent())) {
2938 Inst->replaceUsesOfWith(CLI->getIndVar(), NewLoopCntLoad);
2939 }
2940 }
2941 }
2942 // Make sure that loop counter variable is not merged into loop body
2943 // function argument structure and it is passed as separate variable
2944 OI.ExcludeArgsFromAggregate.push_back(NewLoopCntLoad);
2945
2946 // PostOutline CB is invoked when loop body function is outlined and
2947 // loop body is replaced by call to outlined function. We need to add
2948 // call to OpenMP device rtl inside loop preheader. OpenMP device rtl
2949 // function will handle loop control logic.
2950 //
2951 OI.PostOutlineCB = [=, ToBeDeletedVec =
2952 std::move(ToBeDeleted)](Function &OutlinedFn) {
2953 workshareLoopTargetCallback(this, CLI, Ident, OutlinedFn, ParallelTaskPtr,
2954 ToBeDeletedVec, LoopType);
2955 };
2956 addOutlineInfo(std::move(OI));
2957 return CLI->getAfterIP();
2958}
2959
2962 bool NeedsBarrier, omp::ScheduleKind SchedKind, Value *ChunkSize,
2963 bool HasSimdModifier, bool HasMonotonicModifier,
2964 bool HasNonmonotonicModifier, bool HasOrderedClause,
2965 WorksharingLoopType LoopType) {
2966 if (Config.isTargetDevice())
2967 return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType);
2968 OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType(
2969 SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier,
2970 HasNonmonotonicModifier, HasOrderedClause);
2971
2972 bool IsOrdered = (EffectiveScheduleType & OMPScheduleType::ModifierOrdered) ==
2973 OMPScheduleType::ModifierOrdered;
2974 switch (EffectiveScheduleType & ~OMPScheduleType::ModifierMask) {
2975 case OMPScheduleType::BaseStatic:
2976 assert(!ChunkSize && "No chunk size with static-chunked schedule");
2977 if (IsOrdered)
2978 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
2979 NeedsBarrier, ChunkSize);
2980 // FIXME: Monotonicity ignored?
2981 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier);
2982
2983 case OMPScheduleType::BaseStaticChunked:
2984 if (IsOrdered)
2985 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
2986 NeedsBarrier, ChunkSize);
2987 // FIXME: Monotonicity ignored?
2988 return applyStaticChunkedWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier,
2989 ChunkSize);
2990
2991 case OMPScheduleType::BaseRuntime:
2992 case OMPScheduleType::BaseAuto:
2993 case OMPScheduleType::BaseGreedy:
2994 case OMPScheduleType::BaseBalanced:
2995 case OMPScheduleType::BaseSteal:
2996 case OMPScheduleType::BaseGuidedSimd:
2997 case OMPScheduleType::BaseRuntimeSimd:
2998 assert(!ChunkSize &&
2999 "schedule type does not support user-defined chunk sizes");
3000 [[fallthrough]];
3001 case OMPScheduleType::BaseDynamicChunked:
3002 case OMPScheduleType::BaseGuidedChunked:
3003 case OMPScheduleType::BaseGuidedIterativeChunked:
3004 case OMPScheduleType::BaseGuidedAnalyticalChunked:
3005 case OMPScheduleType::BaseStaticBalancedChunked:
3006 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
3007 NeedsBarrier, ChunkSize);
3008
3009 default:
3010 llvm_unreachable("Unknown/unimplemented schedule kind");
3011 }
3012}
3013
3014/// Returns an LLVM function to call for initializing loop bounds using OpenMP
3015/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
3016/// the runtime. Always interpret integers as unsigned similarly to
3017/// CanonicalLoopInfo.
3018static FunctionCallee
3020 unsigned Bitwidth = Ty->getIntegerBitWidth();
3021 if (Bitwidth == 32)
3022 return OMPBuilder.getOrCreateRuntimeFunction(
3023 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u);
3024 if (Bitwidth == 64)
3025 return OMPBuilder.getOrCreateRuntimeFunction(
3026 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u);
3027 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
3028}
3029
3030/// Returns an LLVM function to call for updating the next loop using OpenMP
3031/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
3032/// the runtime. Always interpret integers as unsigned similarly to
3033/// CanonicalLoopInfo.
3034static FunctionCallee
3036 unsigned Bitwidth = Ty->getIntegerBitWidth();
3037 if (Bitwidth == 32)
3038 return OMPBuilder.getOrCreateRuntimeFunction(
3039 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u);
3040 if (Bitwidth == 64)
3041 return OMPBuilder.getOrCreateRuntimeFunction(
3042 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u);
3043 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
3044}
3045
3046/// Returns an LLVM function to call for finalizing the dynamic loop using
3047/// depending on `type`. Only i32 and i64 are supported by the runtime. Always
3048/// interpret integers as unsigned similarly to CanonicalLoopInfo.
3049static FunctionCallee
3051 unsigned Bitwidth = Ty->getIntegerBitWidth();
3052 if (Bitwidth == 32)
3053 return OMPBuilder.getOrCreateRuntimeFunction(
3054 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_4u);
3055 if (Bitwidth == 64)
3056 return OMPBuilder.getOrCreateRuntimeFunction(
3057 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_8u);
3058 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
3059}
3060
3061OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
3062 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
3063 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) {
3064 assert(CLI->isValid() && "Requires a valid canonical loop");
3065 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&
3066 "Require dedicated allocate IP");
3068 "Require valid schedule type");
3069
3070 bool Ordered = (SchedType & OMPScheduleType::ModifierOrdered) ==
3071 OMPScheduleType::ModifierOrdered;
3072
3073 // Set up the source location value for OpenMP runtime.
3075
3076 uint32_t SrcLocStrSize;
3077 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
3078 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3079
3080 // Declare useful OpenMP runtime functions.
3081 Value *IV = CLI->getIndVar();
3082 Type *IVTy = IV->getType();
3083 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this);
3084 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this);
3085
3086 // Allocate space for computed loop bounds as expected by the "init" function.
3087 Builder.restoreIP(AllocaIP);
3088 Type *I32Type = Type::getInt32Ty(M.getContext());
3089 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
3090 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
3091 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
3092 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
3093
3094 // At the end of the preheader, prepare for calling the "init" function by
3095 // storing the current loop bounds into the allocated space. A canonical loop
3096 // always iterates from 0 to trip-count with step 1. Note that "init" expects
3097 // and produces an inclusive upper bound.
3098 BasicBlock *PreHeader = CLI->getPreheader();
3099 Builder.SetInsertPoint(PreHeader->getTerminator());
3100 Constant *One = ConstantInt::get(IVTy, 1);
3101 Builder.CreateStore(One, PLowerBound);
3102 Value *UpperBound = CLI->getTripCount();
3103 Builder.CreateStore(UpperBound, PUpperBound);
3104 Builder.CreateStore(One, PStride);
3105
3106 BasicBlock *Header = CLI->getHeader();
3107 BasicBlock *Exit = CLI->getExit();
3108 BasicBlock *Cond = CLI->getCond();
3109 BasicBlock *Latch = CLI->getLatch();
3110 InsertPointTy AfterIP = CLI->getAfterIP();
3111
3112 // The CLI will be "broken" in the code below, as the loop is no longer
3113 // a valid canonical loop.
3114
3115 if (!Chunk)
3116 Chunk = One;
3117
3118 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
3119
3120 Constant *SchedulingType =
3121 ConstantInt::get(I32Type, static_cast<int>(SchedType));
3122
3123 // Call the "init" function.
3124 Builder.CreateCall(DynamicInit,
3125 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One,
3126 UpperBound, /* step */ One, Chunk});
3127
3128 // An outer loop around the existing one.
3129 BasicBlock *OuterCond = BasicBlock::Create(
3130 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
3131 PreHeader->getParent());
3132 // This needs to be 32-bit always, so can't use the IVTy Zero above.
3133 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
3134 Value *Res =
3135 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
3136 PLowerBound, PUpperBound, PStride});
3137 Constant *Zero32 = ConstantInt::get(I32Type, 0);
3138 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32);
3139 Value *LowerBound =
3140 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb");
3141 Builder.CreateCondBr(MoreWork, Header, Exit);
3142
3143 // Change PHI-node in loop header to use outer cond rather than preheader,
3144 // and set IV to the LowerBound.
3145 Instruction *Phi = &Header->front();
3146 auto *PI = cast<PHINode>(Phi);
3147 PI->setIncomingBlock(0, OuterCond);
3148 PI->setIncomingValue(0, LowerBound);
3149
3150 // Then set the pre-header to jump to the OuterCond
3151 Instruction *Term = PreHeader->getTerminator();
3152 auto *Br = cast<BranchInst>(Term);
3153 Br->setSuccessor(0, OuterCond);
3154
3155 // Modify the inner condition:
3156 // * Use the UpperBound returned from the DynamicNext call.
3157 // * jump to the loop outer loop when done with one of the inner loops.
3158 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
3159 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
3161 auto *CI = cast<CmpInst>(Comp);
3162 CI->setOperand(1, UpperBound);
3163 // Redirect the inner exit to branch to outer condition.
3164 Instruction *Branch = &Cond->back();
3165 auto *BI = cast<BranchInst>(Branch);
3166 assert(BI->getSuccessor(1) == Exit);
3167 BI->setSuccessor(1, OuterCond);
3168
3169 // Call the "fini" function if "ordered" is present in wsloop directive.
3170 if (Ordered) {
3171 Builder.SetInsertPoint(&Latch->back());
3172 FunctionCallee DynamicFini = getKmpcForDynamicFiniForType(IVTy, M, *this);
3173 Builder.CreateCall(DynamicFini, {SrcLoc, ThreadNum});
3174 }
3175
3176 // Add the barrier if requested.
3177 if (NeedsBarrier) {
3178 Builder.SetInsertPoint(&Exit->back());
3179 createBarrier(LocationDescription(Builder.saveIP(), DL),
3180 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
3181 /* CheckCancelFlag */ false);
3182 }
3183
3184 CLI->invalidate();
3185 return AfterIP;
3186}
3187
3188/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
3189/// after this \p OldTarget will be orphaned.
3191 BasicBlock *NewTarget, DebugLoc DL) {
3192 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
3193 redirectTo(Pred, NewTarget, DL);
3194}
3195
3196/// Determine which blocks in \p BBs are reachable from outside and remove the
3197/// ones that are not reachable from the function.
3199 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
3200 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
3201 for (Use &U : BB->uses()) {
3202 auto *UseInst = dyn_cast<Instruction>(U.getUser());
3203 if (!UseInst)
3204 continue;
3205 if (BBsToErase.count(UseInst->getParent()))
3206 continue;
3207 return true;
3208 }
3209 return false;
3210 };
3211
3212 while (true) {
3213 bool Changed = false;
3214 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
3215 if (HasRemainingUses(BB)) {
3216 BBsToErase.erase(BB);
3217 Changed = true;
3218 }
3219 }
3220 if (!Changed)
3221 break;
3222 }
3223
3224 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
3225 DeleteDeadBlocks(BBVec);
3226}
3227
3230 InsertPointTy ComputeIP) {
3231 assert(Loops.size() >= 1 && "At least one loop required");
3232 size_t NumLoops = Loops.size();
3233
3234 // Nothing to do if there is already just one loop.
3235 if (NumLoops == 1)
3236 return Loops.front();
3237
3238 CanonicalLoopInfo *Outermost = Loops.front();
3239 CanonicalLoopInfo *Innermost = Loops.back();
3240 BasicBlock *OrigPreheader = Outermost->getPreheader();
3241 BasicBlock *OrigAfter = Outermost->getAfter();
3242 Function *F = OrigPreheader->getParent();
3243
3244 // Loop control blocks that may become orphaned later.
3245 SmallVector<BasicBlock *, 12> OldControlBBs;
3246 OldControlBBs.reserve(6 * Loops.size());
3248 Loop->collectControlBlocks(OldControlBBs);
3249
3250 // Setup the IRBuilder for inserting the trip count computation.
3252 if (ComputeIP.isSet())
3253 Builder.restoreIP(ComputeIP);
3254 else
3255 Builder.restoreIP(Outermost->getPreheaderIP());
3256
3257 // Derive the collapsed' loop trip count.
3258 // TODO: Find common/largest indvar type.
3259 Value *CollapsedTripCount = nullptr;
3260 for (CanonicalLoopInfo *L : Loops) {
3261 assert(L->isValid() &&
3262 "All loops to collapse must be valid canonical loops");
3263 Value *OrigTripCount = L->getTripCount();
3264 if (!CollapsedTripCount) {
3265 CollapsedTripCount = OrigTripCount;
3266 continue;
3267 }
3268
3269 // TODO: Enable UndefinedSanitizer to diagnose an overflow here.
3270 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount,
3271 {}, /*HasNUW=*/true);
3272 }
3273
3274 // Create the collapsed loop control flow.
3275 CanonicalLoopInfo *Result =
3276 createLoopSkeleton(DL, CollapsedTripCount, F,
3277 OrigPreheader->getNextNode(), OrigAfter, "collapsed");
3278
3279 // Build the collapsed loop body code.
3280 // Start with deriving the input loop induction variables from the collapsed
3281 // one, using a divmod scheme. To preserve the original loops' order, the
3282 // innermost loop use the least significant bits.
3283 Builder.restoreIP(Result->getBodyIP());
3284
3285 Value *Leftover = Result->getIndVar();
3286 SmallVector<Value *> NewIndVars;
3287 NewIndVars.resize(NumLoops);
3288 for (int i = NumLoops - 1; i >= 1; --i) {
3289 Value *OrigTripCount = Loops[i]->getTripCount();
3290
3291 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount);
3292 NewIndVars[i] = NewIndVar;
3293
3294 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount);
3295 }
3296 // Outermost loop gets all the remaining bits.
3297 NewIndVars[0] = Leftover;
3298
3299 // Construct the loop body control flow.
3300 // We progressively construct the branch structure following in direction of
3301 // the control flow, from the leading in-between code, the loop nest body, the
3302 // trailing in-between code, and rejoining the collapsed loop's latch.
3303 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If
3304 // the ContinueBlock is set, continue with that block. If ContinuePred, use
3305 // its predecessors as sources.
3306 BasicBlock *ContinueBlock = Result->getBody();
3307 BasicBlock *ContinuePred = nullptr;
3308 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest,
3309 BasicBlock *NextSrc) {
3310 if (ContinueBlock)
3311 redirectTo(ContinueBlock, Dest, DL);
3312 else
3313 redirectAllPredecessorsTo(ContinuePred, Dest, DL);
3314
3315 ContinueBlock = nullptr;
3316 ContinuePred = NextSrc;
3317 };
3318
3319 // The code before the nested loop of each level.
3320 // Because we are sinking it into the nest, it will be executed more often
3321 // that the original loop. More sophisticated schemes could keep track of what
3322 // the in-between code is and instantiate it only once per thread.
3323 for (size_t i = 0; i < NumLoops - 1; ++i)
3324 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader());
3325
3326 // Connect the loop nest body.
3327 ContinueWith(Innermost->getBody(), Innermost->getLatch());
3328
3329 // The code after the nested loop at each level.
3330 for (size_t i = NumLoops - 1; i > 0; --i)
3331 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch());
3332
3333 // Connect the finished loop to the collapsed loop latch.
3334 ContinueWith(Result->getLatch(), nullptr);
3335
3336 // Replace the input loops with the new collapsed loop.
3337 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL);
3338 redirectTo(Result->getAfter(), Outermost->getAfter(), DL);
3339
3340 // Replace the input loop indvars with the derived ones.
3341 for (size_t i = 0; i < NumLoops; ++i)
3342 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]);
3343
3344 // Remove unused parts of the input loops.
3345 removeUnusedBlocksFromParent(OldControlBBs);
3346
3347 for (CanonicalLoopInfo *L : Loops)
3348 L->invalidate();
3349
3350#ifndef NDEBUG
3351 Result->assertOK();
3352#endif
3353 return Result;
3354}
3355
3356std::vector<CanonicalLoopInfo *>
3358 ArrayRef<Value *> TileSizes) {
3359 assert(TileSizes.size() == Loops.size() &&
3360 "Must pass as many tile sizes as there are loops");
3361 int NumLoops = Loops.size();
3362 assert(NumLoops >= 1 && "At least one loop to tile required");
3363
3364 CanonicalLoopInfo *OutermostLoop = Loops.front();
3365 CanonicalLoopInfo *InnermostLoop = Loops.back();
3366 Function *F = OutermostLoop->getBody()->getParent();
3367 BasicBlock *InnerEnter = InnermostLoop->getBody();
3368 BasicBlock *InnerLatch = InnermostLoop->getLatch();
3369
3370 // Loop control blocks that may become orphaned later.
3371 SmallVector<BasicBlock *, 12> OldControlBBs;
3372 OldControlBBs.reserve(6 * Loops.size());
3374 Loop->collectControlBlocks(OldControlBBs);
3375
3376 // Collect original trip counts and induction variable to be accessible by
3377 // index. Also, the structure of the original loops is not preserved during
3378 // the construction of the tiled loops, so do it before we scavenge the BBs of
3379 // any original CanonicalLoopInfo.
3380 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
3381 for (CanonicalLoopInfo *L : Loops) {
3382 assert(L->isValid() && "All input loops must be valid canonical loops");
3383 OrigTripCounts.push_back(L->getTripCount());
3384 OrigIndVars.push_back(L->getIndVar());
3385 }
3386
3387 // Collect the code between loop headers. These may contain SSA definitions
3388 // that are used in the loop nest body. To be usable with in the innermost
3389 // body, these BasicBlocks will be sunk into the loop nest body. That is,
3390 // these instructions may be executed more often than before the tiling.
3391 // TODO: It would be sufficient to only sink them into body of the
3392 // corresponding tile loop.
3394 for (int i = 0; i < NumLoops - 1; ++i) {
3395 CanonicalLoopInfo *Surrounding = Loops[i];
3396 CanonicalLoopInfo *Nested = Loops[i + 1];
3397
3398 BasicBlock *EnterBB = Surrounding->getBody();
3399 BasicBlock *ExitBB = Nested->getHeader();
3400 InbetweenCode.emplace_back(EnterBB, ExitBB);
3401 }
3402
3403 // Compute the trip counts of the floor loops.
3405 Builder.restoreIP(OutermostLoop->getPreheaderIP());
3406 SmallVector<Value *, 4> FloorCount, FloorRems;
3407 for (int i = 0; i < NumLoops; ++i) {
3408 Value *TileSize = TileSizes[i];
3409 Value *OrigTripCount = OrigTripCounts[i];
3410 Type *IVType = OrigTripCount->getType();
3411
3412 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
3413 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
3414
3415 // 0 if tripcount divides the tilesize, 1 otherwise.
3416 // 1 means we need an additional iteration for a partial tile.
3417 //
3418 // Unfortunately we cannot just use the roundup-formula
3419 // (tripcount + tilesize - 1)/tilesize
3420 // because the summation might overflow. We do not want introduce undefined
3421 // behavior when the untiled loop nest did not.
3422 Value *FloorTripOverflow =
3423 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
3424
3425 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
3426 FloorTripCount =
3427 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
3428 "omp_floor" + Twine(i) + ".tripcount", true);
3429
3430 // Remember some values for later use.
3431 FloorCount.push_back(FloorTripCount);
3432 FloorRems.push_back(FloorTripRem);
3433 }
3434
3435 // Generate the new loop nest, from the outermost to the innermost.
3436 std::vector<CanonicalLoopInfo *> Result;
3437 Result.reserve(NumLoops * 2);
3438
3439 // The basic block of the surrounding loop that enters the nest generated
3440 // loop.
3441 BasicBlock *Enter = OutermostLoop->getPreheader();
3442
3443 // The basic block of the surrounding loop where the inner code should
3444 // continue.
3445 BasicBlock *Continue = OutermostLoop->getAfter();
3446
3447 // Where the next loop basic block should be inserted.
3448 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
3449
3450 auto EmbeddNewLoop =
3451 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
3452 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
3453 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
3454 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
3455 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
3456 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
3457
3458 // Setup the position where the next embedded loop connects to this loop.
3459 Enter = EmbeddedLoop->getBody();
3460 Continue = EmbeddedLoop->getLatch();
3461 OutroInsertBefore = EmbeddedLoop->getLatch();
3462 return EmbeddedLoop;
3463 };
3464
3465 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
3466 const Twine &NameBase) {
3467 for (auto P : enumerate(TripCounts)) {
3468 CanonicalLoopInfo *EmbeddedLoop =
3469 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
3470 Result.push_back(EmbeddedLoop);
3471 }
3472 };
3473
3474 EmbeddNewLoops(FloorCount, "floor");
3475
3476 // Within the innermost floor loop, emit the code that computes the tile
3477 // sizes.
3479 SmallVector<Value *, 4> TileCounts;
3480 for (int i = 0; i < NumLoops; ++i) {
3481 CanonicalLoopInfo *FloorLoop = Result[i];
3482 Value *TileSize = TileSizes[i];
3483
3484 Value *FloorIsEpilogue =
3485 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
3486 Value *TileTripCount =
3487 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
3488
3489 TileCounts.push_back(TileTripCount);
3490 }
3491
3492 // Create the tile loops.
3493 EmbeddNewLoops(TileCounts, "tile");
3494
3495 // Insert the inbetween code into the body.
3496 BasicBlock *BodyEnter = Enter;
3497 BasicBlock *BodyEntered = nullptr;
3498 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
3499 BasicBlock *EnterBB = P.first;
3500 BasicBlock *ExitBB = P.second;
3501
3502 if (BodyEnter)
3503 redirectTo(BodyEnter, EnterBB, DL);
3504 else
3505 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
3506
3507 BodyEnter = nullptr;
3508 BodyEntered = ExitBB;
3509 }
3510
3511 // Append the original loop nest body into the generated loop nest body.
3512 if (BodyEnter)
3513 redirectTo(BodyEnter, InnerEnter, DL);
3514 else
3515 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
3517
3518 // Replace the original induction variable with an induction variable computed
3519 // from the tile and floor induction variables.
3520 Builder.restoreIP(Result.back()->getBodyIP());
3521 for (int i = 0; i < NumLoops; ++i) {
3522 CanonicalLoopInfo *FloorLoop = Result[i];
3523 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
3524 Value *OrigIndVar = OrigIndVars[i];
3525 Value *Size = TileSizes[i];
3526
3527 Value *Scale =
3528 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
3529 Value *Shift =
3530 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
3531 OrigIndVar->replaceAllUsesWith(Shift);
3532 }
3533
3534 // Remove unused parts of the original loops.
3535 removeUnusedBlocksFromParent(OldControlBBs);
3536
3537 for (CanonicalLoopInfo *L : Loops)
3538 L->invalidate();
3539
3540#ifndef NDEBUG
3541 for (CanonicalLoopInfo *GenL : Result)
3542 GenL->assertOK();
3543#endif
3544 return Result;
3545}
3546
3547/// Attach metadata \p Properties to the basic block described by \p BB. If the
3548/// basic block already has metadata, the basic block properties are appended.
3550 ArrayRef<Metadata *> Properties) {
3551 // Nothing to do if no property to attach.
3552 if (Properties.empty())
3553 return;
3554
3555 LLVMContext &Ctx = BB->getContext();
3556 SmallVector<Metadata *> NewProperties;
3557 NewProperties.push_back(nullptr);
3558
3559 // If the basic block already has metadata, prepend it to the new metadata.
3560 MDNode *Existing = BB->getTerminator()->getMetadata(LLVMContext::MD_loop);
3561 if (Existing)
3562 append_range(NewProperties, drop_begin(Existing->operands(), 1));
3563
3564 append_range(NewProperties, Properties);
3565 MDNode *BasicBlockID = MDNode::getDistinct(Ctx, NewProperties);
3566 BasicBlockID->replaceOperandWith(0, BasicBlockID);
3567
3568 BB->getTerminator()->setMetadata(LLVMContext::MD_loop, BasicBlockID);
3569}
3570
3571/// Attach loop metadata \p Properties to the loop described by \p Loop. If the
3572/// loop already has metadata, the loop properties are appended.
3574 ArrayRef<Metadata *> Properties) {
3575 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo");
3576
3577 // Attach metadata to the loop's latch
3578 BasicBlock *Latch = Loop->getLatch();
3579 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch");
3580 addBasicBlockMetadata(Latch, Properties);
3581}
3582
3583/// Attach llvm.access.group metadata to the memref instructions of \p Block
3584static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup,
3585 LoopInfo &LI) {
3586 for (Instruction &I : *Block) {
3587 if (I.mayReadOrWriteMemory()) {
3588 // TODO: This instruction may already have access group from
3589 // other pragmas e.g. #pragma clang loop vectorize. Append
3590 // so that the existing metadata is not overwritten.
3591 I.setMetadata(LLVMContext::MD_access_group, AccessGroup);
3592 }
3593 }
3594}
3595
3599 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
3600 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))});
3601}
3602
3606 Loop, {
3607 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
3608 });
3609}
3610
3611void OpenMPIRBuilder::createIfVersion(CanonicalLoopInfo *CanonicalLoop,
3612 Value *IfCond, ValueToValueMapTy &VMap,
3613 const Twine &NamePrefix) {
3614 Function *F = CanonicalLoop->getFunction();
3615
3616 // Define where if branch should be inserted
3617 Instruction *SplitBefore;
3618 if (Instruction::classof(IfCond)) {
3619 SplitBefore = dyn_cast<Instruction>(IfCond);
3620 } else {
3621 SplitBefore = CanonicalLoop->getPreheader()->getTerminator();
3622 }
3623
3624 // TODO: We should not rely on pass manager. Currently we use pass manager
3625 // only for getting llvm::Loop which corresponds to given CanonicalLoopInfo
3626 // object. We should have a method which returns all blocks between
3627 // CanonicalLoopInfo::getHeader() and CanonicalLoopInfo::getAfter()
3629 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
3630 FAM.registerPass([]() { return LoopAnalysis(); });
3631 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
3632
3633 // Get the loop which needs to be cloned
3634 LoopAnalysis LIA;
3635 LoopInfo &&LI = LIA.run(*F, FAM);
3636 Loop *L = LI.getLoopFor(CanonicalLoop->getHeader());
3637
3638 // Create additional blocks for the if statement
3639 BasicBlock *Head = SplitBefore->getParent();
3640 Instruction *HeadOldTerm = Head->getTerminator();
3641 llvm::LLVMContext &C = Head->getContext();
3643 C, NamePrefix + ".if.then", Head->getParent(), Head->getNextNode());
3645 C, NamePrefix + ".if.else", Head->getParent(), CanonicalLoop->getExit());
3646
3647 // Create if condition branch.
3648 Builder.SetInsertPoint(HeadOldTerm);
3649 Instruction *BrInstr =
3650 Builder.CreateCondBr(IfCond, ThenBlock, /*ifFalse*/ ElseBlock);
3651 InsertPointTy IP{BrInstr->getParent(), ++BrInstr->getIterator()};
3652 // Then block contains branch to omp loop which needs to be vectorized
3653 spliceBB(IP, ThenBlock, false);
3654 ThenBlock->replaceSuccessorsPhiUsesWith(Head, ThenBlock);
3655
3656 Builder.SetInsertPoint(ElseBlock);
3657
3658 // Clone loop for the else branch
3660
3661 VMap[CanonicalLoop->getPreheader()] = ElseBlock;
3662 for (BasicBlock *Block : L->getBlocks()) {
3663 BasicBlock *NewBB = CloneBasicBlock(Block, VMap, "", F);
3664 NewBB->moveBefore(CanonicalLoop->getExit());
3665 VMap[Block] = NewBB;
3666 NewBlocks.push_back(NewBB);
3667 }
3668 remapInstructionsInBlocks(NewBlocks, VMap);
3669 Builder.CreateBr(NewBlocks.front());
3670}
3671
3672unsigned
3674 const StringMap<bool> &Features) {
3675 if (TargetTriple.isX86()) {
3676 if (Features.lookup("avx512f"))
3677 return 512;
3678 else if (Features.lookup("avx"))
3679 return 256;
3680 return 128;
3681 }
3682 if (TargetTriple.isPPC())
3683 return 128;
3684 if (TargetTriple.isWasm())
3685 return 128;
3686 return 0;
3687}
3688
3690 MapVector<Value *, Value *> AlignedVars,
3691 Value *IfCond, OrderKind Order,
3692 ConstantInt *Simdlen, ConstantInt *Safelen) {
3694
3695 Function *F = CanonicalLoop->getFunction();
3696
3697 // TODO: We should not rely on pass manager. Currently we use pass manager
3698 // only for getting llvm::Loop which corresponds to given CanonicalLoopInfo
3699 // object. We should have a method which returns all blocks between
3700 // CanonicalLoopInfo::getHeader() and CanonicalLoopInfo::getAfter()
3702 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
3703 FAM.registerPass([]() { return LoopAnalysis(); });
3704 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
3705
3706 LoopAnalysis LIA;
3707 LoopInfo &&LI = LIA.run(*F, FAM);
3708
3709 Loop *L = LI.getLoopFor(CanonicalLoop->getHeader());
3710 if (AlignedVars.size()) {
3712 Builder.SetInsertPoint(CanonicalLoop->getPreheader()->getTerminator());
3713 for (auto &AlignedItem : AlignedVars) {
3714 Value *AlignedPtr = AlignedItem.first;
3715 Value *Alignment = AlignedItem.second;
3716 Builder.CreateAlignmentAssumption(F->getParent()->getDataLayout(),
3717 AlignedPtr, Alignment);
3718 }
3719 Builder.restoreIP(IP);
3720 }
3721
3722 if (IfCond) {
3723 ValueToValueMapTy VMap;
3724 createIfVersion(CanonicalLoop, IfCond, VMap, "simd");
3725 // Add metadata to the cloned loop which disables vectorization
3726 Value *MappedLatch = VMap.lookup(CanonicalLoop->getLatch());
3727 assert(MappedLatch &&
3728 "Cannot find value which corresponds to original loop latch");
3729 assert(isa<BasicBlock>(MappedLatch) &&
3730 "Cannot cast mapped latch block value to BasicBlock");
3731 BasicBlock *NewLatchBlock = dyn_cast<BasicBlock>(MappedLatch);
3732 ConstantAsMetadata *BoolConst =
3735 NewLatchBlock,
3736 {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
3737 BoolConst})});
3738 }
3739
3740 SmallSet<BasicBlock *, 8> Reachable;
3741
3742 // Get the basic blocks from the loop in which memref instructions
3743 // can be found.
3744 // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo,
3745 // preferably without running any passes.
3746 for (BasicBlock *Block : L->getBlocks()) {
3747 if (Block == CanonicalLoop->getCond() ||
3748 Block == CanonicalLoop->getHeader())
3749 continue;
3750 Reachable.insert(Block);
3751 }
3752
3753 SmallVector<Metadata *> LoopMDList;
3754
3755 // In presence of finite 'safelen', it may be unsafe to mark all
3756 // the memory instructions parallel, because loop-carried
3757 // dependences of 'safelen' iterations are possible.
3758 // If clause order(concurrent) is specified then the memory instructions
3759 // are marked parallel even if 'safelen' is finite.
3760 if ((Safelen == nullptr) || (Order == OrderKind::OMP_ORDER_concurrent)) {
3761 // Add access group metadata to memory-access instructions.
3762 MDNode *AccessGroup = MDNode::getDistinct(Ctx, {});
3763 for (BasicBlock *BB : Reachable)
3764 addSimdMetadata(BB, AccessGroup, LI);
3765 // TODO: If the loop has existing parallel access metadata, have
3766 // to combine two lists.
3767 LoopMDList.push_back(MDNode::get(
3768 Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccessGroup}));
3769 }
3770
3771 // Use the above access group metadata to create loop level
3772 // metadata, which should be distinct for each loop.
3773 ConstantAsMetadata *BoolConst =
3775 LoopMDList.push_back(MDNode::get(
3776 Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"), BoolConst}));
3777
3778 if (Simdlen || Safelen) {
3779 // If both simdlen and safelen clauses are specified, the value of the
3780 // simdlen parameter must be less than or equal to the value of the safelen
3781 // parameter. Therefore, use safelen only in the absence of simdlen.
3782 ConstantInt *VectorizeWidth = Simdlen == nullptr ? Safelen : Simdlen;
3783 LoopMDList.push_back(
3784 MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.width"),
3785 ConstantAsMetadata::get(VectorizeWidth)}));
3786 }
3787
3788 addLoopMetadata(CanonicalLoop, LoopMDList);
3789}
3790
3791/// Create the TargetMachine object to query the backend for optimization
3792/// preferences.
3793///
3794/// Ideally, this would be passed from the front-end to the OpenMPBuilder, but
3795/// e.g. Clang does not pass it to its CodeGen layer and creates it only when
3796/// needed for the LLVM pass pipline. We use some default options to avoid
3797/// having to pass too many settings from the frontend that probably do not
3798/// matter.
3799///
3800/// Currently, TargetMachine is only used sometimes by the unrollLoopPartial
3801/// method. If we are going to use TargetMachine for more purposes, especially
3802/// those that are sensitive to TargetOptions, RelocModel and CodeModel, it
3803/// might become be worth requiring front-ends to pass on their TargetMachine,
3804/// or at least cache it between methods. Note that while fontends such as Clang
3805/// have just a single main TargetMachine per translation unit, "target-cpu" and
3806/// "target-features" that determine the TargetMachine are per-function and can
3807/// be overrided using __attribute__((target("OPTIONS"))).
3808static std::unique_ptr<TargetMachine>
3810 Module *M = F->getParent();
3811
3812 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString();
3813 StringRef Features = F->getFnAttribute("target-features").getValueAsString();
3814 const std::string &Triple = M->getTargetTriple();
3815
3816 std::string Error;
3818 if (!TheTarget)
3819 return {};
3820
3822 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine(
3823 Triple, CPU, Features, Options, /*RelocModel=*/std::nullopt,
3824 /*CodeModel=*/std::nullopt, OptLevel));
3825}
3826
3827/// Heuristically determine the best-performant unroll factor for \p CLI. This
3828/// depends on the target processor. We are re-using the same heuristics as the
3829/// LoopUnrollPass.
3831 Function *F = CLI->getFunction();
3832
3833 // Assume the user requests the most aggressive unrolling, even if the rest of
3834 // the code is optimized using a lower setting.
3836 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel);
3837
3839 FAM.registerPass([]() { return TargetLibraryAnalysis(); });
3840 FAM.registerPass([]() { return AssumptionAnalysis(); });
3841 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
3842 FAM.registerPass([]() { return LoopAnalysis(); });
3843 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); });
3844 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
3845 TargetIRAnalysis TIRA;
3846 if (TM)
3847 TIRA = TargetIRAnalysis(
3848 [&](const Function &F) { return TM->getTargetTransformInfo(F); });
3849 FAM.registerPass([&]() { return TIRA; });
3850
3851 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM);
3853 ScalarEvolution &&SE = SEA.run(*F, FAM);
3855 DominatorTree &&DT = DTA.run(*F, FAM);
3856 LoopAnalysis LIA;
3857 LoopInfo &&LI = LIA.run(*F, FAM);
3859 AssumptionCache &&AC = ACT.run(*F, FAM);
3861
3862 Loop *L = LI.getLoopFor(CLI->getHeader());
3863 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop");
3864
3867 /*BlockFrequencyInfo=*/nullptr,
3868 /*ProfileSummaryInfo=*/nullptr, ORE, static_cast<int>(OptLevel),
3869 /*UserThreshold=*/std::nullopt,
3870 /*UserCount=*/std::nullopt,
3871 /*UserAllowPartial=*/true,
3872 /*UserAllowRuntime=*/true,
3873 /*UserUpperBound=*/std::nullopt,
3874 /*UserFullUnrollMaxCount=*/std::nullopt);
3875
3876 UP.Force = true;
3877
3878 // Account for additional optimizations taking place before the LoopUnrollPass
3879 // would unroll the loop.
3882
3883 // Use normal unroll factors even if the rest of the code is optimized for
3884 // size.
3887
3888 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n"
3889 << " Threshold=" << UP.Threshold << "\n"
3890 << " PartialThreshold=" << UP.PartialThreshold << "\n"
3891 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n"
3892 << " PartialOptSizeThreshold="
3893 << UP.PartialOptSizeThreshold << "\n");
3894
3895 // Disable peeling.
3898 /*UserAllowPeeling=*/false,
3899 /*UserAllowProfileBasedPeeling=*/false,
3900 /*UnrollingSpecficValues=*/false);
3901
3903 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
3904
3905 // Assume that reads and writes to stack variables can be eliminated by
3906 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's
3907 // size.
3908 for (BasicBlock *BB : L->blocks()) {
3909 for (Instruction &I : *BB) {
3910 Value *Ptr;
3911 if (auto *Load = dyn_cast<LoadInst>(&I)) {
3912 Ptr = Load->getPointerOperand();
3913 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
3914 Ptr = Store->getPointerOperand();
3915 } else
3916 continue;
3917
3918 Ptr = Ptr->stripPointerCasts();
3919
3920 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) {
3921 if (Alloca->getParent() == &F->getEntryBlock())
3922 EphValues.insert(&I);
3923 }
3924 }
3925 }
3926
3927 UnrollCostEstimator UCE(L, TTI, EphValues, UP.BEInsns);
3928
3929 // Loop is not unrollable if the loop contains certain instructions.
3930 if (!UCE.canUnroll() || UCE.Convergent) {
3931 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n");
3932 return 1;
3933 }
3934
3935 LLVM_DEBUG(dbgs() << "Estimated loop size is " << UCE.getRolledLoopSize()
3936 << "\n");
3937
3938 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might
3939 // be able to use it.
3940 int TripCount = 0;
3941 int MaxTripCount = 0;
3942 bool MaxOrZero = false;
3943 unsigned TripMultiple = 0;
3944
3945 bool UseUpperBound = false;
3946 computeUnrollCount(L, TTI, DT, &LI, &AC, SE, EphValues, &ORE, TripCount,
3947 MaxTripCount, MaxOrZero, TripMultiple, UCE, UP, PP,
3948 UseUpperBound);
3949 unsigned Factor = UP.Count;
3950 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n");
3951
3952 // This function returns 1 to signal to not unroll a loop.
3953 if (Factor == 0)
3954 return 1;
3955 return Factor;
3956}
3957
3959 int32_t Factor,
3960 CanonicalLoopInfo **UnrolledCLI) {
3961 assert(Factor >= 0 && "Unroll factor must not be negative");
3962
3963 Function *F = Loop->getFunction();
3964 LLVMContext &Ctx = F->getContext();
3965
3966 // If the unrolled loop is not used for another loop-associated directive, it
3967 // is sufficient to add metadata for the LoopUnrollPass.
3968 if (!UnrolledCLI) {
3969 SmallVector<Metadata *, 2> LoopMetadata;
3970 LoopMetadata.push_back(
3971 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")));
3972
3973 if (Factor >= 1) {
3975 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
3976 LoopMetadata.push_back(MDNode::get(
3977 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst}));
3978 }
3979
3980 addLoopMetadata(Loop, LoopMetadata);
3981 return;
3982 }
3983
3984 // Heuristically determine the unroll factor.
3985 if (Factor == 0)
3987
3988 // No change required with unroll factor 1.
3989 if (Factor == 1) {
3990 *UnrolledCLI = Loop;
3991 return;
3992 }
3993
3994 assert(Factor >= 2 &&
3995 "unrolling only makes sense with a factor of 2 or larger");
3996
3997 Type *IndVarTy = Loop->getIndVarType();
3998
3999 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully
4000 // unroll the inner loop.
4001 Value *FactorVal =
4002 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor,
4003 /*isSigned=*/false));
4004 std::vector<CanonicalLoopInfo *> LoopNest =
4005 tileLoops(DL, {Loop}, {FactorVal});
4006 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling");
4007 *UnrolledCLI = LoopNest[0];
4008 CanonicalLoopInfo *InnerLoop = LoopNest[1];
4009
4010 // LoopUnrollPass can only fully unroll loops with constant trip count.
4011 // Unroll by the unroll factor with a fallback epilog for the remainder
4012 // iterations if necessary.
4014 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
4016 InnerLoop,
4017 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
4019 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})});
4020
4021#ifndef NDEBUG
4022 (*UnrolledCLI)->assertOK();
4023#endif
4024}
4025
4028 llvm::Value *BufSize, llvm::Value *CpyBuf,
4029 llvm::Value *CpyFn, llvm::Value *DidIt) {
4030 if (!updateToLocation(Loc))
4031 return Loc.IP;
4032
4033 uint32_t SrcLocStrSize;
4034 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4035 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4036 Value *ThreadId = getOrCreateThreadID(Ident);
4037
4038 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
4039
4040 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
4041
4042 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
4043 Builder.CreateCall(Fn, Args);
4044
4045 return Builder.saveIP();
4046}
4047
4049 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
4050 FinalizeCallbackTy FiniCB, bool IsNowait, llvm::Value *DidIt) {
4051
4052 if (!updateToLocation(Loc))
4053 return Loc.IP;
4054
4055 // If needed (i.e. not null), initialize `DidIt` with 0
4056 if (DidIt) {
4058 }
4059
4060 Directive OMPD = Directive::OMPD_single;
4061 uint32_t SrcLocStrSize;
4062 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4063 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4064 Value *ThreadId = getOrCreateThreadID(Ident);
4065 Value *Args[] = {Ident, ThreadId};
4066
4067 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
4068 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
4069
4070 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
4071 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
4072
4073 // generates the following:
4074 // if (__kmpc_single()) {
4075 // .... single region ...
4076 // __kmpc_end_single
4077 // }
4078 // __kmpc_barrier
4079
4080 EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
4081 /*Conditional*/ true,
4082 /*hasFinalize*/ true);
4083 if (!IsNowait)
4085 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
4086 /* CheckCancelFlag */ false);
4087 return Builder.saveIP();
4088}
4089
4091 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
4092 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
4093
4094 if (!updateToLocation(Loc))
4095 return Loc.IP;
4096
4097 Directive OMPD = Directive::OMPD_critical;
4098 uint32_t SrcLocStrSize;
4099 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4100 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4101 Value *ThreadId = getOrCreateThreadID(Ident);
4102 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
4103 Value *Args[] = {Ident, ThreadId, LockVar};
4104
4105 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
4106 Function *RTFn = nullptr;
4107 if (HintInst) {
4108 // Add Hint to entry Args and create call
4109 EnterArgs.push_back(HintInst);
4110 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
4111 } else {
4112 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
4113 }
4114 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
4115
4116 Function *ExitRTLFn =
4117 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
4118 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
4119
4120 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
4121 /*Conditional*/ false, /*hasFinalize*/ true);
4122}
4123
4126 InsertPointTy AllocaIP, unsigned NumLoops,
4127 ArrayRef<llvm::Value *> StoreValues,
4128 const Twine &Name, bool IsDependSource) {
4129 assert(
4130 llvm::all_of(StoreValues,
4131 [](Value *SV) { return SV->getType()->isIntegerTy(64); }) &&
4132 "OpenMP runtime requires depend vec with i64 type");
4133
4134 if (!updateToLocation(Loc))
4135 return Loc.IP;
4136
4137 // Allocate space for vector and generate alloc instruction.
4138 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops);
4139 Builder.restoreIP(AllocaIP);
4140 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name);
4141 ArgsBase->setAlignment(Align(8));
4142 Builder.restoreIP(Loc.IP);
4143
4144 // Store the index value with offset in depend vector.
4145 for (unsigned I = 0; I < NumLoops; ++I) {
4146 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP(
4147 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)});
4148 StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter);
4149 STInst->setAlignment(Align(8));
4150 }
4151
4152 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP(
4153 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)});
4154
4155 uint32_t SrcLocStrSize;
4156 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4157 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4158 Value *ThreadId = getOrCreateThreadID(Ident);
4159 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP};
4160
4161 Function *RTLFn = nullptr;
4162 if (IsDependSource)
4163 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post);
4164 else
4165 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait);
4166 Builder.CreateCall(RTLFn, Args);
4167
4168 return Builder.saveIP();
4169}
4170
4172 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
4173 FinalizeCallbackTy FiniCB, bool IsThreads) {
4174 if (!updateToLocation(Loc))
4175 return Loc.IP;
4176
4177 Directive OMPD = Directive::OMPD_ordered;
4178 Instruction *EntryCall = nullptr;
4179 Instruction *ExitCall = nullptr;
4180
4181 if (IsThreads) {
4182 uint32_t SrcLocStrSize;
4183 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4184 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4185 Value *ThreadId = getOrCreateThreadID(Ident);
4186 Value *Args[] = {Ident, ThreadId};
4187
4188 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered);
4189 EntryCall = Builder.CreateCall(EntryRTLFn, Args);
4190
4191 Function *ExitRTLFn =
4192 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered);
4193 ExitCall = Builder.CreateCall(ExitRTLFn, Args);
4194 }
4195
4196 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
4197 /*Conditional*/ false, /*hasFinalize*/ true);
4198}
4199
4200OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
4201 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
4202 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
4203 bool HasFinalize, bool IsCancellable) {
4204
4205 if (HasFinalize)
4206 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable});
4207
4208 // Create inlined region's entry and body blocks, in preparation
4209 // for conditional creation
4210 BasicBlock *EntryBB = Builder.GetInsertBlock();
4211 Instruction *SplitPos = EntryBB->getTerminator();
4212 if (!isa_and_nonnull<BranchInst>(SplitPos))
4213 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
4214 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
4215 BasicBlock *FiniBB =
4216 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
4217
4219 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
4220
4221 // generate body
4222 BodyGenCB(/* AllocaIP */ InsertPointTy(),
4223 /* CodeGenIP */ Builder.saveIP());
4224
4225 // emit exit call and do any needed finalization.
4226 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
4227 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&
4228 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&
4229 "Unexpected control flow graph state!!");
4230 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
4231 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&
4232 "Unexpected Control Flow State!");
4234
4235 // If we are skipping the region of a non conditional, remove the exit
4236 // block, and clear the builder's insertion point.
4237 assert(SplitPos->getParent() == ExitBB &&
4238 "Unexpected Insertion point location!");
4239 auto merged = MergeBlockIntoPredecessor(ExitBB);
4240 BasicBlock *ExitPredBB = SplitPos->getParent();
4241 auto InsertBB = merged ? ExitPredBB : ExitBB;
4242 if (!isa_and_nonnull<BranchInst>(SplitPos))
4243 SplitPos->eraseFromParent();
4244 Builder.SetInsertPoint(InsertBB);
4245
4246 return Builder.saveIP();
4247}
4248
4249OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
4250 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
4251 // if nothing to do, Return current insertion point.
4252 if (!Conditional || !EntryCall)
4253 return Builder.saveIP();
4254
4255 BasicBlock *EntryBB = Builder.GetInsertBlock();
4256 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
4257 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
4258 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
4259
4260 // Emit thenBB and set the Builder's insertion point there for
4261 // body generation next. Place the block after the current block.
4262 Function *CurFn = EntryBB->getParent();
4263 CurFn->insert(std::next(EntryBB->getIterator()), ThenBB);
4264
4265 // Move Entry branch to end of ThenBB, and replace with conditional
4266 // branch (If-stmt)
4267 Instruction *EntryBBTI = EntryBB->getTerminator();
4268 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
4269 EntryBBTI->removeFromParent();
4271 Builder.Insert(EntryBBTI);
4272 UI->eraseFromParent();
4273 Builder.SetInsertPoint(ThenBB->getTerminator());
4274
4275 // return an insertion point to ExitBB.
4276 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
4277}
4278
4279OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
4280 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
4281 bool HasFinalize) {
4282
4283 Builder.restoreIP(FinIP);
4284
4285 // If there is finalization to do, emit it before the exit call
4286 if (HasFinalize) {
4287 assert(!FinalizationStack.empty() &&
4288 "Unexpected finalization stack state!");
4289
4290 FinalizationInfo Fi = FinalizationStack.pop_back_val();
4291 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!");
4292
4293 Fi.FiniCB(FinIP);
4294
4295 BasicBlock *FiniBB = FinIP.getBlock();
4296 Instruction *FiniBBTI = FiniBB->getTerminator();
4297
4298 // set Builder IP for call creation
4299 Builder.SetInsertPoint(FiniBBTI);
4300 }
4301
4302 if (!ExitCall)
4303 return Builder.saveIP();
4304
4305 // place the Exitcall as last instruction before Finalization block terminator
4306 ExitCall->removeFromParent();
4307 Builder.Insert(ExitCall);
4308
4309 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
4310 ExitCall->getIterator());
4311}
4312
4314 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
4315 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
4316 if (!IP.isSet())
4317 return IP;
4318
4320
4321 // creates the following CFG structure
4322 // OMP_Entry : (MasterAddr != PrivateAddr)?
4323 // F T
4324 // | \
4325 // | copin.not.master
4326 // | /
4327 // v /
4328 // copyin.not.master.end
4329 // |
4330 // v
4331 // OMP.Entry.Next
4332
4333 BasicBlock *OMP_Entry = IP.getBlock();
4334 Function *CurFn = OMP_Entry->getParent();
4335 BasicBlock *CopyBegin =
4336 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
4337 BasicBlock *CopyEnd = nullptr;
4338
4339 // If entry block is terminated, split to preserve the branch to following
4340 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
4341 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
4342 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
4343 "copyin.not.master.end");
4344 OMP_Entry->getTerminator()->eraseFromParent();
4345 } else {
4346 CopyEnd =
4347 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
4348 }
4349
4350 Builder.SetInsertPoint(OMP_Entry);
4351 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
4352 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
4353 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
4354 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
4355
4356 Builder.SetInsertPoint(CopyBegin);
4357 if (BranchtoEnd)
4359
4360 return Builder.saveIP();
4361}
4362
4364 Value *Size, Value *Allocator,
4365 std::string Name) {
4367 Builder.restoreIP(Loc.IP);
4368
4369 uint32_t SrcLocStrSize;
4370 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4371 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4372 Value *ThreadId = getOrCreateThreadID(Ident);
4373 Value *Args[] = {ThreadId, Size, Allocator};
4374
4375 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
4376
4377 return Builder.CreateCall(Fn, Args, Name);
4378}
4379
4381 Value *Addr, Value *Allocator,
4382 std::string Name) {
4384 Builder.restoreIP(Loc.IP);
4385
4386 uint32_t SrcLocStrSize;
4387 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4388 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4389 Value *ThreadId = getOrCreateThreadID(Ident);
4390 Value *Args[] = {ThreadId, Addr, Allocator};
4391 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
4392 return Builder.CreateCall(Fn, Args, Name);
4393}
4394
4396 const LocationDescription &Loc, Value *InteropVar,
4397 omp::OMPInteropType InteropType, Value *Device, Value *NumDependences,
4398 Value *DependenceAddress, bool HaveNowaitClause) {
4400 Builder.restoreIP(Loc.IP);
4401
4402 uint32_t SrcLocStrSize;
4403 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4404 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4405 Value *ThreadId = getOrCreateThreadID(Ident);
4406 if (Device == nullptr)
4407 Device = ConstantInt::get(Int32, -1);
4408 Constant *InteropTypeVal = ConstantInt::get(Int32, (int)InteropType);
4409 if (NumDependences == nullptr) {
4410 NumDependences = ConstantInt::get(Int32, 0);
4411 PointerType *PointerTypeVar = PointerType::getUnqual(M.getContext());
4412 DependenceAddress = ConstantPointerNull::get(PointerTypeVar);
4413 }
4414 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause);
4415 Value *Args[] = {
4416 Ident, ThreadId, InteropVar, InteropTypeVal,
4417 Device, NumDependences, DependenceAddress, HaveNowaitClauseVal};
4418
4419 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_init);
4420
4421 return Builder.CreateCall(Fn, Args);
4422}
4423
4425 const LocationDescription &Loc, Value *InteropVar, Value *Device,
4426 Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause) {
4428 Builder.restoreIP(Loc.IP);
4429
4430 uint32_t SrcLocStrSize;
4431 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4432 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4433 Value *ThreadId = getOrCreateThreadID(Ident);
4434 if (Device == nullptr)
4435 Device = ConstantInt::get(Int32, -1);
4436 if (NumDependences == nullptr) {
4437 NumDependences = ConstantInt::get(Int32, 0);
4438 PointerType *PointerTypeVar = PointerType::getUnqual(M.getContext());
4439 DependenceAddress = ConstantPointerNull::get(PointerTypeVar);
4440 }
4441 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause);
4442 Value *Args[] = {
4443 Ident, ThreadId, InteropVar, Device,
4444 NumDependences, DependenceAddress, HaveNowaitClauseVal};
4445
4446 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_destroy);
4447
4448 return Builder.CreateCall(Fn, Args);
4449}
4450
4452 Value *InteropVar, Value *Device,
4453 Value *NumDependences,
4454 Value *DependenceAddress,
4455 bool HaveNowaitClause) {
4457 Builder.restoreIP(Loc.IP);
4458 uint32_t SrcLocStrSize;
4459 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4460 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4461 Value *ThreadId = getOrCreateThreadID(Ident);
4462 if (Device == nullptr)
4463 Device = ConstantInt::get(Int32, -1);
4464 if (NumDependences == nullptr) {
4465 NumDependences = ConstantInt::get(Int32, 0);
4466 PointerType *PointerTypeVar = PointerType::getUnqual(M.getContext());
4467 DependenceAddress = ConstantPointerNull::get(PointerTypeVar);
4468 }
4469 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause);
4470 Value *Args[] = {
4471 Ident, ThreadId, InteropVar, Device,
4472 NumDependences, DependenceAddress, HaveNowaitClauseVal};
4473
4474 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_use);
4475
4476 return Builder.CreateCall(Fn, Args);
4477}
4478
4480 const LocationDescription &Loc, llvm::Value *Pointer,
4483 Builder.restoreIP(Loc.IP);
4484
4485 uint32_t SrcLocStrSize;
4486 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4487 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4488 Value *ThreadId = getOrCreateThreadID(Ident);
4489 Constant *ThreadPrivateCache =
4490 getOrCreateInternalVariable(Int8PtrPtr,