LLVM 19.0.0git
OMPIRBuilder.cpp
Go to the documentation of this file.
1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/StringRef.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/CallingConv.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
36#include "llvm/IR/Function.h"
38#include "llvm/IR/IRBuilder.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/Metadata.h"
42#include "llvm/IR/PassManager.h"
43#include "llvm/IR/Value.h"
55
56#include <cstdint>
57#include <optional>
58
59#define DEBUG_TYPE "openmp-ir-builder"
60
61using namespace llvm;
62using namespace omp;
63
64static cl::opt<bool>
65 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
66 cl::desc("Use optimistic attributes describing "
67 "'as-if' properties of runtime calls."),
68 cl::init(false));
69
71 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden,
72 cl::desc("Factor for the unroll threshold to account for code "
73 "simplifications still taking place"),
74 cl::init(1.5));
75
76#ifndef NDEBUG
77/// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions
78/// at position IP1 may change the meaning of IP2 or vice-versa. This is because
79/// an InsertPoint stores the instruction before something is inserted. For
80/// instance, if both point to the same instruction, two IRBuilders alternating
81/// creating instruction will cause the instructions to be interleaved.
84 if (!IP1.isSet() || !IP2.isSet())
85 return false;
86 return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint();
87}
88
90 // Valid ordered/unordered and base algorithm combinations.
91 switch (SchedType & ~OMPScheduleType::MonotonicityMask) {
92 case OMPScheduleType::UnorderedStaticChunked:
93 case OMPScheduleType::UnorderedStatic:
94 case OMPScheduleType::UnorderedDynamicChunked:
95 case OMPScheduleType::UnorderedGuidedChunked:
96 case OMPScheduleType::UnorderedRuntime:
97 case OMPScheduleType::UnorderedAuto:
98 case OMPScheduleType::UnorderedTrapezoidal:
99 case OMPScheduleType::UnorderedGreedy:
100 case OMPScheduleType::UnorderedBalanced:
101 case OMPScheduleType::UnorderedGuidedIterativeChunked:
102 case OMPScheduleType::UnorderedGuidedAnalyticalChunked:
103 case OMPScheduleType::UnorderedSteal:
104 case OMPScheduleType::UnorderedStaticBalancedChunked:
105 case OMPScheduleType::UnorderedGuidedSimd:
106 case OMPScheduleType::UnorderedRuntimeSimd:
107 case OMPScheduleType::OrderedStaticChunked:
108 case OMPScheduleType::OrderedStatic:
109 case OMPScheduleType::OrderedDynamicChunked:
110 case OMPScheduleType::OrderedGuidedChunked:
111 case OMPScheduleType::OrderedRuntime:
112 case OMPScheduleType::OrderedAuto:
113 case OMPScheduleType::OrderdTrapezoidal:
114 case OMPScheduleType::NomergeUnorderedStaticChunked:
115 case OMPScheduleType::NomergeUnorderedStatic:
116 case OMPScheduleType::NomergeUnorderedDynamicChunked:
117 case OMPScheduleType::NomergeUnorderedGuidedChunked:
118 case OMPScheduleType::NomergeUnorderedRuntime:
119 case OMPScheduleType::NomergeUnorderedAuto:
120 case OMPScheduleType::NomergeUnorderedTrapezoidal:
121 case OMPScheduleType::NomergeUnorderedGreedy:
122 case OMPScheduleType::NomergeUnorderedBalanced:
123 case OMPScheduleType::NomergeUnorderedGuidedIterativeChunked:
124 case OMPScheduleType::NomergeUnorderedGuidedAnalyticalChunked:
125 case OMPScheduleType::NomergeUnorderedSteal:
126 case OMPScheduleType::NomergeOrderedStaticChunked:
127 case OMPScheduleType::NomergeOrderedStatic:
128 case OMPScheduleType::NomergeOrderedDynamicChunked:
129 case OMPScheduleType::NomergeOrderedGuidedChunked:
130 case OMPScheduleType::NomergeOrderedRuntime:
131 case OMPScheduleType::NomergeOrderedAuto:
132 case OMPScheduleType::NomergeOrderedTrapezoidal:
133 break;
134 default:
135 return false;
136 }
137
138 // Must not set both monotonicity modifiers at the same time.
139 OMPScheduleType MonotonicityFlags =
140 SchedType & OMPScheduleType::MonotonicityMask;
141 if (MonotonicityFlags == OMPScheduleType::MonotonicityMask)
142 return false;
143
144 return true;
145}
146#endif
147
148static const omp::GV &getGridValue(const Triple &T, Function *Kernel) {
149 if (T.isAMDGPU()) {
150 StringRef Features =
151 Kernel->getFnAttribute("target-features").getValueAsString();
152 if (Features.count("+wavefrontsize64"))
153 return omp::getAMDGPUGridValues<64>();
154 return omp::getAMDGPUGridValues<32>();
155 }
156 if (T.isNVPTX())
158 llvm_unreachable("No grid value available for this architecture!");
159}
160
161/// Determine which scheduling algorithm to use, determined from schedule clause
162/// arguments.
163static OMPScheduleType
164getOpenMPBaseScheduleType(llvm::omp::ScheduleKind ClauseKind, bool HasChunks,
165 bool HasSimdModifier) {
166 // Currently, the default schedule it static.
167 switch (ClauseKind) {
168 case OMP_SCHEDULE_Default:
169 case OMP_SCHEDULE_Static:
170 return HasChunks ? OMPScheduleType::BaseStaticChunked
171 : OMPScheduleType::BaseStatic;
172 case OMP_SCHEDULE_Dynamic:
173 return OMPScheduleType::BaseDynamicChunked;
174 case OMP_SCHEDULE_Guided:
175 return HasSimdModifier ? OMPScheduleType::BaseGuidedSimd
176 : OMPScheduleType::BaseGuidedChunked;
177 case OMP_SCHEDULE_Auto:
179 case OMP_SCHEDULE_Runtime:
180 return HasSimdModifier ? OMPScheduleType::BaseRuntimeSimd
181 : OMPScheduleType::BaseRuntime;
182 }
183 llvm_unreachable("unhandled schedule clause argument");
184}
185
186/// Adds ordering modifier flags to schedule type.
187static OMPScheduleType
189 bool HasOrderedClause) {
190 assert((BaseScheduleType & OMPScheduleType::ModifierMask) ==
191 OMPScheduleType::None &&
192 "Must not have ordering nor monotonicity flags already set");
193
194 OMPScheduleType OrderingModifier = HasOrderedClause
195 ? OMPScheduleType::ModifierOrdered
196 : OMPScheduleType::ModifierUnordered;
197 OMPScheduleType OrderingScheduleType = BaseScheduleType | OrderingModifier;
198
199 // Unsupported combinations
200 if (OrderingScheduleType ==
201 (OMPScheduleType::BaseGuidedSimd | OMPScheduleType::ModifierOrdered))
202 return OMPScheduleType::OrderedGuidedChunked;
203 else if (OrderingScheduleType == (OMPScheduleType::BaseRuntimeSimd |
204 OMPScheduleType::ModifierOrdered))
205 return OMPScheduleType::OrderedRuntime;
206
207 return OrderingScheduleType;
208}
209
210/// Adds monotonicity modifier flags to schedule type.
211static OMPScheduleType
213 bool HasSimdModifier, bool HasMonotonic,
214 bool HasNonmonotonic, bool HasOrderedClause) {
215 assert((ScheduleType & OMPScheduleType::MonotonicityMask) ==
216 OMPScheduleType::None &&
217 "Must not have monotonicity flags already set");
218 assert((!HasMonotonic || !HasNonmonotonic) &&
219 "Monotonic and Nonmonotonic are contradicting each other");
220
221 if (HasMonotonic) {
222 return ScheduleType | OMPScheduleType::ModifierMonotonic;
223 } else if (HasNonmonotonic) {
224 return ScheduleType | OMPScheduleType::ModifierNonmonotonic;
225 } else {
226 // OpenMP 5.1, 2.11.4 Worksharing-Loop Construct, Description.
227 // If the static schedule kind is specified or if the ordered clause is
228 // specified, and if the nonmonotonic modifier is not specified, the
229 // effect is as if the monotonic modifier is specified. Otherwise, unless
230 // the monotonic modifier is specified, the effect is as if the
231 // nonmonotonic modifier is specified.
232 OMPScheduleType BaseScheduleType =
233 ScheduleType & ~OMPScheduleType::ModifierMask;
234 if ((BaseScheduleType == OMPScheduleType::BaseStatic) ||
235 (BaseScheduleType == OMPScheduleType::BaseStaticChunked) ||
236 HasOrderedClause) {
237 // The monotonic is used by default in openmp runtime library, so no need
238 // to set it.
239 return ScheduleType;
240 } else {
241 return ScheduleType | OMPScheduleType::ModifierNonmonotonic;
242 }
243 }
244}
245
246/// Determine the schedule type using schedule and ordering clause arguments.
247static OMPScheduleType
248computeOpenMPScheduleType(ScheduleKind ClauseKind, bool HasChunks,
249 bool HasSimdModifier, bool HasMonotonicModifier,
250 bool HasNonmonotonicModifier, bool HasOrderedClause) {
251 OMPScheduleType BaseSchedule =
252 getOpenMPBaseScheduleType(ClauseKind, HasChunks, HasSimdModifier);
253 OMPScheduleType OrderedSchedule =
254 getOpenMPOrderingScheduleType(BaseSchedule, HasOrderedClause);
256 OrderedSchedule, HasSimdModifier, HasMonotonicModifier,
257 HasNonmonotonicModifier, HasOrderedClause);
258
260 return Result;
261}
262
263/// Make \p Source branch to \p Target.
264///
265/// Handles two situations:
266/// * \p Source already has an unconditional branch.
267/// * \p Source is a degenerate block (no terminator because the BB is
268/// the current head of the IR construction).
270 if (Instruction *Term = Source->getTerminator()) {
271 auto *Br = cast<BranchInst>(Term);
272 assert(!Br->isConditional() &&
273 "BB's terminator must be an unconditional branch (or degenerate)");
274 BasicBlock *Succ = Br->getSuccessor(0);
275 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
276 Br->setSuccessor(0, Target);
277 return;
278 }
279
280 auto *NewBr = BranchInst::Create(Target, Source);
281 NewBr->setDebugLoc(DL);
282}
283
285 bool CreateBranch) {
286 assert(New->getFirstInsertionPt() == New->begin() &&
287 "Target BB must not have PHI nodes");
288
289 // Move instructions to new block.
290 BasicBlock *Old = IP.getBlock();
291 New->splice(New->begin(), Old, IP.getPoint(), Old->end());
292
293 if (CreateBranch)
294 BranchInst::Create(New, Old);
295}
296
297void llvm::spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) {
299 BasicBlock *Old = Builder.GetInsertBlock();
300
301 spliceBB(Builder.saveIP(), New, CreateBranch);
302 if (CreateBranch)
303 Builder.SetInsertPoint(Old->getTerminator());
304 else
305 Builder.SetInsertPoint(Old);
306
307 // SetInsertPoint also updates the Builder's debug location, but we want to
308 // keep the one the Builder was configured to use.
310}
311
314 BasicBlock *Old = IP.getBlock();
316 Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
317 Old->getParent(), Old->getNextNode());
318 spliceBB(IP, New, CreateBranch);
319 New->replaceSuccessorsPhiUsesWith(Old, New);
320 return New;
321}
322
323BasicBlock *llvm::splitBB(IRBuilderBase &Builder, bool CreateBranch,
326 BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name);
327 if (CreateBranch)
328 Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
329 else
330 Builder.SetInsertPoint(Builder.GetInsertBlock());
331 // SetInsertPoint also updates the Builder's debug location, but we want to
332 // keep the one the Builder was configured to use.
334 return New;
335}
336
337BasicBlock *llvm::splitBB(IRBuilder<> &Builder, bool CreateBranch,
340 BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name);
341 if (CreateBranch)
342 Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
343 else
344 Builder.SetInsertPoint(Builder.GetInsertBlock());
345 // SetInsertPoint also updates the Builder's debug location, but we want to
346 // keep the one the Builder was configured to use.
348 return New;
349}
350
352 llvm::Twine Suffix) {
353 BasicBlock *Old = Builder.GetInsertBlock();
354 return splitBB(Builder, CreateBranch, Old->getName() + Suffix);
355}
356
357// This function creates a fake integer value and a fake use for the integer
358// value. It returns the fake value created. This is useful in modeling the
359// extra arguments to the outlined functions.
361 OpenMPIRBuilder::InsertPointTy OuterAllocaIP,
362 std::stack<Instruction *> &ToBeDeleted,
363 OpenMPIRBuilder::InsertPointTy InnerAllocaIP,
364 const Twine &Name = "", bool AsPtr = true) {
365 Builder.restoreIP(OuterAllocaIP);
366 Instruction *FakeVal;
367 AllocaInst *FakeValAddr =
368 Builder.CreateAlloca(Builder.getInt32Ty(), nullptr, Name + ".addr");
369 ToBeDeleted.push(FakeValAddr);
370
371 if (AsPtr) {
372 FakeVal = FakeValAddr;
373 } else {
374 FakeVal =
375 Builder.CreateLoad(Builder.getInt32Ty(), FakeValAddr, Name + ".val");
376 ToBeDeleted.push(FakeVal);
377 }
378
379 // Generate a fake use of this value
380 Builder.restoreIP(InnerAllocaIP);
381 Instruction *UseFakeVal;
382 if (AsPtr) {
383 UseFakeVal =
384 Builder.CreateLoad(Builder.getInt32Ty(), FakeVal, Name + ".use");
385 } else {
386 UseFakeVal =
387 cast<BinaryOperator>(Builder.CreateAdd(FakeVal, Builder.getInt32(10)));
388 }
389 ToBeDeleted.push(UseFakeVal);
390 return FakeVal;
391}
392
393//===----------------------------------------------------------------------===//
394// OpenMPIRBuilderConfig
395//===----------------------------------------------------------------------===//
396
397namespace {
399/// Values for bit flags for marking which requires clauses have been used.
400enum OpenMPOffloadingRequiresDirFlags {
401 /// flag undefined.
402 OMP_REQ_UNDEFINED = 0x000,
403 /// no requires directive present.
404 OMP_REQ_NONE = 0x001,
405 /// reverse_offload clause.
406 OMP_REQ_REVERSE_OFFLOAD = 0x002,
407 /// unified_address clause.
408 OMP_REQ_UNIFIED_ADDRESS = 0x004,
409 /// unified_shared_memory clause.
410 OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
411 /// dynamic_allocators clause.
412 OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
413 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
414};
415
416} // anonymous namespace
417
419 : RequiresFlags(OMP_REQ_UNDEFINED) {}
420
422 bool IsTargetDevice, bool IsGPU, bool OpenMPOffloadMandatory,
423 bool HasRequiresReverseOffload, bool HasRequiresUnifiedAddress,
424 bool HasRequiresUnifiedSharedMemory, bool HasRequiresDynamicAllocators)
425 : IsTargetDevice(IsTargetDevice), IsGPU(IsGPU),
426 OpenMPOffloadMandatory(OpenMPOffloadMandatory),
427 RequiresFlags(OMP_REQ_UNDEFINED) {
428 if (HasRequiresReverseOffload)
429 RequiresFlags |= OMP_REQ_REVERSE_OFFLOAD;
430 if (HasRequiresUnifiedAddress)
431 RequiresFlags |= OMP_REQ_UNIFIED_ADDRESS;
432 if (HasRequiresUnifiedSharedMemory)
433 RequiresFlags |= OMP_REQ_UNIFIED_SHARED_MEMORY;
434 if (HasRequiresDynamicAllocators)
435 RequiresFlags |= OMP_REQ_DYNAMIC_ALLOCATORS;
436}
437
439 return RequiresFlags & OMP_REQ_REVERSE_OFFLOAD;
440}
441
443 return RequiresFlags & OMP_REQ_UNIFIED_ADDRESS;
444}
445
447 return RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY;
448}
449
451 return RequiresFlags & OMP_REQ_DYNAMIC_ALLOCATORS;
452}
453
455 return hasRequiresFlags() ? RequiresFlags
456 : static_cast<int64_t>(OMP_REQ_NONE);
457}
458
460 if (Value)
461 RequiresFlags |= OMP_REQ_REVERSE_OFFLOAD;
462 else
463 RequiresFlags &= ~OMP_REQ_REVERSE_OFFLOAD;
464}
465
467 if (Value)
468 RequiresFlags |= OMP_REQ_UNIFIED_ADDRESS;
469 else
470 RequiresFlags &= ~OMP_REQ_UNIFIED_ADDRESS;
471}
472
474 if (Value)
475 RequiresFlags |= OMP_REQ_UNIFIED_SHARED_MEMORY;
476 else
477 RequiresFlags &= ~OMP_REQ_UNIFIED_SHARED_MEMORY;
478}
479
481 if (Value)
482 RequiresFlags |= OMP_REQ_DYNAMIC_ALLOCATORS;
483 else
484 RequiresFlags &= ~OMP_REQ_DYNAMIC_ALLOCATORS;
485}
486
487//===----------------------------------------------------------------------===//
488// OpenMPIRBuilder
489//===----------------------------------------------------------------------===//
490
492 IRBuilderBase &Builder,
493 SmallVector<Value *> &ArgsVector) {
495 Value *PointerNum = Builder.getInt32(KernelArgs.NumTargetItems);
496 auto Int32Ty = Type::getInt32Ty(Builder.getContext());
497 Value *ZeroArray = Constant::getNullValue(ArrayType::get(Int32Ty, 3));
498 Value *Flags = Builder.getInt64(KernelArgs.HasNoWait);
499
500 Value *NumTeams3D =
501 Builder.CreateInsertValue(ZeroArray, KernelArgs.NumTeams, {0});
502 Value *NumThreads3D =
503 Builder.CreateInsertValue(ZeroArray, KernelArgs.NumThreads, {0});
504
505 ArgsVector = {Version,
506 PointerNum,
507 KernelArgs.RTArgs.BasePointersArray,
508 KernelArgs.RTArgs.PointersArray,
509 KernelArgs.RTArgs.SizesArray,
510 KernelArgs.RTArgs.MapTypesArray,
511 KernelArgs.RTArgs.MapNamesArray,
512 KernelArgs.RTArgs.MappersArray,
513 KernelArgs.NumIterations,
514 Flags,
515 NumTeams3D,
516 NumThreads3D,
517 KernelArgs.DynCGGroupMem};
518}
519
521 LLVMContext &Ctx = Fn.getContext();
522
523 // Get the function's current attributes.
524 auto Attrs = Fn.getAttributes();
525 auto FnAttrs = Attrs.getFnAttrs();
526 auto RetAttrs = Attrs.getRetAttrs();
528 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo)
529 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo));
530
531 // Add AS to FnAS while taking special care with integer extensions.
532 auto addAttrSet = [&](AttributeSet &FnAS, const AttributeSet &AS,
533 bool Param = true) -> void {
534 bool HasSignExt = AS.hasAttribute(Attribute::SExt);
535 bool HasZeroExt = AS.hasAttribute(Attribute::ZExt);
536 if (HasSignExt || HasZeroExt) {
537 assert(AS.getNumAttributes() == 1 &&
538 "Currently not handling extension attr combined with others.");
539 if (Param) {
540 if (auto AK = TargetLibraryInfo::getExtAttrForI32Param(T, HasSignExt))
541 FnAS = FnAS.addAttribute(Ctx, AK);
542 } else if (auto AK =
543 TargetLibraryInfo::getExtAttrForI32Return(T, HasSignExt))
544 FnAS = FnAS.addAttribute(Ctx, AK);
545 } else {
546 FnAS = FnAS.addAttributes(Ctx, AS);
547 }
548 };
549
550#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
551#include "llvm/Frontend/OpenMP/OMPKinds.def"
552
553 // Add attributes to the function declaration.
554 switch (FnID) {
555#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
556 case Enum: \
557 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \
558 addAttrSet(RetAttrs, RetAttrSet, /*Param*/ false); \
559 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \
560 addAttrSet(ArgAttrs[ArgNo], ArgAttrSets[ArgNo]); \
561 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \
562 break;
563#include "llvm/Frontend/OpenMP/OMPKinds.def"
564 default:
565 // Attributes are optional.
566 break;
567 }
568}
569
572 FunctionType *FnTy = nullptr;
573 Function *Fn = nullptr;
574
575 // Try to find the declation in the module first.
576 switch (FnID) {
577#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
578 case Enum: \
579 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
580 IsVarArg); \
581 Fn = M.getFunction(Str); \
582 break;
583#include "llvm/Frontend/OpenMP/OMPKinds.def"
584 }
585
586 if (!Fn) {
587 // Create a new declaration if we need one.
588 switch (FnID) {
589#define OMP_RTL(Enum, Str, ...) \
590 case Enum: \
591 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
592 break;
593#include "llvm/Frontend/OpenMP/OMPKinds.def"
594 }
595
596 // Add information if the runtime function takes a callback function
597 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
598 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
599 LLVMContext &Ctx = Fn->getContext();
600 MDBuilder MDB(Ctx);
601 // Annotate the callback behavior of the runtime function:
602 // - The callback callee is argument number 2 (microtask).
603 // - The first two arguments of the callback callee are unknown (-1).
604 // - All variadic arguments to the runtime function are passed to the
605 // callback callee.
606 Fn->addMetadata(
607 LLVMContext::MD_callback,
609 2, {-1, -1}, /* VarArgsArePassed */ true)}));
610 }
611 }
612
613 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()
614 << " with type " << *Fn->getFunctionType() << "\n");
615 addAttributes(FnID, *Fn);
616
617 } else {
618 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()
619 << " with type " << *Fn->getFunctionType() << "\n");
620 }
621
622 assert(Fn && "Failed to create OpenMP runtime function");
623
624 return {FnTy, Fn};
625}
626
629 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
630 assert(Fn && "Failed to create OpenMP runtime function pointer");
631 return Fn;
632}
633
634void OpenMPIRBuilder::initialize() { initializeTypes(M); }
635
638 BasicBlock &EntryBlock = Function->getEntryBlock();
639 Instruction *MoveLocInst = EntryBlock.getFirstNonPHI();
640
641 // Loop over blocks looking for constant allocas, skipping the entry block
642 // as any allocas there are already in the desired location.
643 for (auto Block = std::next(Function->begin(), 1); Block != Function->end();
644 Block++) {
645 for (auto Inst = Block->getReverseIterator()->begin();
646 Inst != Block->getReverseIterator()->end();) {
647 if (auto *AllocaInst = dyn_cast_if_present<llvm::AllocaInst>(Inst)) {
648 Inst++;
649 if (!isa<ConstantData>(AllocaInst->getArraySize()))
650 continue;
651 AllocaInst->moveBeforePreserving(MoveLocInst);
652 } else {
653 Inst++;
654 }
655 }
656 }
657}
658
660 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
662 SmallVector<OutlineInfo, 16> DeferredOutlines;
663 for (OutlineInfo &OI : OutlineInfos) {
664 // Skip functions that have not finalized yet; may happen with nested
665 // function generation.
666 if (Fn && OI.getFunction() != Fn) {
667 DeferredOutlines.push_back(OI);
668 continue;
669 }
670
671 ParallelRegionBlockSet.clear();
672 Blocks.clear();
673 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
674
675 Function *OuterFn = OI.getFunction();
676 CodeExtractorAnalysisCache CEAC(*OuterFn);
677 // If we generate code for the target device, we need to allocate
678 // struct for aggregate params in the device default alloca address space.
679 // OpenMP runtime requires that the params of the extracted functions are
680 // passed as zero address space pointers. This flag ensures that
681 // CodeExtractor generates correct code for extracted functions
682 // which are used by OpenMP runtime.
683 bool ArgsInZeroAddressSpace = Config.isTargetDevice();
684 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
685 /* AggregateArgs */ true,
686 /* BlockFrequencyInfo */ nullptr,
687 /* BranchProbabilityInfo */ nullptr,
688 /* AssumptionCache */ nullptr,
689 /* AllowVarArgs */ true,
690 /* AllowAlloca */ true,
691 /* AllocaBlock*/ OI.OuterAllocaBB,
692 /* Suffix */ ".omp_par", ArgsInZeroAddressSpace);
693
694 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n");
695 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()
696 << " Exit: " << OI.ExitBB->getName() << "\n");
697 assert(Extractor.isEligible() &&
698 "Expected OpenMP outlining to be possible!");
699
700 for (auto *V : OI.ExcludeArgsFromAggregate)
701 Extractor.excludeArgFromAggregate(V);
702
703 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
704
705 // Forward target-cpu, target-features attributes to the outlined function.
706 auto TargetCpuAttr = OuterFn->getFnAttribute("target-cpu");
707 if (TargetCpuAttr.isStringAttribute())
708 OutlinedFn->addFnAttr(TargetCpuAttr);
709
710 auto TargetFeaturesAttr = OuterFn->getFnAttribute("target-features");
711 if (TargetFeaturesAttr.isStringAttribute())
712 OutlinedFn->addFnAttr(TargetFeaturesAttr);
713
714 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n");
715 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n");
716 assert(OutlinedFn->getReturnType()->isVoidTy() &&
717 "OpenMP outlined functions should not return a value!");
718
719 // For compability with the clang CG we move the outlined function after the
720 // one with the parallel region.
721 OutlinedFn->removeFromParent();
722 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
723
724 // Remove the artificial entry introduced by the extractor right away, we
725 // made our own entry block after all.
726 {
727 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
728 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB);
729 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry);
730 // Move instructions from the to-be-deleted ArtificialEntry to the entry
731 // basic block of the parallel region. CodeExtractor generates
732 // instructions to unwrap the aggregate argument and may sink
733 // allocas/bitcasts for values that are solely used in the outlined region
734 // and do not escape.
735 assert(!ArtificialEntry.empty() &&
736 "Expected instructions to add in the outlined region entry");
737 for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(),
738 End = ArtificialEntry.rend();
739 It != End;) {
740 Instruction &I = *It;
741 It++;
742
743 if (I.isTerminator())
744 continue;
745
746 I.moveBeforePreserving(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
747 }
748
749 OI.EntryBB->moveBefore(&ArtificialEntry);
750 ArtificialEntry.eraseFromParent();
751 }
752 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB);
753 assert(OutlinedFn && OutlinedFn->getNumUses() == 1);
754
755 // Run a user callback, e.g. to add attributes.
756 if (OI.PostOutlineCB)
757 OI.PostOutlineCB(*OutlinedFn);
758 }
759
760 // Remove work items that have been completed.
761 OutlineInfos = std::move(DeferredOutlines);
762
763 // The createTarget functions embeds user written code into
764 // the target region which may inject allocas which need to
765 // be moved to the entry block of our target or risk malformed
766 // optimisations by later passes, this is only relevant for
767 // the device pass which appears to be a little more delicate
768 // when it comes to optimisations (however, we do not block on
769 // that here, it's up to the inserter to the list to do so).
770 // This notbaly has to occur after the OutlinedInfo candidates
771 // have been extracted so we have an end product that will not
772 // be implicitly adversely affected by any raises unless
773 // intentionally appended to the list.
774 // NOTE: This only does so for ConstantData, it could be extended
775 // to ConstantExpr's with further effort, however, they should
776 // largely be folded when they get here. Extending it to runtime
777 // defined/read+writeable allocation sizes would be non-trivial
778 // (need to factor in movement of any stores to variables the
779 // allocation size depends on, as well as the usual loads,
780 // otherwise it'll yield the wrong result after movement) and
781 // likely be more suitable as an LLVM optimisation pass.
784
785 EmitMetadataErrorReportFunctionTy &&ErrorReportFn =
786 [](EmitMetadataErrorKind Kind,
787 const TargetRegionEntryInfo &EntryInfo) -> void {
788 errs() << "Error of kind: " << Kind
789 << " when emitting offload entries and metadata during "
790 "OMPIRBuilder finalization \n";
791 };
792
795}
796
798 assert(OutlineInfos.empty() && "There must be no outstanding outlinings");
799}
800
803 auto *GV =
804 new GlobalVariable(M, I32Ty,
805 /* isConstant = */ true, GlobalValue::WeakODRLinkage,
806 ConstantInt::get(I32Ty, Value), Name);
807 GV->setVisibility(GlobalValue::HiddenVisibility);
808
809 return GV;
810}
811
813 uint32_t SrcLocStrSize,
814 IdentFlag LocFlags,
815 unsigned Reserve2Flags) {
816 // Enable "C-mode".
817 LocFlags |= OMP_IDENT_FLAG_KMPC;
818
819 Constant *&Ident =
820 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
821 if (!Ident) {
823 Constant *IdentData[] = {I32Null,
824 ConstantInt::get(Int32, uint32_t(LocFlags)),
825 ConstantInt::get(Int32, Reserve2Flags),
826 ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr};
827 Constant *Initializer =
828 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData);
829
830 // Look for existing encoding of the location + flags, not needed but
831 // minimizes the difference to the existing solution while we transition.
832 for (GlobalVariable &GV : M.globals())
833 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer())
834 if (GV.getInitializer() == Initializer)
835 Ident = &GV;
836
837 if (!Ident) {
838 auto *GV = new GlobalVariable(
839 M, OpenMPIRBuilder::Ident,
840 /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "",
843 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
844 GV->setAlignment(Align(8));
845 Ident = GV;
846 }
847 }
848
850}
851
853 uint32_t &SrcLocStrSize) {
854 SrcLocStrSize = LocStr.size();
855 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
856 if (!SrcLocStr) {
857 Constant *Initializer =
859
860 // Look for existing encoding of the location, not needed but minimizes the
861 // difference to the existing solution while we transition.
862 for (GlobalVariable &GV : M.globals())
863 if (GV.isConstant() && GV.hasInitializer() &&
864 GV.getInitializer() == Initializer)
865 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
866
867 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
868 /* AddressSpace */ 0, &M);
869 }
870 return SrcLocStr;
871}
872
874 StringRef FileName,
875 unsigned Line, unsigned Column,
876 uint32_t &SrcLocStrSize) {
877 SmallString<128> Buffer;
878 Buffer.push_back(';');
879 Buffer.append(FileName);
880 Buffer.push_back(';');
881 Buffer.append(FunctionName);
882 Buffer.push_back(';');
883 Buffer.append(std::to_string(Line));
884 Buffer.push_back(';');
885 Buffer.append(std::to_string(Column));
886 Buffer.push_back(';');
887 Buffer.push_back(';');
888 return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize);
889}
890
891Constant *
893 StringRef UnknownLoc = ";unknown;unknown;0;0;;";
894 return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize);
895}
896
898 uint32_t &SrcLocStrSize,
899 Function *F) {
900 DILocation *DIL = DL.get();
901 if (!DIL)
902 return getOrCreateDefaultSrcLocStr(SrcLocStrSize);
903 StringRef FileName = M.getName();
904 if (DIFile *DIF = DIL->getFile())
905 if (std::optional<StringRef> Source = DIF->getSource())
906 FileName = *Source;
907 StringRef Function = DIL->getScope()->getSubprogram()->getName();
908 if (Function.empty() && F)
909 Function = F->getName();
910 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
911 DIL->getColumn(), SrcLocStrSize);
912}
913
915 uint32_t &SrcLocStrSize) {
916 return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize,
917 Loc.IP.getBlock()->getParent());
918}
919
921 return Builder.CreateCall(
922 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
923 "omp_global_thread_num");
924}
925
928 bool ForceSimpleCall, bool CheckCancelFlag) {
929 if (!updateToLocation(Loc))
930 return Loc.IP;
931 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
932}
933
936 bool ForceSimpleCall, bool CheckCancelFlag) {
937 // Build call __kmpc_cancel_barrier(loc, thread_id) or
938 // __kmpc_barrier(loc, thread_id);
939
940 IdentFlag BarrierLocFlags;
941 switch (Kind) {
942 case OMPD_for:
943 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
944 break;
945 case OMPD_sections:
946 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
947 break;
948 case OMPD_single:
949 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
950 break;
951 case OMPD_barrier:
952 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
953 break;
954 default:
955 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
956 break;
957 }
958
959 uint32_t SrcLocStrSize;
960 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
961 Value *Args[] = {
962 getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags),
963 getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))};
964
965 // If we are in a cancellable parallel region, barriers are cancellation
966 // points.
967 // TODO: Check why we would force simple calls or to ignore the cancel flag.
968 bool UseCancelBarrier =
969 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
970
971 Value *Result =
973 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
974 : OMPRTL___kmpc_barrier),
975 Args);
976
977 if (UseCancelBarrier && CheckCancelFlag)
978 emitCancelationCheckImpl(Result, OMPD_parallel);
979
980 return Builder.saveIP();
981}
982
985 Value *IfCondition,
986 omp::Directive CanceledDirective) {
987 if (!updateToLocation(Loc))
988 return Loc.IP;
989
990 // LLVM utilities like blocks with terminators.
991 auto *UI = Builder.CreateUnreachable();
992
993 Instruction *ThenTI = UI, *ElseTI = nullptr;
994 if (IfCondition)
995 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
996 Builder.SetInsertPoint(ThenTI);
997
998 Value *CancelKind = nullptr;
999 switch (CanceledDirective) {
1000#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
1001 case DirectiveEnum: \
1002 CancelKind = Builder.getInt32(Value); \
1003 break;
1004#include "llvm/Frontend/OpenMP/OMPKinds.def"
1005 default:
1006 llvm_unreachable("Unknown cancel kind!");
1007 }
1008
1009 uint32_t SrcLocStrSize;
1010 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1011 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1012 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
1013 Value *Result = Builder.CreateCall(
1014 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
1015 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) {
1016 if (CanceledDirective == OMPD_parallel) {
1018 Builder.restoreIP(IP);
1020 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
1021 /* CheckCancelFlag */ false);
1022 }
1023 };
1024
1025 // The actual cancel logic is shared with others, e.g., cancel_barriers.
1026 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB);
1027
1028 // Update the insertion point and remove the terminator we introduced.
1029 Builder.SetInsertPoint(UI->getParent());
1030 UI->eraseFromParent();
1031
1032 return Builder.saveIP();
1033}
1034
1036 const LocationDescription &Loc, InsertPointTy AllocaIP, Value *&Return,
1037 Value *Ident, Value *DeviceID, Value *NumTeams, Value *NumThreads,
1038 Value *HostPtr, ArrayRef<Value *> KernelArgs) {
1039 if (!updateToLocation(Loc))
1040 return Loc.IP;
1041
1042 Builder.restoreIP(AllocaIP);
1043 auto *KernelArgsPtr =
1044 Builder.CreateAlloca(OpenMPIRBuilder::KernelArgs, nullptr, "kernel_args");
1045 Builder.restoreIP(Loc.IP);
1046
1047 for (unsigned I = 0, Size = KernelArgs.size(); I != Size; ++I) {
1048 llvm::Value *Arg =
1049 Builder.CreateStructGEP(OpenMPIRBuilder::KernelArgs, KernelArgsPtr, I);
1051 KernelArgs[I], Arg,
1052 M.getDataLayout().getPrefTypeAlign(KernelArgs[I]->getType()));
1053 }
1054
1055 SmallVector<Value *> OffloadingArgs{Ident, DeviceID, NumTeams,
1056 NumThreads, HostPtr, KernelArgsPtr};
1057
1058 Return = Builder.CreateCall(
1059 getOrCreateRuntimeFunction(M, OMPRTL___tgt_target_kernel),
1060 OffloadingArgs);
1061
1062 return Builder.saveIP();
1063}
1064
1066 const LocationDescription &Loc, Function *OutlinedFn, Value *OutlinedFnID,
1067 EmitFallbackCallbackTy emitTargetCallFallbackCB, TargetKernelArgs &Args,
1068 Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP) {
1069
1070 if (!updateToLocation(Loc))
1071 return Loc.IP;
1072
1073 Builder.restoreIP(Loc.IP);
1074 // On top of the arrays that were filled up, the target offloading call
1075 // takes as arguments the device id as well as the host pointer. The host
1076 // pointer is used by the runtime library to identify the current target
1077 // region, so it only has to be unique and not necessarily point to
1078 // anything. It could be the pointer to the outlined function that
1079 // implements the target region, but we aren't using that so that the
1080 // compiler doesn't need to keep that, and could therefore inline the host
1081 // function if proven worthwhile during optimization.
1082
1083 // From this point on, we need to have an ID of the target region defined.
1084 assert(OutlinedFnID && "Invalid outlined function ID!");
1085 (void)OutlinedFnID;
1086
1087 // Return value of the runtime offloading call.
1088 Value *Return = nullptr;
1089
1090 // Arguments for the target kernel.
1091 SmallVector<Value *> ArgsVector;
1092 getKernelArgsVector(Args, Builder, ArgsVector);
1093
1094 // The target region is an outlined function launched by the runtime
1095 // via calls to __tgt_target_kernel().
1096 //
1097 // Note that on the host and CPU targets, the runtime implementation of
1098 // these calls simply call the outlined function without forking threads.
1099 // The outlined functions themselves have runtime calls to
1100 // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
1101 // the compiler in emitTeamsCall() and emitParallelCall().
1102 //
1103 // In contrast, on the NVPTX target, the implementation of
1104 // __tgt_target_teams() launches a GPU kernel with the requested number
1105 // of teams and threads so no additional calls to the runtime are required.
1106 // Check the error code and execute the host version if required.
1107 Builder.restoreIP(emitTargetKernel(Builder, AllocaIP, Return, RTLoc, DeviceID,
1108 Args.NumTeams, Args.NumThreads,
1109 OutlinedFnID, ArgsVector));
1110
1111 BasicBlock *OffloadFailedBlock =
1112 BasicBlock::Create(Builder.getContext(), "omp_offload.failed");
1113 BasicBlock *OffloadContBlock =
1114 BasicBlock::Create(Builder.getContext(), "omp_offload.cont");
1116 Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
1117
1118 auto CurFn = Builder.GetInsertBlock()->getParent();
1119 emitBlock(OffloadFailedBlock, CurFn);
1120 Builder.restoreIP(emitTargetCallFallbackCB(Builder.saveIP()));
1121 emitBranch(OffloadContBlock);
1122 emitBlock(OffloadContBlock, CurFn, /*IsFinished=*/true);
1123 return Builder.saveIP();
1124}
1125
1127 omp::Directive CanceledDirective,
1128 FinalizeCallbackTy ExitCB) {
1129 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&
1130 "Unexpected cancellation!");
1131
1132 // For a cancel barrier we create two new blocks.
1134 BasicBlock *NonCancellationBlock;
1135 if (Builder.GetInsertPoint() == BB->end()) {
1136 // TODO: This branch will not be needed once we moved to the
1137 // OpenMPIRBuilder codegen completely.
1138 NonCancellationBlock = BasicBlock::Create(
1139 BB->getContext(), BB->getName() + ".cont", BB->getParent());
1140 } else {
1141 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
1144 }
1145 BasicBlock *CancellationBlock = BasicBlock::Create(
1146 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
1147
1148 // Jump to them based on the return value.
1149 Value *Cmp = Builder.CreateIsNull(CancelFlag);
1150 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
1151 /* TODO weight */ nullptr, nullptr);
1152
1153 // From the cancellation block we finalize all variables and go to the
1154 // post finalization block that is known to the FiniCB callback.
1155 Builder.SetInsertPoint(CancellationBlock);
1156 if (ExitCB)
1157 ExitCB(Builder.saveIP());
1158 auto &FI = FinalizationStack.back();
1159 FI.FiniCB(Builder.saveIP());
1160
1161 // The continuation block is where code generation continues.
1162 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
1163}
1164
1165// Callback used to create OpenMP runtime calls to support
1166// omp parallel clause for the device.
1167// We need to use this callback to replace call to the OutlinedFn in OuterFn
1168// by the call to the OpenMP DeviceRTL runtime function (kmpc_parallel_51)
1170 OpenMPIRBuilder *OMPIRBuilder, Function &OutlinedFn, Function *OuterFn,
1171 BasicBlock *OuterAllocaBB, Value *Ident, Value *IfCondition,
1172 Value *NumThreads, Instruction *PrivTID, AllocaInst *PrivTIDAddr,
1173 Value *ThreadID, const SmallVector<Instruction *, 4> &ToBeDeleted) {
1174 // Add some known attributes.
1175 IRBuilder<> &Builder = OMPIRBuilder->Builder;
1176 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
1177 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
1178 OutlinedFn.addParamAttr(0, Attribute::NoUndef);
1179 OutlinedFn.addParamAttr(1, Attribute::NoUndef);
1180 OutlinedFn.addFnAttr(Attribute::NoUnwind);
1181
1182 assert(OutlinedFn.arg_size() >= 2 &&
1183 "Expected at least tid and bounded tid as arguments");
1184 unsigned NumCapturedVars = OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
1185
1186 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
1187 assert(CI && "Expected call instruction to outlined function");
1188 CI->getParent()->setName("omp_parallel");
1189
1190 Builder.SetInsertPoint(CI);
1191 Type *PtrTy = OMPIRBuilder->VoidPtr;
1192 Value *NullPtrValue = Constant::getNullValue(PtrTy);
1193
1194 // Add alloca for kernel args
1195 OpenMPIRBuilder ::InsertPointTy CurrentIP = Builder.saveIP();
1196 Builder.SetInsertPoint(OuterAllocaBB, OuterAllocaBB->getFirstInsertionPt());
1197 AllocaInst *ArgsAlloca =
1198 Builder.CreateAlloca(ArrayType::get(PtrTy, NumCapturedVars));
1199 Value *Args = ArgsAlloca;
1200 // Add address space cast if array for storing arguments is not allocated
1201 // in address space 0
1202 if (ArgsAlloca->getAddressSpace())
1203 Args = Builder.CreatePointerCast(ArgsAlloca, PtrTy);
1204 Builder.restoreIP(CurrentIP);
1205
1206 // Store captured vars which are used by kmpc_parallel_51
1207 for (unsigned Idx = 0; Idx < NumCapturedVars; Idx++) {
1208 Value *V = *(CI->arg_begin() + 2 + Idx);
1209 Value *StoreAddress = Builder.CreateConstInBoundsGEP2_64(
1210 ArrayType::get(PtrTy, NumCapturedVars), Args, 0, Idx);
1211 Builder.CreateStore(V, StoreAddress);
1212 }
1213
1214 Value *Cond =
1215 IfCondition ? Builder.CreateSExtOrTrunc(IfCondition, OMPIRBuilder->Int32)
1216 : Builder.getInt32(1);
1217
1218 // Build kmpc_parallel_51 call
1219 Value *Parallel51CallArgs[] = {
1220 /* identifier*/ Ident,
1221 /* global thread num*/ ThreadID,
1222 /* if expression */ Cond,
1223 /* number of threads */ NumThreads ? NumThreads : Builder.getInt32(-1),
1224 /* Proc bind */ Builder.getInt32(-1),
1225 /* outlined function */
1226 Builder.CreateBitCast(&OutlinedFn, OMPIRBuilder->ParallelTaskPtr),
1227 /* wrapper function */ NullPtrValue,
1228 /* arguments of the outlined funciton*/ Args,
1229 /* number of arguments */ Builder.getInt64(NumCapturedVars)};
1230
1231 FunctionCallee RTLFn =
1232 OMPIRBuilder->getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_parallel_51);
1233
1234 Builder.CreateCall(RTLFn, Parallel51CallArgs);
1235
1236 LLVM_DEBUG(dbgs() << "With kmpc_parallel_51 placed: "
1237 << *Builder.GetInsertBlock()->getParent() << "\n");
1238
1239 // Initialize the local TID stack location with the argument value.
1240 Builder.SetInsertPoint(PrivTID);
1241 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
1242 Builder.CreateStore(Builder.CreateLoad(OMPIRBuilder->Int32, OutlinedAI),
1243 PrivTIDAddr);
1244
1245 // Remove redundant call to the outlined function.
1246 CI->eraseFromParent();
1247
1248 for (Instruction *I : ToBeDeleted) {
1249 I->eraseFromParent();
1250 }
1251}
1252
1253// Callback used to create OpenMP runtime calls to support
1254// omp parallel clause for the host.
1255// We need to use this callback to replace call to the OutlinedFn in OuterFn
1256// by the call to the OpenMP host runtime function ( __kmpc_fork_call[_if])
1257static void
1259 Function *OuterFn, Value *Ident, Value *IfCondition,
1260 Instruction *PrivTID, AllocaInst *PrivTIDAddr,
1261 const SmallVector<Instruction *, 4> &ToBeDeleted) {
1262 IRBuilder<> &Builder = OMPIRBuilder->Builder;
1263 FunctionCallee RTLFn;
1264 if (IfCondition) {
1265 RTLFn =
1266 OMPIRBuilder->getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call_if);
1267 } else {
1268 RTLFn =
1269 OMPIRBuilder->getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
1270 }
1271 if (auto *F = dyn_cast<Function>(RTLFn.getCallee())) {
1272 if (!F->hasMetadata(LLVMContext::MD_callback)) {
1273 LLVMContext &Ctx = F->getContext();
1274 MDBuilder MDB(Ctx);
1275 // Annotate the callback behavior of the __kmpc_fork_call:
1276 // - The callback callee is argument number 2 (microtask).
1277 // - The first two arguments of the callback callee are unknown (-1).
1278 // - All variadic arguments to the __kmpc_fork_call are passed to the
1279 // callback callee.
1280 F->addMetadata(LLVMContext::MD_callback,
1282 2, {-1, -1},
1283 /* VarArgsArePassed */ true)}));
1284 }
1285 }
1286 // Add some known attributes.
1287 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
1288 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
1289 OutlinedFn.addFnAttr(Attribute::NoUnwind);
1290
1291 assert(OutlinedFn.arg_size() >= 2 &&
1292 "Expected at least tid and bounded tid as arguments");
1293 unsigned NumCapturedVars = OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
1294
1295 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
1296 CI->getParent()->setName("omp_parallel");
1297 Builder.SetInsertPoint(CI);
1298
1299 // Build call __kmpc_fork_call[_if](Ident, n, microtask, var1, .., varn);
1300 Value *ForkCallArgs[] = {
1301 Ident, Builder.getInt32(NumCapturedVars),
1302 Builder.CreateBitCast(&OutlinedFn, OMPIRBuilder->ParallelTaskPtr)};
1303
1304 SmallVector<Value *, 16> RealArgs;
1305 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
1306 if (IfCondition) {
1307 Value *Cond = Builder.CreateSExtOrTrunc(IfCondition, OMPIRBuilder->Int32);
1308 RealArgs.push_back(Cond);
1309 }
1310 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
1311
1312 // __kmpc_fork_call_if always expects a void ptr as the last argument
1313 // If there are no arguments, pass a null pointer.
1314 auto PtrTy = OMPIRBuilder->VoidPtr;
1315 if (IfCondition && NumCapturedVars == 0) {
1316 Value *NullPtrValue = Constant::getNullValue(PtrTy);
1317 RealArgs.push_back(NullPtrValue);
1318 }
1319 if (IfCondition && RealArgs.back()->getType() != PtrTy)
1320 RealArgs.back() = Builder.CreateBitCast(RealArgs.back(), PtrTy);
1321
1322 Builder.CreateCall(RTLFn, RealArgs);
1323
1324 LLVM_DEBUG(dbgs() << "With fork_call placed: "
1325 << *Builder.GetInsertBlock()->getParent() << "\n");
1326
1327 // Initialize the local TID stack location with the argument value.
1328 Builder.SetInsertPoint(PrivTID);
1329 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
1330 Builder.CreateStore(Builder.CreateLoad(OMPIRBuilder->Int32, OutlinedAI),
1331 PrivTIDAddr);
1332
1333 // Remove redundant call to the outlined function.
1334 CI->eraseFromParent();
1335
1336 for (Instruction *I : ToBeDeleted) {
1337 I->eraseFromParent();
1338 }
1339}
1340
1342 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
1343 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
1344 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
1345 omp::ProcBindKind ProcBind, bool IsCancellable) {
1346 assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous");
1347
1348 if (!updateToLocation(Loc))
1349 return Loc.IP;
1350
1351 uint32_t SrcLocStrSize;
1352 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1353 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1354 Value *ThreadID = getOrCreateThreadID(Ident);
1355 // If we generate code for the target device, we need to allocate
1356 // struct for aggregate params in the device default alloca address space.
1357 // OpenMP runtime requires that the params of the extracted functions are
1358 // passed as zero address space pointers. This flag ensures that extracted
1359 // function arguments are declared in zero address space
1360 bool ArgsInZeroAddressSpace = Config.isTargetDevice();
1361
1362 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
1363 // only if we compile for host side.
1364 if (NumThreads && !Config.isTargetDevice()) {
1365 Value *Args[] = {
1366 Ident, ThreadID,
1367 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
1369 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
1370 }
1371
1372 if (ProcBind != OMP_PROC_BIND_default) {
1373 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
1374 Value *Args[] = {
1375 Ident, ThreadID,
1376 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
1378 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
1379 }
1380
1381 BasicBlock *InsertBB = Builder.GetInsertBlock();
1382 Function *OuterFn = InsertBB->getParent();
1383
1384 // Save the outer alloca block because the insertion iterator may get
1385 // invalidated and we still need this later.
1386 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
1387
1388 // Vector to remember instructions we used only during the modeling but which
1389 // we want to delete at the end.
1391
1392 // Change the location to the outer alloca insertion point to create and
1393 // initialize the allocas we pass into the parallel region.
1394 Builder.restoreIP(OuterAllocaIP);
1395 AllocaInst *TIDAddrAlloca = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
1396 AllocaInst *ZeroAddrAlloca =
1397 Builder.CreateAlloca(Int32, nullptr, "zero.addr");
1398 Instruction *TIDAddr = TIDAddrAlloca;
1399 Instruction *ZeroAddr = ZeroAddrAlloca;
1400 if (ArgsInZeroAddressSpace && M.getDataLayout().getAllocaAddrSpace() != 0) {
1401 // Add additional casts to enforce pointers in zero address space
1402 TIDAddr = new AddrSpaceCastInst(
1403 TIDAddrAlloca, PointerType ::get(M.getContext(), 0), "tid.addr.ascast");
1404 TIDAddr->insertAfter(TIDAddrAlloca);
1405 ToBeDeleted.push_back(TIDAddr);
1406 ZeroAddr = new AddrSpaceCastInst(ZeroAddrAlloca,
1407 PointerType ::get(M.getContext(), 0),
1408 "zero.addr.ascast");
1409 ZeroAddr->insertAfter(ZeroAddrAlloca);
1410 ToBeDeleted.push_back(ZeroAddr);
1411 }
1412
1413 // We only need TIDAddr and ZeroAddr for modeling purposes to get the
1414 // associated arguments in the outlined function, so we delete them later.
1415 ToBeDeleted.push_back(TIDAddrAlloca);
1416 ToBeDeleted.push_back(ZeroAddrAlloca);
1417
1418 // Create an artificial insertion point that will also ensure the blocks we
1419 // are about to split are not degenerated.
1420 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
1421
1422 BasicBlock *EntryBB = UI->getParent();
1423 BasicBlock *PRegEntryBB = EntryBB->splitBasicBlock(UI, "omp.par.entry");
1424 BasicBlock *PRegBodyBB = PRegEntryBB->splitBasicBlock(UI, "omp.par.region");
1425 BasicBlock *PRegPreFiniBB =
1426 PRegBodyBB->splitBasicBlock(UI, "omp.par.pre_finalize");
1427 BasicBlock *PRegExitBB = PRegPreFiniBB->splitBasicBlock(UI, "omp.par.exit");
1428
1429 auto FiniCBWrapper = [&](InsertPointTy IP) {
1430 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
1431 // target to the region exit block.
1432 if (IP.getBlock()->end() == IP.getPoint()) {
1434 Builder.restoreIP(IP);
1435 Instruction *I = Builder.CreateBr(PRegExitBB);
1436 IP = InsertPointTy(I->getParent(), I->getIterator());
1437 }
1438 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&
1439 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&
1440 "Unexpected insertion point for finalization call!");
1441 return FiniCB(IP);
1442 };
1443
1444 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
1445
1446 // Generate the privatization allocas in the block that will become the entry
1447 // of the outlined function.
1448 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
1449 InsertPointTy InnerAllocaIP = Builder.saveIP();
1450
1451 AllocaInst *PrivTIDAddr =
1452 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
1453 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid");
1454
1455 // Add some fake uses for OpenMP provided arguments.
1456 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use"));
1457 Instruction *ZeroAddrUse =
1458 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use");
1459 ToBeDeleted.push_back(ZeroAddrUse);
1460
1461 // EntryBB
1462 // |
1463 // V
1464 // PRegionEntryBB <- Privatization allocas are placed here.
1465 // |
1466 // V
1467 // PRegionBodyBB <- BodeGen is invoked here.
1468 // |
1469 // V
1470 // PRegPreFiniBB <- The block we will start finalization from.
1471 // |
1472 // V
1473 // PRegionExitBB <- A common exit to simplify block collection.
1474 //
1475
1476 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n");
1477
1478 // Let the caller create the body.
1479 assert(BodyGenCB && "Expected body generation callback!");
1480 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
1481 BodyGenCB(InnerAllocaIP, CodeGenIP);
1482
1483 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n");
1484
1485 OutlineInfo OI;
1486 if (Config.isTargetDevice()) {
1487 // Generate OpenMP target specific runtime call
1488 OI.PostOutlineCB = [=, ToBeDeletedVec =
1489 std::move(ToBeDeleted)](Function &OutlinedFn) {
1490 targetParallelCallback(this, OutlinedFn, OuterFn, OuterAllocaBlock, Ident,
1491 IfCondition, NumThreads, PrivTID, PrivTIDAddr,
1492 ThreadID, ToBeDeletedVec);
1493 };
1494 } else {
1495 // Generate OpenMP host runtime call
1496 OI.PostOutlineCB = [=, ToBeDeletedVec =
1497 std::move(ToBeDeleted)](Function &OutlinedFn) {
1498 hostParallelCallback(this, OutlinedFn, OuterFn, Ident, IfCondition,
1499 PrivTID, PrivTIDAddr, ToBeDeletedVec);
1500 };
1501 }
1502
1503 OI.OuterAllocaBB = OuterAllocaBlock;
1504 OI.EntryBB = PRegEntryBB;
1505 OI.ExitBB = PRegExitBB;
1506
1507 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
1509 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
1510
1511 // Ensure a single exit node for the outlined region by creating one.
1512 // We might have multiple incoming edges to the exit now due to finalizations,
1513 // e.g., cancel calls that cause the control flow to leave the region.
1514 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
1515 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
1516 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
1517 Blocks.push_back(PRegOutlinedExitBB);
1518
1519 CodeExtractorAnalysisCache CEAC(*OuterFn);
1520 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
1521 /* AggregateArgs */ false,
1522 /* BlockFrequencyInfo */ nullptr,
1523 /* BranchProbabilityInfo */ nullptr,
1524 /* AssumptionCache */ nullptr,
1525 /* AllowVarArgs */ true,
1526 /* AllowAlloca */ true,
1527 /* AllocationBlock */ OuterAllocaBlock,
1528 /* Suffix */ ".omp_par", ArgsInZeroAddressSpace);
1529
1530 // Find inputs to, outputs from the code region.
1531 BasicBlock *CommonExit = nullptr;
1532 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
1533 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
1534 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
1535
1536 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n");
1537
1538 FunctionCallee TIDRTLFn =
1539 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
1540
1541 auto PrivHelper = [&](Value &V) {
1542 if (&V == TIDAddr || &V == ZeroAddr) {
1543 OI.ExcludeArgsFromAggregate.push_back(&V);
1544 return;
1545 }
1546
1548 for (Use &U : V.uses())
1549 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
1550 if (ParallelRegionBlockSet.count(UserI->getParent()))
1551 Uses.insert(&U);
1552
1553 // __kmpc_fork_call expects extra arguments as pointers. If the input
1554 // already has a pointer type, everything is fine. Otherwise, store the
1555 // value onto stack and load it back inside the to-be-outlined region. This
1556 // will ensure only the pointer will be passed to the function.
1557 // FIXME: if there are more than 15 trailing arguments, they must be
1558 // additionally packed in a struct.
1559 Value *Inner = &V;
1560 if (!V.getType()->isPointerTy()) {
1562 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n");
1563
1564 Builder.restoreIP(OuterAllocaIP);
1565 Value *Ptr =
1566 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
1567
1568 // Store to stack at end of the block that currently branches to the entry
1569 // block of the to-be-outlined region.
1570 Builder.SetInsertPoint(InsertBB,
1571 InsertBB->getTerminator()->getIterator());
1572 Builder.CreateStore(&V, Ptr);
1573
1574 // Load back next to allocations in the to-be-outlined region.
1575 Builder.restoreIP(InnerAllocaIP);
1576 Inner = Builder.CreateLoad(V.getType(), Ptr);
1577 }
1578
1579 Value *ReplacementValue = nullptr;
1580 CallInst *CI = dyn_cast<CallInst>(&V);
1581 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
1582 ReplacementValue = PrivTID;
1583 } else {
1585 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
1586 InnerAllocaIP = {
1587 InnerAllocaIP.getBlock(),
1588 InnerAllocaIP.getBlock()->getTerminator()->getIterator()};
1589
1590 assert(ReplacementValue &&
1591 "Expected copy/create callback to set replacement value!");
1592 if (ReplacementValue == &V)
1593 return;
1594 }
1595
1596 for (Use *UPtr : Uses)
1597 UPtr->set(ReplacementValue);
1598 };
1599
1600 // Reset the inner alloca insertion as it will be used for loading the values
1601 // wrapped into pointers before passing them into the to-be-outlined region.
1602 // Configure it to insert immediately after the fake use of zero address so
1603 // that they are available in the generated body and so that the
1604 // OpenMP-related values (thread ID and zero address pointers) remain leading
1605 // in the argument list.
1606 InnerAllocaIP = IRBuilder<>::InsertPoint(
1607 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
1608
1609 // Reset the outer alloca insertion point to the entry of the relevant block
1610 // in case it was invalidated.
1611 OuterAllocaIP = IRBuilder<>::InsertPoint(
1612 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
1613
1614 for (Value *Input : Inputs) {
1615 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n");
1616 PrivHelper(*Input);
1617 }
1618 LLVM_DEBUG({
1619 for (Value *Output : Outputs)
1620 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");
1621 });
1622 assert(Outputs.empty() &&
1623 "OpenMP outlining should not produce live-out values!");
1624
1625 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n");
1626 LLVM_DEBUG({
1627 for (auto *BB : Blocks)
1628 dbgs() << " PBR: " << BB->getName() << "\n";
1629 });
1630
1631 // Adjust the finalization stack, verify the adjustment, and call the
1632 // finalize function a last time to finalize values between the pre-fini
1633 // block and the exit block if we left the parallel "the normal way".
1634 auto FiniInfo = FinalizationStack.pop_back_val();
1635 (void)FiniInfo;
1636 assert(FiniInfo.DK == OMPD_parallel &&
1637 "Unexpected finalization stack state!");
1638
1639 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
1640
1641 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
1642 FiniCB(PreFiniIP);
1643
1644 // Register the outlined info.
1645 addOutlineInfo(std::move(OI));
1646
1647 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
1648 UI->eraseFromParent();
1649
1650 return AfterIP;
1651}
1652
1654 // Build call void __kmpc_flush(ident_t *loc)
1655 uint32_t SrcLocStrSize;
1656 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1657 Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)};
1658
1659 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
1660}
1661
1663 if (!updateToLocation(Loc))
1664 return;
1665 emitFlush(Loc);
1666}
1667
1669 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
1670 // global_tid);
1671 uint32_t SrcLocStrSize;
1672 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1673 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1674 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
1675
1676 // Ignore return result until untied tasks are supported.
1677 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
1678 Args);
1679}
1680
1682 if (!updateToLocation(Loc))
1683 return;
1684 emitTaskwaitImpl(Loc);
1685}
1686
1688 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
1689 uint32_t SrcLocStrSize;
1690 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1691 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1693 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
1694
1695 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
1696 Args);
1697}
1698
1700 if (!updateToLocation(Loc))
1701 return;
1702 emitTaskyieldImpl(Loc);
1703}
1704
1707 InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB,
1708 bool Tied, Value *Final, Value *IfCondition,
1709 SmallVector<DependData> Dependencies) {
1710
1711 if (!updateToLocation(Loc))
1712 return InsertPointTy();
1713
1714 uint32_t SrcLocStrSize;
1715 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1716 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1717 // The current basic block is split into four basic blocks. After outlining,
1718 // they will be mapped as follows:
1719 // ```
1720 // def current_fn() {
1721 // current_basic_block:
1722 // br label %task.exit
1723 // task.exit:
1724 // ; instructions after task
1725 // }
1726 // def outlined_fn() {
1727 // task.alloca:
1728 // br label %task.body
1729 // task.body:
1730 // ret void
1731 // }
1732 // ```
1733 BasicBlock *TaskExitBB = splitBB(Builder, /*CreateBranch=*/true, "task.exit");
1734 BasicBlock *TaskBodyBB = splitBB(Builder, /*CreateBranch=*/true, "task.body");
1735 BasicBlock *TaskAllocaBB =
1736 splitBB(Builder, /*CreateBranch=*/true, "task.alloca");
1737
1738 InsertPointTy TaskAllocaIP =
1739 InsertPointTy(TaskAllocaBB, TaskAllocaBB->begin());
1740 InsertPointTy TaskBodyIP = InsertPointTy(TaskBodyBB, TaskBodyBB->begin());
1741 BodyGenCB(TaskAllocaIP, TaskBodyIP);
1742
1743 OutlineInfo OI;
1744 OI.EntryBB = TaskAllocaBB;
1745 OI.OuterAllocaBB = AllocaIP.getBlock();
1746 OI.ExitBB = TaskExitBB;
1747
1748 // Add the thread ID argument.
1749 std::stack<Instruction *> ToBeDeleted;
1751 Builder, AllocaIP, ToBeDeleted, TaskAllocaIP, "global.tid", false));
1752
1753 OI.PostOutlineCB = [this, Ident, Tied, Final, IfCondition, Dependencies,
1754 TaskAllocaBB, ToBeDeleted](Function &OutlinedFn) mutable {
1755 // Replace the Stale CI by appropriate RTL function call.
1756 assert(OutlinedFn.getNumUses() == 1 &&
1757 "there must be a single user for the outlined function");
1758 CallInst *StaleCI = cast<CallInst>(OutlinedFn.user_back());
1759
1760 // HasShareds is true if any variables are captured in the outlined region,
1761 // false otherwise.
1762 bool HasShareds = StaleCI->arg_size() > 1;
1763 Builder.SetInsertPoint(StaleCI);
1764
1765 // Gather the arguments for emitting the runtime call for
1766 // @__kmpc_omp_task_alloc
1767 Function *TaskAllocFn =
1768 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_alloc);
1769
1770 // Arguments - `loc_ref` (Ident) and `gtid` (ThreadID)
1771 // call.
1772 Value *ThreadID = getOrCreateThreadID(Ident);
1773
1774 // Argument - `flags`
1775 // Task is tied iff (Flags & 1) == 1.
1776 // Task is untied iff (Flags & 1) == 0.
1777 // Task is final iff (Flags & 2) == 2.
1778 // Task is not final iff (Flags & 2) == 0.
1779 // TODO: Handle the other flags.
1780 Value *Flags = Builder.getInt32(Tied);
1781 if (Final) {
1782 Value *FinalFlag =
1784 Flags = Builder.CreateOr(FinalFlag, Flags);
1785 }
1786
1787 // Argument - `sizeof_kmp_task_t` (TaskSize)
1788 // Tasksize refers to the size in bytes of kmp_task_t data structure
1789 // including private vars accessed in task.
1790 // TODO: add kmp_task_t_with_privates (privates)
1791 Value *TaskSize = Builder.getInt64(
1793
1794 // Argument - `sizeof_shareds` (SharedsSize)
1795 // SharedsSize refers to the shareds array size in the kmp_task_t data
1796 // structure.
1797 Value *SharedsSize = Builder.getInt64(0);
1798 if (HasShareds) {
1799 AllocaInst *ArgStructAlloca =
1800 dyn_cast<AllocaInst>(StaleCI->getArgOperand(1));
1801 assert(ArgStructAlloca &&
1802 "Unable to find the alloca instruction corresponding to arguments "
1803 "for extracted function");
1804 StructType *ArgStructType =
1805 dyn_cast<StructType>(ArgStructAlloca->getAllocatedType());
1806 assert(ArgStructType && "Unable to find struct type corresponding to "
1807 "arguments for extracted function");
1808 SharedsSize =
1810 }
1811 // Emit the @__kmpc_omp_task_alloc runtime call
1812 // The runtime call returns a pointer to an area where the task captured
1813 // variables must be copied before the task is run (TaskData)
1814 CallInst *TaskData = Builder.CreateCall(
1815 TaskAllocFn, {/*loc_ref=*/Ident, /*gtid=*/ThreadID, /*flags=*/Flags,
1816 /*sizeof_task=*/TaskSize, /*sizeof_shared=*/SharedsSize,
1817 /*task_func=*/&OutlinedFn});
1818
1819 // Copy the arguments for outlined function
1820 if (HasShareds) {
1821 Value *Shareds = StaleCI->getArgOperand(1);
1822 Align Alignment = TaskData->getPointerAlignment(M.getDataLayout());
1823 Value *TaskShareds = Builder.CreateLoad(VoidPtr, TaskData);
1824 Builder.CreateMemCpy(TaskShareds, Alignment, Shareds, Alignment,
1825 SharedsSize);
1826 }
1827
1828 Value *DepArray = nullptr;
1829 if (Dependencies.size()) {
1830 InsertPointTy OldIP = Builder.saveIP();
1832 &OldIP.getBlock()->getParent()->getEntryBlock().back());
1833
1834 Type *DepArrayTy = ArrayType::get(DependInfo, Dependencies.size());
1835 DepArray = Builder.CreateAlloca(DepArrayTy, nullptr, ".dep.arr.addr");
1836
1837 unsigned P = 0;
1838 for (const DependData &Dep : Dependencies) {
1839 Value *Base =
1840 Builder.CreateConstInBoundsGEP2_64(DepArrayTy, DepArray, 0, P);
1841 // Store the pointer to the variable
1843 DependInfo, Base,
1844 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr));
1845 Value *DepValPtr =
1847 Builder.CreateStore(DepValPtr, Addr);
1848 // Store the size of the variable
1850 DependInfo, Base,
1851 static_cast<unsigned int>(RTLDependInfoFields::Len));
1853 Dep.DepValueType)),
1854 Size);
1855 // Store the dependency kind
1857 DependInfo, Base,
1858 static_cast<unsigned int>(RTLDependInfoFields::Flags));
1860 ConstantInt::get(Builder.getInt8Ty(),
1861 static_cast<unsigned int>(Dep.DepKind)),
1862 Flags);
1863 ++P;
1864 }
1865
1866 Builder.restoreIP(OldIP);
1867 }
1868
1869 // In the presence of the `if` clause, the following IR is generated:
1870 // ...
1871 // %data = call @__kmpc_omp_task_alloc(...)
1872 // br i1 %if_condition, label %then, label %else
1873 // then:
1874 // call @__kmpc_omp_task(...)
1875 // br label %exit
1876 // else:
1877 // ;; Wait for resolution of dependencies, if any, before
1878 // ;; beginning the task
1879 // call @__kmpc_omp_wait_deps(...)
1880 // call @__kmpc_omp_task_begin_if0(...)
1881 // call @outlined_fn(...)
1882 // call @__kmpc_omp_task_complete_if0(...)
1883 // br label %exit
1884 // exit:
1885 // ...
1886 if (IfCondition) {
1887 // `SplitBlockAndInsertIfThenElse` requires the block to have a
1888 // terminator.
1889 splitBB(Builder, /*CreateBranch=*/true, "if.end");
1890 Instruction *IfTerminator =
1891 Builder.GetInsertPoint()->getParent()->getTerminator();
1892 Instruction *ThenTI = IfTerminator, *ElseTI = nullptr;
1893 Builder.SetInsertPoint(IfTerminator);
1894 SplitBlockAndInsertIfThenElse(IfCondition, IfTerminator, &ThenTI,
1895 &ElseTI);
1896 Builder.SetInsertPoint(ElseTI);
1897
1898 if (Dependencies.size()) {
1899 Function *TaskWaitFn =
1900 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_wait_deps);
1902 TaskWaitFn,
1903 {Ident, ThreadID, Builder.getInt32(Dependencies.size()), DepArray,
1904 ConstantInt::get(Builder.getInt32Ty(), 0),
1906 }
1907 Function *TaskBeginFn =
1908 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_begin_if0);
1909 Function *TaskCompleteFn =
1910 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_complete_if0);
1911 Builder.CreateCall(TaskBeginFn, {Ident, ThreadID, TaskData});
1912 CallInst *CI = nullptr;
1913 if (HasShareds)
1914 CI = Builder.CreateCall(&OutlinedFn, {ThreadID, TaskData});
1915 else
1916 CI = Builder.CreateCall(&OutlinedFn, {ThreadID});
1917 CI->setDebugLoc(StaleCI->getDebugLoc());
1918 Builder.CreateCall(TaskCompleteFn, {Ident, ThreadID, TaskData});
1919 Builder.SetInsertPoint(ThenTI);
1920 }
1921
1922 if (Dependencies.size()) {
1923 Function *TaskFn =
1924 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_with_deps);
1926 TaskFn,
1927 {Ident, ThreadID, TaskData, Builder.getInt32(Dependencies.size()),
1928 DepArray, ConstantInt::get(Builder.getInt32Ty(), 0),
1930
1931 } else {
1932 // Emit the @__kmpc_omp_task runtime call to spawn the task
1933 Function *TaskFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task);
1934 Builder.CreateCall(TaskFn, {Ident, ThreadID, TaskData});
1935 }
1936
1937 StaleCI->eraseFromParent();
1938
1939 Builder.SetInsertPoint(TaskAllocaBB, TaskAllocaBB->begin());
1940 if (HasShareds) {
1941 LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
1942 OutlinedFn.getArg(1)->replaceUsesWithIf(
1943 Shareds, [Shareds](Use &U) { return U.getUser() != Shareds; });
1944 }
1945
1946 while (!ToBeDeleted.empty()) {
1947 ToBeDeleted.top()->eraseFromParent();
1948 ToBeDeleted.pop();
1949 }
1950 };
1951
1952 addOutlineInfo(std::move(OI));
1953 Builder.SetInsertPoint(TaskExitBB, TaskExitBB->begin());
1954
1955 return Builder.saveIP();
1956}
1957
1960 InsertPointTy AllocaIP,
1961 BodyGenCallbackTy BodyGenCB) {
1962 if (!updateToLocation(Loc))
1963 return InsertPointTy();
1964
1965 uint32_t SrcLocStrSize;
1966 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1967 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1968 Value *ThreadID = getOrCreateThreadID(Ident);
1969
1970 // Emit the @__kmpc_taskgroup runtime call to start the taskgroup
1971 Function *TaskgroupFn =
1972 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskgroup);
1973 Builder.CreateCall(TaskgroupFn, {Ident, ThreadID});
1974
1975 BasicBlock *TaskgroupExitBB = splitBB(Builder, true, "taskgroup.exit");
1976 BodyGenCB(AllocaIP, Builder.saveIP());
1977
1978 Builder.SetInsertPoint(TaskgroupExitBB);
1979 // Emit the @__kmpc_end_taskgroup runtime call to end the taskgroup
1980 Function *EndTaskgroupFn =
1981 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_taskgroup);
1982 Builder.CreateCall(EndTaskgroupFn, {Ident, ThreadID});
1983
1984 return Builder.saveIP();
1985}
1986
1988 const LocationDescription &Loc, InsertPointTy AllocaIP,
1990 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) {
1991 assert(!isConflictIP(AllocaIP, Loc.IP) && "Dedicated IP allocas required");
1992
1993 if (!updateToLocation(Loc))
1994 return Loc.IP;
1995
1996 auto FiniCBWrapper = [&](InsertPointTy IP) {
1997 if (IP.getBlock()->end() != IP.getPoint())
1998 return FiniCB(IP);
1999 // This must be done otherwise any nested constructs using FinalizeOMPRegion
2000 // will fail because that function requires the Finalization Basic Block to
2001 // have a terminator, which is already removed by EmitOMPRegionBody.
2002 // IP is currently at cancelation block.
2003 // We need to backtrack to the condition block to fetch
2004 // the exit block and create a branch from cancelation
2005 // to exit block.
2007 Builder.restoreIP(IP);
2008 auto *CaseBB = IP.getBlock()->getSinglePredecessor();
2009 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
2010 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
2011 Instruction *I = Builder.CreateBr(ExitBB);
2012 IP = InsertPointTy(I->getParent(), I->getIterator());
2013 return FiniCB(IP);
2014 };
2015
2016 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable});
2017
2018 // Each section is emitted as a switch case
2019 // Each finalization callback is handled from clang.EmitOMPSectionDirective()
2020 // -> OMP.createSection() which generates the IR for each section
2021 // Iterate through all sections and emit a switch construct:
2022 // switch (IV) {
2023 // case 0:
2024 // <SectionStmt[0]>;
2025 // break;
2026 // ...
2027 // case <NumSection> - 1:
2028 // <SectionStmt[<NumSection> - 1]>;
2029 // break;
2030 // }
2031 // ...
2032 // section_loop.after:
2033 // <FiniCB>;
2034 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) {
2035 Builder.restoreIP(CodeGenIP);
2037 splitBBWithSuffix(Builder, /*CreateBranch=*/false, ".sections.after");
2038 Function *CurFn = Continue->getParent();
2039 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, Continue);
2040
2041 unsigned CaseNumber = 0;
2042 for (auto SectionCB : SectionCBs) {
2044 M.getContext(), "omp_section_loop.body.case", CurFn, Continue);
2045 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB);
2046 Builder.SetInsertPoint(CaseBB);
2047 BranchInst *CaseEndBr = Builder.CreateBr(Continue);
2048 SectionCB(InsertPointTy(),
2049 {CaseEndBr->getParent(), CaseEndBr->getIterator()});
2050 CaseNumber++;
2051 }
2052 // remove the existing terminator from body BB since there can be no
2053 // terminators after switch/case
2054 };
2055 // Loop body ends here
2056 // LowerBound, UpperBound, and STride for createCanonicalLoop
2057 Type *I32Ty = Type::getInt32Ty(M.getContext());
2058 Value *LB = ConstantInt::get(I32Ty, 0);
2059 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size());
2060 Value *ST = ConstantInt::get(I32Ty, 1);
2062 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop");
2063 InsertPointTy AfterIP =
2064 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait);
2065
2066 // Apply the finalization callback in LoopAfterBB
2067 auto FiniInfo = FinalizationStack.pop_back_val();
2068 assert(FiniInfo.DK == OMPD_sections &&
2069 "Unexpected finalization stack state!");
2070 if (FinalizeCallbackTy &CB = FiniInfo.FiniCB) {
2071 Builder.restoreIP(AfterIP);
2072 BasicBlock *FiniBB =
2073 splitBBWithSuffix(Builder, /*CreateBranch=*/true, "sections.fini");
2074 CB(Builder.saveIP());
2075 AfterIP = {FiniBB, FiniBB->begin()};
2076 }
2077
2078 return AfterIP;
2079}
2080
2083 BodyGenCallbackTy BodyGenCB,
2084 FinalizeCallbackTy FiniCB) {
2085 if (!updateToLocation(Loc))
2086 return Loc.IP;
2087
2088 auto FiniCBWrapper = [&](InsertPointTy IP) {
2089 if (IP.getBlock()->end() != IP.getPoint())
2090 return FiniCB(IP);
2091 // This must be done otherwise any nested constructs using FinalizeOMPRegion
2092 // will fail because that function requires the Finalization Basic Block to
2093 // have a terminator, which is already removed by EmitOMPRegionBody.
2094 // IP is currently at cancelation block.
2095 // We need to backtrack to the condition block to fetch
2096 // the exit block and create a branch from cancelation
2097 // to exit block.
2099 Builder.restoreIP(IP);
2100 auto *CaseBB = Loc.IP.getBlock();
2101 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
2102 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
2103 Instruction *I = Builder.CreateBr(ExitBB);
2104 IP = InsertPointTy(I->getParent(), I->getIterator());
2105 return FiniCB(IP);
2106 };
2107
2108 Directive OMPD = Directive::OMPD_sections;
2109 // Since we are using Finalization Callback here, HasFinalize
2110 // and IsCancellable have to be true
2111 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper,
2112 /*Conditional*/ false, /*hasFinalize*/ true,
2113 /*IsCancellable*/ true);
2114}
2115
2116/// Create a function with a unique name and a "void (i8*, i8*)" signature in
2117/// the given module and return it.
2119 Type *VoidTy = Type::getVoidTy(M.getContext());
2120 Type *Int8PtrTy = PointerType::getUnqual(M.getContext());
2121 auto *FuncTy =
2122 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false);
2124 M.getDataLayout().getDefaultGlobalsAddressSpace(),
2125 ".omp.reduction.func", &M);
2126}
2127
2130 InsertPointTy AllocaIP,
2131 ArrayRef<ReductionInfo> ReductionInfos,
2132 ArrayRef<bool> IsByRef, bool IsNoWait) {
2133 assert(ReductionInfos.size() == IsByRef.size());
2134 for (const ReductionInfo &RI : ReductionInfos) {
2135 (void)RI;
2136 assert(RI.Variable && "expected non-null variable");
2137 assert(RI.PrivateVariable && "expected non-null private variable");
2138 assert(RI.ReductionGen && "expected non-null reduction generator callback");
2139 assert(RI.Variable->getType() == RI.PrivateVariable->getType() &&
2140 "expected variables and their private equivalents to have the same "
2141 "type");
2142 assert(RI.Variable->getType()->isPointerTy() &&
2143 "expected variables to be pointers");
2144 }
2145
2146 if (!updateToLocation(Loc))
2147 return InsertPointTy();
2148
2149 BasicBlock *InsertBlock = Loc.IP.getBlock();
2150 BasicBlock *ContinuationBlock =
2151 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize");
2152 InsertBlock->getTerminator()->eraseFromParent();
2153
2154 // Create and populate array of type-erased pointers to private reduction
2155 // values.
2156 unsigned NumReductions = ReductionInfos.size();
2157 Type *RedArrayTy = ArrayType::get(Builder.getPtrTy(), NumReductions);
2158 Builder.restoreIP(AllocaIP);
2159 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
2160
2161 Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
2162
2163 for (auto En : enumerate(ReductionInfos)) {
2164 unsigned Index = En.index();
2165 const ReductionInfo &RI = En.value();
2166 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64(
2167 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index));
2168 Builder.CreateStore(RI.PrivateVariable, RedArrayElemPtr);
2169 }
2170
2171 // Emit a call to the runtime function that orchestrates the reduction.
2172 // Declare the reduction function in the process.
2174 Module *Module = Func->getParent();
2175 uint32_t SrcLocStrSize;
2176 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2177 bool CanGenerateAtomic =
2178 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) {
2179 return RI.AtomicReductionGen;
2180 });
2181 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize,
2182 CanGenerateAtomic
2183 ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE
2184 : IdentFlag(0));
2185 Value *ThreadId = getOrCreateThreadID(Ident);
2186 Constant *NumVariables = Builder.getInt32(NumReductions);
2187 const DataLayout &DL = Module->getDataLayout();
2188 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy);
2189 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize);
2190 Function *ReductionFunc = getFreshReductionFunc(*Module);
2191 Value *Lock = getOMPCriticalRegionLock(".reduction");
2193 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait
2194 : RuntimeFunction::OMPRTL___kmpc_reduce);
2195 CallInst *ReduceCall =
2196 Builder.CreateCall(ReduceFunc,
2197 {Ident, ThreadId, NumVariables, RedArraySize, RedArray,
2198 ReductionFunc, Lock},
2199 "reduce");
2200
2201 // Create final reduction entry blocks for the atomic and non-atomic case.
2202 // Emit IR that dispatches control flow to one of the blocks based on the
2203 // reduction supporting the atomic mode.
2204 BasicBlock *NonAtomicRedBlock =
2205 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func);
2206 BasicBlock *AtomicRedBlock =
2207 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func);
2208 SwitchInst *Switch =
2209 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2);
2210 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock);
2211 Switch->addCase(Builder.getInt32(2), AtomicRedBlock);
2212
2213 // Populate the non-atomic reduction using the elementwise reduction function.
2214 // This loads the elements from the global and private variables and reduces
2215 // them before storing back the result to the global variable.
2216 Builder.SetInsertPoint(NonAtomicRedBlock);
2217 for (auto En : enumerate(ReductionInfos)) {
2218 const ReductionInfo &RI = En.value();
2220 // We have one less load for by-ref case because that load is now inside of
2221 // the reduction region
2222 Value *RedValue = nullptr;
2223 if (!IsByRef[En.index()]) {
2224 RedValue = Builder.CreateLoad(ValueType, RI.Variable,
2225 "red.value." + Twine(En.index()));
2226 }
2227 Value *PrivateRedValue =
2229 "red.private.value." + Twine(En.index()));
2230 Value *Reduced;
2231 if (IsByRef[En.index()]) {
2233 PrivateRedValue, Reduced));
2234 } else {
2236 PrivateRedValue, Reduced));
2237 }
2238 if (!Builder.GetInsertBlock())
2239 return InsertPointTy();
2240 // for by-ref case, the load is inside of the reduction region
2241 if (!IsByRef[En.index()])
2242 Builder.CreateStore(Reduced, RI.Variable);
2243 }
2244 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr(
2245 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait
2246 : RuntimeFunction::OMPRTL___kmpc_end_reduce);
2247 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock});
2248 Builder.CreateBr(ContinuationBlock);
2249
2250 // Populate the atomic reduction using the atomic elementwise reduction
2251 // function. There are no loads/stores here because they will be happening
2252 // inside the atomic elementwise reduction.
2253 Builder.SetInsertPoint(AtomicRedBlock);
2254 if (CanGenerateAtomic && llvm::none_of(IsByRef, [](bool P) { return P; })) {
2255 for (const ReductionInfo &RI : ReductionInfos) {
2257 RI.Variable, RI.PrivateVariable));
2258 if (!Builder.GetInsertBlock())
2259 return InsertPointTy();
2260 }
2261 Builder.CreateBr(ContinuationBlock);
2262 } else {
2264 }
2265
2266 // Populate the outlined reduction function using the elementwise reduction
2267 // function. Partial values are extracted from the type-erased array of
2268 // pointers to private variables.
2269 BasicBlock *ReductionFuncBlock =
2270 BasicBlock::Create(Module->getContext(), "", ReductionFunc);
2271 Builder.SetInsertPoint(ReductionFuncBlock);
2272 Value *LHSArrayPtr = ReductionFunc->getArg(0);
2273 Value *RHSArrayPtr = ReductionFunc->getArg(1);
2274
2275 for (auto En : enumerate(ReductionInfos)) {
2276 const ReductionInfo &RI = En.value();
2278 RedArrayTy, LHSArrayPtr, 0, En.index());
2279 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getPtrTy(), LHSI8PtrPtr);
2280 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType());
2281 Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr);
2283 RedArrayTy, RHSArrayPtr, 0, En.index());
2284 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getPtrTy(), RHSI8PtrPtr);
2285 Value *RHSPtr =
2287 Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr);
2288 Value *Reduced;
2290 if (!Builder.GetInsertBlock())
2291 return InsertPointTy();
2292 // store is inside of the reduction region when using by-ref
2293 if (!IsByRef[En.index()])
2294 Builder.CreateStore(Reduced, LHSPtr);
2295 }
2297
2298 Builder.SetInsertPoint(ContinuationBlock);
2299 return Builder.saveIP();
2300}
2301
2304 BodyGenCallbackTy BodyGenCB,
2305 FinalizeCallbackTy FiniCB) {
2306
2307 if (!updateToLocation(Loc))
2308 return Loc.IP;
2309
2310 Directive OMPD = Directive::OMPD_master;
2311 uint32_t SrcLocStrSize;
2312 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2313 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2314 Value *ThreadId = getOrCreateThreadID(Ident);
2315 Value *Args[] = {Ident, ThreadId};
2316
2317 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
2318 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2319
2320 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
2321 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2322
2323 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2324 /*Conditional*/ true, /*hasFinalize*/ true);
2325}
2326
2329 BodyGenCallbackTy BodyGenCB,
2330 FinalizeCallbackTy FiniCB, Value *Filter) {
2331 if (!updateToLocation(Loc))
2332 return Loc.IP;
2333
2334 Directive OMPD = Directive::OMPD_masked;
2335 uint32_t SrcLocStrSize;
2336 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2337 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2338 Value *ThreadId = getOrCreateThreadID(Ident);
2339 Value *Args[] = {Ident, ThreadId, Filter};
2340 Value *ArgsEnd[] = {Ident, ThreadId};
2341
2342 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked);
2343 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2344
2345 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked);
2346 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd);
2347
2348 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2349 /*Conditional*/ true, /*hasFinalize*/ true);
2350}
2351
2353 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
2354 BasicBlock *PostInsertBefore, const Twine &Name) {
2355 Module *M = F->getParent();
2356 LLVMContext &Ctx = M->getContext();
2357 Type *IndVarTy = TripCount->getType();
2358
2359 // Create the basic block structure.
2360 BasicBlock *Preheader =
2361 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
2362 BasicBlock *Header =
2363 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
2364 BasicBlock *Cond =
2365 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
2366 BasicBlock *Body =
2367 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
2368 BasicBlock *Latch =
2369 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
2370 BasicBlock *Exit =
2371 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
2372 BasicBlock *After =
2373 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
2374
2375 // Use specified DebugLoc for new instructions.
2377
2378 Builder.SetInsertPoint(Preheader);
2379 Builder.CreateBr(Header);
2380
2381 Builder.SetInsertPoint(Header);
2382 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
2383 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
2385
2387 Value *Cmp =
2388 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
2389 Builder.CreateCondBr(Cmp, Body, Exit);
2390
2391 Builder.SetInsertPoint(Body);
2392 Builder.CreateBr(Latch);
2393
2394 Builder.SetInsertPoint(Latch);
2395 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
2396 "omp_" + Name + ".next", /*HasNUW=*/true);
2397 Builder.CreateBr(Header);
2398 IndVarPHI->addIncoming(Next, Latch);
2399
2400 Builder.SetInsertPoint(Exit);
2402
2403 // Remember and return the canonical control flow.
2404 LoopInfos.emplace_front();
2405 CanonicalLoopInfo *CL = &LoopInfos.front();
2406
2407 CL->Header = Header;
2408 CL->Cond = Cond;
2409 CL->Latch = Latch;
2410 CL->Exit = Exit;
2411
2412#ifndef NDEBUG
2413 CL->assertOK();
2414#endif
2415 return CL;
2416}
2417
2420 LoopBodyGenCallbackTy BodyGenCB,
2421 Value *TripCount, const Twine &Name) {
2422 BasicBlock *BB = Loc.IP.getBlock();
2423 BasicBlock *NextBB = BB->getNextNode();
2424
2425 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
2426 NextBB, NextBB, Name);
2427 BasicBlock *After = CL->getAfter();
2428
2429 // If location is not set, don't connect the loop.
2430 if (updateToLocation(Loc)) {
2431 // Split the loop at the insertion point: Branch to the preheader and move
2432 // every following instruction to after the loop (the After BB). Also, the
2433 // new successor is the loop's after block.
2434 spliceBB(Builder, After, /*CreateBranch=*/false);
2436 }
2437
2438 // Emit the body content. We do it after connecting the loop to the CFG to
2439 // avoid that the callback encounters degenerate BBs.
2440 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
2441
2442#ifndef NDEBUG
2443 CL->assertOK();
2444#endif
2445 return CL;
2446}
2447
2449 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
2450 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
2451 InsertPointTy ComputeIP, const Twine &Name) {
2452
2453 // Consider the following difficulties (assuming 8-bit signed integers):
2454 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
2455 // DO I = 1, 100, 50
2456 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
2457 // DO I = 100, 0, -128
2458
2459 // Start, Stop and Step must be of the same integer type.
2460 auto *IndVarTy = cast<IntegerType>(Start->getType());
2461 assert(IndVarTy == Stop->getType() && "Stop type mismatch");
2462 assert(IndVarTy == Step->getType() && "Step type mismatch");
2463
2464 LocationDescription ComputeLoc =
2465 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
2466 updateToLocation(ComputeLoc);
2467
2468 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
2469 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
2470
2471 // Like Step, but always positive.
2472 Value *Incr = Step;
2473
2474 // Distance between Start and Stop; always positive.
2475 Value *Span;
2476
2477 // Condition whether there are no iterations are executed at all, e.g. because
2478 // UB < LB.
2479 Value *ZeroCmp;
2480
2481 if (IsSigned) {
2482 // Ensure that increment is positive. If not, negate and invert LB and UB.
2483 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
2484 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
2485 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
2486 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
2487 Span = Builder.CreateSub(UB, LB, "", false, true);
2488 ZeroCmp = Builder.CreateICmp(
2489 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
2490 } else {
2491 Span = Builder.CreateSub(Stop, Start, "", true);
2492 ZeroCmp = Builder.CreateICmp(
2493 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
2494 }
2495
2496 Value *CountIfLooping;
2497 if (InclusiveStop) {
2498 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
2499 } else {
2500 // Avoid incrementing past stop since it could overflow.
2501 Value *CountIfTwo = Builder.CreateAdd(
2502 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
2503 Value *OneCmp = Builder.CreateICmp(CmpInst::ICMP_ULE, Span, Incr);
2504 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
2505 }
2506 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
2507 "omp_" + Name + ".tripcount");
2508
2509 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
2510 Builder.restoreIP(CodeGenIP);
2511 Value *Span = Builder.CreateMul(IV, Step);
2512 Value *IndVar = Builder.CreateAdd(Span, Start);
2513 BodyGenCB(Builder.saveIP(), IndVar);
2514 };
2515 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
2516 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
2517}
2518
2519// Returns an LLVM function to call for initializing loop bounds using OpenMP
2520// static scheduling depending on `type`. Only i32 and i64 are supported by the
2521// runtime. Always interpret integers as unsigned similarly to
2522// CanonicalLoopInfo.
2524 OpenMPIRBuilder &OMPBuilder) {
2525 unsigned Bitwidth = Ty->getIntegerBitWidth();
2526 if (Bitwidth == 32)
2527 return OMPBuilder.getOrCreateRuntimeFunction(
2528 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
2529 if (Bitwidth == 64)
2530 return OMPBuilder.getOrCreateRuntimeFunction(
2531 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
2532 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
2533}
2534
2536OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
2537 InsertPointTy AllocaIP,
2538 bool NeedsBarrier) {
2539 assert(CLI->isValid() && "Requires a valid canonical loop");
2540 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&
2541 "Require dedicated allocate IP");
2542
2543 // Set up the source location value for OpenMP runtime.
2546
2547 uint32_t SrcLocStrSize;
2548 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
2549 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2550
2551 // Declare useful OpenMP runtime functions.
2552 Value *IV = CLI->getIndVar();
2553 Type *IVTy = IV->getType();
2554 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
2555 FunctionCallee StaticFini =
2556 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
2557
2558 // Allocate space for computed loop bounds as expected by the "init" function.
2559 Builder.restoreIP(AllocaIP);
2560 Type *I32Type = Type::getInt32Ty(M.getContext());
2561 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
2562 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
2563 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
2564 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
2565
2566 // At the end of the preheader, prepare for calling the "init" function by
2567 // storing the current loop bounds into the allocated space. A canonical loop
2568 // always iterates from 0 to trip-count with step 1. Note that "init" expects
2569 // and produces an inclusive upper bound.
2571 Constant *Zero = ConstantInt::get(IVTy, 0);
2572 Constant *One = ConstantInt::get(IVTy, 1);
2573 Builder.CreateStore(Zero, PLowerBound);
2574 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
2575 Builder.CreateStore(UpperBound, PUpperBound);
2576 Builder.CreateStore(One, PStride);
2577
2578 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
2579
2580 Constant *SchedulingType = ConstantInt::get(
2581 I32Type, static_cast<int>(OMPScheduleType::UnorderedStatic));
2582
2583 // Call the "init" function and update the trip count of the loop with the
2584 // value it produced.
2585 Builder.CreateCall(StaticInit,
2586 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
2587 PUpperBound, PStride, One, Zero});
2588 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound);
2589 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound);
2590 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
2591 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
2592 CLI->setTripCount(TripCount);
2593
2594 // Update all uses of the induction variable except the one in the condition
2595 // block that compares it with the actual upper bound, and the increment in
2596 // the latch block.
2597
2598 CLI->mapIndVar([&](Instruction *OldIV) -> Value * {
2600 CLI->getBody()->getFirstInsertionPt());
2602 return Builder.CreateAdd(OldIV, LowerBound);
2603 });
2604
2605 // In the "exit" block, call the "fini" function.
2607 CLI->getExit()->getTerminator()->getIterator());
2608 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
2609
2610 // Add the barrier if requested.
2611 if (NeedsBarrier)
2612 createBarrier(LocationDescription(Builder.saveIP(), DL),
2613 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
2614 /* CheckCancelFlag */ false);
2615
2616 InsertPointTy AfterIP = CLI->getAfterIP();
2617 CLI->invalidate();
2618
2619 return AfterIP;
2620}
2621
2622OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(
2623 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
2624 bool NeedsBarrier, Value *ChunkSize) {
2625 assert(CLI->isValid() && "Requires a valid canonical loop");
2626 assert(ChunkSize && "Chunk size is required");
2627
2628 LLVMContext &Ctx = CLI->getFunction()->getContext();
2629 Value *IV = CLI->getIndVar();
2630 Value *OrigTripCount = CLI->getTripCount();
2631 Type *IVTy = IV->getType();
2632 assert(IVTy->getIntegerBitWidth() <= 64 &&
2633 "Max supported tripcount bitwidth is 64 bits");
2634 Type *InternalIVTy = IVTy->getIntegerBitWidth() <= 32 ? Type::getInt32Ty(Ctx)
2635 : Type::getInt64Ty(Ctx);
2636 Type *I32Type = Type::getInt32Ty(M.getContext());
2637 Constant *Zero = ConstantInt::get(InternalIVTy, 0);
2638 Constant *One = ConstantInt::get(InternalIVTy, 1);
2639
2640 // Declare useful OpenMP runtime functions.
2641 FunctionCallee StaticInit =
2642 getKmpcForStaticInitForType(InternalIVTy, M, *this);
2643 FunctionCallee StaticFini =
2644 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
2645
2646 // Allocate space for computed loop bounds as expected by the "init" function.
2647 Builder.restoreIP(AllocaIP);
2649 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
2650 Value *PLowerBound =
2651 Builder.CreateAlloca(InternalIVTy, nullptr, "p.lowerbound");
2652 Value *PUpperBound =
2653 Builder.CreateAlloca(InternalIVTy, nullptr, "p.upperbound");
2654 Value *PStride = Builder.CreateAlloca(InternalIVTy, nullptr, "p.stride");
2655
2656 // Set up the source location value for the OpenMP runtime.
2659
2660 // TODO: Detect overflow in ubsan or max-out with current tripcount.
2661 Value *CastedChunkSize =
2662 Builder.CreateZExtOrTrunc(ChunkSize, InternalIVTy, "chunksize");
2663 Value *CastedTripCount =
2664 Builder.CreateZExt(OrigTripCount, InternalIVTy, "tripcount");
2665
2666 Constant *SchedulingType = ConstantInt::get(
2667 I32Type, static_cast<int>(OMPScheduleType::UnorderedStaticChunked));
2668 Builder.CreateStore(Zero, PLowerBound);
2669 Value *OrigUpperBound = Builder.CreateSub(CastedTripCount, One);
2670 Builder.CreateStore(OrigUpperBound, PUpperBound);
2671 Builder.CreateStore(One, PStride);
2672
2673 // Call the "init" function and update the trip count of the loop with the
2674 // value it produced.
2675 uint32_t SrcLocStrSize;
2676 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
2677 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2678 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
2679 Builder.CreateCall(StaticInit,
2680 {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum,
2681 /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter,
2682 /*plower=*/PLowerBound, /*pupper=*/PUpperBound,
2683 /*pstride=*/PStride, /*incr=*/One,
2684 /*chunk=*/CastedChunkSize});
2685
2686 // Load values written by the "init" function.
2687 Value *FirstChunkStart =
2688 Builder.CreateLoad(InternalIVTy, PLowerBound, "omp_firstchunk.lb");
2689 Value *FirstChunkStop =
2690 Builder.CreateLoad(InternalIVTy, PUpperBound, "omp_firstchunk.ub");
2691 Value *FirstChunkEnd = Builder.CreateAdd(FirstChunkStop, One);
2692 Value *ChunkRange =
2693 Builder.CreateSub(FirstChunkEnd, FirstChunkStart, "omp_chunk.range");
2694 Value *NextChunkStride =
2695 Builder.CreateLoad(InternalIVTy, PStride, "omp_dispatch.stride");
2696
2697 // Create outer "dispatch" loop for enumerating the chunks.
2698 BasicBlock *DispatchEnter = splitBB(Builder, true);
2699 Value *DispatchCounter;
2701 {Builder.saveIP(), DL},
2702 [&](InsertPointTy BodyIP, Value *Counter) { DispatchCounter = Counter; },
2703 FirstChunkStart, CastedTripCount, NextChunkStride,
2704 /*IsSigned=*/false, /*InclusiveStop=*/false, /*ComputeIP=*/{},
2705 "dispatch");
2706
2707 // Remember the BasicBlocks of the dispatch loop we need, then invalidate to
2708 // not have to preserve the canonical invariant.
2709 BasicBlock *DispatchBody = DispatchCLI->getBody();
2710 BasicBlock *DispatchLatch = DispatchCLI->getLatch();
2711 BasicBlock *DispatchExit = DispatchCLI->getExit();
2712 BasicBlock *DispatchAfter = DispatchCLI->getAfter();
2713 DispatchCLI->invalidate();
2714
2715 // Rewire the original loop to become the chunk loop inside the dispatch loop.
2716 redirectTo(DispatchAfter, CLI->getAfter(), DL);
2717 redirectTo(CLI->getExit(), DispatchLatch, DL);
2718 redirectTo(DispatchBody, DispatchEnter, DL);
2719
2720 // Prepare the prolog of the chunk loop.
2723
2724 // Compute the number of iterations of the chunk loop.
2726 Value *ChunkEnd = Builder.CreateAdd(DispatchCounter, ChunkRange);
2727 Value *IsLastChunk =
2728 Builder.CreateICmpUGE(ChunkEnd, CastedTripCount, "omp_chunk.is_last");
2729 Value *CountUntilOrigTripCount =
2730 Builder.CreateSub(CastedTripCount, DispatchCounter);
2731 Value *ChunkTripCount = Builder.CreateSelect(
2732 IsLastChunk, CountUntilOrigTripCount, ChunkRange, "omp_chunk.tripcount");
2733 Value *BackcastedChunkTC =
2734 Builder.CreateTrunc(ChunkTripCount, IVTy, "omp_chunk.tripcount.trunc");
2735 CLI->setTripCount(BackcastedChunkTC);
2736
2737 // Update all uses of the induction variable except the one in the condition
2738 // block that compares it with the actual upper bound, and the increment in
2739 // the latch block.
2740 Value *BackcastedDispatchCounter =
2741 Builder.CreateTrunc(DispatchCounter, IVTy, "omp_dispatch.iv.trunc");
2742 CLI->mapIndVar([&](Instruction *) -> Value * {
2743 Builder.restoreIP(CLI->getBodyIP());
2744 return Builder.CreateAdd(IV, BackcastedDispatchCounter);
2745 });
2746
2747 // In the "exit" block, call the "fini" function.
2748 Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt());
2749 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
2750
2751 // Add the barrier if requested.
2752 if (NeedsBarrier)
2753 createBarrier(LocationDescription(Builder.saveIP(), DL), OMPD_for,
2754 /*ForceSimpleCall=*/false, /*CheckCancelFlag=*/false);
2755
2756#ifndef NDEBUG
2757 // Even though we currently do not support applying additional methods to it,
2758 // the chunk loop should remain a canonical loop.
2759 CLI->assertOK();
2760#endif
2761
2762 return {DispatchAfter, DispatchAfter->getFirstInsertionPt()};
2763}
2764
2765// Returns an LLVM function to call for executing an OpenMP static worksharing
2766// for loop depending on `type`. Only i32 and i64 are supported by the runtime.
2767// Always interpret integers as unsigned similarly to CanonicalLoopInfo.
2768static FunctionCallee
2770 WorksharingLoopType LoopType) {
2771 unsigned Bitwidth = Ty->getIntegerBitWidth();
2772 Module &M = OMPBuilder->M;
2773 switch (LoopType) {
2774 case WorksharingLoopType::ForStaticLoop:
2775 if (Bitwidth == 32)
2776 return OMPBuilder->getOrCreateRuntimeFunction(
2777 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_loop_4u);
2778 if (Bitwidth == 64)
2779 return OMPBuilder->getOrCreateRuntimeFunction(
2780 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_loop_8u);
2781 break;
2782 case WorksharingLoopType::DistributeStaticLoop:
2783 if (Bitwidth == 32)
2784 return OMPBuilder->getOrCreateRuntimeFunction(
2785 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_static_loop_4u);
2786 if (Bitwidth == 64)
2787 return OMPBuilder->getOrCreateRuntimeFunction(
2788 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_static_loop_8u);
2789 break;
2790 case WorksharingLoopType::DistributeForStaticLoop:
2791 if (Bitwidth == 32)
2792 return OMPBuilder->getOrCreateRuntimeFunction(
2793 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_for_static_loop_4u);
2794 if (Bitwidth == 64)
2795 return OMPBuilder->getOrCreateRuntimeFunction(
2796 M, omp::RuntimeFunction::OMPRTL___kmpc_distribute_for_static_loop_8u);
2797 break;
2798 }
2799 if (Bitwidth != 32 && Bitwidth != 64) {
2800 llvm_unreachable("Unknown OpenMP loop iterator bitwidth");
2801 }
2802 llvm_unreachable("Unknown type of OpenMP worksharing loop");
2803}
2804
2805// Inserts a call to proper OpenMP Device RTL function which handles
2806// loop worksharing.
2808 OpenMPIRBuilder *OMPBuilder, WorksharingLoopType LoopType,
2809 BasicBlock *InsertBlock, Value *Ident, Value *LoopBodyArg,
2810 Type *ParallelTaskPtr, Value *TripCount, Function &LoopBodyFn) {
2811 Type *TripCountTy = TripCount->getType();
2812 Module &M = OMPBuilder->M;
2813 IRBuilder<> &Builder = OMPBuilder->Builder;
2814 FunctionCallee RTLFn =
2815 getKmpcForStaticLoopForType(TripCountTy, OMPBuilder, LoopType);
2816 SmallVector<Value *, 8> RealArgs;
2817 RealArgs.push_back(Ident);
2818 RealArgs.push_back(Builder.CreateBitCast(&LoopBodyFn, ParallelTaskPtr));
2819 RealArgs.push_back(LoopBodyArg);
2820 RealArgs.push_back(TripCount);
2821 if (LoopType == WorksharingLoopType::DistributeStaticLoop) {
2822 RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
2823 Builder.CreateCall(RTLFn, RealArgs);
2824 return;
2825 }
2826 FunctionCallee RTLNumThreads = OMPBuilder->getOrCreateRuntimeFunction(
2827 M, omp::RuntimeFunction::OMPRTL_omp_get_num_threads);
2828 Builder.restoreIP({InsertBlock, std::prev(InsertBlock->end())});
2829 Value *NumThreads = Builder.CreateCall(RTLNumThreads, {});
2830
2831 RealArgs.push_back(
2832 Builder.CreateZExtOrTrunc(NumThreads, TripCountTy, "num.threads.cast"));
2833 RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
2834 if (LoopType == WorksharingLoopType::DistributeForStaticLoop) {
2835 RealArgs.push_back(ConstantInt::get(TripCountTy, 0));
2836 }
2837
2838 Builder.CreateCall(RTLFn, RealArgs);
2839}
2840
2841static void
2843 CanonicalLoopInfo *CLI, Value *Ident,
2844 Function &OutlinedFn, Type *ParallelTaskPtr,
2845 const SmallVector<Instruction *, 4> &ToBeDeleted,
2846 WorksharingLoopType LoopType) {
2847 IRBuilder<> &Builder = OMPIRBuilder->Builder;
2848 BasicBlock *Preheader = CLI->getPreheader();
2849 Value *TripCount = CLI->getTripCount();
2850
2851 // After loop body outling, the loop body contains only set up
2852 // of loop body argument structure and the call to the outlined
2853 // loop body function. Firstly, we need to move setup of loop body args
2854 // into loop preheader.
2855 Preheader->splice(std::prev(Preheader->end()), CLI->getBody(),
2856 CLI->getBody()->begin(), std::prev(CLI->getBody()->end()));
2857
2858 // The next step is to remove the whole loop. We do not it need anymore.
2859 // That's why make an unconditional branch from loop preheader to loop
2860 // exit block
2861 Builder.restoreIP({Preheader, Preheader->end()});
2862 Preheader->getTerminator()->eraseFromParent();
2863 Builder.CreateBr(CLI->getExit());
2864
2865 // Delete dead loop blocks
2866 OpenMPIRBuilder::OutlineInfo CleanUpInfo;
2867 SmallPtrSet<BasicBlock *, 32> RegionBlockSet;
2868 SmallVector<BasicBlock *, 32> BlocksToBeRemoved;
2869 CleanUpInfo.EntryBB = CLI->getHeader();
2870 CleanUpInfo.ExitBB = CLI->getExit();
2871 CleanUpInfo.collectBlocks(RegionBlockSet, BlocksToBeRemoved);
2872 DeleteDeadBlocks(BlocksToBeRemoved);
2873
2874 // Find the instruction which corresponds to loop body argument structure
2875 // and remove the call to loop body function instruction.
2876 Value *LoopBodyArg;
2877 User *OutlinedFnUser = OutlinedFn.getUniqueUndroppableUser();
2878 assert(OutlinedFnUser &&
2879 "Expected unique undroppable user of outlined function");
2880 CallInst *OutlinedFnCallInstruction = dyn_cast<CallInst>(OutlinedFnUser);
2881 assert(OutlinedFnCallInstruction && "Expected outlined function call");
2882 assert((OutlinedFnCallInstruction->getParent() == Preheader) &&
2883 "Expected outlined function call to be located in loop preheader");
2884 // Check in case no argument structure has been passed.
2885 if (OutlinedFnCallInstruction->arg_size() > 1)
2886 LoopBodyArg = OutlinedFnCallInstruction->getArgOperand(1);
2887 else
2888 LoopBodyArg = Constant::getNullValue(Builder.getPtrTy());
2889 OutlinedFnCallInstruction->eraseFromParent();
2890
2891 createTargetLoopWorkshareCall(OMPIRBuilder, LoopType, Preheader, Ident,
2892 LoopBodyArg, ParallelTaskPtr, TripCount,
2893 OutlinedFn);
2894
2895 for (auto &ToBeDeletedItem : ToBeDeleted)
2896 ToBeDeletedItem->eraseFromParent();
2897 CLI->invalidate();
2898}
2899
2901OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI,
2902 InsertPointTy AllocaIP,
2903 WorksharingLoopType LoopType) {
2904 uint32_t SrcLocStrSize;
2905 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
2906 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2907
2908 OutlineInfo OI;
2909 OI.OuterAllocaBB = CLI->getPreheader();
2910 Function *OuterFn = CLI->getPreheader()->getParent();
2911
2912 // Instructions which need to be deleted at the end of code generation
2914
2915 OI.OuterAllocaBB = AllocaIP.getBlock();
2916
2917 // Mark the body loop as region which needs to be extracted
2918 OI.EntryBB = CLI->getBody();
2919 OI.ExitBB = CLI->getLatch()->splitBasicBlock(CLI->getLatch()->begin(),
2920 "omp.prelatch", true);
2921
2922 // Prepare loop body for extraction
2923 Builder.restoreIP({CLI->getPreheader(), CLI->getPreheader()->begin()});
2924
2925 // Insert new loop counter variable which will be used only in loop
2926 // body.
2927 AllocaInst *NewLoopCnt = Builder.CreateAlloca(CLI->getIndVarType(), 0, "");
2928 Instruction *NewLoopCntLoad =
2929 Builder.CreateLoad(CLI->getIndVarType(), NewLoopCnt);
2930 // New loop counter instructions are redundant in the loop preheader when
2931 // code generation for workshare loop is finshed. That's why mark them as
2932 // ready for deletion.
2933 ToBeDeleted.push_back(NewLoopCntLoad);
2934 ToBeDeleted.push_back(NewLoopCnt);
2935
2936 // Analyse loop body region. Find all input variables which are used inside
2937 // loop body region.
2938 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
2940 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
2941 SmallVector<BasicBlock *, 32> BlocksT(ParallelRegionBlockSet.begin(),
2942 ParallelRegionBlockSet.end());
2943
2944 CodeExtractorAnalysisCache CEAC(*OuterFn);
2945 CodeExtractor Extractor(Blocks,
2946 /* DominatorTree */ nullptr,
2947 /* AggregateArgs */ true,
2948 /* BlockFrequencyInfo */ nullptr,
2949 /* BranchProbabilityInfo */ nullptr,
2950 /* AssumptionCache */ nullptr,
2951 /* AllowVarArgs */ true,
2952 /* AllowAlloca */ true,
2953 /* AllocationBlock */ CLI->getPreheader(),
2954 /* Suffix */ ".omp_wsloop",
2955 /* AggrArgsIn0AddrSpace */ true);
2956
2957 BasicBlock *CommonExit = nullptr;
2958 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
2959
2960 // Find allocas outside the loop body region which are used inside loop
2961 // body
2962 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
2963
2964 // We need to model loop body region as the function f(cnt, loop_arg).
2965 // That's why we replace loop induction variable by the new counter
2966 // which will be one of loop body function argument
2968 CLI->getIndVar()->user_end());
2969 for (auto Use : Users) {
2970 if (Instruction *Inst = dyn_cast<Instruction>(Use)) {
2971 if (ParallelRegionBlockSet.count(Inst->getParent())) {
2972 Inst->replaceUsesOfWith(CLI->getIndVar(), NewLoopCntLoad);
2973 }
2974 }
2975 }
2976 // Make sure that loop counter variable is not merged into loop body
2977 // function argument structure and it is passed as separate variable
2978 OI.ExcludeArgsFromAggregate.push_back(NewLoopCntLoad);
2979
2980 // PostOutline CB is invoked when loop body function is outlined and
2981 // loop body is replaced by call to outlined function. We need to add
2982 // call to OpenMP device rtl inside loop preheader. OpenMP device rtl
2983 // function will handle loop control logic.
2984 //
2985 OI.PostOutlineCB = [=, ToBeDeletedVec =
2986 std::move(ToBeDeleted)](Function &OutlinedFn) {
2987 workshareLoopTargetCallback(this, CLI, Ident, OutlinedFn, ParallelTaskPtr,
2988 ToBeDeletedVec, LoopType);
2989 };
2990 addOutlineInfo(std::move(OI));
2991 return CLI->getAfterIP();
2992}
2993
2996 bool NeedsBarrier, omp::ScheduleKind SchedKind, Value *ChunkSize,
2997 bool HasSimdModifier, bool HasMonotonicModifier,
2998 bool HasNonmonotonicModifier, bool HasOrderedClause,
2999 WorksharingLoopType LoopType) {
3000 if (Config.isTargetDevice())
3001 return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType);
3002 OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType(
3003 SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier,
3004 HasNonmonotonicModifier, HasOrderedClause);
3005
3006 bool IsOrdered = (EffectiveScheduleType & OMPScheduleType::ModifierOrdered) ==
3007 OMPScheduleType::ModifierOrdered;
3008 switch (EffectiveScheduleType & ~OMPScheduleType::ModifierMask) {
3009 case OMPScheduleType::BaseStatic:
3010 assert(!ChunkSize && "No chunk size with static-chunked schedule");
3011 if (IsOrdered)
3012 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
3013 NeedsBarrier, ChunkSize);
3014 // FIXME: Monotonicity ignored?
3015 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier);
3016
3017 case OMPScheduleType::BaseStaticChunked:
3018 if (IsOrdered)
3019 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
3020 NeedsBarrier, ChunkSize);
3021 // FIXME: Monotonicity ignored?
3022 return applyStaticChunkedWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier,
3023 ChunkSize);
3024
3025 case OMPScheduleType::BaseRuntime:
3026 case OMPScheduleType::BaseAuto:
3027 case OMPScheduleType::BaseGreedy:
3028 case OMPScheduleType::BaseBalanced:
3029 case OMPScheduleType::BaseSteal:
3030 case OMPScheduleType::BaseGuidedSimd:
3031 case OMPScheduleType::BaseRuntimeSimd:
3032 assert(!ChunkSize &&
3033 "schedule type does not support user-defined chunk sizes");
3034 [[fallthrough]];
3035 case OMPScheduleType::BaseDynamicChunked:
3036 case OMPScheduleType::BaseGuidedChunked:
3037 case OMPScheduleType::BaseGuidedIterativeChunked:
3038 case OMPScheduleType::BaseGuidedAnalyticalChunked:
3039 case OMPScheduleType::BaseStaticBalancedChunked:
3040 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
3041 NeedsBarrier, ChunkSize);
3042
3043 default:
3044 llvm_unreachable("Unknown/unimplemented schedule kind");
3045 }
3046}
3047
3048/// Returns an LLVM function to call for initializing loop bounds using OpenMP
3049/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
3050/// the runtime. Always interpret integers as unsigned similarly to
3051/// CanonicalLoopInfo.
3052static FunctionCallee
3054 unsigned Bitwidth = Ty->getIntegerBitWidth();
3055 if (Bitwidth == 32)
3056 return OMPBuilder.getOrCreateRuntimeFunction(
3057 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u);
3058 if (Bitwidth == 64)
3059 return OMPBuilder.getOrCreateRuntimeFunction(
3060 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u);
3061 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
3062}
3063
3064/// Returns an LLVM function to call for updating the next loop using OpenMP
3065/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
3066/// the runtime. Always interpret integers as unsigned similarly to
3067/// CanonicalLoopInfo.
3068static FunctionCallee
3070 unsigned Bitwidth = Ty->getIntegerBitWidth();
3071 if (Bitwidth == 32)
3072 return OMPBuilder.getOrCreateRuntimeFunction(
3073 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u);
3074 if (Bitwidth == 64)
3075 return OMPBuilder.getOrCreateRuntimeFunction(
3076 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u);
3077 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
3078}
3079
3080/// Returns an LLVM function to call for finalizing the dynamic loop using
3081/// depending on `type`. Only i32 and i64 are supported by the runtime. Always
3082/// interpret integers as unsigned similarly to CanonicalLoopInfo.
3083static FunctionCallee
3085 unsigned Bitwidth = Ty->getIntegerBitWidth();
3086 if (Bitwidth == 32)
3087 return OMPBuilder.getOrCreateRuntimeFunction(
3088 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_4u);
3089 if (Bitwidth == 64)
3090 return OMPBuilder.getOrCreateRuntimeFunction(
3091 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_8u);
3092 llvm_unreachable("unknown OpenMP loop iterator bitwidth");
3093}
3094
3095OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
3096 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
3097 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) {
3098 assert(CLI->isValid() && "Requires a valid canonical loop");
3099 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&
3100 "Require dedicated allocate IP");
3102 "Require valid schedule type");
3103
3104 bool Ordered = (SchedType & OMPScheduleType::ModifierOrdered) ==
3105 OMPScheduleType::ModifierOrdered;
3106
3107 // Set up the source location value for OpenMP runtime.
3109
3110 uint32_t SrcLocStrSize;
3111 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
3112 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3113
3114 // Declare useful OpenMP runtime functions.
3115 Value *IV = CLI->getIndVar();
3116 Type *IVTy = IV->getType();
3117 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this);
3118 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this);
3119
3120 // Allocate space for computed loop bounds as expected by the "init" function.
3121 Builder.restoreIP(AllocaIP);
3122 Type *I32Type = Type::getInt32Ty(M.getContext());
3123 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
3124 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
3125 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
3126 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
3127
3128 // At the end of the preheader, prepare for calling the "init" function by
3129 // storing the current loop bounds into the allocated space. A canonical loop
3130 // always iterates from 0 to trip-count with step 1. Note that "init" expects
3131 // and produces an inclusive upper bound.
3132 BasicBlock *PreHeader = CLI->getPreheader();
3133 Builder.SetInsertPoint(PreHeader->getTerminator());
3134 Constant *One = ConstantInt::get(IVTy, 1);
3135 Builder.CreateStore(One, PLowerBound);
3136 Value *UpperBound = CLI->getTripCount();
3137 Builder.CreateStore(UpperBound, PUpperBound);
3138 Builder.CreateStore(One, PStride);
3139
3140 BasicBlock *Header = CLI->getHeader();
3141 BasicBlock *Exit = CLI->getExit();
3142 BasicBlock *Cond = CLI->getCond();
3143 BasicBlock *Latch = CLI->getLatch();
3144 InsertPointTy AfterIP = CLI->getAfterIP();
3145
3146 // The CLI will be "broken" in the code below, as the loop is no longer
3147 // a valid canonical loop.
3148
3149 if (!Chunk)
3150 Chunk = One;
3151
3152 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
3153
3154 Constant *SchedulingType =
3155 ConstantInt::get(I32Type, static_cast<int>(SchedType));
3156
3157 // Call the "init" function.
3158 Builder.CreateCall(DynamicInit,
3159 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One,
3160 UpperBound, /* step */ One, Chunk});
3161
3162 // An outer loop around the existing one.
3163 BasicBlock *OuterCond = BasicBlock::Create(
3164 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
3165 PreHeader->getParent());
3166 // This needs to be 32-bit always, so can't use the IVTy Zero above.
3167 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
3168 Value *Res =
3169 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
3170 PLowerBound, PUpperBound, PStride});
3171 Constant *Zero32 = ConstantInt::get(I32Type, 0);
3172 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32);
3173 Value *LowerBound =
3174 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb");
3175 Builder.CreateCondBr(MoreWork, Header, Exit);
3176
3177 // Change PHI-node in loop header to use outer cond rather than preheader,
3178 // and set IV to the LowerBound.
3179 Instruction *Phi = &Header->front();
3180 auto *PI = cast<PHINode>(Phi);
3181 PI->setIncomingBlock(0, OuterCond);
3182 PI->setIncomingValue(0, LowerBound);
3183
3184 // Then set the pre-header to jump to the OuterCond
3185 Instruction *Term = PreHeader->getTerminator();
3186 auto *Br = cast<BranchInst>(Term);
3187 Br->setSuccessor(0, OuterCond);
3188
3189 // Modify the inner condition:
3190 // * Use the UpperBound returned from the DynamicNext call.
3191 // * jump to the loop outer loop when done with one of the inner loops.
3192 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
3193 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
3195 auto *CI = cast<CmpInst>(Comp);
3196 CI->setOperand(1, UpperBound);
3197 // Redirect the inner exit to branch to outer condition.
3198 Instruction *Branch = &Cond->back();
3199 auto *BI = cast<BranchInst>(Branch);
3200 assert(BI->getSuccessor(1) == Exit);
3201 BI->setSuccessor(1, OuterCond);
3202
3203 // Call the "fini" function if "ordered" is present in wsloop directive.
3204 if (Ordered) {
3205 Builder.SetInsertPoint(&Latch->back());
3206 FunctionCallee DynamicFini = getKmpcForDynamicFiniForType(IVTy, M, *this);
3207 Builder.CreateCall(DynamicFini, {SrcLoc, ThreadNum});
3208 }
3209
3210 // Add the barrier if requested.
3211 if (NeedsBarrier) {
3212 Builder.SetInsertPoint(&Exit->back());
3213 createBarrier(LocationDescription(Builder.saveIP(), DL),
3214 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
3215 /* CheckCancelFlag */ false);
3216 }
3217
3218 CLI->invalidate();
3219 return AfterIP;
3220}
3221
3222/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
3223/// after this \p OldTarget will be orphaned.
3225 BasicBlock *NewTarget, DebugLoc DL) {
3226 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
3227 redirectTo(Pred, NewTarget, DL);
3228}
3229
3230/// Determine which blocks in \p BBs are reachable from outside and remove the
3231/// ones that are not reachable from the function.
3233 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
3234 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
3235 for (Use &U : BB->uses()) {
3236 auto *UseInst = dyn_cast<Instruction>(U.getUser());
3237 if (!UseInst)
3238 continue;
3239 if (BBsToErase.count(UseInst->getParent()))
3240 continue;
3241 return true;
3242 }
3243 return false;
3244 };
3245
3246 while (true) {
3247 bool Changed = false;
3248 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
3249 if (HasRemainingUses(BB)) {
3250 BBsToErase.erase(BB);
3251 Changed = true;
3252 }
3253 }
3254 if (!Changed)
3255 break;
3256 }
3257
3258 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
3259 DeleteDeadBlocks(BBVec);
3260}
3261
3264 InsertPointTy ComputeIP) {
3265 assert(Loops.size() >= 1 && "At least one loop required");
3266 size_t NumLoops = Loops.size();
3267
3268 // Nothing to do if there is already just one loop.
3269 if (NumLoops == 1)
3270 return Loops.front();
3271
3272 CanonicalLoopInfo *Outermost = Loops.front();
3273 CanonicalLoopInfo *Innermost = Loops.back();
3274 BasicBlock *OrigPreheader = Outermost->getPreheader();
3275 BasicBlock *OrigAfter = Outermost->getAfter();
3276 Function *F = OrigPreheader->getParent();
3277
3278 // Loop control blocks that may become orphaned later.
3279 SmallVector<BasicBlock *, 12> OldControlBBs;
3280 OldControlBBs.reserve(6 * Loops.size());
3282 Loop->collectControlBlocks(OldControlBBs);
3283
3284 // Setup the IRBuilder for inserting the trip count computation.
3286 if (ComputeIP.isSet())
3287 Builder.restoreIP(ComputeIP);
3288 else
3289 Builder.restoreIP(Outermost->getPreheaderIP());
3290
3291 // Derive the collapsed' loop trip count.
3292 // TODO: Find common/largest indvar type.
3293 Value *CollapsedTripCount = nullptr;
3294 for (CanonicalLoopInfo *L : Loops) {
3295 assert(L->isValid() &&
3296 "All loops to collapse must be valid canonical loops");
3297 Value *OrigTripCount = L->getTripCount();
3298 if (!CollapsedTripCount) {
3299 CollapsedTripCount = OrigTripCount;
3300 continue;
3301 }
3302
3303 // TODO: Enable UndefinedSanitizer to diagnose an overflow here.
3304 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount,
3305 {}, /*HasNUW=*/true);
3306 }
3307
3308 // Create the collapsed loop control flow.
3309 CanonicalLoopInfo *Result =
3310 createLoopSkeleton(DL, CollapsedTripCount, F,
3311 OrigPreheader->getNextNode(), OrigAfter, "collapsed");
3312
3313 // Build the collapsed loop body code.
3314 // Start with deriving the input loop induction variables from the collapsed
3315 // one, using a divmod scheme. To preserve the original loops' order, the
3316 // innermost loop use the least significant bits.
3317 Builder.restoreIP(Result->getBodyIP());
3318
3319 Value *Leftover = Result->getIndVar();
3320 SmallVector<Value *> NewIndVars;
3321 NewIndVars.resize(NumLoops);
3322 for (int i = NumLoops - 1; i >= 1; --i) {
3323 Value *OrigTripCount = Loops[i]->getTripCount();
3324
3325 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount);
3326 NewIndVars[i] = NewIndVar;
3327
3328 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount);
3329 }
3330 // Outermost loop gets all the remaining bits.
3331 NewIndVars[0] = Leftover;
3332
3333 // Construct the loop body control flow.
3334 // We progressively construct the branch structure following in direction of
3335 // the control flow, from the leading in-between code, the loop nest body, the
3336 // trailing in-between code, and rejoining the collapsed loop's latch.
3337 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If
3338 // the ContinueBlock is set, continue with that block. If ContinuePred, use
3339 // its predecessors as sources.
3340 BasicBlock *ContinueBlock = Result->getBody();
3341 BasicBlock *ContinuePred = nullptr;
3342 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest,
3343 BasicBlock *NextSrc) {
3344 if (ContinueBlock)
3345 redirectTo(ContinueBlock, Dest, DL);
3346 else
3347 redirectAllPredecessorsTo(ContinuePred, Dest, DL);
3348
3349 ContinueBlock = nullptr;
3350 ContinuePred = NextSrc;
3351 };
3352
3353 // The code before the nested loop of each level.
3354 // Because we are sinking it into the nest, it will be executed more often
3355 // that the original loop. More sophisticated schemes could keep track of what
3356 // the in-between code is and instantiate it only once per thread.
3357 for (size_t i = 0; i < NumLoops - 1; ++i)
3358 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader());
3359
3360 // Connect the loop nest body.
3361 ContinueWith(Innermost->getBody(), Innermost->getLatch());
3362
3363 // The code after the nested loop at each level.
3364 for (size_t i = NumLoops - 1; i > 0; --i)
3365 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch());
3366
3367 // Connect the finished loop to the collapsed loop latch.
3368 ContinueWith(Result->getLatch(), nullptr);
3369
3370 // Replace the input loops with the new collapsed loop.
3371 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL);
3372 redirectTo(Result->getAfter(), Outermost->getAfter(), DL);
3373
3374 // Replace the input loop indvars with the derived ones.
3375 for (size_t i = 0; i < NumLoops; ++i)
3376 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]);
3377
3378 // Remove unused parts of the input loops.
3379 removeUnusedBlocksFromParent(OldControlBBs);
3380
3381 for (CanonicalLoopInfo *L : Loops)
3382 L->invalidate();
3383
3384#ifndef NDEBUG
3385 Result->assertOK();
3386#endif
3387 return Result;
3388}
3389
3390std::vector<CanonicalLoopInfo *>
3392 ArrayRef<Value *> TileSizes) {
3393 assert(TileSizes.size() == Loops.size() &&
3394 "Must pass as many tile sizes as there are loops");
3395 int NumLoops = Loops.size();
3396 assert(NumLoops >= 1 && "At least one loop to tile required");
3397
3398 CanonicalLoopInfo *OutermostLoop = Loops.front();
3399 CanonicalLoopInfo *InnermostLoop = Loops.back();
3400 Function *F = OutermostLoop->getBody()->getParent();
3401 BasicBlock *InnerEnter = InnermostLoop->getBody();
3402 BasicBlock *InnerLatch = InnermostLoop->getLatch();
3403
3404 // Loop control blocks that may become orphaned later.
3405 SmallVector<BasicBlock *, 12> OldControlBBs;
3406 OldControlBBs.reserve(6 * Loops.size());
3408 Loop->collectControlBlocks(OldControlBBs);
3409
3410 // Collect original trip counts and induction variable to be accessible by
3411 // index. Also, the structure of the original loops is not preserved during
3412 // the construction of the tiled loops, so do it before we scavenge the BBs of
3413 // any original CanonicalLoopInfo.
3414 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
3415 for (CanonicalLoopInfo *L : Loops) {
3416 assert(L->isValid() && "All input loops must be valid canonical loops");
3417 OrigTripCounts.push_back(L->getTripCount());
3418 OrigIndVars.push_back(L->getIndVar());
3419 }
3420
3421 // Collect the code between loop headers. These may contain SSA definitions
3422 // that are used in the loop nest body. To be usable with in the innermost
3423 // body, these BasicBlocks will be sunk into the loop nest body. That is,
3424 // these instructions may be executed more often than before the tiling.
3425 // TODO: It would be sufficient to only sink them into body of the
3426 // corresponding tile loop.
3428 for (int i = 0; i < NumLoops - 1; ++i) {
3429 CanonicalLoopInfo *Surrounding = Loops[i];
3430 CanonicalLoopInfo *Nested = Loops[i + 1];
3431
3432 BasicBlock *EnterBB = Surrounding->getBody();
3433 BasicBlock *ExitBB = Nested->getHeader();
3434 InbetweenCode.emplace_back(EnterBB, ExitBB);
3435 }
3436
3437 // Compute the trip counts of the floor loops.
3439 Builder.restoreIP(OutermostLoop->getPreheaderIP());
3440 SmallVector<Value *, 4> FloorCount, FloorRems;
3441 for (int i = 0; i < NumLoops; ++i) {
3442 Value *TileSize = TileSizes[i];
3443 Value *OrigTripCount = OrigTripCounts[i];
3444 Type *IVType = OrigTripCount->getType();
3445
3446 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
3447 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
3448
3449 // 0 if tripcount divides the tilesize, 1 otherwise.
3450 // 1 means we need an additional iteration for a partial tile.
3451 //
3452 // Unfortunately we cannot just use the roundup-formula
3453 // (tripcount + tilesize - 1)/tilesize
3454 // because the summation might overflow. We do not want introduce undefined
3455 // behavior when the untiled loop nest did not.
3456 Value *FloorTripOverflow =
3457 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
3458
3459 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
3460 FloorTripCount =
3461 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
3462 "omp_floor" + Twine(i) + ".tripcount", true);
3463
3464 // Remember some values for later use.
3465 FloorCount.push_back(FloorTripCount);
3466 FloorRems.push_back(FloorTripRem);
3467 }
3468
3469 // Generate the new loop nest, from the outermost to the innermost.
3470 std::vector<CanonicalLoopInfo *> Result;
3471 Result.reserve(NumLoops * 2);
3472
3473 // The basic block of the surrounding loop that enters the nest generated
3474 // loop.
3475 BasicBlock *Enter = OutermostLoop->getPreheader();
3476
3477 // The basic block of the surrounding loop where the inner code should
3478 // continue.
3479 BasicBlock *Continue = OutermostLoop->getAfter();
3480
3481 // Where the next loop basic block should be inserted.
3482 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
3483
3484 auto EmbeddNewLoop =
3485 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
3486 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
3487 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
3488 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
3489 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
3490 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
3491
3492 // Setup the position where the next embedded loop connects to this loop.
3493 Enter = EmbeddedLoop->getBody();
3494 Continue = EmbeddedLoop->getLatch();
3495 OutroInsertBefore = EmbeddedLoop->getLatch();
3496 return EmbeddedLoop;
3497 };
3498
3499 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
3500 const Twine &NameBase) {
3501 for (auto P : enumerate(TripCounts)) {
3502 CanonicalLoopInfo *EmbeddedLoop =
3503 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
3504 Result.push_back(EmbeddedLoop);
3505 }
3506 };
3507
3508 EmbeddNewLoops(FloorCount, "floor");
3509
3510 // Within the innermost floor loop, emit the code that computes the tile
3511 // sizes.
3513 SmallVector<Value *, 4> TileCounts;
3514 for (int i = 0; i < NumLoops; ++i) {
3515 CanonicalLoopInfo *FloorLoop = Result[i];
3516 Value *TileSize = TileSizes[i];
3517
3518 Value *FloorIsEpilogue =
3519 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
3520 Value *TileTripCount =
3521 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
3522
3523 TileCounts.push_back(TileTripCount);
3524 }
3525
3526 // Create the tile loops.
3527 EmbeddNewLoops(TileCounts, "tile");
3528
3529 // Insert the inbetween code into the body.
3530 BasicBlock *BodyEnter = Enter;
3531 BasicBlock *BodyEntered = nullptr;
3532 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
3533 BasicBlock *EnterBB = P.first;
3534 BasicBlock *ExitBB = P.second;
3535
3536 if (BodyEnter)
3537 redirectTo(BodyEnter, EnterBB, DL);
3538 else
3539 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
3540
3541 BodyEnter = nullptr;
3542 BodyEntered = ExitBB;
3543 }
3544
3545 // Append the original loop nest body into the generated loop nest body.
3546 if (BodyEnter)
3547 redirectTo(BodyEnter, InnerEnter, DL);
3548 else
3549 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
3551
3552 // Replace the original induction variable with an induction variable computed
3553 // from the tile and floor induction variables.
3554 Builder.restoreIP(Result.back()->getBodyIP());
3555 for (int i = 0; i < NumLoops; ++i) {
3556 CanonicalLoopInfo *FloorLoop = Result[i];
3557 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
3558 Value *OrigIndVar = OrigIndVars[i];
3559 Value *Size = TileSizes[i];
3560
3561 Value *Scale =
3562 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
3563 Value *Shift =
3564 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
3565 OrigIndVar->replaceAllUsesWith(Shift);
3566 }
3567
3568 // Remove unused parts of the original loops.
3569 removeUnusedBlocksFromParent(OldControlBBs);
3570
3571 for (CanonicalLoopInfo *L : Loops)
3572 L->invalidate();
3573
3574#ifndef NDEBUG
3575 for (CanonicalLoopInfo *GenL : Result)
3576 GenL->assertOK();
3577#endif
3578 return Result;
3579}
3580
3581/// Attach metadata \p Properties to the basic block described by \p BB. If the
3582/// basic block already has metadata, the basic block properties are appended.
3584 ArrayRef<Metadata *> Properties) {
3585 // Nothing to do if no property to attach.
3586 if (Properties.empty())
3587 return;
3588
3589 LLVMContext &Ctx = BB->getContext();
3590 SmallVector<Metadata *> NewProperties;
3591 NewProperties.push_back(nullptr);
3592
3593 // If the basic block already has metadata, prepend it to the new metadata.
3594 MDNode *Existing = BB->getTerminator()->getMetadata(LLVMContext::MD_loop);
3595 if (Existing)
3596 append_range(NewProperties, drop_begin(Existing->operands(), 1));
3597
3598 append_range(NewProperties, Properties);
3599 MDNode *BasicBlockID = MDNode::getDistinct(Ctx, NewProperties);
3600 BasicBlockID->replaceOperandWith(0, BasicBlockID);
3601
3602 BB->getTerminator()->setMetadata(LLVMContext::MD_loop, BasicBlockID);
3603}
3604
3605/// Attach loop metadata \p Properties to the loop described by \p Loop. If the
3606/// loop already has metadata, the loop properties are appended.
3608 ArrayRef<Metadata *> Properties) {
3609 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo");
3610
3611 // Attach metadata to the loop's latch
3612 BasicBlock *Latch = Loop->getLatch();
3613 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch");
3614 addBasicBlockMetadata(Latch, Properties);
3615}
3616
3617/// Attach llvm.access.group metadata to the memref instructions of \p Block
3618static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup,
3619 LoopInfo &LI) {
3620 for (Instruction &I : *Block) {
3621 if (I.mayReadOrWriteMemory()) {
3622 // TODO: This instruction may already have access group from
3623 // other pragmas e.g. #pragma clang loop vectorize. Append
3624 // so that the existing metadata is not overwritten.
3625 I.setMetadata(LLVMContext::MD_access_group, AccessGroup);
3626 }
3627 }
3628}
3629
3633 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
3634 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))});
3635}
3636
3640 Loop, {
3641 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
3642 });
3643}
3644
3645void OpenMPIRBuilder::createIfVersion(CanonicalLoopInfo *CanonicalLoop,
3646 Value *IfCond, ValueToValueMapTy &VMap,
3647 const Twine &NamePrefix) {
3648 Function *F = CanonicalLoop->getFunction();
3649
3650 // Define where if branch should be inserted
3651 Instruction *SplitBefore;
3652 if (Instruction::classof(IfCond)) {
3653 SplitBefore = dyn_cast<Instruction>(IfCond);
3654 } else {
3655 SplitBefore = CanonicalLoop->getPreheader()->getTerminator();
3656 }
3657
3658 // TODO: We should not rely on pass manager. Currently we use pass manager
3659 // only for getting llvm::Loop which corresponds to given CanonicalLoopInfo
3660 // object. We should have a method which returns all blocks between
3661 // CanonicalLoopInfo::getHeader() and CanonicalLoopInfo::getAfter()
3663 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
3664 FAM.registerPass([]() { return LoopAnalysis(); });
3665 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
3666
3667 // Get the loop which needs to be cloned
3668 LoopAnalysis LIA;
3669 LoopInfo &&LI = LIA.run(*F, FAM);
3670 Loop *L = LI.getLoopFor(CanonicalLoop->getHeader());
3671
3672 // Create additional blocks for the if statement
3673 BasicBlock *Head = SplitBefore->getParent();
3674 Instruction *HeadOldTerm = Head->getTerminator();
3675 llvm::LLVMContext &C = Head->getContext();
3677 C, NamePrefix + ".if.then", Head->getParent(), Head->getNextNode());
3679 C, NamePrefix + ".if.else", Head->getParent(), CanonicalLoop->getExit());
3680
3681 // Create if condition branch.
3682 Builder.SetInsertPoint(HeadOldTerm);
3683 Instruction *BrInstr =
3684 Builder.CreateCondBr(IfCond, ThenBlock, /*ifFalse*/ ElseBlock);
3685 InsertPointTy IP{BrInstr->getParent(), ++BrInstr->getIterator()};
3686 // Then block contains branch to omp loop which needs to be vectorized
3687 spliceBB(IP, ThenBlock, false);
3688 ThenBlock->replaceSuccessorsPhiUsesWith(Head, ThenBlock);
3689
3690 Builder.SetInsertPoint(ElseBlock);
3691
3692 // Clone loop for the else branch
3694
3695 VMap[CanonicalLoop->getPreheader()] = ElseBlock;
3696 for (BasicBlock *Block : L->getBlocks()) {
3697 BasicBlock *NewBB = CloneBasicBlock(Block, VMap, "", F);
3698 NewBB->moveBefore(CanonicalLoop->getExit());
3699 VMap[Block] = NewBB;
3700 NewBlocks.push_back(NewBB);
3701 }
3702 remapInstructionsInBlocks(NewBlocks, VMap);
3703 Builder.CreateBr(NewBlocks.front());
3704}
3705
3706unsigned
3708 const StringMap<bool> &Features) {
3709 if (TargetTriple.isX86()) {
3710 if (Features.lookup("avx512f"))
3711 return 512;
3712 else if (Features.lookup("avx"))
3713 return 256;
3714 return 128;
3715 }
3716 if (TargetTriple.isPPC())
3717 return 128;
3718 if (TargetTriple.isWasm())
3719 return 128;
3720 return 0;
3721}
3722
3724 MapVector<Value *, Value *> AlignedVars,
3725 Value *IfCond, OrderKind Order,
3726 ConstantInt *Simdlen, ConstantInt *Safelen) {
3728
3729 Function *F = CanonicalLoop->getFunction();
3730
3731 // TODO: We should not rely on pass manager. Currently we use pass manager
3732 // only for getting llvm::Loop which corresponds to given CanonicalLoopInfo
3733 // object. We should have a method which returns all blocks between
3734 // CanonicalLoopInfo::getHeader() and CanonicalLoopInfo::getAfter()
3736 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
3737 FAM.registerPass([]() { return LoopAnalysis(); });
3738 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
3739
3740 LoopAnalysis LIA;
3741 LoopInfo &&LI = LIA.run(*F, FAM);
3742
3743 Loop *L = LI.getLoopFor(CanonicalLoop->getHeader());
3744 if (AlignedVars.size()) {
3746 Builder.SetInsertPoint(CanonicalLoop->getPreheader()->getTerminator());
3747 for (auto &AlignedItem : AlignedVars) {
3748 Value *AlignedPtr = AlignedItem.first;
3749 Value *Alignment = AlignedItem.second;
3750 Builder.CreateAlignmentAssumption(F->getParent()->getDataLayout(),
3751 AlignedPtr, Alignment);
3752 }
3753 Builder.restoreIP(IP);
3754 }
3755
3756 if (IfCond) {
3757 ValueToValueMapTy VMap;
3758 createIfVersion(CanonicalLoop, IfCond, VMap, "simd");
3759 // Add metadata to the cloned loop which disables vectorization
3760 Value *MappedLatch = VMap.lookup(CanonicalLoop->getLatch());
3761 assert(MappedLatch &&
3762 "Cannot find value which corresponds to original loop latch");
3763 assert(isa<BasicBlock>(MappedLatch) &&
3764 "Cannot cast mapped latch block value to BasicBlock");
3765 BasicBlock *NewLatchBlock = dyn_cast<BasicBlock>(MappedLatch);
3766 ConstantAsMetadata *BoolConst =
3769 NewLatchBlock,
3770 {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
3771 BoolConst})});
3772 }
3773
3774 SmallSet<BasicBlock *, 8> Reachable;
3775
3776 // Get the basic blocks from the loop in which memref instructions
3777 // can be found.
3778 // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo,
3779 // preferably without running any passes.
3780 for (BasicBlock *Block : L->getBlocks()) {
3781 if (Block == CanonicalLoop->getCond() ||
3782 Block == CanonicalLoop->getHeader())
3783 continue;
3784 Reachable.insert(Block);
3785 }
3786
3787 SmallVector<Metadata *> LoopMDList;
3788
3789 // In presence of finite 'safelen', it may be unsafe to mark all
3790 // the memory instructions parallel, because loop-carried
3791 // dependences of 'safelen' iterations are possible.
3792 // If clause order(concurrent) is specified then the memory instructions
3793 // are marked parallel even if 'safelen' is finite.
3794 if ((Safelen == nullptr) || (Order == OrderKind::OMP_ORDER_concurrent)) {
3795 // Add access group metadata to memory-access instructions.
3796 MDNode *AccessGroup = MDNode::getDistinct(Ctx, {});
3797 for (BasicBlock *BB : Reachable)
3798 addSimdMetadata(BB, AccessGroup, LI);
3799 // TODO: If the loop has existing parallel access metadata, have
3800 // to combine two lists.
3801 LoopMDList.push_back(MDNode::get(
3802 Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccessGroup}));
3803 }
3804
3805 // Use the above access group metadata to create loop level
3806 // metadata, which should be distinct for each loop.
3807 ConstantAsMetadata *BoolConst =
3809 LoopMDList.push_back(MDNode::get(
3810 Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"), BoolConst}));
3811
3812 if (Simdlen || Safelen) {
3813 // If both simdlen and safelen clauses are specified, the value of the
3814 // simdlen parameter must be less than or equal to the value of the safelen
3815 // parameter. Therefore, use safelen only in the absence of simdlen.
3816 ConstantInt *VectorizeWidth = Simdlen == nullptr ? Safelen : Simdlen;
3817 LoopMDList.push_back(
3818 MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.width"),
3819 ConstantAsMetadata::get(VectorizeWidth)}));
3820 }
3821
3822 addLoopMetadata(CanonicalLoop, LoopMDList);
3823}
3824
3825/// Create the TargetMachine object to query the backend for optimization
3826/// preferences.
3827///
3828/// Ideally, this would be passed from the front-end to the OpenMPBuilder, but
3829/// e.g. Clang does not pass it to its CodeGen layer and creates it only when
3830/// needed for the LLVM pass pipline. We use some default options to avoid
3831/// having to pass too many settings from the frontend that probably do not
3832/// matter.
3833///
3834/// Currently, TargetMachine is only used sometimes by the unrollLoopPartial
3835/// method. If we are going to use TargetMachine for more purposes, especially
3836/// those that are sensitive to TargetOptions, RelocModel and CodeModel, it
3837/// might become be worth requiring front-ends to pass on their TargetMachine,
3838/// or at least cache it between methods. Note that while fontends such as Clang
3839/// have just a single main TargetMachine per translation unit, "target-cpu" and
3840/// "target-features" that determine the TargetMachine are per-function and can
3841/// be overrided using __attribute__((target("OPTIONS"))).
3842static std::unique_ptr<TargetMachine>
3844 Module *M = F->getParent();
3845
3846 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString();
3847 StringRef Features = F->getFnAttribute("target-features").getValueAsString();
3848 const std::string &Triple = M->getTargetTriple();
3849
3850 std::string Error;
3852 if (!TheTarget)
3853 return {};
3854
3856 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine(
3857 Triple, CPU, Features, Options, /*RelocModel=*/std::nullopt,
3858 /*CodeModel=*/std::nullopt, OptLevel));
3859}
3860
3861/// Heuristically determine the best-performant unroll factor for \p CLI. This
3862/// depends on the target processor. We are re-using the same heuristics as the
3863/// LoopUnrollPass.
3865 Function *F = CLI->getFunction();
3866
3867 // Assume the user requests the most aggressive unrolling, even if the rest of
3868 // the code is optimized using a lower setting.
3870 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel);
3871
3873 FAM.registerPass([]() { return TargetLibraryAnalysis(); });
3874 FAM.registerPass([]() { return AssumptionAnalysis(); });
3875 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
3876 FAM.registerPass([]() { return LoopAnalysis(); });
3877 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); });
3878 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
3879 TargetIRAnalysis TIRA;
3880 if (TM)
3881 TIRA = TargetIRAnalysis(
3882 [&](const Function &F) { return TM->getTargetTransformInfo(F); });
3883 FAM.registerPass([&]() { return TIRA; });
3884
3885 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM);
3887 ScalarEvolution &&SE = SEA.run(*F, FAM);
3889 DominatorTree &&DT = DTA.run(*F, FAM);
3890 LoopAnalysis LIA;
3891 LoopInfo &&LI = LIA.run(*F, FAM);
3893 AssumptionCache &&AC = ACT.run(*F, FAM);
3895
3896 Loop *L = LI.getLoopFor(CLI->getHeader());
3897 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop");
3898
3901 /*BlockFrequencyInfo=*/nullptr,
3902 /*ProfileSummaryInfo=*/nullptr, ORE, static_cast<int>(OptLevel),
3903 /*UserThreshold=*/std::nullopt,
3904 /*UserCount=*/std::nullopt,
3905 /*UserAllowPartial=*/true,
3906 /*UserAllowRuntime=*/true,
3907 /*UserUpperBound=*/std::nullopt,
3908 /*UserFullUnrollMaxCount=*/std::nullopt);
3909
3910 UP.Force = true;
3911
3912 // Account for additional optimizations taking place before the LoopUnrollPass
3913 // would unroll the loop.
3916
3917 // Use normal unroll factors even if the rest of the code is optimized for
3918 // size.
3921
3922 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n"
3923 << " Threshold=" << UP.Threshold << "\n"
3924 << " PartialThreshold=" << UP.PartialThreshold << "\n"
3925 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n"
3926 << " PartialOptSizeThreshold="
3927 << UP.PartialOptSizeThreshold << "\n");
3928
3929 // Disable peeling.
3932 /*UserAllowPeeling=*/false,
3933 /*UserAllowProfileBasedPeeling=*/false,
3934 /*UnrollingSpecficValues=*/false);
3935
3937 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
3938
3939 // Assume that reads and writes to stack variables can be eliminated by
3940 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's
3941 // size.
3942 for (BasicBlock *BB : L->blocks()) {
3943 for (Instruction &I : *BB) {
3944 Value *Ptr;
3945 if (auto *Load = dyn_cast<LoadInst>(&I)) {
3946 Ptr = Load->getPointerOperand();
3947 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
3948 Ptr = Store->getPointerOperand();
3949 } else
3950 continue;
3951
3952 Ptr = Ptr->stripPointerCasts();
3953
3954 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) {
3955 if (Alloca->getParent() == &F->getEntryBlock())
3956 EphValues.insert(&I);
3957 }
3958 }
3959 }
3960
3961 UnrollCostEstimator UCE(L, TTI, EphValues, UP.BEInsns);
3962
3963 // Loop is not unrollable if the loop contains certain instructions.
3964 if (!UCE.canUnroll()) {
3965 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n");
3966 return 1;
3967 }
3968
3969 LLVM_DEBUG(dbgs() << "Estimated loop size is " << UCE.getRolledLoopSize()
3970 << "\n");
3971
3972 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might
3973 // be able to use it.
3974 int TripCount = 0;
3975 int MaxTripCount = 0;
3976 bool MaxOrZero = false;
3977 unsigned TripMultiple = 0;
3978
3979 bool UseUpperBound = false;
3980 computeUnrollCount(L, TTI, DT, &LI, &AC, SE, EphValues, &ORE, TripCount,
3981 MaxTripCount, MaxOrZero, TripMultiple, UCE, UP, PP,
3982 UseUpperBound);
3983 unsigned Factor = UP.Count;
3984 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n");
3985
3986 // This function returns 1 to signal to not unroll a loop.
3987 if (Factor == 0)
3988 return 1;
3989 return Factor;
3990}
3991
3993 int32_t Factor,
3994 CanonicalLoopInfo **UnrolledCLI) {
3995 assert(Factor >= 0 && "Unroll factor must not be negative");
3996
3997 Function *F = Loop->getFunction();
3998 LLVMContext &Ctx = F->getContext();
3999
4000 // If the unrolled loop is not used for another loop-associated directive, it
4001 // is sufficient to add metadata for the LoopUnrollPass.
4002 if (!UnrolledCLI) {
4003 SmallVector<Metadata *, 2> LoopMetadata;
4004 LoopMetadata.push_back(
4005 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")));
4006
4007 if (Factor >= 1) {
4009 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
4010 LoopMetadata.push_back(MDNode::get(
4011 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst}));
4012 }
4013
4014 addLoopMetadata(Loop, LoopMetadata);
4015 return;
4016 }
4017
4018 // Heuristically determine the unroll factor.
4019 if (Factor == 0)
4021
4022 // No change required with unroll factor 1.
4023 if (Factor == 1) {
4024 *UnrolledCLI = Loop;
4025 return;
4026 }
4027
4028 assert(Factor >= 2 &&
4029 "unrolling only makes sense with a factor of 2 or larger");
4030
4031 Type *IndVarTy = Loop->getIndVarType();
4032
4033 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully
4034 // unroll the inner loop.
4035 Value *FactorVal =
4036 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor,
4037 /*isSigned=*/false));
4038 std::vector<CanonicalLoopInfo *> LoopNest =
4039 tileLoops(DL, {Loop}, {FactorVal});
4040 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling");
4041 *UnrolledCLI = LoopNest[0];
4042 CanonicalLoopInfo *InnerLoop = LoopNest[1];
4043
4044 // LoopUnrollPass can only fully unroll loops with constant trip count.
4045 // Unroll by the unroll factor with a fallback epilog for the remainder
4046 // iterations if necessary.
4048 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
4050 InnerLoop,
4051 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
4053 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})});
4054
4055#ifndef NDEBUG
4056 (*UnrolledCLI)->assertOK();
4057#endif
4058}
4059
4062 llvm::Value *BufSize, llvm::Value *CpyBuf,
4063 llvm::Value *CpyFn, llvm::Value *DidIt) {
4064 if (!updateToLocation(Loc))
4065 return Loc.IP;
4066
4067 uint32_t SrcLocStrSize;
4068 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4069 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4070 Value *ThreadId = getOrCreateThreadID(Ident);
4071
4072 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
4073
4074 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
4075
4076 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
4077 Builder.CreateCall(Fn, Args);
4078
4079 return Builder.saveIP();
4080}
4081
4083 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
4084 FinalizeCallbackTy FiniCB, bool IsNowait, ArrayRef<llvm::Value *> CPVars,
4086
4087 if (!updateToLocation(Loc))
4088 return Loc.IP;
4089
4090 // If needed allocate and initialize `DidIt` with 0.
4091 // DidIt: flag variable: 1=single thread; 0=not single thread.
4092 llvm::Value *DidIt = nullptr;
4093 if (!CPVars.empty()) {
4096 }
4097
4098 Directive OMPD = Directive::OMPD_single;
4099 uint32_t SrcLocStrSize;
4100 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4101 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4102 Value *ThreadId = getOrCreateThreadID(Ident);
4103 Value *Args[] = {Ident, ThreadId};
4104
4105 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
4106 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
4107
4108 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
4109 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
4110
4111 auto FiniCBWrapper = [&](InsertPointTy IP) {
4112 FiniCB(IP);
4113
4114 // The thread that executes the single region must set `DidIt` to 1.
4115 // This is used by __kmpc_copyprivate, to know if the caller is the
4116 // single thread or not.
4117 if (DidIt)
4119 };
4120
4121 // generates the following:
4122 // if (__kmpc_single()) {
4123 // .... single region ...
4124 // __kmpc_end_single
4125 // }
4126 // __kmpc_copyprivate
4127 // __kmpc_barrier
4128
4129 EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCBWrapper,
4130 /*Conditional*/ true,
4131 /*hasFinalize*/ true);
4132
4133 if (DidIt) {
4134 for (size_t I = 0, E = CPVars.size(); I < E; ++I)
4135 // NOTE BufSize is currently unused, so just pass 0.
4137 /*BufSize=*/ConstantInt::get(Int64, 0), CPVars[I],
4138 CPFuncs[I], DidIt);
4139 // NOTE __kmpc_copyprivate already inserts a barrier
4140 } else if (!IsNowait)
4142 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
4143 /* CheckCancelFlag */ false);
4144 return Builder.saveIP();
4145}
4146
4148 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
4149 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
4150
4151 if (!updateToLocation(Loc))
4152 return Loc.IP;
4153
4154 Directive OMPD = Directive::OMPD_critical;
4155 uint32_t SrcLocStrSize;
4156 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4157 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4158 Value *ThreadId = getOrCreateThreadID(Ident);
4159 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
4160 Value *Args[] = {Ident, ThreadId, LockVar};
4161
4162 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
4163 Function *RTFn = nullptr;
4164 if (HintInst) {
4165 // Add Hint to entry Args and create call
4166 EnterArgs.push_back(HintInst);
4167 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
4168 } else {
4169 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
4170 }
4171 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
4172
4173 Function *ExitRTLFn =
4174 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
4175 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
4176
4177 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
4178 /*Conditional*/ false, /*hasFinalize*/ true);
4179}
4180
4183 InsertPointTy AllocaIP, unsigned NumLoops,
4184 ArrayRef<llvm::Value *> StoreValues,
4185 const Twine &Name, bool IsDependSource) {
4186 assert(
4187 llvm::all_of(StoreValues,
4188 [](Value *SV) { return SV->getType()->isIntegerTy(64); }) &&
4189 "OpenMP runtime requires depend vec with i64 type");
4190
4191 if (!updateToLocation(Loc))
4192 return Loc.IP;
4193
4194 // Allocate space for vector and generate alloc instruction.
4195 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops);
4196 Builder.restoreIP(AllocaIP);
4197 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name);
4198 ArgsBase->setAlignment(Align(8));
4199 Builder.restoreIP(Loc.IP);
4200
4201 // Store the index value with offset in depend vector.
4202 for (unsigned I = 0; I < NumLoops; ++I) {
4203 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP(
4204 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)});
4205 StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter);
4206 STInst->setAlignment(Align(8));
4207 }
4208
4209 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP(
4210 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)});
4211
4212 uint32_t SrcLocStrSize;
4213 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4214 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4215 Value *ThreadId = getOrCreateThreadID(Ident);
4216 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP};
4217
4218 Function *RTLFn = nullptr;
4219 if (IsDependSource)
4220 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post);
4221 else
4222 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait);
4223 Builder.CreateCall(RTLFn, Args);
4224
4225 return Builder.saveIP();
4226}
4227
4229 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
4230 FinalizeCallbackTy FiniCB, bool IsThreads) {
4231 if (!updateToLocation(Loc))
4232 return Loc.IP;
4233
4234 Directive OMPD = Directive::OMPD_ordered;
4235 Instruction *EntryCall = nullptr;
4236 Instruction *ExitCall = nullptr;
4237
4238 if (IsThreads) {
4239 uint32_t SrcLocStrSize;
4240 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4241 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4242 Value *ThreadId = getOrCreateThreadID(Ident);
4243 Value *Args[] = {Ident, ThreadId};
4244
4245 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered);
4246 EntryCall = Builder.CreateCall(EntryRTLFn, Args);
4247
4248 Function *ExitRTLFn =
4249 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered);
4250 ExitCall = Builder.CreateCall(ExitRTLFn, Args);
4251 }
4252
4253 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
4254 /*Conditional*/ false, /*hasFinalize*/ true);
4255}
4256
4257OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
4258 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
4259 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
4260 bool HasFinalize, bool IsCancellable) {
4261
4262 if (HasFinalize)
4263 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable});
4264
4265 // Create inlined region's entry and body blocks, in preparation
4266 // for conditional creation
4267 BasicBlock *EntryBB = Builder.GetInsertBlock();
4268 Instruction *SplitPos = EntryBB->getTerminator();
4269 if (!isa_and_nonnull<BranchInst>(SplitPos))
4270 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
4271 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
4272 BasicBlock *FiniBB =
4273 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
4274
4276 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
4277
4278 // generate body
4279 BodyGenCB(/* AllocaIP */ InsertPointTy(),
4280 /* CodeGenIP */ Builder.saveIP());
4281
4282 // emit exit call and do any needed finalization.
4283 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
4284 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&
4285 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&
4286 "Unexpected control flow graph state!!");
4287 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
4288 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&
4289 "Unexpected Control Flow State!");
4291
4292 // If we are skipping the region of a non conditional, remove the exit
4293 // block, and clear the builder's insertion point.
4294 assert(SplitPos->getParent() == ExitBB &&
4295 "Unexpected Insertion point location!");
4296 auto merged = MergeBlockIntoPredecessor(ExitBB);
4297 BasicBlock *ExitPredBB = SplitPos->getParent();
4298 auto InsertBB = merged ? ExitPredBB : ExitBB;
4299 if (!isa_and_nonnull<BranchInst>(SplitPos))
4300 SplitPos->eraseFromParent();
4301 Builder.SetInsertPoint(InsertBB);
4302
4303 return Builder.saveIP();
4304}
4305
4306OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
4307 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
4308 // if nothing to do, Return current insertion point.
4309 if (!Conditional || !EntryCall)
4310 return Builder.saveIP();
4311
4312 BasicBlock *EntryBB = Builder.GetInsertBlock();
4313 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
4314 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
4315 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
4316
4317 // Emit thenBB and set the Builder's insertion point there for
4318 // body generation next. Place the block after the current block.
4319 Function *CurFn = EntryBB->getParent();
4320 CurFn->insert(std::next(EntryBB->getIterator()), ThenBB);
4321
4322 // Move Entry branch to end of ThenBB, and replace with conditional
4323 // branch (If-stmt)
4324 Instruction *EntryBBTI = EntryBB->getTerminator();
4325 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
4326 EntryBBTI->removeFromParent();
4328 Builder.Insert(EntryBBTI);
4329 UI->eraseFromParent();
4330 Builder.SetInsertPoint(ThenBB->getTerminator());
4331
4332 // return an insertion point to ExitBB.
4333 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
4334}
4335
4336OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
4337 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
4338 bool HasFinalize) {
4339
4340 Builder.restoreIP(FinIP);
4341
4342 // If there is finalization to do, emit it before the exit call
4343 if (HasFinalize) {
4344 assert(!FinalizationStack.empty() &&
4345 "Unexpected finalization stack state!");
4346
4347 FinalizationInfo Fi = FinalizationStack.pop_back_val();
4348 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!");
4349
4350 Fi.FiniCB(FinIP);
4351
4352 BasicBlock *FiniBB = FinIP.getBlock();
4353 Instruction *FiniBBTI = FiniBB->getTerminator();
4354
4355 // set Builder IP for call creation
4356 Builder.SetInsertPoint(FiniBBTI);
4357 }
4358
4359 if (!ExitCall)
4360 return Builder.saveIP();
4361
4362 // place the Exitcall as last instruction before Finalization block terminator
4363 ExitCall->removeFromParent();
4364 Builder.Insert(ExitCall);
4365
4366 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
4367 ExitCall->getIterator());
4368}
4369
4371 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
4372 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
4373 if (!IP.isSet())
4374 return IP;
4375
4377
4378 // creates the following CFG structure
4379 // OMP_Entry : (MasterAddr != PrivateAddr)?
4380 // F T
4381 // | \
4382 // | copin.not.master
4383 // | /
4384 // v /
4385 // copyin.not.master.end
4386 // |
4387 // v
4388 // OMP.Entry.Next
4389
4390 BasicBlock *OMP_Entry = IP.getBlock();
4391 Function *CurFn = OMP_Entry->getParent();
4392 BasicBlock *CopyBegin =
4393 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
4394 BasicBlock *CopyEnd = nullptr;
4395
4396 // If entry block is terminated, split to preserve the branch to following
4397 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
4398 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
4399 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
4400 "copyin.not.master.end");
4401 OMP_Entry->getTerminator()->eraseFromParent();
4402 } else {
4403 CopyEnd =
4404 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
4405 }
4406
4407 Builder.SetInsertPoint(OMP_Entry);
4408 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
4409 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
4410 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
4411 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
4412
4413 Builder.SetInsertPoint(CopyBegin);
4414 if (BranchtoEnd)
4416
4417 return Builder.saveIP();
4418}
4419
4421 Value *Size, Value *Allocator,
4422 std::string Name) {
4424 updateToLocation(Loc);
4425
4426 uint32_t SrcLocStrSize;
4427 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4428 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4429 Value *ThreadId = getOrCreateThreadID(Ident);
4430 Value *Args[] = {ThreadId, Size, Allocator};
4431
4432 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
4433
4434 return Builder.CreateCall(Fn, Args, Name);
4435}
4436
4438 Value *Addr, Value *Allocator,
4439 std::string Name) {
4441 updateToLocation(Loc);
4442
4443 uint32_t SrcLocStrSize;
4444 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4445 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4446 Value *ThreadId = getOrCreateThreadID(Ident);
4447 Value *Args[] = {ThreadId, Addr, Allocator};
4448 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
4449 return Builder.CreateCall(Fn, Args, Name);
4450}
4451
4453 const LocationDescription &Loc, Value *InteropVar,
4454 omp::OMPInteropType InteropType, Value *Device, Value *NumDependences,
4455 Value *DependenceAddress, bool HaveNowaitClause) {
4457 updateToLocation(Loc);
4458
4459 uint32_t SrcLocStrSize;
4460 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4461 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4462 Value *ThreadId = getOrCreateThreadID(Ident);
4463 if (Device == nullptr)
4464 Device = ConstantInt::get(Int32, -1);
4465 Constant *InteropTypeVal = ConstantInt::get(Int32, (int)InteropType);
4466 if (NumDependences == nullptr) {
4467 NumDependences = ConstantInt::get(Int32, 0);
4468 PointerType *PointerTypeVar = PointerType::getUnqual(M.getContext());
4469 DependenceAddress = ConstantPointerNull::get(PointerTypeVar);
4470 }
4471 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause);
4472 Value *Args[] = {
4473 Ident, ThreadId, InteropVar, InteropTypeVal,
4474 Device, NumDependences, DependenceAddress, HaveNowaitClauseVal};
4475
4476 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_init);
4477
4478 return Builder.CreateCall(Fn, Args);
4479}
4480
4482 const LocationDescription &Loc, Value *InteropVar, Value *Device,
4483 Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause) {
4485 updateToLocation(Loc);
4486
4487 uint32_t SrcLocStrSize;
4488 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
4489 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
4490 Value *ThreadId = getOrCreateThreadID(Ident);
4491 if (Device == nullptr)
4492 Device = ConstantInt::get(Int32, -1);
4493 if (NumDependences == nullptr) {
4494 NumDependences = ConstantInt::get(Int32, 0);
4495 PointerType *PointerTypeVar =