Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/IR/IRBuilder.h
Warning:line 187, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name OMPIRBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Frontend/OpenMP -I include -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-20-140412-16051-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
16#include "llvm/ADT/SmallSet.h"
17#include "llvm/ADT/StringRef.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/CodeMetrics.h"
20#include "llvm/Analysis/LoopInfo.h"
21#include "llvm/Analysis/OptimizationRemarkEmitter.h"
22#include "llvm/Analysis/ScalarEvolution.h"
23#include "llvm/Analysis/TargetLibraryInfo.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/DebugInfoMetadata.h"
27#include "llvm/IR/GlobalVariable.h"
28#include "llvm/IR/IRBuilder.h"
29#include "llvm/IR/MDBuilder.h"
30#include "llvm/IR/PassManager.h"
31#include "llvm/IR/Value.h"
32#include "llvm/MC/TargetRegistry.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Transforms/Utils/BasicBlockUtils.h"
37#include "llvm/Transforms/Utils/CodeExtractor.h"
38#include "llvm/Transforms/Utils/LoopPeel.h"
39#include "llvm/Transforms/Utils/UnrollLoop.h"
40
41#include <cstdint>
42
43#define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder"
44
45using namespace llvm;
46using namespace omp;
47
48static cl::opt<bool>
49 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
50 cl::desc("Use optimistic attributes describing "
51 "'as-if' properties of runtime calls."),
52 cl::init(false));
53
54static cl::opt<double> UnrollThresholdFactor(
55 "openmp-ir-builder-unroll-threshold-factor", cl::Hidden,
56 cl::desc("Factor for the unroll threshold to account for code "
57 "simplifications still taking place"),
58 cl::init(1.5));
59
60#ifndef NDEBUG
61/// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions
62/// at position IP1 may change the meaning of IP2 or vice-versa. This is because
63/// an InsertPoint stores the instruction before something is inserted. For
64/// instance, if both point to the same instruction, two IRBuilders alternating
65/// creating instruction will cause the instructions to be interleaved.
66static bool isConflictIP(IRBuilder<>::InsertPoint IP1,
67 IRBuilder<>::InsertPoint IP2) {
68 if (!IP1.isSet() || !IP2.isSet())
69 return false;
70 return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint();
71}
72
73static bool isValidWorkshareLoopScheduleType(OMPScheduleType SchedType) {
74 // Valid ordered/unordered and base algorithm combinations.
75 switch (SchedType & ~OMPScheduleType::MonotonicityMask) {
76 case OMPScheduleType::UnorderedStaticChunked:
77 case OMPScheduleType::UnorderedStatic:
78 case OMPScheduleType::UnorderedDynamicChunked:
79 case OMPScheduleType::UnorderedGuidedChunked:
80 case OMPScheduleType::UnorderedRuntime:
81 case OMPScheduleType::UnorderedAuto:
82 case OMPScheduleType::UnorderedTrapezoidal:
83 case OMPScheduleType::UnorderedGreedy:
84 case OMPScheduleType::UnorderedBalanced:
85 case OMPScheduleType::UnorderedGuidedIterativeChunked:
86 case OMPScheduleType::UnorderedGuidedAnalyticalChunked:
87 case OMPScheduleType::UnorderedSteal:
88 case OMPScheduleType::UnorderedStaticBalancedChunked:
89 case OMPScheduleType::UnorderedGuidedSimd:
90 case OMPScheduleType::UnorderedRuntimeSimd:
91 case OMPScheduleType::OrderedStaticChunked:
92 case OMPScheduleType::OrderedStatic:
93 case OMPScheduleType::OrderedDynamicChunked:
94 case OMPScheduleType::OrderedGuidedChunked:
95 case OMPScheduleType::OrderedRuntime:
96 case OMPScheduleType::OrderedAuto:
97 case OMPScheduleType::OrderdTrapezoidal:
98 case OMPScheduleType::NomergeUnorderedStaticChunked:
99 case OMPScheduleType::NomergeUnorderedStatic:
100 case OMPScheduleType::NomergeUnorderedDynamicChunked:
101 case OMPScheduleType::NomergeUnorderedGuidedChunked:
102 case OMPScheduleType::NomergeUnorderedRuntime:
103 case OMPScheduleType::NomergeUnorderedAuto:
104 case OMPScheduleType::NomergeUnorderedTrapezoidal:
105 case OMPScheduleType::NomergeUnorderedGreedy:
106 case OMPScheduleType::NomergeUnorderedBalanced:
107 case OMPScheduleType::NomergeUnorderedGuidedIterativeChunked:
108 case OMPScheduleType::NomergeUnorderedGuidedAnalyticalChunked:
109 case OMPScheduleType::NomergeUnorderedSteal:
110 case OMPScheduleType::NomergeOrderedStaticChunked:
111 case OMPScheduleType::NomergeOrderedStatic:
112 case OMPScheduleType::NomergeOrderedDynamicChunked:
113 case OMPScheduleType::NomergeOrderedGuidedChunked:
114 case OMPScheduleType::NomergeOrderedRuntime:
115 case OMPScheduleType::NomergeOrderedAuto:
116 case OMPScheduleType::NomergeOrderedTrapezoidal:
117 break;
118 default:
119 return false;
120 }
121
122 // Must not set both monotonicity modifiers at the same time.
123 OMPScheduleType MonotonicityFlags =
124 SchedType & OMPScheduleType::MonotonicityMask;
125 if (MonotonicityFlags == OMPScheduleType::MonotonicityMask)
126 return false;
127
128 return true;
129}
130#endif
131
132/// Determine which scheduling algorithm to use, determined from schedule clause
133/// arguments.
134static OMPScheduleType
135getOpenMPBaseScheduleType(llvm::omp::ScheduleKind ClauseKind, bool HasChunks,
136 bool HasSimdModifier) {
137 // Currently, the default schedule it static.
138 switch (ClauseKind) {
139 case OMP_SCHEDULE_Default:
140 case OMP_SCHEDULE_Static:
141 return HasChunks ? OMPScheduleType::BaseStaticChunked
142 : OMPScheduleType::BaseStatic;
143 case OMP_SCHEDULE_Dynamic:
144 return OMPScheduleType::BaseDynamicChunked;
145 case OMP_SCHEDULE_Guided:
146 return HasSimdModifier ? OMPScheduleType::BaseGuidedSimd
147 : OMPScheduleType::BaseGuidedChunked;
148 case OMP_SCHEDULE_Auto:
149 return llvm::omp::OMPScheduleType::BaseAuto;
150 case OMP_SCHEDULE_Runtime:
151 return HasSimdModifier ? OMPScheduleType::BaseRuntimeSimd
152 : OMPScheduleType::BaseRuntime;
153 }
154 llvm_unreachable("unhandled schedule clause argument")::llvm::llvm_unreachable_internal("unhandled schedule clause argument"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 154)
;
155}
156
157/// Adds ordering modifier flags to schedule type.
158static OMPScheduleType
159getOpenMPOrderingScheduleType(OMPScheduleType BaseScheduleType,
160 bool HasOrderedClause) {
161 assert((BaseScheduleType & OMPScheduleType::ModifierMask) ==(static_cast <bool> ((BaseScheduleType & OMPScheduleType
::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set"
) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 163, __extension__
__PRETTY_FUNCTION__))
162 OMPScheduleType::None &&(static_cast <bool> ((BaseScheduleType & OMPScheduleType
::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set"
) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 163, __extension__
__PRETTY_FUNCTION__))
163 "Must not have ordering nor monotonicity flags already set")(static_cast <bool> ((BaseScheduleType & OMPScheduleType
::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set"
) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 163, __extension__
__PRETTY_FUNCTION__))
;
164
165 OMPScheduleType OrderingModifier = HasOrderedClause
166 ? OMPScheduleType::ModifierOrdered
167 : OMPScheduleType::ModifierUnordered;
168 OMPScheduleType OrderingScheduleType = BaseScheduleType | OrderingModifier;
169
170 // Unsupported combinations
171 if (OrderingScheduleType ==
172 (OMPScheduleType::BaseGuidedSimd | OMPScheduleType::ModifierOrdered))
173 return OMPScheduleType::OrderedGuidedChunked;
174 else if (OrderingScheduleType == (OMPScheduleType::BaseRuntimeSimd |
175 OMPScheduleType::ModifierOrdered))
176 return OMPScheduleType::OrderedRuntime;
177
178 return OrderingScheduleType;
179}
180
181/// Adds monotonicity modifier flags to schedule type.
182static OMPScheduleType
183getOpenMPMonotonicityScheduleType(OMPScheduleType ScheduleType,
184 bool HasSimdModifier, bool HasMonotonic,
185 bool HasNonmonotonic, bool HasOrderedClause) {
186 assert((ScheduleType & OMPScheduleType::MonotonicityMask) ==(static_cast <bool> ((ScheduleType & OMPScheduleType
::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set"
) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 188, __extension__
__PRETTY_FUNCTION__))
187 OMPScheduleType::None &&(static_cast <bool> ((ScheduleType & OMPScheduleType
::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set"
) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 188, __extension__
__PRETTY_FUNCTION__))
188 "Must not have monotonicity flags already set")(static_cast <bool> ((ScheduleType & OMPScheduleType
::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set"
) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 188, __extension__
__PRETTY_FUNCTION__))
;
189 assert((!HasMonotonic || !HasNonmonotonic) &&(static_cast <bool> ((!HasMonotonic || !HasNonmonotonic
) && "Monotonic and Nonmonotonic are contradicting each other"
) ? void (0) : __assert_fail ("(!HasMonotonic || !HasNonmonotonic) && \"Monotonic and Nonmonotonic are contradicting each other\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 190, __extension__
__PRETTY_FUNCTION__))
190 "Monotonic and Nonmonotonic are contradicting each other")(static_cast <bool> ((!HasMonotonic || !HasNonmonotonic
) && "Monotonic and Nonmonotonic are contradicting each other"
) ? void (0) : __assert_fail ("(!HasMonotonic || !HasNonmonotonic) && \"Monotonic and Nonmonotonic are contradicting each other\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 190, __extension__
__PRETTY_FUNCTION__))
;
191
192 if (HasMonotonic) {
193 return ScheduleType | OMPScheduleType::ModifierMonotonic;
194 } else if (HasNonmonotonic) {
195 return ScheduleType | OMPScheduleType::ModifierNonmonotonic;
196 } else {
197 // OpenMP 5.1, 2.11.4 Worksharing-Loop Construct, Description.
198 // If the static schedule kind is specified or if the ordered clause is
199 // specified, and if the nonmonotonic modifier is not specified, the
200 // effect is as if the monotonic modifier is specified. Otherwise, unless
201 // the monotonic modifier is specified, the effect is as if the
202 // nonmonotonic modifier is specified.
203 OMPScheduleType BaseScheduleType =
204 ScheduleType & ~OMPScheduleType::ModifierMask;
205 if ((BaseScheduleType == OMPScheduleType::BaseStatic) ||
206 (BaseScheduleType == OMPScheduleType::BaseStaticChunked) ||
207 HasOrderedClause) {
208 // The monotonic is used by default in openmp runtime library, so no need
209 // to set it.
210 return ScheduleType;
211 } else {
212 return ScheduleType | OMPScheduleType::ModifierNonmonotonic;
213 }
214 }
215}
216
217/// Determine the schedule type using schedule and ordering clause arguments.
218static OMPScheduleType
219computeOpenMPScheduleType(ScheduleKind ClauseKind, bool HasChunks,
220 bool HasSimdModifier, bool HasMonotonicModifier,
221 bool HasNonmonotonicModifier, bool HasOrderedClause) {
222 OMPScheduleType BaseSchedule =
223 getOpenMPBaseScheduleType(ClauseKind, HasChunks, HasSimdModifier);
224 OMPScheduleType OrderedSchedule =
225 getOpenMPOrderingScheduleType(BaseSchedule, HasOrderedClause);
226 OMPScheduleType Result = getOpenMPMonotonicityScheduleType(
227 OrderedSchedule, HasSimdModifier, HasMonotonicModifier,
228 HasNonmonotonicModifier, HasOrderedClause);
229
230 assert(isValidWorkshareLoopScheduleType(Result))(static_cast <bool> (isValidWorkshareLoopScheduleType(Result
)) ? void (0) : __assert_fail ("isValidWorkshareLoopScheduleType(Result)"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 230, __extension__
__PRETTY_FUNCTION__))
;
231 return Result;
232}
233
234/// Make \p Source branch to \p Target.
235///
236/// Handles two situations:
237/// * \p Source already has an unconditional branch.
238/// * \p Source is a degenerate block (no terminator because the BB is
239/// the current head of the IR construction).
240static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
241 if (Instruction *Term = Source->getTerminator()) {
242 auto *Br = cast<BranchInst>(Term);
243 assert(!Br->isConditional() &&(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 244, __extension__
__PRETTY_FUNCTION__))
244 "BB's terminator must be an unconditional branch (or degenerate)")(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 244, __extension__
__PRETTY_FUNCTION__))
;
245 BasicBlock *Succ = Br->getSuccessor(0);
246 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
247 Br->setSuccessor(0, Target);
248 return;
249 }
250
251 auto *NewBr = BranchInst::Create(Target, Source);
252 NewBr->setDebugLoc(DL);
253}
254
255/// Move the instruction after an InsertPoint to the beginning of another
256/// BasicBlock.
257///
258/// The instructions after \p IP are moved to the beginning of \p New which must
259/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
260/// \p New will be added such that there is no semantic change. Otherwise, the
261/// \p IP insert block remains degenerate and it is up to the caller to insert a
262/// terminator.
263static void spliceBB(OpenMPIRBuilder::InsertPointTy IP, BasicBlock *New,
264 bool CreateBranch) {
265 assert(New->getFirstInsertionPt() == New->begin() &&(static_cast <bool> (New->getFirstInsertionPt() == New
->begin() && "Target BB must not have PHI nodes") ?
void (0) : __assert_fail ("New->getFirstInsertionPt() == New->begin() && \"Target BB must not have PHI nodes\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 266, __extension__
__PRETTY_FUNCTION__))
266 "Target BB must not have PHI nodes")(static_cast <bool> (New->getFirstInsertionPt() == New
->begin() && "Target BB must not have PHI nodes") ?
void (0) : __assert_fail ("New->getFirstInsertionPt() == New->begin() && \"Target BB must not have PHI nodes\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 266, __extension__
__PRETTY_FUNCTION__))
;
267
268 // Move instructions to new block.
269 BasicBlock *Old = IP.getBlock();
270 New->getInstList().splice(New->begin(), Old->getInstList(), IP.getPoint(),
271 Old->end());
272
273 if (CreateBranch)
274 BranchInst::Create(New, Old);
275}
276
277/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
278/// insert location will stick to after the instruction before the insertion
279/// point (instead of moving with the instruction the InsertPoint stores
280/// internally).
281static void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) {
282 DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
283 BasicBlock *Old = Builder.GetInsertBlock();
284
285 spliceBB(Builder.saveIP(), New, CreateBranch);
286 if (CreateBranch)
287 Builder.SetInsertPoint(Old->getTerminator());
288 else
289 Builder.SetInsertPoint(Old);
290
291 // SetInsertPoint also updates the Builder's debug location, but we want to
292 // keep the one the Builder was configured to use.
293 Builder.SetCurrentDebugLocation(DebugLoc);
294}
295
296/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
297/// (missing the terminator).
298///
299/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
300/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
301/// is true, a branch to the new successor will new created such that
302/// semantically there is no change; otherwise the block of the insertion point
303/// remains degenerate and it is the caller's responsibility to insert a
304/// terminator. Returns the new successor block.
305static BasicBlock *splitBB(OpenMPIRBuilder::InsertPointTy IP, bool CreateBranch,
306 llvm::Twine Name = {}) {
307 BasicBlock *Old = IP.getBlock();
308 BasicBlock *New = BasicBlock::Create(
309 Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name,
310 Old->getParent(), Old->getNextNode());
311 spliceBB(IP, New, CreateBranch);
312 New->replaceSuccessorsPhiUsesWith(Old, New);
313 return New;
314}
315
316/// Split a BasicBlock at \p Builder's insertion point, even if the block is
317/// degenerate (missing the terminator). Its new insert location will stick to
318/// after the instruction before the insertion point (instead of moving with the
319/// instruction the InsertPoint stores internally).
320static BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch,
321 llvm::Twine Name = {}) {
322 DebugLoc DebugLoc = Builder.getCurrentDebugLocation();
323 BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name);
324 if (CreateBranch)
325 Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator());
326 else
327 Builder.SetInsertPoint(Builder.GetInsertBlock());
328 // SetInsertPoint also updates the Builder's debug location, but we want to
329 // keep the one the Builder was configured to use.
330 Builder.SetCurrentDebugLocation(DebugLoc);
331 return New;
332}
333
334void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
335 LLVMContext &Ctx = Fn.getContext();
336
337 // Get the function's current attributes.
338 auto Attrs = Fn.getAttributes();
339 auto FnAttrs = Attrs.getFnAttrs();
340 auto RetAttrs = Attrs.getRetAttrs();
341 SmallVector<AttributeSet, 4> ArgAttrs;
342 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo)
343 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo));
344
345#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
346#include "llvm/Frontend/OpenMP/OMPKinds.def"
347
348 // Add attributes to the function declaration.
349 switch (FnID) {
350#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
351 case Enum: \
352 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \
353 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \
354 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \
355 ArgAttrs[ArgNo] = \
356 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \
357 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \
358 break;
359#include "llvm/Frontend/OpenMP/OMPKinds.def"
360 default:
361 // Attributes are optional.
362 break;
363 }
364}
365
366FunctionCallee
367OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) {
368 FunctionType *FnTy = nullptr;
369 Function *Fn = nullptr;
370
371 // Try to find the declation in the module first.
372 switch (FnID) {
373#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
374 case Enum: \
375 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
376 IsVarArg); \
377 Fn = M.getFunction(Str); \
378 break;
379#include "llvm/Frontend/OpenMP/OMPKinds.def"
380 }
381
382 if (!Fn) {
383 // Create a new declaration if we need one.
384 switch (FnID) {
385#define OMP_RTL(Enum, Str, ...) \
386 case Enum: \
387 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
388 break;
389#include "llvm/Frontend/OpenMP/OMPKinds.def"
390 }
391
392 // Add information if the runtime function takes a callback function
393 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
394 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
395 LLVMContext &Ctx = Fn->getContext();
396 MDBuilder MDB(Ctx);
397 // Annotate the callback behavior of the runtime function:
398 // - The callback callee is argument number 2 (microtask).
399 // - The first two arguments of the callback callee are unknown (-1).
400 // - All variadic arguments to the runtime function are passed to the
401 // callback callee.
402 Fn->addMetadata(
403 LLVMContext::MD_callback,
404 *MDNode::get(Ctx, {MDB.createCallbackEncoding(
405 2, {-1, -1}, /* VarArgsArePassed */ true)}));
406 }
407 }
408
409 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
410 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
411 addAttributes(FnID, *Fn);
412
413 } else {
414 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
415 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
416 }
417
418 assert(Fn && "Failed to create OpenMP runtime function")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 418, __extension__
__PRETTY_FUNCTION__))
;
419
420 // Cast the function to the expected type if necessary
421 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo());
422 return {FnTy, C};
423}
424
425Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) {
426 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID);
427 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
428 assert(Fn && "Failed to create OpenMP runtime function pointer")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function pointer"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 428, __extension__
__PRETTY_FUNCTION__))
;
429 return Fn;
430}
431
432void OpenMPIRBuilder::initialize() { initializeTypes(M); }
433
434void OpenMPIRBuilder::finalize(Function *Fn) {
435 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
436 SmallVector<BasicBlock *, 32> Blocks;
437 SmallVector<OutlineInfo, 16> DeferredOutlines;
438 for (OutlineInfo &OI : OutlineInfos) {
439 // Skip functions that have not finalized yet; may happen with nested
440 // function generation.
441 if (Fn && OI.getFunction() != Fn) {
442 DeferredOutlines.push_back(OI);
443 continue;
444 }
445
446 ParallelRegionBlockSet.clear();
447 Blocks.clear();
448 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
449
450 Function *OuterFn = OI.getFunction();
451 CodeExtractorAnalysisCache CEAC(*OuterFn);
452 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
453 /* AggregateArgs */ true,
454 /* BlockFrequencyInfo */ nullptr,
455 /* BranchProbabilityInfo */ nullptr,
456 /* AssumptionCache */ nullptr,
457 /* AllowVarArgs */ true,
458 /* AllowAlloca */ true,
459 /* AllocaBlock*/ OI.OuterAllocaBB,
460 /* Suffix */ ".omp_par");
461
462 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before outlining: "
<< *OuterFn << "\n"; } } while (false)
;
463 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
464 << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
;
465 assert(Extractor.isEligible() &&(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 466, __extension__
__PRETTY_FUNCTION__))
466 "Expected OpenMP outlining to be possible!")(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 466, __extension__
__PRETTY_FUNCTION__))
;
467
468 for (auto *V : OI.ExcludeArgsFromAggregate)
469 Extractor.excludeArgFromAggregate(V);
470
471 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
472
473 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After outlining: "
<< *OuterFn << "\n"; } } while (false)
;
474 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << " Outlined function: "
<< *OutlinedFn << "\n"; } } while (false)
;
475 assert(OutlinedFn->getReturnType()->isVoidTy() &&(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 476, __extension__
__PRETTY_FUNCTION__))
476 "OpenMP outlined functions should not return a value!")(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 476, __extension__
__PRETTY_FUNCTION__))
;
477
478 // For compability with the clang CG we move the outlined function after the
479 // one with the parallel region.
480 OutlinedFn->removeFromParent();
481 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
482
483 // Remove the artificial entry introduced by the extractor right away, we
484 // made our own entry block after all.
485 {
486 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
487 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)(static_cast <bool> (ArtificialEntry.getUniqueSuccessor
() == OI.EntryBB) ? void (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 487, __extension__
__PRETTY_FUNCTION__))
;
488 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)(static_cast <bool> (OI.EntryBB->getUniquePredecessor
() == &ArtificialEntry) ? void (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 488, __extension__
__PRETTY_FUNCTION__))
;
489 // Move instructions from the to-be-deleted ArtificialEntry to the entry
490 // basic block of the parallel region. CodeExtractor generates
491 // instructions to unwrap the aggregate argument and may sink
492 // allocas/bitcasts for values that are solely used in the outlined region
493 // and do not escape.
494 assert(!ArtificialEntry.empty() &&(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to add in the outlined region entry")
? void (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to add in the outlined region entry\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 495, __extension__
__PRETTY_FUNCTION__))
495 "Expected instructions to add in the outlined region entry")(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to add in the outlined region entry")
? void (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to add in the outlined region entry\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 495, __extension__
__PRETTY_FUNCTION__))
;
496 for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(),
497 End = ArtificialEntry.rend();
498 It != End;) {
499 Instruction &I = *It;
500 It++;
501
502 if (I.isTerminator())
503 continue;
504
505 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
506 }
507
508 OI.EntryBB->moveBefore(&ArtificialEntry);
509 ArtificialEntry.eraseFromParent();
510 }
511 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)(static_cast <bool> (&OutlinedFn->getEntryBlock(
) == OI.EntryBB) ? void (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 511, __extension__
__PRETTY_FUNCTION__))
;
512 assert(OutlinedFn && OutlinedFn->getNumUses() == 1)(static_cast <bool> (OutlinedFn && OutlinedFn->
getNumUses() == 1) ? void (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 512, __extension__
__PRETTY_FUNCTION__))
;
513
514 // Run a user callback, e.g. to add attributes.
515 if (OI.PostOutlineCB)
516 OI.PostOutlineCB(*OutlinedFn);
517 }
518
519 // Remove work items that have been completed.
520 OutlineInfos = std::move(DeferredOutlines);
521}
522
523OpenMPIRBuilder::~OpenMPIRBuilder() {
524 assert(OutlineInfos.empty() && "There must be no outstanding outlinings")(static_cast <bool> (OutlineInfos.empty() && "There must be no outstanding outlinings"
) ? void (0) : __assert_fail ("OutlineInfos.empty() && \"There must be no outstanding outlinings\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 524, __extension__
__PRETTY_FUNCTION__))
;
525}
526
527GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) {
528 IntegerType *I32Ty = Type::getInt32Ty(M.getContext());
529 auto *GV =
530 new GlobalVariable(M, I32Ty,
531 /* isConstant = */ true, GlobalValue::WeakODRLinkage,
532 ConstantInt::get(I32Ty, Value), Name);
533 GV->setVisibility(GlobalValue::HiddenVisibility);
534
535 return GV;
536}
537
538Constant *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
539 uint32_t SrcLocStrSize,
540 IdentFlag LocFlags,
541 unsigned Reserve2Flags) {
542 // Enable "C-mode".
543 LocFlags |= OMP_IDENT_FLAG_KMPC;
544
545 Constant *&Ident =
546 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
547 if (!Ident) {
548 Constant *I32Null = ConstantInt::getNullValue(Int32);
549 Constant *IdentData[] = {I32Null,
550 ConstantInt::get(Int32, uint32_t(LocFlags)),
551 ConstantInt::get(Int32, Reserve2Flags),
552 ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr};
553 Constant *Initializer =
554 ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData);
555
556 // Look for existing encoding of the location + flags, not needed but
557 // minimizes the difference to the existing solution while we transition.
558 for (GlobalVariable &GV : M.getGlobalList())
559 if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer())
560 if (GV.getInitializer() == Initializer)
561 Ident = &GV;
562
563 if (!Ident) {
564 auto *GV = new GlobalVariable(
565 M, OpenMPIRBuilder::Ident,
566 /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "",
567 nullptr, GlobalValue::NotThreadLocal,
568 M.getDataLayout().getDefaultGlobalsAddressSpace());
569 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
570 GV->setAlignment(Align(8));
571 Ident = GV;
572 }
573 }
574
575 return ConstantExpr::getPointerBitCastOrAddrSpaceCast(Ident, IdentPtr);
576}
577
578Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr,
579 uint32_t &SrcLocStrSize) {
580 SrcLocStrSize = LocStr.size();
581 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
582 if (!SrcLocStr) {
583 Constant *Initializer =
584 ConstantDataArray::getString(M.getContext(), LocStr);
585
586 // Look for existing encoding of the location, not needed but minimizes the
587 // difference to the existing solution while we transition.
588 for (GlobalVariable &GV : M.getGlobalList())
589 if (GV.isConstant() && GV.hasInitializer() &&
590 GV.getInitializer() == Initializer)
591 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
592
593 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
594 /* AddressSpace */ 0, &M);
595 }
596 return SrcLocStr;
597}
598
599Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
600 StringRef FileName,
601 unsigned Line, unsigned Column,
602 uint32_t &SrcLocStrSize) {
603 SmallString<128> Buffer;
604 Buffer.push_back(';');
605 Buffer.append(FileName);
606 Buffer.push_back(';');
607 Buffer.append(FunctionName);
608 Buffer.push_back(';');
609 Buffer.append(std::to_string(Line));
610 Buffer.push_back(';');
611 Buffer.append(std::to_string(Column));
612 Buffer.push_back(';');
613 Buffer.push_back(';');
614 return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize);
615}
616
617Constant *
618OpenMPIRBuilder::getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize) {
619 StringRef UnknownLoc = ";unknown;unknown;0;0;;";
620 return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize);
621}
622
623Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL,
624 uint32_t &SrcLocStrSize,
625 Function *F) {
626 DILocation *DIL = DL.get();
627 if (!DIL)
628 return getOrCreateDefaultSrcLocStr(SrcLocStrSize);
629 StringRef FileName = M.getName();
630 if (DIFile *DIF = DIL->getFile())
631 if (Optional<StringRef> Source = DIF->getSource())
632 FileName = *Source;
633 StringRef Function = DIL->getScope()->getSubprogram()->getName();
634 if (Function.empty() && F)
635 Function = F->getName();
636 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
637 DIL->getColumn(), SrcLocStrSize);
638}
639
640Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc,
641 uint32_t &SrcLocStrSize) {
642 return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize,
643 Loc.IP.getBlock()->getParent());
644}
645
646Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) {
647 return Builder.CreateCall(
648 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
649 "omp_global_thread_num");
650}
651
652OpenMPIRBuilder::InsertPointTy
653OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK,
654 bool ForceSimpleCall, bool CheckCancelFlag) {
655 if (!updateToLocation(Loc))
656 return Loc.IP;
657 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
658}
659
660OpenMPIRBuilder::InsertPointTy
661OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind,
662 bool ForceSimpleCall, bool CheckCancelFlag) {
663 // Build call __kmpc_cancel_barrier(loc, thread_id) or
664 // __kmpc_barrier(loc, thread_id);
665
666 IdentFlag BarrierLocFlags;
667 switch (Kind) {
668 case OMPD_for:
669 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
670 break;
671 case OMPD_sections:
672 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
673 break;
674 case OMPD_single:
675 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
676 break;
677 case OMPD_barrier:
678 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
679 break;
680 default:
681 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
682 break;
683 }
684
685 uint32_t SrcLocStrSize;
686 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
687 Value *Args[] = {
688 getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags),
689 getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))};
690
691 // If we are in a cancellable parallel region, barriers are cancellation
692 // points.
693 // TODO: Check why we would force simple calls or to ignore the cancel flag.
694 bool UseCancelBarrier =
695 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
696
697 Value *Result =
698 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(
699 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
700 : OMPRTL___kmpc_barrier),
701 Args);
702
703 if (UseCancelBarrier && CheckCancelFlag)
704 emitCancelationCheckImpl(Result, OMPD_parallel);
705
706 return Builder.saveIP();
707}
708
709OpenMPIRBuilder::InsertPointTy
710OpenMPIRBuilder::createCancel(const LocationDescription &Loc,
711 Value *IfCondition,
712 omp::Directive CanceledDirective) {
713 if (!updateToLocation(Loc))
714 return Loc.IP;
715
716 // LLVM utilities like blocks with terminators.
717 auto *UI = Builder.CreateUnreachable();
718
719 Instruction *ThenTI = UI, *ElseTI = nullptr;
720 if (IfCondition)
721 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
722 Builder.SetInsertPoint(ThenTI);
723
724 Value *CancelKind = nullptr;
725 switch (CanceledDirective) {
726#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
727 case DirectiveEnum: \
728 CancelKind = Builder.getInt32(Value); \
729 break;
730#include "llvm/Frontend/OpenMP/OMPKinds.def"
731 default:
732 llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 732)
;
733 }
734
735 uint32_t SrcLocStrSize;
736 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
737 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
738 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
739 Value *Result = Builder.CreateCall(
740 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
741 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) {
742 if (CanceledDirective == OMPD_parallel) {
743 IRBuilder<>::InsertPointGuard IPG(Builder);
744 Builder.restoreIP(IP);
745 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
746 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
747 /* CheckCancelFlag */ false);
748 }
749 };
750
751 // The actual cancel logic is shared with others, e.g., cancel_barriers.
752 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB);
753
754 // Update the insertion point and remove the terminator we introduced.
755 Builder.SetInsertPoint(UI->getParent());
756 UI->eraseFromParent();
757
758 return Builder.saveIP();
759}
760
761void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
762 omp::Directive CanceledDirective,
763 FinalizeCallbackTy ExitCB) {
764 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 765, __extension__
__PRETTY_FUNCTION__))
765 "Unexpected cancellation!")(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 765, __extension__
__PRETTY_FUNCTION__))
;
766
767 // For a cancel barrier we create two new blocks.
768 BasicBlock *BB = Builder.GetInsertBlock();
769 BasicBlock *NonCancellationBlock;
770 if (Builder.GetInsertPoint() == BB->end()) {
771 // TODO: This branch will not be needed once we moved to the
772 // OpenMPIRBuilder codegen completely.
773 NonCancellationBlock = BasicBlock::Create(
774 BB->getContext(), BB->getName() + ".cont", BB->getParent());
775 } else {
776 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
777 BB->getTerminator()->eraseFromParent();
778 Builder.SetInsertPoint(BB);
779 }
780 BasicBlock *CancellationBlock = BasicBlock::Create(
781 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
782
783 // Jump to them based on the return value.
784 Value *Cmp = Builder.CreateIsNull(CancelFlag);
785 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
786 /* TODO weight */ nullptr, nullptr);
787
788 // From the cancellation block we finalize all variables and go to the
789 // post finalization block that is known to the FiniCB callback.
790 Builder.SetInsertPoint(CancellationBlock);
791 if (ExitCB)
792 ExitCB(Builder.saveIP());
793 auto &FI = FinalizationStack.back();
794 FI.FiniCB(Builder.saveIP());
795
796 // The continuation block is where code generation continues.
797 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
798}
799
800IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
801 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
802 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
803 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
804 omp::ProcBindKind ProcBind, bool IsCancellable) {
805 assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous")(static_cast <bool> (!isConflictIP(Loc.IP, OuterAllocaIP
) && "IPs must not be ambiguous") ? void (0) : __assert_fail
("!isConflictIP(Loc.IP, OuterAllocaIP) && \"IPs must not be ambiguous\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 805, __extension__
__PRETTY_FUNCTION__))
;
806
807 if (!updateToLocation(Loc))
808 return Loc.IP;
809
810 uint32_t SrcLocStrSize;
811 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
812 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
813 Value *ThreadID = getOrCreateThreadID(Ident);
814
815 if (NumThreads) {
816 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
817 Value *Args[] = {
818 Ident, ThreadID,
819 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
820 Builder.CreateCall(
821 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
822 }
823
824 if (ProcBind != OMP_PROC_BIND_default) {
825 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
826 Value *Args[] = {
827 Ident, ThreadID,
828 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
829 Builder.CreateCall(
830 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
831 }
832
833 BasicBlock *InsertBB = Builder.GetInsertBlock();
834 Function *OuterFn = InsertBB->getParent();
835
836 // Save the outer alloca block because the insertion iterator may get
837 // invalidated and we still need this later.
838 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
839
840 // Vector to remember instructions we used only during the modeling but which
841 // we want to delete at the end.
842 SmallVector<Instruction *, 4> ToBeDeleted;
843
844 // Change the location to the outer alloca insertion point to create and
845 // initialize the allocas we pass into the parallel region.
846 Builder.restoreIP(OuterAllocaIP);
847 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
848 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr");
849
850 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the
851 // program, otherwise we only need them for modeling purposes to get the
852 // associated arguments in the outlined function. In the former case,
853 // initialize the allocas properly, in the latter case, delete them later.
854 if (IfCondition) {
855 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr);
856 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr);
857 } else {
858 ToBeDeleted.push_back(TIDAddr);
859 ToBeDeleted.push_back(ZeroAddr);
860 }
861
862 // Create an artificial insertion point that will also ensure the blocks we
863 // are about to split are not degenerated.
864 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
865
866 Instruction *ThenTI = UI, *ElseTI = nullptr;
867 if (IfCondition)
868 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
869
870 BasicBlock *ThenBB = ThenTI->getParent();
871 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry");
872 BasicBlock *PRegBodyBB =
873 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region");
874 BasicBlock *PRegPreFiniBB =
875 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize");
876 BasicBlock *PRegExitBB =
877 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit");
878
879 auto FiniCBWrapper = [&](InsertPointTy IP) {
880 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
881 // target to the region exit block.
882 if (IP.getBlock()->end() == IP.getPoint()) {
883 IRBuilder<>::InsertPointGuard IPG(Builder);
884 Builder.restoreIP(IP);
885 Instruction *I = Builder.CreateBr(PRegExitBB);
886 IP = InsertPointTy(I->getParent(), I->getIterator());
887 }
888 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 890, __extension__
__PRETTY_FUNCTION__))
889 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 890, __extension__
__PRETTY_FUNCTION__))
890 "Unexpected insertion point for finalization call!")(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 890, __extension__
__PRETTY_FUNCTION__))
;
891 return FiniCB(IP);
892 };
893
894 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
895
896 // Generate the privatization allocas in the block that will become the entry
897 // of the outlined function.
898 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
899 InsertPointTy InnerAllocaIP = Builder.saveIP();
900
901 AllocaInst *PrivTIDAddr =
902 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
903 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid");
904
905 // Add some fake uses for OpenMP provided arguments.
906 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use"));
907 Instruction *ZeroAddrUse =
908 Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use");
909 ToBeDeleted.push_back(ZeroAddrUse);
910
911 // ThenBB
912 // |
913 // V
914 // PRegionEntryBB <- Privatization allocas are placed here.
915 // |
916 // V
917 // PRegionBodyBB <- BodeGen is invoked here.
918 // |
919 // V
920 // PRegPreFiniBB <- The block we will start finalization from.
921 // |
922 // V
923 // PRegionExitBB <- A common exit to simplify block collection.
924 //
925
926 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
927
928 // Let the caller create the body.
929 assert(BodyGenCB && "Expected body generation callback!")(static_cast <bool> (BodyGenCB && "Expected body generation callback!"
) ? void (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 929, __extension__
__PRETTY_FUNCTION__))
;
930 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
931 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB);
932
933 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
934
935 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
936 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
937 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
938 llvm::LLVMContext &Ctx = F->getContext();
939 MDBuilder MDB(Ctx);
940 // Annotate the callback behavior of the __kmpc_fork_call:
941 // - The callback callee is argument number 2 (microtask).
942 // - The first two arguments of the callback callee are unknown (-1).
943 // - All variadic arguments to the __kmpc_fork_call are passed to the
944 // callback callee.
945 F->addMetadata(
946 llvm::LLVMContext::MD_callback,
947 *llvm::MDNode::get(
948 Ctx, {MDB.createCallbackEncoding(2, {-1, -1},
949 /* VarArgsArePassed */ true)}));
950 }
951 }
952
953 OutlineInfo OI;
954 OI.PostOutlineCB = [=](Function &OutlinedFn) {
955 // Add some known attributes.
956 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
957 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
958 OutlinedFn.addFnAttr(Attribute::NoUnwind);
959 OutlinedFn.addFnAttr(Attribute::NoRecurse);
960
961 assert(OutlinedFn.arg_size() >= 2 &&(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 962, __extension__
__PRETTY_FUNCTION__))
962 "Expected at least tid and bounded tid as arguments")(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 962, __extension__
__PRETTY_FUNCTION__))
;
963 unsigned NumCapturedVars =
964 OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
965
966 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
967 CI->getParent()->setName("omp_parallel");
968 Builder.SetInsertPoint(CI);
969
970 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn);
971 Value *ForkCallArgs[] = {
972 Ident, Builder.getInt32(NumCapturedVars),
973 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)};
974
975 SmallVector<Value *, 16> RealArgs;
976 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
977 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
978
979 Builder.CreateCall(RTLFn, RealArgs);
980
981 LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
982 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
983
984 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end());
985
986 // Initialize the local TID stack location with the argument value.
987 Builder.SetInsertPoint(PrivTID);
988 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
989 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr);
990
991 // If no "if" clause was present we do not need the call created during
992 // outlining, otherwise we reuse it in the serialized parallel region.
993 if (!ElseTI) {
994 CI->eraseFromParent();
995 } else {
996
997 // If an "if" clause was present we are now generating the serialized
998 // version into the "else" branch.
999 Builder.SetInsertPoint(ElseTI);
1000
1001 // Build calls __kmpc_serialized_parallel(&Ident, GTid);
1002 Value *SerializedParallelCallArgs[] = {Ident, ThreadID};
1003 Builder.CreateCall(
1004 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel),
1005 SerializedParallelCallArgs);
1006
1007 // OutlinedFn(&GTid, &zero, CapturedStruct);
1008 CI->removeFromParent();
1009 Builder.Insert(CI);
1010
1011 // __kmpc_end_serialized_parallel(&Ident, GTid);
1012 Value *EndArgs[] = {Ident, ThreadID};
1013 Builder.CreateCall(
1014 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel),
1015 EndArgs);
1016
1017 LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
1018 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
1019 }
1020
1021 for (Instruction *I : ToBeDeleted)
1022 I->eraseFromParent();
1023 };
1024
1025 // Adjust the finalization stack, verify the adjustment, and call the
1026 // finalize function a last time to finalize values between the pre-fini
1027 // block and the exit block if we left the parallel "the normal way".
1028 auto FiniInfo = FinalizationStack.pop_back_val();
1029 (void)FiniInfo;
1030 assert(FiniInfo.DK == OMPD_parallel &&(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1031, __extension__
__PRETTY_FUNCTION__))
1031 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1031, __extension__
__PRETTY_FUNCTION__))
;
1032
1033 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
1034
1035 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
1036 FiniCB(PreFiniIP);
1037
1038 OI.OuterAllocaBB = OuterAllocaBlock;
1039 OI.EntryBB = PRegEntryBB;
1040 OI.ExitBB = PRegExitBB;
1041
1042 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
1043 SmallVector<BasicBlock *, 32> Blocks;
1044 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
1045
1046 // Ensure a single exit node for the outlined region by creating one.
1047 // We might have multiple incoming edges to the exit now due to finalizations,
1048 // e.g., cancel calls that cause the control flow to leave the region.
1049 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
1050 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
1051 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
1052 Blocks.push_back(PRegOutlinedExitBB);
1053
1054 CodeExtractorAnalysisCache CEAC(*OuterFn);
1055 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
1056 /* AggregateArgs */ false,
1057 /* BlockFrequencyInfo */ nullptr,
1058 /* BranchProbabilityInfo */ nullptr,
1059 /* AssumptionCache */ nullptr,
1060 /* AllowVarArgs */ true,
1061 /* AllowAlloca */ true,
1062 /* AllocationBlock */ OuterAllocaBlock,
1063 /* Suffix */ ".omp_par");
1064
1065 // Find inputs to, outputs from the code region.
1066 BasicBlock *CommonExit = nullptr;
1067 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
1068 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
1069 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
1070
1071 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before privatization: "
<< *OuterFn << "\n"; } } while (false)
;
1072
1073 FunctionCallee TIDRTLFn =
1074 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
1075
1076 auto PrivHelper = [&](Value &V) {
1077 if (&V == TIDAddr || &V == ZeroAddr) {
1078 OI.ExcludeArgsFromAggregate.push_back(&V);
1079 return;
1080 }
1081
1082 SetVector<Use *> Uses;
1083 for (Use &U : V.uses())
1084 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
1085 if (ParallelRegionBlockSet.count(UserI->getParent()))
1086 Uses.insert(&U);
1087
1088 // __kmpc_fork_call expects extra arguments as pointers. If the input
1089 // already has a pointer type, everything is fine. Otherwise, store the
1090 // value onto stack and load it back inside the to-be-outlined region. This
1091 // will ensure only the pointer will be passed to the function.
1092 // FIXME: if there are more than 15 trailing arguments, they must be
1093 // additionally packed in a struct.
1094 Value *Inner = &V;
1095 if (!V.getType()->isPointerTy()) {
1096 IRBuilder<>::InsertPointGuard Guard(Builder);
1097 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: "
<< V << "\n"; } } while (false)
;
1098
1099 Builder.restoreIP(OuterAllocaIP);
1100 Value *Ptr =
1101 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
1102
1103 // Store to stack at end of the block that currently branches to the entry
1104 // block of the to-be-outlined region.
1105 Builder.SetInsertPoint(InsertBB,
1106 InsertBB->getTerminator()->getIterator());
1107 Builder.CreateStore(&V, Ptr);
1108
1109 // Load back next to allocations in the to-be-outlined region.
1110 Builder.restoreIP(InnerAllocaIP);
1111 Inner = Builder.CreateLoad(V.getType(), Ptr);
1112 }
1113
1114 Value *ReplacementValue = nullptr;
1115 CallInst *CI = dyn_cast<CallInst>(&V);
1116 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
1117 ReplacementValue = PrivTID;
1118 } else {
1119 Builder.restoreIP(
1120 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
1121 assert(ReplacementValue &&(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1122, __extension__
__PRETTY_FUNCTION__))
1122 "Expected copy/create callback to set replacement value!")(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1122, __extension__
__PRETTY_FUNCTION__))
;
1123 if (ReplacementValue == &V)
1124 return;
1125 }
1126
1127 for (Use *UPtr : Uses)
1128 UPtr->set(ReplacementValue);
1129 };
1130
1131 // Reset the inner alloca insertion as it will be used for loading the values
1132 // wrapped into pointers before passing them into the to-be-outlined region.
1133 // Configure it to insert immediately after the fake use of zero address so
1134 // that they are available in the generated body and so that the
1135 // OpenMP-related values (thread ID and zero address pointers) remain leading
1136 // in the argument list.
1137 InnerAllocaIP = IRBuilder<>::InsertPoint(
1138 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
1139
1140 // Reset the outer alloca insertion point to the entry of the relevant block
1141 // in case it was invalidated.
1142 OuterAllocaIP = IRBuilder<>::InsertPoint(
1143 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
1144
1145 for (Value *Input : Inputs) {
1146 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Captured input: " <<
*Input << "\n"; } } while (false)
;
1147 PrivHelper(*Input);
1148 }
1149 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
1150 for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
1151 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
1152 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
;
1153 assert(Outputs.empty() &&(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1154, __extension__
__PRETTY_FUNCTION__))
1154 "OpenMP outlining should not produce live-out values!")(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1154, __extension__
__PRETTY_FUNCTION__))
;
1155
1156 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After privatization: "
<< *OuterFn << "\n"; } } while (false)
;
1157 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
1158 for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
1159 dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
1160 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
;
1161
1162 // Register the outlined info.
1163 addOutlineInfo(std::move(OI));
1164
1165 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
1166 UI->eraseFromParent();
1167
1168 return AfterIP;
1169}
1170
1171void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) {
1172 // Build call void __kmpc_flush(ident_t *loc)
1173 uint32_t SrcLocStrSize;
1174 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1175 Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)};
1176
1177 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
1178}
1179
1180void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) {
1181 if (!updateToLocation(Loc))
1182 return;
1183 emitFlush(Loc);
1184}
1185
1186void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) {
1187 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
1188 // global_tid);
1189 uint32_t SrcLocStrSize;
1190 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1191 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1192 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
1193
1194 // Ignore return result until untied tasks are supported.
1195 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
1196 Args);
1197}
1198
1199void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) {
1200 if (!updateToLocation(Loc))
1201 return;
1202 emitTaskwaitImpl(Loc);
1203}
1204
1205void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) {
1206 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
1207 uint32_t SrcLocStrSize;
1208 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1209 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1210 Constant *I32Null = ConstantInt::getNullValue(Int32);
1211 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
1212
1213 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
1214 Args);
1215}
1216
1217void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) {
1218 if (!updateToLocation(Loc))
1219 return;
1220 emitTaskyieldImpl(Loc);
1221}
1222
1223OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections(
1224 const LocationDescription &Loc, InsertPointTy AllocaIP,
1225 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB,
1226 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) {
1227 assert(!isConflictIP(AllocaIP, Loc.IP) && "Dedicated IP allocas required")(static_cast <bool> (!isConflictIP(AllocaIP, Loc.IP) &&
"Dedicated IP allocas required") ? void (0) : __assert_fail (
"!isConflictIP(AllocaIP, Loc.IP) && \"Dedicated IP allocas required\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1227, __extension__
__PRETTY_FUNCTION__))
;
1228
1229 if (!updateToLocation(Loc))
1230 return Loc.IP;
1231
1232 auto FiniCBWrapper = [&](InsertPointTy IP) {
1233 if (IP.getBlock()->end() != IP.getPoint())
1234 return FiniCB(IP);
1235 // This must be done otherwise any nested constructs using FinalizeOMPRegion
1236 // will fail because that function requires the Finalization Basic Block to
1237 // have a terminator, which is already removed by EmitOMPRegionBody.
1238 // IP is currently at cancelation block.
1239 // We need to backtrack to the condition block to fetch
1240 // the exit block and create a branch from cancelation
1241 // to exit block.
1242 IRBuilder<>::InsertPointGuard IPG(Builder);
1243 Builder.restoreIP(IP);
1244 auto *CaseBB = IP.getBlock()->getSinglePredecessor();
1245 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
1246 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
1247 Instruction *I = Builder.CreateBr(ExitBB);
1248 IP = InsertPointTy(I->getParent(), I->getIterator());
1249 return FiniCB(IP);
1250 };
1251
1252 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable});
1253
1254 // Each section is emitted as a switch case
1255 // Each finalization callback is handled from clang.EmitOMPSectionDirective()
1256 // -> OMP.createSection() which generates the IR for each section
1257 // Iterate through all sections and emit a switch construct:
1258 // switch (IV) {
1259 // case 0:
1260 // <SectionStmt[0]>;
1261 // break;
1262 // ...
1263 // case <NumSection> - 1:
1264 // <SectionStmt[<NumSection> - 1]>;
1265 // break;
1266 // }
1267 // ...
1268 // section_loop.after:
1269 // <FiniCB>;
1270 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) {
1271 auto *CurFn = CodeGenIP.getBlock()->getParent();
1272 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor();
1273 auto *ForExitBB = CodeGenIP.getBlock()
1274 ->getSinglePredecessor()
1275 ->getTerminator()
1276 ->getSuccessor(1);
1277 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB);
1278 Builder.restoreIP(CodeGenIP);
1279 unsigned CaseNumber = 0;
1280 for (auto SectionCB : SectionCBs) {
1281 auto *CaseBB = BasicBlock::Create(M.getContext(),
1282 "omp_section_loop.body.case", CurFn);
1283 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB);
1284 Builder.SetInsertPoint(CaseBB);
1285 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB);
1286 CaseNumber++;
1287 }
1288 // remove the existing terminator from body BB since there can be no
1289 // terminators after switch/case
1290 CodeGenIP.getBlock()->getTerminator()->eraseFromParent();
1291 };
1292 // Loop body ends here
1293 // LowerBound, UpperBound, and STride for createCanonicalLoop
1294 Type *I32Ty = Type::getInt32Ty(M.getContext());
1295 Value *LB = ConstantInt::get(I32Ty, 0);
1296 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size());
1297 Value *ST = ConstantInt::get(I32Ty, 1);
1298 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop(
1299 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop");
1300 Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator());
1301 AllocaIP = Builder.saveIP();
1302 InsertPointTy AfterIP =
1303 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait);
1304 BasicBlock *LoopAfterBB = AfterIP.getBlock();
1305 Instruction *SplitPos = LoopAfterBB->getTerminator();
1306 if (!isa_and_nonnull<BranchInst>(SplitPos))
1307 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB);
1308 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB,
1309 // which requires a BB with branch
1310 BasicBlock *ExitBB =
1311 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end");
1312 SplitPos->eraseFromParent();
1313
1314 // Apply the finalization callback in LoopAfterBB
1315 auto FiniInfo = FinalizationStack.pop_back_val();
1316 assert(FiniInfo.DK == OMPD_sections &&(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1317, __extension__
__PRETTY_FUNCTION__))
1317 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1317, __extension__
__PRETTY_FUNCTION__))
;
1318 Builder.SetInsertPoint(LoopAfterBB->getTerminator());
1319 FiniInfo.FiniCB(Builder.saveIP());
1320 Builder.SetInsertPoint(ExitBB);
1321
1322 return Builder.saveIP();
1323}
1324
1325OpenMPIRBuilder::InsertPointTy
1326OpenMPIRBuilder::createSection(const LocationDescription &Loc,
1327 BodyGenCallbackTy BodyGenCB,
1328 FinalizeCallbackTy FiniCB) {
1329 if (!updateToLocation(Loc))
1330 return Loc.IP;
1331
1332 auto FiniCBWrapper = [&](InsertPointTy IP) {
1333 if (IP.getBlock()->end() != IP.getPoint())
1334 return FiniCB(IP);
1335 // This must be done otherwise any nested constructs using FinalizeOMPRegion
1336 // will fail because that function requires the Finalization Basic Block to
1337 // have a terminator, which is already removed by EmitOMPRegionBody.
1338 // IP is currently at cancelation block.
1339 // We need to backtrack to the condition block to fetch
1340 // the exit block and create a branch from cancelation
1341 // to exit block.
1342 IRBuilder<>::InsertPointGuard IPG(Builder);
1343 Builder.restoreIP(IP);
1344 auto *CaseBB = Loc.IP.getBlock();
1345 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
1346 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
1347 Instruction *I = Builder.CreateBr(ExitBB);
1348 IP = InsertPointTy(I->getParent(), I->getIterator());
1349 return FiniCB(IP);
1350 };
1351
1352 Directive OMPD = Directive::OMPD_sections;
1353 // Since we are using Finalization Callback here, HasFinalize
1354 // and IsCancellable have to be true
1355 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper,
1356 /*Conditional*/ false, /*hasFinalize*/ true,
1357 /*IsCancellable*/ true);
1358}
1359
1360/// Create a function with a unique name and a "void (i8*, i8*)" signature in
1361/// the given module and return it.
1362Function *getFreshReductionFunc(Module &M) {
1363 Type *VoidTy = Type::getVoidTy(M.getContext());
1364 Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
1365 auto *FuncTy =
1366 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false);
1367 return Function::Create(FuncTy, GlobalVariable::InternalLinkage,
1368 M.getDataLayout().getDefaultGlobalsAddressSpace(),
1369 ".omp.reduction.func", &M);
1370}
1371
1372OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions(
1373 const LocationDescription &Loc, InsertPointTy AllocaIP,
1374 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) {
1375 for (const ReductionInfo &RI : ReductionInfos) {
1376 (void)RI;
1377 assert(RI.Variable && "expected non-null variable")(static_cast <bool> (RI.Variable && "expected non-null variable"
) ? void (0) : __assert_fail ("RI.Variable && \"expected non-null variable\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1377, __extension__
__PRETTY_FUNCTION__))
;
1378 assert(RI.PrivateVariable && "expected non-null private variable")(static_cast <bool> (RI.PrivateVariable && "expected non-null private variable"
) ? void (0) : __assert_fail ("RI.PrivateVariable && \"expected non-null private variable\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1378, __extension__
__PRETTY_FUNCTION__))
;
1379 assert(RI.ReductionGen && "expected non-null reduction generator callback")(static_cast <bool> (RI.ReductionGen && "expected non-null reduction generator callback"
) ? void (0) : __assert_fail ("RI.ReductionGen && \"expected non-null reduction generator callback\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1379, __extension__
__PRETTY_FUNCTION__))
;
1380 assert(RI.Variable->getType() == RI.PrivateVariable->getType() &&(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1382, __extension__
__PRETTY_FUNCTION__))
1381 "expected variables and their private equivalents to have the same "(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1382, __extension__
__PRETTY_FUNCTION__))
1382 "type")(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1382, __extension__
__PRETTY_FUNCTION__))
;
1383 assert(RI.Variable->getType()->isPointerTy() &&(static_cast <bool> (RI.Variable->getType()->isPointerTy
() && "expected variables to be pointers") ? void (0)
: __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1384, __extension__
__PRETTY_FUNCTION__))
1384 "expected variables to be pointers")(static_cast <bool> (RI.Variable->getType()->isPointerTy
() && "expected variables to be pointers") ? void (0)
: __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1384, __extension__
__PRETTY_FUNCTION__))
;
1385 }
1386
1387 if (!updateToLocation(Loc))
1388 return InsertPointTy();
1389
1390 BasicBlock *InsertBlock = Loc.IP.getBlock();
1391 BasicBlock *ContinuationBlock =
1392 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize");
1393 InsertBlock->getTerminator()->eraseFromParent();
1394
1395 // Create and populate array of type-erased pointers to private reduction
1396 // values.
1397 unsigned NumReductions = ReductionInfos.size();
1398 Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions);
1399 Builder.restoreIP(AllocaIP);
1400 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
1401
1402 Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
1403
1404 for (auto En : enumerate(ReductionInfos)) {
1405 unsigned Index = En.index();
1406 const ReductionInfo &RI = En.value();
1407 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64(
1408 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index));
1409 Value *Casted =
1410 Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(),
1411 "private.red.var." + Twine(Index) + ".casted");
1412 Builder.CreateStore(Casted, RedArrayElemPtr);
1413 }
1414
1415 // Emit a call to the runtime function that orchestrates the reduction.
1416 // Declare the reduction function in the process.
1417 Function *Func = Builder.GetInsertBlock()->getParent();
1418 Module *Module = Func->getParent();
1419 Value *RedArrayPtr =
1420 Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr");
1421 uint32_t SrcLocStrSize;
1422 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1423 bool CanGenerateAtomic =
1424 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) {
1425 return RI.AtomicReductionGen;
1426 });
1427 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize,
1428 CanGenerateAtomic
1429 ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE
1430 : IdentFlag(0));
1431 Value *ThreadId = getOrCreateThreadID(Ident);
1432 Constant *NumVariables = Builder.getInt32(NumReductions);
1433 const DataLayout &DL = Module->getDataLayout();
1434 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy);
1435 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize);
1436 Function *ReductionFunc = getFreshReductionFunc(*Module);
1437 Value *Lock = getOMPCriticalRegionLock(".reduction");
1438 Function *ReduceFunc = getOrCreateRuntimeFunctionPtr(
1439 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait
1440 : RuntimeFunction::OMPRTL___kmpc_reduce);
1441 CallInst *ReduceCall =
1442 Builder.CreateCall(ReduceFunc,
1443 {Ident, ThreadId, NumVariables, RedArraySize,
1444 RedArrayPtr, ReductionFunc, Lock},
1445 "reduce");
1446
1447 // Create final reduction entry blocks for the atomic and non-atomic case.
1448 // Emit IR that dispatches control flow to one of the blocks based on the
1449 // reduction supporting the atomic mode.
1450 BasicBlock *NonAtomicRedBlock =
1451 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func);
1452 BasicBlock *AtomicRedBlock =
1453 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func);
1454 SwitchInst *Switch =
1455 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2);
1456 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock);
1457 Switch->addCase(Builder.getInt32(2), AtomicRedBlock);
1458
1459 // Populate the non-atomic reduction using the elementwise reduction function.
1460 // This loads the elements from the global and private variables and reduces
1461 // them before storing back the result to the global variable.
1462 Builder.SetInsertPoint(NonAtomicRedBlock);
1463 for (auto En : enumerate(ReductionInfos)) {
1464 const ReductionInfo &RI = En.value();
1465 Type *ValueType = RI.ElementType;
1466 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable,
1467 "red.value." + Twine(En.index()));
1468 Value *PrivateRedValue =
1469 Builder.CreateLoad(ValueType, RI.PrivateVariable,
1470 "red.private.value." + Twine(En.index()));
1471 Value *Reduced;
1472 Builder.restoreIP(
1473 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced));
1474 if (!Builder.GetInsertBlock())
1475 return InsertPointTy();
1476 Builder.CreateStore(Reduced, RI.Variable);
1477 }
1478 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr(
1479 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait
1480 : RuntimeFunction::OMPRTL___kmpc_end_reduce);
1481 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock});
1482 Builder.CreateBr(ContinuationBlock);
1483
1484 // Populate the atomic reduction using the atomic elementwise reduction
1485 // function. There are no loads/stores here because they will be happening
1486 // inside the atomic elementwise reduction.
1487 Builder.SetInsertPoint(AtomicRedBlock);
1488 if (CanGenerateAtomic) {
1489 for (const ReductionInfo &RI : ReductionInfos) {
1490 Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.ElementType,
1491 RI.Variable, RI.PrivateVariable));
1492 if (!Builder.GetInsertBlock())
1493 return InsertPointTy();
1494 }
1495 Builder.CreateBr(ContinuationBlock);
1496 } else {
1497 Builder.CreateUnreachable();
1498 }
1499
1500 // Populate the outlined reduction function using the elementwise reduction
1501 // function. Partial values are extracted from the type-erased array of
1502 // pointers to private variables.
1503 BasicBlock *ReductionFuncBlock =
1504 BasicBlock::Create(Module->getContext(), "", ReductionFunc);
1505 Builder.SetInsertPoint(ReductionFuncBlock);
1506 Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0),
1507 RedArrayTy->getPointerTo());
1508 Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1),
1509 RedArrayTy->getPointerTo());
1510 for (auto En : enumerate(ReductionInfos)) {
1511 const ReductionInfo &RI = En.value();
1512 Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64(
1513 RedArrayTy, LHSArrayPtr, 0, En.index());
1514 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr);
1515 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType());
1516 Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr);
1517 Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64(
1518 RedArrayTy, RHSArrayPtr, 0, En.index());
1519 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr);
1520 Value *RHSPtr =
1521 Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType());
1522 Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr);
1523 Value *Reduced;
1524 Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced));
1525 if (!Builder.GetInsertBlock())
1526 return InsertPointTy();
1527 Builder.CreateStore(Reduced, LHSPtr);
1528 }
1529 Builder.CreateRetVoid();
1530
1531 Builder.SetInsertPoint(ContinuationBlock);
1532 return Builder.saveIP();
1533}
1534
1535OpenMPIRBuilder::InsertPointTy
1536OpenMPIRBuilder::createMaster(const LocationDescription &Loc,
1537 BodyGenCallbackTy BodyGenCB,
1538 FinalizeCallbackTy FiniCB) {
1539
1540 if (!updateToLocation(Loc))
1541 return Loc.IP;
1542
1543 Directive OMPD = Directive::OMPD_master;
1544 uint32_t SrcLocStrSize;
1545 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1546 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1547 Value *ThreadId = getOrCreateThreadID(Ident);
1548 Value *Args[] = {Ident, ThreadId};
1549
1550 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
1551 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1552
1553 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
1554 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1555
1556 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1557 /*Conditional*/ true, /*hasFinalize*/ true);
1558}
1559
1560OpenMPIRBuilder::InsertPointTy
1561OpenMPIRBuilder::createMasked(const LocationDescription &Loc,
1562 BodyGenCallbackTy BodyGenCB,
1563 FinalizeCallbackTy FiniCB, Value *Filter) {
1564 if (!updateToLocation(Loc))
1565 return Loc.IP;
1566
1567 Directive OMPD = Directive::OMPD_masked;
1568 uint32_t SrcLocStrSize;
1569 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
1570 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1571 Value *ThreadId = getOrCreateThreadID(Ident);
1572 Value *Args[] = {Ident, ThreadId, Filter};
1573 Value *ArgsEnd[] = {Ident, ThreadId};
1574
1575 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked);
1576 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1577
1578 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked);
1579 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd);
1580
1581 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1582 /*Conditional*/ true, /*hasFinalize*/ true);
1583}
1584
1585CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton(
1586 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
1587 BasicBlock *PostInsertBefore, const Twine &Name) {
1588 Module *M = F->getParent();
1589 LLVMContext &Ctx = M->getContext();
1590 Type *IndVarTy = TripCount->getType();
1591
1592 // Create the basic block structure.
1593 BasicBlock *Preheader =
1594 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
1595 BasicBlock *Header =
1596 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
1597 BasicBlock *Cond =
1598 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
1599 BasicBlock *Body =
1600 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
1601 BasicBlock *Latch =
1602 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
1603 BasicBlock *Exit =
1604 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
1605 BasicBlock *After =
1606 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
1607
1608 // Use specified DebugLoc for new instructions.
1609 Builder.SetCurrentDebugLocation(DL);
1610
1611 Builder.SetInsertPoint(Preheader);
1612 Builder.CreateBr(Header);
1613
1614 Builder.SetInsertPoint(Header);
1615 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
1616 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
1617 Builder.CreateBr(Cond);
1618
1619 Builder.SetInsertPoint(Cond);
1620 Value *Cmp =
1621 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
1622 Builder.CreateCondBr(Cmp, Body, Exit);
1623
1624 Builder.SetInsertPoint(Body);
1625 Builder.CreateBr(Latch);
1626
1627 Builder.SetInsertPoint(Latch);
1628 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
1629 "omp_" + Name + ".next", /*HasNUW=*/true);
1630 Builder.CreateBr(Header);
1631 IndVarPHI->addIncoming(Next, Latch);
1632
1633 Builder.SetInsertPoint(Exit);
1634 Builder.CreateBr(After);
1635
1636 // Remember and return the canonical control flow.
1637 LoopInfos.emplace_front();
1638 CanonicalLoopInfo *CL = &LoopInfos.front();
1639
1640 CL->Header = Header;
1641 CL->Cond = Cond;
1642 CL->Latch = Latch;
1643 CL->Exit = Exit;
1644
1645#ifndef NDEBUG
1646 CL->assertOK();
1647#endif
1648 return CL;
1649}
1650
1651CanonicalLoopInfo *
1652OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc,
1653 LoopBodyGenCallbackTy BodyGenCB,
1654 Value *TripCount, const Twine &Name) {
1655 BasicBlock *BB = Loc.IP.getBlock();
1656 BasicBlock *NextBB = BB->getNextNode();
1657
1658 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
1659 NextBB, NextBB, Name);
1660 BasicBlock *After = CL->getAfter();
1661
1662 // If location is not set, don't connect the loop.
1663 if (updateToLocation(Loc)) {
1664 // Split the loop at the insertion point: Branch to the preheader and move
1665 // every following instruction to after the loop (the After BB). Also, the
1666 // new successor is the loop's after block.
1667 spliceBB(Builder, After, /*CreateBranch=*/false);
1668 Builder.CreateBr(CL->getPreheader());
1669 }
1670
1671 // Emit the body content. We do it after connecting the loop to the CFG to
1672 // avoid that the callback encounters degenerate BBs.
1673 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
1674
1675#ifndef NDEBUG
1676 CL->assertOK();
1677#endif
1678 return CL;
1679}
1680
1681CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop(
1682 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
1683 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
1684 InsertPointTy ComputeIP, const Twine &Name) {
1685
1686 // Consider the following difficulties (assuming 8-bit signed integers):
1687 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
1688 // DO I = 1, 100, 50
1689 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
1690 // DO I = 100, 0, -128
1691
1692 // Start, Stop and Step must be of the same integer type.
1693 auto *IndVarTy = cast<IntegerType>(Start->getType());
1694 assert(IndVarTy == Stop->getType() && "Stop type mismatch")(static_cast <bool> (IndVarTy == Stop->getType() &&
"Stop type mismatch") ? void (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1694, __extension__
__PRETTY_FUNCTION__))
;
1695 assert(IndVarTy == Step->getType() && "Step type mismatch")(static_cast <bool> (IndVarTy == Step->getType() &&
"Step type mismatch") ? void (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1695, __extension__
__PRETTY_FUNCTION__))
;
1696
1697 LocationDescription ComputeLoc =
1698 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
1699 updateToLocation(ComputeLoc);
1700
1701 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
1702 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
1703
1704 // Like Step, but always positive.
1705 Value *Incr = Step;
1706
1707 // Distance between Start and Stop; always positive.
1708 Value *Span;
1709
1710 // Condition whether there are no iterations are executed at all, e.g. because
1711 // UB < LB.
1712 Value *ZeroCmp;
1713
1714 if (IsSigned) {
1715 // Ensure that increment is positive. If not, negate and invert LB and UB.
1716 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
1717 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
1718 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
1719 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
1720 Span = Builder.CreateSub(UB, LB, "", false, true);
1721 ZeroCmp = Builder.CreateICmp(
1722 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
1723 } else {
1724 Span = Builder.CreateSub(Stop, Start, "", true);
1725 ZeroCmp = Builder.CreateICmp(
1726 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
1727 }
1728
1729 Value *CountIfLooping;
1730 if (InclusiveStop) {
1731 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
1732 } else {
1733 // Avoid incrementing past stop since it could overflow.
1734 Value *CountIfTwo = Builder.CreateAdd(
1735 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
1736 Value *OneCmp = Builder.CreateICmp(
1737 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr);
1738 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
1739 }
1740 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
1741 "omp_" + Name + ".tripcount");
1742
1743 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
1744 Builder.restoreIP(CodeGenIP);
1745 Value *Span = Builder.CreateMul(IV, Step);
1746 Value *IndVar = Builder.CreateAdd(Span, Start);
1747 BodyGenCB(Builder.saveIP(), IndVar);
1748 };
1749 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
1750 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
1751}
1752
1753// Returns an LLVM function to call for initializing loop bounds using OpenMP
1754// static scheduling depending on `type`. Only i32 and i64 are supported by the
1755// runtime. Always interpret integers as unsigned similarly to
1756// CanonicalLoopInfo.
1757static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M,
1758 OpenMPIRBuilder &OMPBuilder) {
1759 unsigned Bitwidth = Ty->getIntegerBitWidth();
1760 if (Bitwidth == 32)
1761 return OMPBuilder.getOrCreateRuntimeFunction(
1762 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
1763 if (Bitwidth == 64)
1764 return OMPBuilder.getOrCreateRuntimeFunction(
1765 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
1766 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1766)
;
1767}
1768
1769OpenMPIRBuilder::InsertPointTy
1770OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
1771 InsertPointTy AllocaIP,
1772 bool NeedsBarrier) {
1773 assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1773, __extension__
__PRETTY_FUNCTION__))
;
1774 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP
()) && "Require dedicated allocate IP") ? void (0) : __assert_fail
("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1775, __extension__
__PRETTY_FUNCTION__))
1775 "Require dedicated allocate IP")(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP
()) && "Require dedicated allocate IP") ? void (0) : __assert_fail
("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1775, __extension__
__PRETTY_FUNCTION__))
;
1776
1777 // Set up the source location value for OpenMP runtime.
1778 Builder.restoreIP(CLI->getPreheaderIP());
1779 Builder.SetCurrentDebugLocation(DL);
1780
1781 uint32_t SrcLocStrSize;
1782 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
1783 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1784
1785 // Declare useful OpenMP runtime functions.
1786 Value *IV = CLI->getIndVar();
1787 Type *IVTy = IV->getType();
1788 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
1789 FunctionCallee StaticFini =
1790 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
1791
1792 // Allocate space for computed loop bounds as expected by the "init" function.
1793 Builder.restoreIP(AllocaIP);
1794 Type *I32Type = Type::getInt32Ty(M.getContext());
1795 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1796 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1797 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1798 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1799
1800 // At the end of the preheader, prepare for calling the "init" function by
1801 // storing the current loop bounds into the allocated space. A canonical loop
1802 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1803 // and produces an inclusive upper bound.
1804 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
1805 Constant *Zero = ConstantInt::get(IVTy, 0);
1806 Constant *One = ConstantInt::get(IVTy, 1);
1807 Builder.CreateStore(Zero, PLowerBound);
1808 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
1809 Builder.CreateStore(UpperBound, PUpperBound);
1810 Builder.CreateStore(One, PStride);
1811
1812 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1813
1814 Constant *SchedulingType = ConstantInt::get(
1815 I32Type, static_cast<int>(OMPScheduleType::UnorderedStatic));
1816
1817 // Call the "init" function and update the trip count of the loop with the
1818 // value it produced.
1819 Builder.CreateCall(StaticInit,
1820 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
1821 PUpperBound, PStride, One, Zero});
1822 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound);
1823 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound);
1824 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
1825 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
1826 CLI->setTripCount(TripCount);
1827
1828 // Update all uses of the induction variable except the one in the condition
1829 // block that compares it with the actual upper bound, and the increment in
1830 // the latch block.
1831
1832 CLI->mapIndVar([&](Instruction *OldIV) -> Value * {
1833 Builder.SetInsertPoint(CLI->getBody(),
1834 CLI->getBody()->getFirstInsertionPt());
1835 Builder.SetCurrentDebugLocation(DL);
1836 return Builder.CreateAdd(OldIV, LowerBound);
1837 });
1838
1839 // In the "exit" block, call the "fini" function.
1840 Builder.SetInsertPoint(CLI->getExit(),
1841 CLI->getExit()->getTerminator()->getIterator());
1842 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
1843
1844 // Add the barrier if requested.
1845 if (NeedsBarrier)
1846 createBarrier(LocationDescription(Builder.saveIP(), DL),
1847 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1848 /* CheckCancelFlag */ false);
1849
1850 InsertPointTy AfterIP = CLI->getAfterIP();
1851 CLI->invalidate();
1852
1853 return AfterIP;
1854}
1855
1856OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(
1857 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
1858 bool NeedsBarrier, Value *ChunkSize) {
1859 assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1859, __extension__
__PRETTY_FUNCTION__))
;
1860 assert(ChunkSize && "Chunk size is required")(static_cast <bool> (ChunkSize && "Chunk size is required"
) ? void (0) : __assert_fail ("ChunkSize && \"Chunk size is required\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1860, __extension__
__PRETTY_FUNCTION__))
;
1861
1862 LLVMContext &Ctx = CLI->getFunction()->getContext();
1863 Value *IV = CLI->getIndVar();
1864 Value *OrigTripCount = CLI->getTripCount();
1865 Type *IVTy = IV->getType();
1866 assert(IVTy->getIntegerBitWidth() <= 64 &&(static_cast <bool> (IVTy->getIntegerBitWidth() <=
64 && "Max supported tripcount bitwidth is 64 bits")
? void (0) : __assert_fail ("IVTy->getIntegerBitWidth() <= 64 && \"Max supported tripcount bitwidth is 64 bits\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1867, __extension__
__PRETTY_FUNCTION__))
1867 "Max supported tripcount bitwidth is 64 bits")(static_cast <bool> (IVTy->getIntegerBitWidth() <=
64 && "Max supported tripcount bitwidth is 64 bits")
? void (0) : __assert_fail ("IVTy->getIntegerBitWidth() <= 64 && \"Max supported tripcount bitwidth is 64 bits\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1867, __extension__
__PRETTY_FUNCTION__))
;
1868 Type *InternalIVTy = IVTy->getIntegerBitWidth() <= 32 ? Type::getInt32Ty(Ctx)
1869 : Type::getInt64Ty(Ctx);
1870 Type *I32Type = Type::getInt32Ty(M.getContext());
1871 Constant *Zero = ConstantInt::get(InternalIVTy, 0);
1872 Constant *One = ConstantInt::get(InternalIVTy, 1);
1873
1874 // Declare useful OpenMP runtime functions.
1875 FunctionCallee StaticInit =
1876 getKmpcForStaticInitForType(InternalIVTy, M, *this);
1877 FunctionCallee StaticFini =
1878 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
1879
1880 // Allocate space for computed loop bounds as expected by the "init" function.
1881 Builder.restoreIP(AllocaIP);
1882 Builder.SetCurrentDebugLocation(DL);
1883 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1884 Value *PLowerBound =
1885 Builder.CreateAlloca(InternalIVTy, nullptr, "p.lowerbound");
1886 Value *PUpperBound =
1887 Builder.CreateAlloca(InternalIVTy, nullptr, "p.upperbound");
1888 Value *PStride = Builder.CreateAlloca(InternalIVTy, nullptr, "p.stride");
1889
1890 // Set up the source location value for the OpenMP runtime.
1891 Builder.restoreIP(CLI->getPreheaderIP());
1892 Builder.SetCurrentDebugLocation(DL);
1893
1894 // TODO: Detect overflow in ubsan or max-out with current tripcount.
1895 Value *CastedChunkSize =
1896 Builder.CreateZExtOrTrunc(ChunkSize, InternalIVTy, "chunksize");
1897 Value *CastedTripCount =
1898 Builder.CreateZExt(OrigTripCount, InternalIVTy, "tripcount");
1899
1900 Constant *SchedulingType = ConstantInt::get(
1901 I32Type, static_cast<int>(OMPScheduleType::UnorderedStaticChunked));
1902 Builder.CreateStore(Zero, PLowerBound);
1903 Value *OrigUpperBound = Builder.CreateSub(CastedTripCount, One);
1904 Builder.CreateStore(OrigUpperBound, PUpperBound);
1905 Builder.CreateStore(One, PStride);
1906
1907 // Call the "init" function and update the trip count of the loop with the
1908 // value it produced.
1909 uint32_t SrcLocStrSize;
1910 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
1911 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
1912 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1913 Builder.CreateCall(StaticInit,
1914 {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum,
1915 /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter,
1916 /*plower=*/PLowerBound, /*pupper=*/PUpperBound,
1917 /*pstride=*/PStride, /*incr=*/One,
1918 /*chunk=*/CastedChunkSize});
1919
1920 // Load values written by the "init" function.
1921 Value *FirstChunkStart =
1922 Builder.CreateLoad(InternalIVTy, PLowerBound, "omp_firstchunk.lb");
1923 Value *FirstChunkStop =
1924 Builder.CreateLoad(InternalIVTy, PUpperBound, "omp_firstchunk.ub");
1925 Value *FirstChunkEnd = Builder.CreateAdd(FirstChunkStop, One);
1926 Value *ChunkRange =
1927 Builder.CreateSub(FirstChunkEnd, FirstChunkStart, "omp_chunk.range");
1928 Value *NextChunkStride =
1929 Builder.CreateLoad(InternalIVTy, PStride, "omp_dispatch.stride");
1930
1931 // Create outer "dispatch" loop for enumerating the chunks.
1932 BasicBlock *DispatchEnter = splitBB(Builder, true);
1933 Value *DispatchCounter;
1934 CanonicalLoopInfo *DispatchCLI = createCanonicalLoop(
1935 {Builder.saveIP(), DL},
1936 [&](InsertPointTy BodyIP, Value *Counter) { DispatchCounter = Counter; },
1937 FirstChunkStart, CastedTripCount, NextChunkStride,
1938 /*IsSigned=*/false, /*InclusiveStop=*/false, /*ComputeIP=*/{},
1939 "dispatch");
1940
1941 // Remember the BasicBlocks of the dispatch loop we need, then invalidate to
1942 // not have to preserve the canonical invariant.
1943 BasicBlock *DispatchBody = DispatchCLI->getBody();
1944 BasicBlock *DispatchLatch = DispatchCLI->getLatch();
1945 BasicBlock *DispatchExit = DispatchCLI->getExit();
1946 BasicBlock *DispatchAfter = DispatchCLI->getAfter();
1947 DispatchCLI->invalidate();
1948
1949 // Rewire the original loop to become the chunk loop inside the dispatch loop.
1950 redirectTo(DispatchAfter, CLI->getAfter(), DL);
1951 redirectTo(CLI->getExit(), DispatchLatch, DL);
1952 redirectTo(DispatchBody, DispatchEnter, DL);
1953
1954 // Prepare the prolog of the chunk loop.
1955 Builder.restoreIP(CLI->getPreheaderIP());
1956 Builder.SetCurrentDebugLocation(DL);
1957
1958 // Compute the number of iterations of the chunk loop.
1959 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
1960 Value *ChunkEnd = Builder.CreateAdd(DispatchCounter, ChunkRange);
1961 Value *IsLastChunk =
1962 Builder.CreateICmpUGE(ChunkEnd, CastedTripCount, "omp_chunk.is_last");
1963 Value *CountUntilOrigTripCount =
1964 Builder.CreateSub(CastedTripCount, DispatchCounter);
1965 Value *ChunkTripCount = Builder.CreateSelect(
1966 IsLastChunk, CountUntilOrigTripCount, ChunkRange, "omp_chunk.tripcount");
1967 Value *BackcastedChunkTC =
1968 Builder.CreateTrunc(ChunkTripCount, IVTy, "omp_chunk.tripcount.trunc");
1969 CLI->setTripCount(BackcastedChunkTC);
1970
1971 // Update all uses of the induction variable except the one in the condition
1972 // block that compares it with the actual upper bound, and the increment in
1973 // the latch block.
1974 Value *BackcastedDispatchCounter =
1975 Builder.CreateTrunc(DispatchCounter, IVTy, "omp_dispatch.iv.trunc");
1976 CLI->mapIndVar([&](Instruction *) -> Value * {
1977 Builder.restoreIP(CLI->getBodyIP());
1978 return Builder.CreateAdd(IV, BackcastedDispatchCounter);
1979 });
1980
1981 // In the "exit" block, call the "fini" function.
1982 Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt());
1983 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
1984
1985 // Add the barrier if requested.
1986 if (NeedsBarrier)
1987 createBarrier(LocationDescription(Builder.saveIP(), DL), OMPD_for,
1988 /*ForceSimpleCall=*/false, /*CheckCancelFlag=*/false);
1989
1990#ifndef NDEBUG
1991 // Even though we currently do not support applying additional methods to it,
1992 // the chunk loop should remain a canonical loop.
1993 CLI->assertOK();
1994#endif
1995
1996 return {DispatchAfter, DispatchAfter->getFirstInsertionPt()};
1997}
1998
1999OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoop(
2000 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
2001 bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind,
2002 llvm::Value *ChunkSize, bool HasSimdModifier, bool HasMonotonicModifier,
2003 bool HasNonmonotonicModifier, bool HasOrderedClause) {
2004 OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType(
2005 SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier,
2006 HasNonmonotonicModifier, HasOrderedClause);
2007
2008 bool IsOrdered = (EffectiveScheduleType & OMPScheduleType::ModifierOrdered) ==
2009 OMPScheduleType::ModifierOrdered;
2010 switch (EffectiveScheduleType & ~OMPScheduleType::ModifierMask) {
2011 case OMPScheduleType::BaseStatic:
2012 assert(!ChunkSize && "No chunk size with static-chunked schedule")(static_cast <bool> (!ChunkSize && "No chunk size with static-chunked schedule"
) ? void (0) : __assert_fail ("!ChunkSize && \"No chunk size with static-chunked schedule\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2012, __extension__
__PRETTY_FUNCTION__))
;
2013 if (IsOrdered)
2014 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
2015 NeedsBarrier, ChunkSize);
2016 // FIXME: Monotonicity ignored?
2017 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier);
2018
2019 case OMPScheduleType::BaseStaticChunked:
2020 if (IsOrdered)
2021 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
2022 NeedsBarrier, ChunkSize);
2023 // FIXME: Monotonicity ignored?
2024 return applyStaticChunkedWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier,
2025 ChunkSize);
2026
2027 case OMPScheduleType::BaseRuntime:
2028 case OMPScheduleType::BaseAuto:
2029 case OMPScheduleType::BaseGreedy:
2030 case OMPScheduleType::BaseBalanced:
2031 case OMPScheduleType::BaseSteal:
2032 case OMPScheduleType::BaseGuidedSimd:
2033 case OMPScheduleType::BaseRuntimeSimd:
2034 assert(!ChunkSize &&(static_cast <bool> (!ChunkSize && "schedule type does not support user-defined chunk sizes"
) ? void (0) : __assert_fail ("!ChunkSize && \"schedule type does not support user-defined chunk sizes\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2035, __extension__
__PRETTY_FUNCTION__))
2035 "schedule type does not support user-defined chunk sizes")(static_cast <bool> (!ChunkSize && "schedule type does not support user-defined chunk sizes"
) ? void (0) : __assert_fail ("!ChunkSize && \"schedule type does not support user-defined chunk sizes\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2035, __extension__
__PRETTY_FUNCTION__))
;
2036 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2037 case OMPScheduleType::BaseDynamicChunked:
2038 case OMPScheduleType::BaseGuidedChunked:
2039 case OMPScheduleType::BaseGuidedIterativeChunked:
2040 case OMPScheduleType::BaseGuidedAnalyticalChunked:
2041 case OMPScheduleType::BaseStaticBalancedChunked:
2042 return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType,
2043 NeedsBarrier, ChunkSize);
2044
2045 default:
2046 llvm_unreachable("Unknown/unimplemented schedule kind")::llvm::llvm_unreachable_internal("Unknown/unimplemented schedule kind"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2046)
;
2047 }
2048}
2049
2050/// Returns an LLVM function to call for initializing loop bounds using OpenMP
2051/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
2052/// the runtime. Always interpret integers as unsigned similarly to
2053/// CanonicalLoopInfo.
2054static FunctionCallee
2055getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
2056 unsigned Bitwidth = Ty->getIntegerBitWidth();
2057 if (Bitwidth == 32)
2058 return OMPBuilder.getOrCreateRuntimeFunction(
2059 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u);
2060 if (Bitwidth == 64)
2061 return OMPBuilder.getOrCreateRuntimeFunction(
2062 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u);
2063 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2063)
;
2064}
2065
2066/// Returns an LLVM function to call for updating the next loop using OpenMP
2067/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
2068/// the runtime. Always interpret integers as unsigned similarly to
2069/// CanonicalLoopInfo.
2070static FunctionCallee
2071getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
2072 unsigned Bitwidth = Ty->getIntegerBitWidth();
2073 if (Bitwidth == 32)
2074 return OMPBuilder.getOrCreateRuntimeFunction(
2075 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u);
2076 if (Bitwidth == 64)
2077 return OMPBuilder.getOrCreateRuntimeFunction(
2078 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u);
2079 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2079)
;
2080}
2081
2082/// Returns an LLVM function to call for finalizing the dynamic loop using
2083/// depending on `type`. Only i32 and i64 are supported by the runtime. Always
2084/// interpret integers as unsigned similarly to CanonicalLoopInfo.
2085static FunctionCallee
2086getKmpcForDynamicFiniForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
2087 unsigned Bitwidth = Ty->getIntegerBitWidth();
2088 if (Bitwidth == 32)
2089 return OMPBuilder.getOrCreateRuntimeFunction(
2090 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_4u);
2091 if (Bitwidth == 64)
2092 return OMPBuilder.getOrCreateRuntimeFunction(
2093 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_8u);
2094 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2094)
;
2095}
2096
2097OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
2098 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
2099 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) {
2100 assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2100, __extension__
__PRETTY_FUNCTION__))
;
2101 assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP
()) && "Require dedicated allocate IP") ? void (0) : __assert_fail
("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2102, __extension__
__PRETTY_FUNCTION__))
2102 "Require dedicated allocate IP")(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP
()) && "Require dedicated allocate IP") ? void (0) : __assert_fail
("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2102, __extension__
__PRETTY_FUNCTION__))
;
2103 assert(isValidWorkshareLoopScheduleType(SchedType) &&(static_cast <bool> (isValidWorkshareLoopScheduleType(SchedType
) && "Require valid schedule type") ? void (0) : __assert_fail
("isValidWorkshareLoopScheduleType(SchedType) && \"Require valid schedule type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2104, __extension__
__PRETTY_FUNCTION__))
2104 "Require valid schedule type")(static_cast <bool> (isValidWorkshareLoopScheduleType(SchedType
) && "Require valid schedule type") ? void (0) : __assert_fail
("isValidWorkshareLoopScheduleType(SchedType) && \"Require valid schedule type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2104, __extension__
__PRETTY_FUNCTION__))
;
2105
2106 bool Ordered = (SchedType & OMPScheduleType::ModifierOrdered) ==
2107 OMPScheduleType::ModifierOrdered;
2108
2109 // Set up the source location value for OpenMP runtime.
2110 Builder.SetCurrentDebugLocation(DL);
2111
2112 uint32_t SrcLocStrSize;
2113 Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize);
2114 Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2115
2116 // Declare useful OpenMP runtime functions.
2117 Value *IV = CLI->getIndVar();
2118 Type *IVTy = IV->getType();
2119 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this);
2120 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this);
2121
2122 // Allocate space for computed loop bounds as expected by the "init" function.
2123 Builder.restoreIP(AllocaIP);
2124 Type *I32Type = Type::getInt32Ty(M.getContext());
2125 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
2126 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
2127 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
2128 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
2129
2130 // At the end of the preheader, prepare for calling the "init" function by
2131 // storing the current loop bounds into the allocated space. A canonical loop
2132 // always iterates from 0 to trip-count with step 1. Note that "init" expects
2133 // and produces an inclusive upper bound.
2134 BasicBlock *PreHeader = CLI->getPreheader();
2135 Builder.SetInsertPoint(PreHeader->getTerminator());
2136 Constant *One = ConstantInt::get(IVTy, 1);
2137 Builder.CreateStore(One, PLowerBound);
2138 Value *UpperBound = CLI->getTripCount();
2139 Builder.CreateStore(UpperBound, PUpperBound);
2140 Builder.CreateStore(One, PStride);
2141
2142 BasicBlock *Header = CLI->getHeader();
2143 BasicBlock *Exit = CLI->getExit();
2144 BasicBlock *Cond = CLI->getCond();
2145 BasicBlock *Latch = CLI->getLatch();
2146 InsertPointTy AfterIP = CLI->getAfterIP();
2147
2148 // The CLI will be "broken" in the code below, as the loop is no longer
2149 // a valid canonical loop.
2150
2151 if (!Chunk)
2152 Chunk = One;
2153
2154 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
2155
2156 Constant *SchedulingType =
2157 ConstantInt::get(I32Type, static_cast<int>(SchedType));
2158
2159 // Call the "init" function.
2160 Builder.CreateCall(DynamicInit,
2161 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One,
2162 UpperBound, /* step */ One, Chunk});
2163
2164 // An outer loop around the existing one.
2165 BasicBlock *OuterCond = BasicBlock::Create(
2166 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
2167 PreHeader->getParent());
2168 // This needs to be 32-bit always, so can't use the IVTy Zero above.
2169 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
2170 Value *Res =
2171 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
2172 PLowerBound, PUpperBound, PStride});
2173 Constant *Zero32 = ConstantInt::get(I32Type, 0);
2174 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32);
2175 Value *LowerBound =
2176 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb");
2177 Builder.CreateCondBr(MoreWork, Header, Exit);
2178
2179 // Change PHI-node in loop header to use outer cond rather than preheader,
2180 // and set IV to the LowerBound.
2181 Instruction *Phi = &Header->front();
2182 auto *PI = cast<PHINode>(Phi);
2183 PI->setIncomingBlock(0, OuterCond);
2184 PI->setIncomingValue(0, LowerBound);
2185
2186 // Then set the pre-header to jump to the OuterCond
2187 Instruction *Term = PreHeader->getTerminator();
2188 auto *Br = cast<BranchInst>(Term);
2189 Br->setSuccessor(0, OuterCond);
2190
2191 // Modify the inner condition:
2192 // * Use the UpperBound returned from the DynamicNext call.
2193 // * jump to the loop outer loop when done with one of the inner loops.
2194 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
2195 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
2196 Instruction *Comp = &*Builder.GetInsertPoint();
2197 auto *CI = cast<CmpInst>(Comp);
2198 CI->setOperand(1, UpperBound);
2199 // Redirect the inner exit to branch to outer condition.
2200 Instruction *Branch = &Cond->back();
2201 auto *BI = cast<BranchInst>(Branch);
2202 assert(BI->getSuccessor(1) == Exit)(static_cast <bool> (BI->getSuccessor(1) == Exit) ? void
(0) : __assert_fail ("BI->getSuccessor(1) == Exit", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2202, __extension__ __PRETTY_FUNCTION__))
;
2203 BI->setSuccessor(1, OuterCond);
2204
2205 // Call the "fini" function if "ordered" is present in wsloop directive.
2206 if (Ordered) {
2207 Builder.SetInsertPoint(&Latch->back());
2208 FunctionCallee DynamicFini = getKmpcForDynamicFiniForType(IVTy, M, *this);
2209 Builder.CreateCall(DynamicFini, {SrcLoc, ThreadNum});
2210 }
2211
2212 // Add the barrier if requested.
2213 if (NeedsBarrier) {
2214 Builder.SetInsertPoint(&Exit->back());
2215 createBarrier(LocationDescription(Builder.saveIP(), DL),
2216 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
2217 /* CheckCancelFlag */ false);
2218 }
2219
2220 CLI->invalidate();
2221 return AfterIP;
2222}
2223
2224/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
2225/// after this \p OldTarget will be orphaned.
2226static void redirectAllPredecessorsTo(BasicBlock *OldTarget,
2227 BasicBlock *NewTarget, DebugLoc DL) {
2228 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
2229 redirectTo(Pred, NewTarget, DL);
2230}
2231
2232/// Determine which blocks in \p BBs are reachable from outside and remove the
2233/// ones that are not reachable from the function.
2234static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) {
2235 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
2236 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
2237 for (Use &U : BB->uses()) {
2238 auto *UseInst = dyn_cast<Instruction>(U.getUser());
2239 if (!UseInst)
2240 continue;
2241 if (BBsToErase.count(UseInst->getParent()))
2242 continue;
2243 return true;
2244 }
2245 return false;
2246 };
2247
2248 while (true) {
2249 bool Changed = false;
2250 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
2251 if (HasRemainingUses(BB)) {
2252 BBsToErase.erase(BB);
2253 Changed = true;
2254 }
2255 }
2256 if (!Changed)
2257 break;
2258 }
2259
2260 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
2261 DeleteDeadBlocks(BBVec);
2262}
2263
2264CanonicalLoopInfo *
2265OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
2266 InsertPointTy ComputeIP) {
2267 assert(Loops.size() >= 1 && "At least one loop required")(static_cast <bool> (Loops.size() >= 1 && "At least one loop required"
) ? void (0) : __assert_fail ("Loops.size() >= 1 && \"At least one loop required\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2267, __extension__
__PRETTY_FUNCTION__))
;
2268 size_t NumLoops = Loops.size();
2269
2270 // Nothing to do if there is already just one loop.
2271 if (NumLoops == 1)
2272 return Loops.front();
2273
2274 CanonicalLoopInfo *Outermost = Loops.front();
2275 CanonicalLoopInfo *Innermost = Loops.back();
2276 BasicBlock *OrigPreheader = Outermost->getPreheader();
2277 BasicBlock *OrigAfter = Outermost->getAfter();
2278 Function *F = OrigPreheader->getParent();
2279
2280 // Loop control blocks that may become orphaned later.
2281 SmallVector<BasicBlock *, 12> OldControlBBs;
2282 OldControlBBs.reserve(6 * Loops.size());
2283 for (CanonicalLoopInfo *Loop : Loops)
2284 Loop->collectControlBlocks(OldControlBBs);
2285
2286 // Setup the IRBuilder for inserting the trip count computation.
2287 Builder.SetCurrentDebugLocation(DL);
2288 if (ComputeIP.isSet())
2289 Builder.restoreIP(ComputeIP);
2290 else
2291 Builder.restoreIP(Outermost->getPreheaderIP());
2292
2293 // Derive the collapsed' loop trip count.
2294 // TODO: Find common/largest indvar type.
2295 Value *CollapsedTripCount = nullptr;
2296 for (CanonicalLoopInfo *L : Loops) {
2297 assert(L->isValid() &&(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2298, __extension__
__PRETTY_FUNCTION__))
2298 "All loops to collapse must be valid canonical loops")(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2298, __extension__
__PRETTY_FUNCTION__))
;
2299 Value *OrigTripCount = L->getTripCount();
2300 if (!CollapsedTripCount) {
2301 CollapsedTripCount = OrigTripCount;
2302 continue;
2303 }
2304
2305 // TODO: Enable UndefinedSanitizer to diagnose an overflow here.
2306 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount,
2307 {}, /*HasNUW=*/true);
2308 }
2309
2310 // Create the collapsed loop control flow.
2311 CanonicalLoopInfo *Result =
2312 createLoopSkeleton(DL, CollapsedTripCount, F,
2313 OrigPreheader->getNextNode(), OrigAfter, "collapsed");
2314
2315 // Build the collapsed loop body code.
2316 // Start with deriving the input loop induction variables from the collapsed
2317 // one, using a divmod scheme. To preserve the original loops' order, the
2318 // innermost loop use the least significant bits.
2319 Builder.restoreIP(Result->getBodyIP());
2320
2321 Value *Leftover = Result->getIndVar();
2322 SmallVector<Value *> NewIndVars;
2323 NewIndVars.resize(NumLoops);
2324 for (int i = NumLoops - 1; i >= 1; --i) {
2325 Value *OrigTripCount = Loops[i]->getTripCount();
2326
2327 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount);
2328 NewIndVars[i] = NewIndVar;
2329
2330 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount);
2331 }
2332 // Outermost loop gets all the remaining bits.
2333 NewIndVars[0] = Leftover;
2334
2335 // Construct the loop body control flow.
2336 // We progressively construct the branch structure following in direction of
2337 // the control flow, from the leading in-between code, the loop nest body, the
2338 // trailing in-between code, and rejoining the collapsed loop's latch.
2339 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If
2340 // the ContinueBlock is set, continue with that block. If ContinuePred, use
2341 // its predecessors as sources.
2342 BasicBlock *ContinueBlock = Result->getBody();
2343 BasicBlock *ContinuePred = nullptr;
2344 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest,
2345 BasicBlock *NextSrc) {
2346 if (ContinueBlock)
2347 redirectTo(ContinueBlock, Dest, DL);
2348 else
2349 redirectAllPredecessorsTo(ContinuePred, Dest, DL);
2350
2351 ContinueBlock = nullptr;
2352 ContinuePred = NextSrc;
2353 };
2354
2355 // The code before the nested loop of each level.
2356 // Because we are sinking it into the nest, it will be executed more often
2357 // that the original loop. More sophisticated schemes could keep track of what
2358 // the in-between code is and instantiate it only once per thread.
2359 for (size_t i = 0; i < NumLoops - 1; ++i)
2360 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader());
2361
2362 // Connect the loop nest body.
2363 ContinueWith(Innermost->getBody(), Innermost->getLatch());
2364
2365 // The code after the nested loop at each level.
2366 for (size_t i = NumLoops - 1; i > 0; --i)
2367 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch());
2368
2369 // Connect the finished loop to the collapsed loop latch.
2370 ContinueWith(Result->getLatch(), nullptr);
2371
2372 // Replace the input loops with the new collapsed loop.
2373 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL);
2374 redirectTo(Result->getAfter(), Outermost->getAfter(), DL);
2375
2376 // Replace the input loop indvars with the derived ones.
2377 for (size_t i = 0; i < NumLoops; ++i)
2378 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]);
2379
2380 // Remove unused parts of the input loops.
2381 removeUnusedBlocksFromParent(OldControlBBs);
2382
2383 for (CanonicalLoopInfo *L : Loops)
2384 L->invalidate();
2385
2386#ifndef NDEBUG
2387 Result->assertOK();
2388#endif
2389 return Result;
2390}
2391
2392std::vector<CanonicalLoopInfo *>
2393OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
2394 ArrayRef<Value *> TileSizes) {
2395 assert(TileSizes.size() == Loops.size() &&(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2396, __extension__
__PRETTY_FUNCTION__))
2396 "Must pass as many tile sizes as there are loops")(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2396, __extension__
__PRETTY_FUNCTION__))
;
2397 int NumLoops = Loops.size();
2398 assert(NumLoops >= 1 && "At least one loop to tile required")(static_cast <bool> (NumLoops >= 1 && "At least one loop to tile required"
) ? void (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2398, __extension__
__PRETTY_FUNCTION__))
;
2399
2400 CanonicalLoopInfo *OutermostLoop = Loops.front();
2401 CanonicalLoopInfo *InnermostLoop = Loops.back();
2402 Function *F = OutermostLoop->getBody()->getParent();
2403 BasicBlock *InnerEnter = InnermostLoop->getBody();
2404 BasicBlock *InnerLatch = InnermostLoop->getLatch();
2405
2406 // Loop control blocks that may become orphaned later.
2407 SmallVector<BasicBlock *, 12> OldControlBBs;
2408 OldControlBBs.reserve(6 * Loops.size());
2409 for (CanonicalLoopInfo *Loop : Loops)
2410 Loop->collectControlBlocks(OldControlBBs);
2411
2412 // Collect original trip counts and induction variable to be accessible by
2413 // index. Also, the structure of the original loops is not preserved during
2414 // the construction of the tiled loops, so do it before we scavenge the BBs of
2415 // any original CanonicalLoopInfo.
2416 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
2417 for (CanonicalLoopInfo *L : Loops) {
2418 assert(L->isValid() && "All input loops must be valid canonical loops")(static_cast <bool> (L->isValid() && "All input loops must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All input loops must be valid canonical loops\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2418, __extension__
__PRETTY_FUNCTION__))
;
2419 OrigTripCounts.push_back(L->getTripCount());
2420 OrigIndVars.push_back(L->getIndVar());
2421 }
2422
2423 // Collect the code between loop headers. These may contain SSA definitions
2424 // that are used in the loop nest body. To be usable with in the innermost
2425 // body, these BasicBlocks will be sunk into the loop nest body. That is,
2426 // these instructions may be executed more often than before the tiling.
2427 // TODO: It would be sufficient to only sink them into body of the
2428 // corresponding tile loop.
2429 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode;
2430 for (int i = 0; i < NumLoops - 1; ++i) {
2431 CanonicalLoopInfo *Surrounding = Loops[i];
2432 CanonicalLoopInfo *Nested = Loops[i + 1];
2433
2434 BasicBlock *EnterBB = Surrounding->getBody();
2435 BasicBlock *ExitBB = Nested->getHeader();
2436 InbetweenCode.emplace_back(EnterBB, ExitBB);
2437 }
2438
2439 // Compute the trip counts of the floor loops.
2440 Builder.SetCurrentDebugLocation(DL);
2441 Builder.restoreIP(OutermostLoop->getPreheaderIP());
2442 SmallVector<Value *, 4> FloorCount, FloorRems;
2443 for (int i = 0; i < NumLoops; ++i) {
2444 Value *TileSize = TileSizes[i];
2445 Value *OrigTripCount = OrigTripCounts[i];
2446 Type *IVType = OrigTripCount->getType();
2447
2448 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
2449 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
2450
2451 // 0 if tripcount divides the tilesize, 1 otherwise.
2452 // 1 means we need an additional iteration for a partial tile.
2453 //
2454 // Unfortunately we cannot just use the roundup-formula
2455 // (tripcount + tilesize - 1)/tilesize
2456 // because the summation might overflow. We do not want introduce undefined
2457 // behavior when the untiled loop nest did not.
2458 Value *FloorTripOverflow =
2459 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
2460
2461 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
2462 FloorTripCount =
2463 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
2464 "omp_floor" + Twine(i) + ".tripcount", true);
2465
2466 // Remember some values for later use.
2467 FloorCount.push_back(FloorTripCount);
2468 FloorRems.push_back(FloorTripRem);
2469 }
2470
2471 // Generate the new loop nest, from the outermost to the innermost.
2472 std::vector<CanonicalLoopInfo *> Result;
2473 Result.reserve(NumLoops * 2);
2474
2475 // The basic block of the surrounding loop that enters the nest generated
2476 // loop.
2477 BasicBlock *Enter = OutermostLoop->getPreheader();
2478
2479 // The basic block of the surrounding loop where the inner code should
2480 // continue.
2481 BasicBlock *Continue = OutermostLoop->getAfter();
2482
2483 // Where the next loop basic block should be inserted.
2484 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
2485
2486 auto EmbeddNewLoop =
2487 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
2488 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
2489 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
2490 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
2491 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
2492 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
2493
2494 // Setup the position where the next embedded loop connects to this loop.
2495 Enter = EmbeddedLoop->getBody();
2496 Continue = EmbeddedLoop->getLatch();
2497 OutroInsertBefore = EmbeddedLoop->getLatch();
2498 return EmbeddedLoop;
2499 };
2500
2501 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
2502 const Twine &NameBase) {
2503 for (auto P : enumerate(TripCounts)) {
2504 CanonicalLoopInfo *EmbeddedLoop =
2505 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
2506 Result.push_back(EmbeddedLoop);
2507 }
2508 };
2509
2510 EmbeddNewLoops(FloorCount, "floor");
2511
2512 // Within the innermost floor loop, emit the code that computes the tile
2513 // sizes.
2514 Builder.SetInsertPoint(Enter->getTerminator());
2515 SmallVector<Value *, 4> TileCounts;
2516 for (int i = 0; i < NumLoops; ++i) {
2517 CanonicalLoopInfo *FloorLoop = Result[i];
2518 Value *TileSize = TileSizes[i];
2519
2520 Value *FloorIsEpilogue =
2521 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
2522 Value *TileTripCount =
2523 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
2524
2525 TileCounts.push_back(TileTripCount);
2526 }
2527
2528 // Create the tile loops.
2529 EmbeddNewLoops(TileCounts, "tile");
2530
2531 // Insert the inbetween code into the body.
2532 BasicBlock *BodyEnter = Enter;
2533 BasicBlock *BodyEntered = nullptr;
2534 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
2535 BasicBlock *EnterBB = P.first;
2536 BasicBlock *ExitBB = P.second;
2537
2538 if (BodyEnter)
2539 redirectTo(BodyEnter, EnterBB, DL);
2540 else
2541 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
2542
2543 BodyEnter = nullptr;
2544 BodyEntered = ExitBB;
2545 }
2546
2547 // Append the original loop nest body into the generated loop nest body.
2548 if (BodyEnter)
2549 redirectTo(BodyEnter, InnerEnter, DL);
2550 else
2551 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
2552 redirectAllPredecessorsTo(InnerLatch, Continue, DL);
2553
2554 // Replace the original induction variable with an induction variable computed
2555 // from the tile and floor induction variables.
2556 Builder.restoreIP(Result.back()->getBodyIP());
2557 for (int i = 0; i < NumLoops; ++i) {
2558 CanonicalLoopInfo *FloorLoop = Result[i];
2559 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
2560 Value *OrigIndVar = OrigIndVars[i];
2561 Value *Size = TileSizes[i];
2562
2563 Value *Scale =
2564 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
2565 Value *Shift =
2566 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
2567 OrigIndVar->replaceAllUsesWith(Shift);
2568 }
2569
2570 // Remove unused parts of the original loops.
2571 removeUnusedBlocksFromParent(OldControlBBs);
2572
2573 for (CanonicalLoopInfo *L : Loops)
2574 L->invalidate();
2575
2576#ifndef NDEBUG
2577 for (CanonicalLoopInfo *GenL : Result)
2578 GenL->assertOK();
2579#endif
2580 return Result;
2581}
2582
2583/// Attach loop metadata \p Properties to the loop described by \p Loop. If the
2584/// loop already has metadata, the loop properties are appended.
2585static void addLoopMetadata(CanonicalLoopInfo *Loop,
2586 ArrayRef<Metadata *> Properties) {
2587 assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo")(static_cast <bool> (Loop->isValid() && "Expecting a valid CanonicalLoopInfo"
) ? void (0) : __assert_fail ("Loop->isValid() && \"Expecting a valid CanonicalLoopInfo\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2587, __extension__
__PRETTY_FUNCTION__))
;
2588
2589 // Nothing to do if no property to attach.
2590 if (Properties.empty())
2591 return;
2592
2593 LLVMContext &Ctx = Loop->getFunction()->getContext();
2594 SmallVector<Metadata *> NewLoopProperties;
2595 NewLoopProperties.push_back(nullptr);
2596
2597 // If the loop already has metadata, prepend it to the new metadata.
2598 BasicBlock *Latch = Loop->getLatch();
2599 assert(Latch && "A valid CanonicalLoopInfo must have a unique latch")(static_cast <bool> (Latch && "A valid CanonicalLoopInfo must have a unique latch"
) ? void (0) : __assert_fail ("Latch && \"A valid CanonicalLoopInfo must have a unique latch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2599, __extension__
__PRETTY_FUNCTION__))
;
2600 MDNode *Existing = Latch->getTerminator()->getMetadata(LLVMContext::MD_loop);
2601 if (Existing)
2602 append_range(NewLoopProperties, drop_begin(Existing->operands(), 1));
2603
2604 append_range(NewLoopProperties, Properties);
2605 MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties);
2606 LoopID->replaceOperandWith(0, LoopID);
2607
2608 Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID);
2609}
2610
2611/// Attach llvm.access.group metadata to the memref instructions of \p Block
2612static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup,
2613 LoopInfo &LI) {
2614 for (Instruction &I : *Block) {
2615 if (I.mayReadOrWriteMemory()) {
2616 // TODO: This instruction may already have access group from
2617 // other pragmas e.g. #pragma clang loop vectorize. Append
2618 // so that the existing metadata is not overwritten.
2619 I.setMetadata(LLVMContext::MD_access_group, AccessGroup);
2620 }
2621 }
2622}
2623
2624void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) {
2625 LLVMContext &Ctx = Builder.getContext();
2626 addLoopMetadata(
2627 Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
2628 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))});
2629}
2630
2631void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) {
2632 LLVMContext &Ctx = Builder.getContext();
2633 addLoopMetadata(
2634 Loop, {
2635 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
2636 });
2637}
2638
2639void OpenMPIRBuilder::applySimd(DebugLoc, CanonicalLoopInfo *CanonicalLoop) {
2640 LLVMContext &Ctx = Builder.getContext();
2641
2642 Function *F = CanonicalLoop->getFunction();
2643
2644 FunctionAnalysisManager FAM;
2645 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
2646 FAM.registerPass([]() { return LoopAnalysis(); });
2647 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
2648
2649 LoopAnalysis LIA;
2650 LoopInfo &&LI = LIA.run(*F, FAM);
2651
2652 Loop *L = LI.getLoopFor(CanonicalLoop->getHeader());
2653
2654 SmallSet<BasicBlock *, 8> Reachable;
2655
2656 // Get the basic blocks from the loop in which memref instructions
2657 // can be found.
2658 // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo,
2659 // preferably without running any passes.
2660 for (BasicBlock *Block : L->getBlocks()) {
2661 if (Block == CanonicalLoop->getCond() ||
2662 Block == CanonicalLoop->getHeader())
2663 continue;
2664 Reachable.insert(Block);
2665 }
2666
2667 // Add access group metadata to memory-access instructions.
2668 MDNode *AccessGroup = MDNode::getDistinct(Ctx, {});
2669 for (BasicBlock *BB : Reachable)
2670 addSimdMetadata(BB, AccessGroup, LI);
2671
2672 // Use the above access group metadata to create loop level
2673 // metadata, which should be distinct for each loop.
2674 ConstantAsMetadata *BoolConst =
2675 ConstantAsMetadata::get(ConstantInt::getTrue(Type::getInt1Ty(Ctx)));
2676 // TODO: If the loop has existing parallel access metadata, have
2677 // to combine two lists.
2678 addLoopMetadata(
2679 CanonicalLoop,
2680 {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"),
2681 AccessGroup}),
2682 MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
2683 BoolConst})});
2684}
2685
2686/// Create the TargetMachine object to query the backend for optimization
2687/// preferences.
2688///
2689/// Ideally, this would be passed from the front-end to the OpenMPBuilder, but
2690/// e.g. Clang does not pass it to its CodeGen layer and creates it only when
2691/// needed for the LLVM pass pipline. We use some default options to avoid
2692/// having to pass too many settings from the frontend that probably do not
2693/// matter.
2694///
2695/// Currently, TargetMachine is only used sometimes by the unrollLoopPartial
2696/// method. If we are going to use TargetMachine for more purposes, especially
2697/// those that are sensitive to TargetOptions, RelocModel and CodeModel, it
2698/// might become be worth requiring front-ends to pass on their TargetMachine,
2699/// or at least cache it between methods. Note that while fontends such as Clang
2700/// have just a single main TargetMachine per translation unit, "target-cpu" and
2701/// "target-features" that determine the TargetMachine are per-function and can
2702/// be overrided using __attribute__((target("OPTIONS"))).
2703static std::unique_ptr<TargetMachine>
2704createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) {
2705 Module *M = F->getParent();
2706
2707 StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString();
2708 StringRef Features = F->getFnAttribute("target-features").getValueAsString();
2709 const std::string &Triple = M->getTargetTriple();
2710
2711 std::string Error;
2712 const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
2713 if (!TheTarget)
2714 return {};
2715
2716 llvm::TargetOptions Options;
2717 return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine(
2718 Triple, CPU, Features, Options, /*RelocModel=*/None, /*CodeModel=*/None,
2719 OptLevel));
2720}
2721
2722/// Heuristically determine the best-performant unroll factor for \p CLI. This
2723/// depends on the target processor. We are re-using the same heuristics as the
2724/// LoopUnrollPass.
2725static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) {
2726 Function *F = CLI->getFunction();
2727
2728 // Assume the user requests the most aggressive unrolling, even if the rest of
2729 // the code is optimized using a lower setting.
2730 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive;
2731 std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel);
2732
2733 FunctionAnalysisManager FAM;
2734 FAM.registerPass([]() { return TargetLibraryAnalysis(); });
2735 FAM.registerPass([]() { return AssumptionAnalysis(); });
2736 FAM.registerPass([]() { return DominatorTreeAnalysis(); });
2737 FAM.registerPass([]() { return LoopAnalysis(); });
2738 FAM.registerPass([]() { return ScalarEvolutionAnalysis(); });
2739 FAM.registerPass([]() { return PassInstrumentationAnalysis(); });
2740 TargetIRAnalysis TIRA;
2741 if (TM)
2742 TIRA = TargetIRAnalysis(
2743 [&](const Function &F) { return TM->getTargetTransformInfo(F); });
2744 FAM.registerPass([&]() { return TIRA; });
2745
2746 TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM);
2747 ScalarEvolutionAnalysis SEA;
2748 ScalarEvolution &&SE = SEA.run(*F, FAM);
2749 DominatorTreeAnalysis DTA;
2750 DominatorTree &&DT = DTA.run(*F, FAM);
2751 LoopAnalysis LIA;
2752 LoopInfo &&LI = LIA.run(*F, FAM);
2753 AssumptionAnalysis ACT;
2754 AssumptionCache &&AC = ACT.run(*F, FAM);
2755 OptimizationRemarkEmitter ORE{F};
2756
2757 Loop *L = LI.getLoopFor(CLI->getHeader());
2758 assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop")(static_cast <bool> (L && "Expecting CanonicalLoopInfo to be recognized as a loop"
) ? void (0) : __assert_fail ("L && \"Expecting CanonicalLoopInfo to be recognized as a loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2758, __extension__
__PRETTY_FUNCTION__))
;
2759
2760 TargetTransformInfo::UnrollingPreferences UP =
2761 gatherUnrollingPreferences(L, SE, TTI,
2762 /*BlockFrequencyInfo=*/nullptr,
2763 /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel,
2764 /*UserThreshold=*/None,
2765 /*UserCount=*/None,
2766 /*UserAllowPartial=*/true,
2767 /*UserAllowRuntime=*/true,
2768 /*UserUpperBound=*/None,
2769 /*UserFullUnrollMaxCount=*/None);
2770
2771 UP.Force = true;
2772
2773 // Account for additional optimizations taking place before the LoopUnrollPass
2774 // would unroll the loop.
2775 UP.Threshold *= UnrollThresholdFactor;
2776 UP.PartialThreshold *= UnrollThresholdFactor;
2777
2778 // Use normal unroll factors even if the rest of the code is optimized for
2779 // size.
2780 UP.OptSizeThreshold = UP.Threshold;
2781 UP.PartialOptSizeThreshold = UP.PartialThreshold;
2782
2783 LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2784 << " Threshold=" << UP.Threshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2785 << " PartialThreshold=" << UP.PartialThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2786 << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2787 << " PartialOptSizeThreshold="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
2788 << UP.PartialOptSizeThreshold << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n"
<< " Threshold=" << UP.Threshold << "\n" <<
" PartialThreshold=" << UP.PartialThreshold << "\n"
<< " OptSizeThreshold=" << UP.OptSizeThreshold <<
"\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold
<< "\n"; } } while (false)
;
2789
2790 // Disable peeling.
2791 TargetTransformInfo::PeelingPreferences PP =
2792 gatherPeelingPreferences(L, SE, TTI,
2793 /*UserAllowPeeling=*/false,
2794 /*UserAllowProfileBasedPeeling=*/false,
2795 /*UnrollingSpecficValues=*/false);
2796
2797 SmallPtrSet<const Value *, 32> EphValues;
2798 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
2799
2800 // Assume that reads and writes to stack variables can be eliminated by
2801 // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's
2802 // size.
2803 for (BasicBlock *BB : L->blocks()) {
2804 for (Instruction &I : *BB) {
2805 Value *Ptr;
2806 if (auto *Load = dyn_cast<LoadInst>(&I)) {
2807 Ptr = Load->getPointerOperand();
2808 } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
2809 Ptr = Store->getPointerOperand();
2810 } else
2811 continue;
2812
2813 Ptr = Ptr->stripPointerCasts();
2814
2815 if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) {
2816 if (Alloca->getParent() == &F->getEntryBlock())
2817 EphValues.insert(&I);
2818 }
2819 }
2820 }
2821
2822 unsigned NumInlineCandidates;
2823 bool NotDuplicatable;
2824 bool Convergent;
2825 unsigned LoopSize =
2826 ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent,
2827 TTI, EphValues, UP.BEInsns);
2828 LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSize << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Estimated loop size is "
<< LoopSize << "\n"; } } while (false)
;
2829
2830 // Loop is not unrollable if the loop contains certain instructions.
2831 if (NotDuplicatable || Convergent) {
2832 LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Loop not considered unrollable\n"
; } } while (false)
;
2833 return 1;
2834 }
2835
2836 // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might
2837 // be able to use it.
2838 int TripCount = 0;
2839 int MaxTripCount = 0;
2840 bool MaxOrZero = false;
2841 unsigned TripMultiple = 0;
2842
2843 bool UseUpperBound = false;
2844 computeUnrollCount(L, TTI, DT, &LI, SE, EphValues, &ORE, TripCount,
2845 MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP,
2846 UseUpperBound);
2847 unsigned Factor = UP.Count;
2848 LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Suggesting unroll factor of "
<< Factor << "\n"; } } while (false)
;
2849
2850 // This function returns 1 to signal to not unroll a loop.
2851 if (Factor == 0)
2852 return 1;
2853 return Factor;
2854}
2855
2856void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop,
2857 int32_t Factor,
2858 CanonicalLoopInfo **UnrolledCLI) {
2859 assert(Factor >= 0 && "Unroll factor must not be negative")(static_cast <bool> (Factor >= 0 && "Unroll factor must not be negative"
) ? void (0) : __assert_fail ("Factor >= 0 && \"Unroll factor must not be negative\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2859, __extension__
__PRETTY_FUNCTION__))
;
2860
2861 Function *F = Loop->getFunction();
2862 LLVMContext &Ctx = F->getContext();
2863
2864 // If the unrolled loop is not used for another loop-associated directive, it
2865 // is sufficient to add metadata for the LoopUnrollPass.
2866 if (!UnrolledCLI) {
2867 SmallVector<Metadata *, 2> LoopMetadata;
2868 LoopMetadata.push_back(
2869 MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")));
2870
2871 if (Factor >= 1) {
2872 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get(
2873 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
2874 LoopMetadata.push_back(MDNode::get(
2875 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst}));
2876 }
2877
2878 addLoopMetadata(Loop, LoopMetadata);
2879 return;
2880 }
2881
2882 // Heuristically determine the unroll factor.
2883 if (Factor == 0)
2884 Factor = computeHeuristicUnrollFactor(Loop);
2885
2886 // No change required with unroll factor 1.
2887 if (Factor == 1) {
2888 *UnrolledCLI = Loop;
2889 return;
2890 }
2891
2892 assert(Factor >= 2 &&(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger"
) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2893, __extension__
__PRETTY_FUNCTION__))
2893 "unrolling only makes sense with a factor of 2 or larger")(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger"
) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2893, __extension__
__PRETTY_FUNCTION__))
;
2894
2895 Type *IndVarTy = Loop->getIndVarType();
2896
2897 // Apply partial unrolling by tiling the loop by the unroll-factor, then fully
2898 // unroll the inner loop.
2899 Value *FactorVal =
2900 ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor,
2901 /*isSigned=*/false));
2902 std::vector<CanonicalLoopInfo *> LoopNest =
2903 tileLoops(DL, {Loop}, {FactorVal});
2904 assert(LoopNest.size() == 2 && "Expect 2 loops after tiling")(static_cast <bool> (LoopNest.size() == 2 && "Expect 2 loops after tiling"
) ? void (0) : __assert_fail ("LoopNest.size() == 2 && \"Expect 2 loops after tiling\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2904, __extension__
__PRETTY_FUNCTION__))
;
2905 *UnrolledCLI = LoopNest[0];
2906 CanonicalLoopInfo *InnerLoop = LoopNest[1];
2907
2908 // LoopUnrollPass can only fully unroll loops with constant trip count.
2909 // Unroll by the unroll factor with a fallback epilog for the remainder
2910 // iterations if necessary.
2911 ConstantAsMetadata *FactorConst = ConstantAsMetadata::get(
2912 ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor)));
2913 addLoopMetadata(
2914 InnerLoop,
2915 {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")),
2916 MDNode::get(
2917 Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})});
2918
2919#ifndef NDEBUG
2920 (*UnrolledCLI)->assertOK();
2921#endif
2922}
2923
2924OpenMPIRBuilder::InsertPointTy
2925OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
2926 llvm::Value *BufSize, llvm::Value *CpyBuf,
2927 llvm::Value *CpyFn, llvm::Value *DidIt) {
2928 if (!updateToLocation(Loc))
2929 return Loc.IP;
2930
2931 uint32_t SrcLocStrSize;
2932 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2933 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2934 Value *ThreadId = getOrCreateThreadID(Ident);
2935
2936 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
2937
2938 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
2939
2940 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
2941 Builder.CreateCall(Fn, Args);
2942
2943 return Builder.saveIP();
2944}
2945
2946OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSingle(
2947 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2948 FinalizeCallbackTy FiniCB, bool IsNowait, llvm::Value *DidIt) {
2949
2950 if (!updateToLocation(Loc))
2951 return Loc.IP;
2952
2953 // If needed (i.e. not null), initialize `DidIt` with 0
2954 if (DidIt) {
2955 Builder.CreateStore(Builder.getInt32(0), DidIt);
2956 }
2957
2958 Directive OMPD = Directive::OMPD_single;
2959 uint32_t SrcLocStrSize;
2960 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2961 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2962 Value *ThreadId = getOrCreateThreadID(Ident);
2963 Value *Args[] = {Ident, ThreadId};
2964
2965 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
2966 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2967
2968 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
2969 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2970
2971 // generates the following:
2972 // if (__kmpc_single()) {
2973 // .... single region ...
2974 // __kmpc_end_single
2975 // }
2976 // __kmpc_barrier
2977
2978 EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2979 /*Conditional*/ true,
2980 /*hasFinalize*/ true);
2981 if (!IsNowait)
2982 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
2983 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
2984 /* CheckCancelFlag */ false);
2985 return Builder.saveIP();
2986}
2987
2988OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical(
2989 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2990 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
2991
2992 if (!updateToLocation(Loc))
2993 return Loc.IP;
2994
2995 Directive OMPD = Directive::OMPD_critical;
2996 uint32_t SrcLocStrSize;
2997 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
2998 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
2999 Value *ThreadId = getOrCreateThreadID(Ident);
3000 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
3001 Value *Args[] = {Ident, ThreadId, LockVar};
3002
3003 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
3004 Function *RTFn = nullptr;
3005 if (HintInst) {
3006 // Add Hint to entry Args and create call
3007 EnterArgs.push_back(HintInst);
3008 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
3009 } else {
3010 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
3011 }
3012 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
3013
3014 Function *ExitRTLFn =
3015 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
3016 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
3017
3018 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
3019 /*Conditional*/ false, /*hasFinalize*/ true);
3020}
3021
3022OpenMPIRBuilder::InsertPointTy
3023OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc,
3024 InsertPointTy AllocaIP, unsigned NumLoops,
3025 ArrayRef<llvm::Value *> StoreValues,
3026 const Twine &Name, bool IsDependSource) {
3027 for (size_t I = 0; I < StoreValues.size(); I++)
3028 assert(StoreValues[I]->getType()->isIntegerTy(64) &&(static_cast <bool> (StoreValues[I]->getType()->isIntegerTy
(64) && "OpenMP runtime requires depend vec with i64 type"
) ? void (0) : __assert_fail ("StoreValues[I]->getType()->isIntegerTy(64) && \"OpenMP runtime requires depend vec with i64 type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3029, __extension__
__PRETTY_FUNCTION__))
3029 "OpenMP runtime requires depend vec with i64 type")(static_cast <bool> (StoreValues[I]->getType()->isIntegerTy
(64) && "OpenMP runtime requires depend vec with i64 type"
) ? void (0) : __assert_fail ("StoreValues[I]->getType()->isIntegerTy(64) && \"OpenMP runtime requires depend vec with i64 type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3029, __extension__
__PRETTY_FUNCTION__))
;
3030
3031 if (!updateToLocation(Loc))
3032 return Loc.IP;
3033
3034 // Allocate space for vector and generate alloc instruction.
3035 auto *ArrI64Ty = ArrayType::get(Int64, NumLoops);
3036 Builder.restoreIP(AllocaIP);
3037 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name);
3038 ArgsBase->setAlignment(Align(8));
3039 Builder.restoreIP(Loc.IP);
3040
3041 // Store the index value with offset in depend vector.
3042 for (unsigned I = 0; I < NumLoops; ++I) {
3043 Value *DependAddrGEPIter = Builder.CreateInBoundsGEP(
3044 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)});
3045 StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter);
3046 STInst->setAlignment(Align(8));
3047 }
3048
3049 Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP(
3050 ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)});
3051
3052 uint32_t SrcLocStrSize;
3053 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3054 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3055 Value *ThreadId = getOrCreateThreadID(Ident);
3056 Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP};
3057
3058 Function *RTLFn = nullptr;
3059 if (IsDependSource)
3060 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post);
3061 else
3062 RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait);
3063 Builder.CreateCall(RTLFn, Args);
3064
3065 return Builder.saveIP();
3066}
3067
3068OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd(
3069 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
3070 FinalizeCallbackTy FiniCB, bool IsThreads) {
3071 if (!updateToLocation(Loc))
3072 return Loc.IP;
3073
3074 Directive OMPD = Directive::OMPD_ordered;
3075 Instruction *EntryCall = nullptr;
3076 Instruction *ExitCall = nullptr;
3077
3078 if (IsThreads) {
3079 uint32_t SrcLocStrSize;
3080 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3081 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3082 Value *ThreadId = getOrCreateThreadID(Ident);
3083 Value *Args[] = {Ident, ThreadId};
3084
3085 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered);
3086 EntryCall = Builder.CreateCall(EntryRTLFn, Args);
3087
3088 Function *ExitRTLFn =
3089 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered);
3090 ExitCall = Builder.CreateCall(ExitRTLFn, Args);
3091 }
3092
3093 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
3094 /*Conditional*/ false, /*hasFinalize*/ true);
3095}
3096
3097OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
3098 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
3099 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
3100 bool HasFinalize, bool IsCancellable) {
3101
3102 if (HasFinalize)
3103 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable});
3104
3105 // Create inlined region's entry and body blocks, in preparation
3106 // for conditional creation
3107 BasicBlock *EntryBB = Builder.GetInsertBlock();
3108 Instruction *SplitPos = EntryBB->getTerminator();
3109 if (!isa_and_nonnull<BranchInst>(SplitPos))
3110 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
3111 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
3112 BasicBlock *FiniBB =
3113 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
3114
3115 Builder.SetInsertPoint(EntryBB->getTerminator());
3116 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
3117
3118 // generate body
3119 BodyGenCB(/* AllocaIP */ InsertPointTy(),
3120 /* CodeGenIP */ Builder.saveIP(), *FiniBB);
3121
3122 // If we didn't emit a branch to FiniBB during body generation, it means
3123 // FiniBB is unreachable (e.g. while(1);). stop generating all the
3124 // unreachable blocks, and remove anything we are not going to use.
3125 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0);
3126 if (SkipEmittingRegion) {
3127 FiniBB->eraseFromParent();
3128 ExitCall->eraseFromParent();
3129 // Discard finalization if we have it.
3130 if (HasFinalize) {
3131 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3132, __extension__
__PRETTY_FUNCTION__))
3132 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3132, __extension__
__PRETTY_FUNCTION__))
;
3133 FinalizationStack.pop_back();
3134 }
3135 } else {
3136 // emit exit call and do any needed finalization.
3137 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
3138 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3140, __extension__
__PRETTY_FUNCTION__))
3139 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3140, __extension__
__PRETTY_FUNCTION__))
3140 "Unexpected control flow graph state!!")(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3140, __extension__
__PRETTY_FUNCTION__))
;
3141 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
3142 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3143, __extension__
__PRETTY_FUNCTION__))
3143 "Unexpected Control Flow State!")(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3143, __extension__
__PRETTY_FUNCTION__))
;
3144 MergeBlockIntoPredecessor(FiniBB);
3145 }
3146
3147 // If we are skipping the region of a non conditional, remove the exit
3148 // block, and clear the builder's insertion point.
3149 assert(SplitPos->getParent() == ExitBB &&(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
3150 "Unexpected Insertion point location!")(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
;
3151 if (!Conditional && SkipEmittingRegion) {
3152 ExitBB->eraseFromParent();
3153 Builder.ClearInsertionPoint();
3154 } else {
3155 auto merged = MergeBlockIntoPredecessor(ExitBB);
3156 BasicBlock *ExitPredBB = SplitPos->getParent();
3157 auto InsertBB = merged ? ExitPredBB : ExitBB;
3158 if (!isa_and_nonnull<BranchInst>(SplitPos))
3159 SplitPos->eraseFromParent();
3160 Builder.SetInsertPoint(InsertBB);
3161 }
3162
3163 return Builder.saveIP();
3164}
3165
3166OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
3167 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
3168 // if nothing to do, Return current insertion point.
3169 if (!Conditional || !EntryCall)
3170 return Builder.saveIP();
3171
3172 BasicBlock *EntryBB = Builder.GetInsertBlock();
3173 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
3174 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
3175 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
3176
3177 // Emit thenBB and set the Builder's insertion point there for
3178 // body generation next. Place the block after the current block.
3179 Function *CurFn = EntryBB->getParent();
3180 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB);
3181
3182 // Move Entry branch to end of ThenBB, and replace with conditional
3183 // branch (If-stmt)
3184 Instruction *EntryBBTI = EntryBB->getTerminator();
3185 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
3186 EntryBBTI->removeFromParent();
3187 Builder.SetInsertPoint(UI);
3188 Builder.Insert(EntryBBTI);
3189 UI->eraseFromParent();
3190 Builder.SetInsertPoint(ThenBB->getTerminator());
3191
3192 // return an insertion point to ExitBB.
3193 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
3194}
3195
3196OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
3197 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
3198 bool HasFinalize) {
3199
3200 Builder.restoreIP(FinIP);
3201
3202 // If there is finalization to do, emit it before the exit call
3203 if (HasFinalize) {
3204 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3205, __extension__
__PRETTY_FUNCTION__))
3205 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3205, __extension__
__PRETTY_FUNCTION__))
;
3206
3207 FinalizationInfo Fi = FinalizationStack.pop_back_val();
3208 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")(static_cast <bool> (Fi.DK == OMPD && "Unexpected Directive for Finalization call!"
) ? void (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3208, __extension__
__PRETTY_FUNCTION__))
;
3209
3210 Fi.FiniCB(FinIP);
3211
3212 BasicBlock *FiniBB = FinIP.getBlock();
3213 Instruction *FiniBBTI = FiniBB->getTerminator();
3214
3215 // set Builder IP for call creation
3216 Builder.SetInsertPoint(FiniBBTI);
3217 }
3218
3219 if (!ExitCall)
3220 return Builder.saveIP();
3221
3222 // place the Exitcall as last instruction before Finalization block terminator
3223 ExitCall->removeFromParent();
3224 Builder.Insert(ExitCall);
3225
3226 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
3227 ExitCall->getIterator());
3228}
3229
3230OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks(
3231 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
3232 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
3233 if (!IP.isSet())
3234 return IP;
3235
3236 IRBuilder<>::InsertPointGuard IPG(Builder);
3237
3238 // creates the following CFG structure
3239 // OMP_Entry : (MasterAddr != PrivateAddr)?
3240 // F T
3241 // | \
3242 // | copin.not.master
3243 // | /
3244 // v /
3245 // copyin.not.master.end
3246 // |
3247 // v
3248 // OMP.Entry.Next
3249
3250 BasicBlock *OMP_Entry = IP.getBlock();
3251 Function *CurFn = OMP_Entry->getParent();
3252 BasicBlock *CopyBegin =
3253 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
3254 BasicBlock *CopyEnd = nullptr;
3255
3256 // If entry block is terminated, split to preserve the branch to following
3257 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
3258 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
3259 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
3260 "copyin.not.master.end");
3261 OMP_Entry->getTerminator()->eraseFromParent();
3262 } else {
3263 CopyEnd =
3264 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
3265 }
3266
3267 Builder.SetInsertPoint(OMP_Entry);
3268 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
3269 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
3270 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
3271 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
3272
3273 Builder.SetInsertPoint(CopyBegin);
3274 if (BranchtoEnd)
3275 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd));
3276
3277 return Builder.saveIP();
3278}
3279
3280CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc,
3281 Value *Size, Value *Allocator,
3282 std::string Name) {
3283 IRBuilder<>::InsertPointGuard IPG(Builder);
3284 Builder.restoreIP(Loc.IP);
3285
3286 uint32_t SrcLocStrSize;
3287 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3288 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3289 Value *ThreadId = getOrCreateThreadID(Ident);
3290 Value *Args[] = {ThreadId, Size, Allocator};
3291
3292 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
3293
3294 return Builder.CreateCall(Fn, Args, Name);
3295}
3296
3297CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc,
3298 Value *Addr, Value *Allocator,
3299 std::string Name) {
3300 IRBuilder<>::InsertPointGuard IPG(Builder);
3301 Builder.restoreIP(Loc.IP);
3302
3303 uint32_t SrcLocStrSize;
3304 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3305 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3306 Value *ThreadId = getOrCreateThreadID(Ident);
3307 Value *Args[] = {ThreadId, Addr, Allocator};
3308 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
3309 return Builder.CreateCall(Fn, Args, Name);
3310}
3311
3312CallInst *OpenMPIRBuilder::createOMPInteropInit(
3313 const LocationDescription &Loc, Value *InteropVar,
3314 omp::OMPInteropType InteropType, Value *Device, Value *NumDependences,
3315 Value *DependenceAddress, bool HaveNowaitClause) {
3316 IRBuilder<>::InsertPointGuard IPG(Builder);
3317 Builder.restoreIP(Loc.IP);
3318
3319 uint32_t SrcLocStrSize;
3320 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3321 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3322 Value *ThreadId = getOrCreateThreadID(Ident);
3323 if (Device == nullptr)
3324 Device = ConstantInt::get(Int32, -1);
3325 Constant *InteropTypeVal = ConstantInt::get(Int64, (int)InteropType);
3326 if (NumDependences == nullptr) {
3327 NumDependences = ConstantInt::get(Int32, 0);
3328 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext());
3329 DependenceAddress = ConstantPointerNull::get(PointerTypeVar);
3330 }
3331 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause);
3332 Value *Args[] = {
3333 Ident, ThreadId, InteropVar, InteropTypeVal,
3334 Device, NumDependences, DependenceAddress, HaveNowaitClauseVal};
3335
3336 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_init);
3337
3338 return Builder.CreateCall(Fn, Args);
3339}
3340
3341CallInst *OpenMPIRBuilder::createOMPInteropDestroy(
3342 const LocationDescription &Loc, Value *InteropVar, Value *Device,
3343 Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause) {
3344 IRBuilder<>::InsertPointGuard IPG(Builder);
3345 Builder.restoreIP(Loc.IP);
3346
3347 uint32_t SrcLocStrSize;
3348 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3349 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3350 Value *ThreadId = getOrCreateThreadID(Ident);
3351 if (Device == nullptr)
3352 Device = ConstantInt::get(Int32, -1);
3353 if (NumDependences == nullptr) {
3354 NumDependences = ConstantInt::get(Int32, 0);
3355 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext());
3356 DependenceAddress = ConstantPointerNull::get(PointerTypeVar);
3357 }
3358 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause);
3359 Value *Args[] = {
3360 Ident, ThreadId, InteropVar, Device,
3361 NumDependences, DependenceAddress, HaveNowaitClauseVal};
3362
3363 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_destroy);
3364
3365 return Builder.CreateCall(Fn, Args);
3366}
3367
3368CallInst *OpenMPIRBuilder::createOMPInteropUse(const LocationDescription &Loc,
3369 Value *InteropVar, Value *Device,
3370 Value *NumDependences,
3371 Value *DependenceAddress,
3372 bool HaveNowaitClause) {
3373 IRBuilder<>::InsertPointGuard IPG(Builder);
3374 Builder.restoreIP(Loc.IP);
3375 uint32_t SrcLocStrSize;
3376 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3377 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3378 Value *ThreadId = getOrCreateThreadID(Ident);
3379 if (Device == nullptr)
3380 Device = ConstantInt::get(Int32, -1);
3381 if (NumDependences == nullptr) {
3382 NumDependences = ConstantInt::get(Int32, 0);
3383 PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext());
3384 DependenceAddress = ConstantPointerNull::get(PointerTypeVar);
3385 }
3386 Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause);
3387 Value *Args[] = {
3388 Ident, ThreadId, InteropVar, Device,
3389 NumDependences, DependenceAddress, HaveNowaitClauseVal};
3390
3391 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_use);
3392
3393 return Builder.CreateCall(Fn, Args);
3394}
3395
3396CallInst *OpenMPIRBuilder::createCachedThreadPrivate(
3397 const LocationDescription &Loc, llvm::Value *Pointer,
3398 llvm::ConstantInt *Size, const llvm::Twine &Name) {
3399 IRBuilder<>::InsertPointGuard IPG(Builder);
3400 Builder.restoreIP(Loc.IP);
3401
3402 uint32_t SrcLocStrSize;
3403 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3404 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3405 Value *ThreadId = getOrCreateThreadID(Ident);
3406 Constant *ThreadPrivateCache =
3407 getOrCreateOMPInternalVariable(Int8PtrPtr, Name);
3408 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache};
3409
3410 Function *Fn =
3411 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached);
3412
3413 return Builder.CreateCall(Fn, Args);
3414}
3415
3416OpenMPIRBuilder::InsertPointTy
3417OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD,
3418 bool RequiresFullRuntime) {
3419 if (!updateToLocation(Loc))
3420 return Loc.IP;
3421
3422 uint32_t SrcLocStrSize;
3423 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3424 Constant *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3425 ConstantInt *IsSPMDVal = ConstantInt::getSigned(
3426 IntegerType::getInt8Ty(Int8->getContext()),
3427 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC);
3428 ConstantInt *UseGenericStateMachine =
3429 ConstantInt::getBool(Int32->getContext(), !IsSPMD);
3430 ConstantInt *RequiresFullRuntimeVal =
3431 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
3432
3433 Function *Fn = getOrCreateRuntimeFunctionPtr(
3434 omp::RuntimeFunction::OMPRTL___kmpc_target_init);
3435
3436 CallInst *ThreadKind = Builder.CreateCall(
3437 Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal});
3438
3439 Value *ExecUserCode = Builder.CreateICmpEQ(
3440 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1),
3441 "exec_user_code");
3442
3443 // ThreadKind = __kmpc_target_init(...)
3444 // if (ThreadKind == -1)
3445 // user_code
3446 // else
3447 // return;
3448
3449 auto *UI = Builder.CreateUnreachable();
3450 BasicBlock *CheckBB = UI->getParent();
3451 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry");
3452
3453 BasicBlock *WorkerExitBB = BasicBlock::Create(
3454 CheckBB->getContext(), "worker.exit", CheckBB->getParent());
3455 Builder.SetInsertPoint(WorkerExitBB);
3456 Builder.CreateRetVoid();
3457
3458 auto *CheckBBTI = CheckBB->getTerminator();
3459 Builder.SetInsertPoint(CheckBBTI);
3460 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB);
3461
3462 CheckBBTI->eraseFromParent();
3463 UI->eraseFromParent();
3464
3465 // Continue in the "user_code" block, see diagram above and in
3466 // openmp/libomptarget/deviceRTLs/common/include/target.h .
3467 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt());
3468}
3469
3470void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc,
3471 bool IsSPMD,
3472 bool RequiresFullRuntime) {
3473 if (!updateToLocation(Loc))
3474 return;
3475
3476 uint32_t SrcLocStrSize;
3477 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3478 Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3479 ConstantInt *IsSPMDVal = ConstantInt::getSigned(
3480 IntegerType::getInt8Ty(Int8->getContext()),
3481 IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC);
3482 ConstantInt *RequiresFullRuntimeVal =
3483 ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
3484
3485 Function *Fn = getOrCreateRuntimeFunctionPtr(
3486 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit);
3487
3488 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal});
3489}
3490
3491std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts,
3492 StringRef FirstSeparator,
3493 StringRef Separator) {
3494 SmallString<128> Buffer;
3495 llvm::raw_svector_ostream OS(Buffer);
3496 StringRef Sep = FirstSeparator;
3497 for (StringRef Part : Parts) {
3498 OS << Sep << Part;
3499 Sep = Separator;
3500 }
3501 return OS.str().str();
3502}
3503
3504Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable(
3505 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
3506 // TODO: Replace the twine arg with stringref to get rid of the conversion
3507 // logic. However This is taken from current implementation in clang as is.
3508 // Since this method is used in many places exclusively for OMP internal use
3509 // we will keep it as is for temporarily until we move all users to the
3510 // builder and then, if possible, fix it everywhere in one go.
3511 SmallString<256> Buffer;
3512 llvm::raw_svector_ostream Out(Buffer);
3513 Out << Name;
3514 StringRef RuntimeName = Out.str();
3515 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
3516 if (Elem.second) {
3517 assert(cast<PointerType>(Elem.second->getType())(static_cast <bool> (cast<PointerType>(Elem.second
->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) &&
"OMP internal variable has different type than requested") ?
void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3519, __extension__
__PRETTY_FUNCTION__))
3518 ->isOpaqueOrPointeeTypeMatches(Ty) &&(static_cast <bool> (cast<PointerType>(Elem.second
->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) &&
"OMP internal variable has different type than requested") ?
void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3519, __extension__
__PRETTY_FUNCTION__))
3519 "OMP internal variable has different type than requested")(static_cast <bool> (cast<PointerType>(Elem.second
->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) &&
"OMP internal variable has different type than requested") ?
void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3519, __extension__
__PRETTY_FUNCTION__))
;
3520 } else {
3521 // TODO: investigate the appropriate linkage type used for the global
3522 // variable for possibly changing that to internal or private, or maybe
3523 // create different versions of the function for different OMP internal
3524 // variables.
3525 Elem.second = new llvm::GlobalVariable(
3526 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage,
3527 llvm::Constant::getNullValue(Ty), Elem.first(),
3528 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
3529 AddressSpace);
3530 }
3531
3532 return Elem.second;
3533}
3534
3535Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) {
3536 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
3537 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", ".");
3538 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name);
3539}
3540
3541GlobalVariable *
3542OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
3543 std::string VarName) {
3544 llvm::Constant *MaptypesArrayInit =
3545 llvm::ConstantDataArray::get(M.getContext(), Mappings);
3546 auto *MaptypesArrayGlobal = new llvm::GlobalVariable(
3547 M, MaptypesArrayInit->getType(),
3548 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit,
3549 VarName);
3550 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3551 return MaptypesArrayGlobal;
3552}
3553
3554void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc,
3555 InsertPointTy AllocaIP,
3556 unsigned NumOperands,
3557 struct MapperAllocas &MapperAllocas) {
3558 if (!updateToLocation(Loc))
3559 return;
3560
3561 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands);
3562 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands);
3563 Builder.restoreIP(AllocaIP);
3564 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy);
3565 AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy);
3566 AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty);
3567 Builder.restoreIP(Loc.IP);
3568 MapperAllocas.ArgsBase = ArgsBase;
3569 MapperAllocas.Args = Args;
3570 MapperAllocas.ArgSizes = ArgSizes;
3571}
3572
3573void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc,
3574 Function *MapperFunc, Value *SrcLocInfo,
3575 Value *MaptypesArg, Value *MapnamesArg,
3576 struct MapperAllocas &MapperAllocas,
3577 int64_t DeviceID, unsigned NumOperands) {
3578 if (!updateToLocation(Loc))
3579 return;
3580
3581 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands);
3582 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands);
3583 Value *ArgsBaseGEP =
3584 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase,
3585 {Builder.getInt32(0), Builder.getInt32(0)});
3586 Value *ArgsGEP =
3587 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args,
3588 {Builder.getInt32(0), Builder.getInt32(0)});
3589 Value *ArgSizesGEP =
3590 Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes,
3591 {Builder.getInt32(0), Builder.getInt32(0)});
3592 Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo());
3593 Builder.CreateCall(MapperFunc,
3594 {SrcLocInfo, Builder.getInt64(DeviceID),
3595 Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP,
3596 ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr});
3597}
3598
3599bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic(
3600 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) {
3601 assert(!(AO == AtomicOrdering::NotAtomic ||(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3603, __extension__
__PRETTY_FUNCTION__))
3602 AO == llvm::AtomicOrdering::Unordered) &&(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3603, __extension__
__PRETTY_FUNCTION__))
3603 "Unexpected Atomic Ordering.")(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3603, __extension__
__PRETTY_FUNCTION__))
;
3604
3605 bool Flush = false;
3606 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic;
3607
3608 switch (AK) {
3609 case Read:
3610 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease ||
3611 AO == AtomicOrdering::SequentiallyConsistent) {
3612 FlushAO = AtomicOrdering::Acquire;
3613 Flush = true;
3614 }
3615 break;
3616 case Write:
3617 case Compare:
3618 case Update:
3619 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease ||
3620 AO == AtomicOrdering::SequentiallyConsistent) {
3621 FlushAO = AtomicOrdering::Release;
3622 Flush = true;
3623 }
3624 break;
3625 case Capture:
3626 switch (AO) {
3627 case AtomicOrdering::Acquire:
3628 FlushAO = AtomicOrdering::Acquire;
3629 Flush = true;
3630 break;
3631 case AtomicOrdering::Release:
3632 FlushAO = AtomicOrdering::Release;
3633 Flush = true;
3634 break;
3635 case AtomicOrdering::AcquireRelease:
3636 case AtomicOrdering::SequentiallyConsistent:
3637 FlushAO = AtomicOrdering::AcquireRelease;
3638 Flush = true;
3639 break;
3640 default:
3641 // do nothing - leave silently.
3642 break;
3643 }
3644 }
3645
3646 if (Flush) {
3647 // Currently Flush RT call still doesn't take memory_ordering, so for when
3648 // that happens, this tries to do the resolution of which atomic ordering
3649 // to use with but issue the flush call
3650 // TODO: pass `FlushAO` after memory ordering support is added
3651 (void)FlushAO;
3652 emitFlush(Loc);
3653 }
3654
3655 // for AO == AtomicOrdering::Monotonic and all other case combinations
3656 // do nothing
3657 return Flush;
3658}
3659
3660OpenMPIRBuilder::InsertPointTy
3661OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc,
3662 AtomicOpValue &X, AtomicOpValue &V,
3663 AtomicOrdering AO) {
3664 if (!updateToLocation(Loc))
3665 return Loc.IP;
3666
3667 Type *XTy = X.Var->getType();
3668 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3668, __extension__
__PRETTY_FUNCTION__))
;
3669 Type *XElemTy = X.ElemTy;
3670 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3672, __extension__
__PRETTY_FUNCTION__))
3671 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3672, __extension__
__PRETTY_FUNCTION__))
3672 "OMP atomic read expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3672, __extension__
__PRETTY_FUNCTION__))
;
3673
3674 Value *XRead = nullptr;
3675
3676 if (XElemTy->isIntegerTy()) {
3677 LoadInst *XLD =
3678 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read");
3679 XLD->setAtomic(AO);
3680 XRead = cast<Value>(XLD);
3681 } else {
3682 // We need to bitcast and perform atomic op as integer
3683 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
3684 IntegerType *IntCastTy =
3685 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3686 Value *XBCast = Builder.CreateBitCast(
3687 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast");
3688 LoadInst *XLoad =
3689 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load");
3690 XLoad->setAtomic(AO);
3691 if (XElemTy->isFloatingPointTy()) {
3692 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast");
3693 } else {
3694 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast");
3695 }
3696 }
3697 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read);
3698 Builder.CreateStore(XRead, V.Var, V.IsVolatile);
3699 return Builder.saveIP();
3700}
3701
3702OpenMPIRBuilder::InsertPointTy
3703OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
3704 AtomicOpValue &X, Value *Expr,
3705 AtomicOrdering AO) {
3706 if (!updateToLocation(Loc))
3707 return Loc.IP;
3708
3709 Type *XTy = X.Var->getType();
3710 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3710, __extension__
__PRETTY_FUNCTION__))
;
3711 Type *XElemTy = X.ElemTy;
3712 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3714, __extension__
__PRETTY_FUNCTION__))
3713 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3714, __extension__
__PRETTY_FUNCTION__))
3714 "OMP atomic write expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3714, __extension__
__PRETTY_FUNCTION__))
;
3715
3716 if (XElemTy->isIntegerTy()) {
3717 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
3718 XSt->setAtomic(AO);
3719 } else {
3720 // We need to bitcast and perform atomic op as integers
3721 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
3722 IntegerType *IntCastTy =
3723 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3724 Value *XBCast = Builder.CreateBitCast(
3725 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast");
3726 Value *ExprCast =
3727 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast");
3728 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile);
3729 XSt->setAtomic(AO);
3730 }
3731
3732 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write);
3733 return Builder.saveIP();
3734}
3735
3736OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate(
3737 const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X,
3738 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
3739 AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) {
3740 assert(!isConflictIP(Loc.IP, AllocaIP) && "IPs must not be ambiguous")(static_cast <bool> (!isConflictIP(Loc.IP, AllocaIP) &&
"IPs must not be ambiguous") ? void (0) : __assert_fail ("!isConflictIP(Loc.IP, AllocaIP) && \"IPs must not be ambiguous\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3740, __extension__
__PRETTY_FUNCTION__))
;
1
'?' condition is true
3741 if (!updateToLocation(Loc))
3742 return Loc.IP;
3743
3744 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
2
Taking false branch
3
Assuming 'DebugFlag' is false
4
Loop condition is false. Exiting loop
3745 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3746 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3747 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3748 Type *XElemTy = X.ElemTy;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3749 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3750 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3751 "OMP atomic update expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3752 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3753 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3754 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3755 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp
!= AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::
UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
;
3756
3757 emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, RMWOp, UpdateOp,
5
Calling 'OpenMPIRBuilder::emitAtomicUpdate'
3758 X.IsVolatile, IsXBinopExpr);
3759 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update);
3760 return Builder.saveIP();
3761}
3762
3763Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
3764 AtomicRMWInst::BinOp RMWOp) {
3765 switch (RMWOp) {
3766 case AtomicRMWInst::Add:
3767 return Builder.CreateAdd(Src1, Src2);
3768 case AtomicRMWInst::Sub:
3769 return Builder.CreateSub(Src1, Src2);
3770 case AtomicRMWInst::And:
3771 return Builder.CreateAnd(Src1, Src2);
3772 case AtomicRMWInst::Nand:
3773 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2));
3774 case AtomicRMWInst::Or:
3775 return Builder.CreateOr(Src1, Src2);
3776 case AtomicRMWInst::Xor:
3777 return Builder.CreateXor(Src1, Src2);
3778 case AtomicRMWInst::Xchg:
3779 case AtomicRMWInst::FAdd:
3780 case AtomicRMWInst::FSub:
3781 case AtomicRMWInst::BAD_BINOP:
3782 case AtomicRMWInst::Max:
3783 case AtomicRMWInst::Min:
3784 case AtomicRMWInst::UMax:
3785 case AtomicRMWInst::UMin:
3786 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3786)
;
3787 }
3788 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3788)
;
3789}
3790
3791std::pair<Value *, Value *> OpenMPIRBuilder::emitAtomicUpdate(
3792 InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
3793 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
3794 AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr) {
3795 // TODO: handle the case where XElemTy is not byte-sized or not a power of 2
3796 // or a complex datatype.
3797 bool emitRMWOp = false;
3798 switch (RMWOp) {
6
Control jumps to the 'default' case at line 3810
3799 case AtomicRMWInst::Add:
3800 case AtomicRMWInst::And:
3801 case AtomicRMWInst::Nand:
3802 case AtomicRMWInst::Or:
3803 case AtomicRMWInst::Xor:
3804 case AtomicRMWInst::Xchg:
3805 emitRMWOp = XElemTy;
3806 break;
3807 case AtomicRMWInst::Sub:
3808 emitRMWOp = (IsXBinopExpr && XElemTy);
3809 break;
3810 default:
3811 emitRMWOp = false;
3812 }
3813 emitRMWOp &= XElemTy->isIntegerTy();
7
Calling 'Type::isIntegerTy'
10
Returning from 'Type::isIntegerTy'
3814
3815 std::pair<Value *, Value *> Res;
3816 if (emitRMWOp
10.1
'emitRMWOp' is false
10.1
'emitRMWOp' is false
10.1
'emitRMWOp' is false
) {
11
Taking false branch
3817 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
3818 // not needed except in case of postfix captures. Generate anyway for
3819 // consistency with the else part. Will be removed with any DCE pass.
3820 // AtomicRMWInst::Xchg does not have a coressponding instruction.
3821 if (RMWOp == AtomicRMWInst::Xchg)
3822 Res.second = Res.first;
3823 else
3824 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp);
3825 } else {
3826 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace();
12
The object is a 'PointerType'
3827 IntegerType *IntCastTy =
3828 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3829 Value *XBCast =
3830 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
3831 LoadInst *OldVal =
3832 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load");
3833 OldVal->setAtomic(AO);
3834 // CurBB
3835 // | /---\
3836 // ContBB |
3837 // | \---/
3838 // ExitBB
3839 BasicBlock *CurBB = Builder.GetInsertBlock();
3840 Instruction *CurBBTI = CurBB->getTerminator();
3841 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable();
13
Assuming 'CurBBTI' is null
14
'?' condition is false
3842 BasicBlock *ExitBB =
3843 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit");
3844 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(),
3845 X->getName() + ".atomic.cont");
3846 ContBB->getTerminator()->eraseFromParent();
3847 Builder.restoreIP(AllocaIP);
3848 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy);
3849 NewAtomicAddr->setName(X->getName() + "x.new.val");
3850 Builder.SetInsertPoint(ContBB);
3851 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2);
3852 PHI->addIncoming(OldVal, CurBB);
3853 IntegerType *NewAtomicCastTy =
3854 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
3855 bool IsIntTy = XElemTy->isIntegerTy();
3856 Value *NewAtomicIntAddr =
3857 (IsIntTy
14.1
'IsIntTy' is false
14.1
'IsIntTy' is false
14.1
'IsIntTy' is false
)
15
'?' condition is false
3858 ? NewAtomicAddr
3859 : Builder.CreateBitCast(NewAtomicAddr,
3860 NewAtomicCastTy->getPointerTo(Addrspace));
3861 Value *OldExprVal = PHI;
3862 if (!IsIntTy
15.1
'IsIntTy' is false
15.1
'IsIntTy' is false
15.1
'IsIntTy' is false
) {
16
Taking true branch
3863 if (XElemTy->isFloatingPointTy()) {
17
Taking true branch
3864 OldExprVal = Builder.CreateBitCast(PHI, XElemTy,
3865 X->getName() + ".atomic.fltCast");
3866 } else {
3867 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy,
3868 X->getName() + ".atomic.ptrCast");
3869 }
3870 }
3871
3872 Value *Upd = UpdateOp(OldExprVal, Builder);
3873 Builder.CreateStore(Upd, NewAtomicAddr);
3874 LoadInst *DesiredVal = Builder.CreateLoad(IntCastTy, NewAtomicIntAddr);
3875 Value *XAddr =
3876 (IsIntTy
17.1
'IsIntTy' is false
17.1
'IsIntTy' is false
17.1
'IsIntTy' is false
)
18
'?' condition is false
3877 ? X
3878 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
3879 AtomicOrdering Failure =
3880 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
3881 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg(
3882 XAddr, PHI, DesiredVal, llvm::MaybeAlign(), AO, Failure);
3883 Result->setVolatile(VolatileX);
3884 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0);
3885 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1);
3886 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock());
3887 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB);
3888
3889 Res.first = OldExprVal;
3890 Res.second = Upd;
3891
3892 // set Insertion point in exit block
3893 if (UnreachableInst *ExitTI
20.1
'ExitTI' is null
20.1
'ExitTI' is null
20.1
'ExitTI' is null
=
20
'ExitTI' initialized to a null pointer value
21
Taking false branch
3894 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) {
19
Assuming the object is not a 'UnreachableInst'
3895 CurBBTI->eraseFromParent();
3896 Builder.SetInsertPoint(ExitBB);
3897 } else {
3898 Builder.SetInsertPoint(ExitTI);
22
Passing null pointer value via 1st parameter 'I'
23
Calling 'IRBuilderBase::SetInsertPoint'
3899 }
3900 }
3901
3902 return Res;
3903}
3904
3905OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture(
3906 const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X,
3907 AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
3908 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
3909 bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) {
3910 if (!updateToLocation(Loc))
3911 return Loc.IP;
3912
3913 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3914 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3915 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3916 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3917 Type *XElemTy = X.ElemTy;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3918 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3919 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3920 "OMP atomic capture expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3921 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3922 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
3923 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__
__PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast
<bool> ((XElemTy->isFloatingPointTy() || XElemTy->
isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__
__PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst
::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__
__PRETTY_FUNCTION__)); }; } } while (false)
;
3924
3925 // If UpdateExpr is 'x' updated with some `expr` not based on 'x',
3926 // 'x' is simply atomically rewritten with 'expr'.
3927 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg);
3928 std::pair<Value *, Value *> Result =
3929 emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, AtomicOp, UpdateOp,
3930 X.IsVolatile, IsXBinopExpr);
3931
3932 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second);
3933 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile);
3934
3935 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture);
3936 return Builder.saveIP();
3937}
3938
3939OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCompare(
3940 const LocationDescription &Loc, AtomicOpValue &X, Value *E, Value *D,
3941 AtomicOrdering AO, OMPAtomicCompareOp Op, bool IsXBinopExpr) {
3942 if (!updateToLocation(Loc))
3943 return Loc.IP;
3944
3945 assert(X.Var->getType()->isPointerTy() &&(static_cast <bool> (X.Var->getType()->isPointerTy
() && "OMP atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("X.Var->getType()->isPointerTy() && \"OMP atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3946, __extension__
__PRETTY_FUNCTION__))
3946 "OMP atomic expects a pointer to target memory")(static_cast <bool> (X.Var->getType()->isPointerTy
() && "OMP atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("X.Var->getType()->isPointerTy() && \"OMP atomic expects a pointer to target memory\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3946, __extension__
__PRETTY_FUNCTION__))
;
3947 assert((X.ElemTy->isIntegerTy() || X.ElemTy->isPointerTy()) &&(static_cast <bool> ((X.ElemTy->isIntegerTy() || X.ElemTy
->isPointerTy()) && "OMP atomic compare expected a integer scalar type"
) ? void (0) : __assert_fail ("(X.ElemTy->isIntegerTy() || X.ElemTy->isPointerTy()) && \"OMP atomic compare expected a integer scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3948, __extension__
__PRETTY_FUNCTION__))
3948 "OMP atomic compare expected a integer scalar type")(static_cast <bool> ((X.ElemTy->isIntegerTy() || X.ElemTy
->isPointerTy()) && "OMP atomic compare expected a integer scalar type"
) ? void (0) : __assert_fail ("(X.ElemTy->isIntegerTy() || X.ElemTy->isPointerTy()) && \"OMP atomic compare expected a integer scalar type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3948, __extension__
__PRETTY_FUNCTION__))
;
3949
3950 if (Op == OMPAtomicCompareOp::EQ) {
3951 AtomicOrdering Failure = AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
3952 // We don't need the result for now.
3953 (void)Builder.CreateAtomicCmpXchg(X.Var, E, D, MaybeAlign(), AO, Failure);
3954 } else {
3955 assert((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) &&(static_cast <bool> ((Op == OMPAtomicCompareOp::MAX || Op
== OMPAtomicCompareOp::MIN) && "Op should be either max or min at this point"
) ? void (0) : __assert_fail ("(Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && \"Op should be either max or min at this point\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3956, __extension__
__PRETTY_FUNCTION__))
3956 "Op should be either max or min at this point")(static_cast <bool> ((Op == OMPAtomicCompareOp::MAX || Op
== OMPAtomicCompareOp::MIN) && "Op should be either max or min at this point"
) ? void (0) : __assert_fail ("(Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && \"Op should be either max or min at this point\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3956, __extension__
__PRETTY_FUNCTION__))
;
3957
3958 // Reverse the ordop as the OpenMP forms are different from LLVM forms.
3959 // Let's take max as example.
3960 // OpenMP form:
3961 // x = x > expr ? expr : x;
3962 // LLVM form:
3963 // *ptr = *ptr > val ? *ptr : val;
3964 // We need to transform to LLVM form.
3965 // x = x <= expr ? x : expr;
3966 AtomicRMWInst::BinOp NewOp;
3967 if (IsXBinopExpr) {
3968 if (X.IsSigned)
3969 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Min
3970 : AtomicRMWInst::Max;
3971 else
3972 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMin
3973 : AtomicRMWInst::UMax;
3974 } else {
3975 if (X.IsSigned)
3976 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Max
3977 : AtomicRMWInst::Min;
3978 else
3979 NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMax
3980 : AtomicRMWInst::UMin;
3981 }
3982 // We dont' need the result for now.
3983 (void)Builder.CreateAtomicRMW(NewOp, X.Var, E, MaybeAlign(), AO);
3984 }
3985
3986 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Compare);
3987
3988 return Builder.saveIP();
3989}
3990
3991GlobalVariable *
3992OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
3993 std::string VarName) {
3994 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get(
3995 llvm::ArrayType::get(
3996 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()),
3997 Names);
3998 auto *MapNamesArrayGlobal = new llvm::GlobalVariable(
3999 M, MapNamesArrayInit->getType(),
4000 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit,
4001 VarName);
4002 return MapNamesArrayGlobal;
4003}
4004
4005// Create all simple and struct types exposed by the runtime and remember
4006// the llvm::PointerTypes of them for easy access later.
4007void OpenMPIRBuilder::initializeTypes(Module &M) {
4008 LLVMContext &Ctx = M.getContext();
4009 StructType *T;
4010#define OMP_TYPE(VarName, InitValue) VarName = InitValue;
4011#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
4012 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \
4013 VarName##PtrTy = PointerType::getUnqual(VarName##Ty);
4014#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
4015 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \
4016 VarName##Ptr = PointerType::getUnqual(VarName);
4017#define OMP_STRUCT_TYPE(VarName, StructName, ...) \
4018 T = StructType::getTypeByName(Ctx, StructName); \
4019 if (!T) \
4020 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \
4021 VarName = T; \
4022 VarName##Ptr = PointerType::getUnqual(T);
4023#include "llvm/Frontend/OpenMP/OMPKinds.def"
4024}
4025
4026void OpenMPIRBuilder::OutlineInfo::collectBlocks(
4027 SmallPtrSetImpl<BasicBlock *> &BlockSet,
4028 SmallVectorImpl<BasicBlock *> &BlockVector) {
4029 SmallVector<BasicBlock *, 32> Worklist;
4030 BlockSet.insert(EntryBB);
4031 BlockSet.insert(ExitBB);
4032
4033 Worklist.push_back(EntryBB);
4034 while (!Worklist.empty()) {
4035 BasicBlock *BB = Worklist.pop_back_val();
4036 BlockVector.push_back(BB);
4037 for (BasicBlock *SuccBB : successors(BB))
4038 if (BlockSet.insert(SuccBB).second)
4039 Worklist.push_back(SuccBB);
4040 }
4041}
4042
4043void CanonicalLoopInfo::collectControlBlocks(
4044 SmallVectorImpl<BasicBlock *> &BBs) {
4045 // We only count those BBs as control block for which we do not need to
4046 // reverse the CFG, i.e. not the loop body which can contain arbitrary control
4047 // flow. For consistency, this also means we do not add the Body block, which
4048 // is just the entry to the body code.
4049 BBs.reserve(BBs.size() + 6);
4050 BBs.append({getPreheader(), Header, Cond, Latch, Exit, getAfter()});
4051}
4052
4053BasicBlock *CanonicalLoopInfo::getPreheader() const {
4054 assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4054, __extension__
__PRETTY_FUNCTION__))
;
4055 for (BasicBlock *Pred : predecessors(Header)) {
4056 if (Pred != Latch)
4057 return Pred;
4058 }
4059 llvm_unreachable("Missing preheader")::llvm::llvm_unreachable_internal("Missing preheader", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 4059)
;
4060}
4061
4062void CanonicalLoopInfo::setTripCount(Value *TripCount) {
4063 assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4063, __extension__
__PRETTY_FUNCTION__))
;
4064
4065 Instruction *CmpI = &getCond()->front();
4066 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")(static_cast <bool> (isa<CmpInst>(CmpI) &&
"First inst must compare IV with TripCount") ? void (0) : __assert_fail
("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4066, __extension__
__PRETTY_FUNCTION__))
;
4067 CmpI->setOperand(1, TripCount);
4068
4069#ifndef NDEBUG
4070 assertOK();
4071#endif
4072}
4073
4074void CanonicalLoopInfo::mapIndVar(
4075 llvm::function_ref<Value *(Instruction *)> Updater) {
4076 assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4076, __extension__
__PRETTY_FUNCTION__))
;
4077
4078 Instruction *OldIV = getIndVar();
4079
4080 // Record all uses excluding those introduced by the updater. Uses by the
4081 // CanonicalLoopInfo itself to keep track of the number of iterations are
4082 // excluded.
4083 SmallVector<Use *> ReplacableUses;
4084 for (Use &U : OldIV->uses()) {
4085 auto *User = dyn_cast<Instruction>(U.getUser());
4086 if (!User)
4087 continue;
4088 if (User->getParent() == getCond())
4089 continue;
4090 if (User->getParent() == getLatch())
4091 continue;
4092 ReplacableUses.push_back(&U);
4093 }
4094
4095 // Run the updater that may introduce new uses
4096 Value *NewIV = Updater(OldIV);
4097
4098 // Replace the old uses with the value returned by the updater.
4099 for (Use *U : ReplacableUses)
4100 U->set(NewIV);
4101
4102#ifndef NDEBUG
4103 assertOK();
4104#endif
4105}
4106
4107void CanonicalLoopInfo::assertOK() const {
4108#ifndef NDEBUG
4109 // No constraints if this object currently does not describe a loop.
4110 if (!isValid())
4111 return;
4112
4113 BasicBlock *Preheader = getPreheader();
4114 BasicBlock *Body = getBody();
4115 BasicBlock *After = getAfter();
4116
4117 // Verify standard control-flow we use for OpenMP loops.
4118 assert(Preheader)(static_cast <bool> (Preheader) ? void (0) : __assert_fail
("Preheader", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4118
, __extension__ __PRETTY_FUNCTION__))
;
4119 assert(isa<BranchInst>(Preheader->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4120, __extension__
__PRETTY_FUNCTION__))
4120 "Preheader must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4120, __extension__
__PRETTY_FUNCTION__))
;
4121 assert(Preheader->getSingleSuccessor() == Header &&(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4122, __extension__
__PRETTY_FUNCTION__))
4122 "Preheader must jump to header")(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4122, __extension__
__PRETTY_FUNCTION__))
;
4123
4124 assert(Header)(static_cast <bool> (Header) ? void (0) : __assert_fail
("Header", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4124
, __extension__ __PRETTY_FUNCTION__))
;
4125 assert(isa<BranchInst>(Header->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4126, __extension__
__PRETTY_FUNCTION__))
4126 "Header must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4126, __extension__
__PRETTY_FUNCTION__))
;
4127 assert(Header->getSingleSuccessor() == Cond &&(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4128, __extension__
__PRETTY_FUNCTION__))
4128 "Header must jump to exiting block")(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4128, __extension__
__PRETTY_FUNCTION__))
;
4129
4130 assert(Cond)(static_cast <bool> (Cond) ? void (0) : __assert_fail (
"Cond", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4130, __extension__
__PRETTY_FUNCTION__))
;
4131 assert(Cond->getSinglePredecessor() == Header &&(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4132, __extension__
__PRETTY_FUNCTION__))
4132 "Exiting block only reachable from header")(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4132, __extension__
__PRETTY_FUNCTION__))
;
4133
4134 assert(isa<BranchInst>(Cond->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4135, __extension__
__PRETTY_FUNCTION__))
4135 "Exiting block must terminate with conditional branch")(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4135, __extension__
__PRETTY_FUNCTION__))
;
4136 assert(size(successors(Cond)) == 2 &&(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4137, __extension__
__PRETTY_FUNCTION__))
4137 "Exiting block must have two successors")(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4137, __extension__
__PRETTY_FUNCTION__))
;
4138 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4139, __extension__
__PRETTY_FUNCTION__))
4139 "Exiting block's first successor jump to the body")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4139, __extension__
__PRETTY_FUNCTION__))
;
4140 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4141, __extension__
__PRETTY_FUNCTION__))
4141 "Exiting block's second successor must exit the loop")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4141, __extension__
__PRETTY_FUNCTION__))
;
4142
4143 assert(Body)(static_cast <bool> (Body) ? void (0) : __assert_fail (
"Body", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4143, __extension__
__PRETTY_FUNCTION__))
;
4144 assert(Body->getSinglePredecessor() == Cond &&(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4145, __extension__
__PRETTY_FUNCTION__))
4145 "Body only reachable from exiting block")(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4145, __extension__
__PRETTY_FUNCTION__))
;
4146 assert(!isa<PHINode>(Body->front()))(static_cast <bool> (!isa<PHINode>(Body->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Body->front())"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4146, __extension__
__PRETTY_FUNCTION__))
;
4147
4148 assert(Latch)(static_cast <bool> (Latch) ? void (0) : __assert_fail (
"Latch", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4148, __extension__
__PRETTY_FUNCTION__))
;
4149 assert(isa<BranchInst>(Latch->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4150, __extension__
__PRETTY_FUNCTION__))
4150 "Latch must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4150, __extension__
__PRETTY_FUNCTION__))
;
4151 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")(static_cast <bool> (Latch->getSingleSuccessor() == Header
&& "Latch must jump to header") ? void (0) : __assert_fail
("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4151, __extension__
__PRETTY_FUNCTION__))
;
4152 // TODO: To support simple redirecting of the end of the body code that has
4153 // multiple; introduce another auxiliary basic block like preheader and after.
4154 assert(Latch->getSinglePredecessor() != nullptr)(static_cast <bool> (Latch->getSinglePredecessor() !=
nullptr) ? void (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4154, __extension__
__PRETTY_FUNCTION__))
;
4155 assert(!isa<PHINode>(Latch->front()))(static_cast <bool> (!isa<PHINode>(Latch->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Latch->front())"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4155, __extension__
__PRETTY_FUNCTION__))
;
4156
4157 assert(Exit)(static_cast <bool> (Exit) ? void (0) : __assert_fail (
"Exit", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4157, __extension__
__PRETTY_FUNCTION__))
;
4158 assert(isa<BranchInst>(Exit->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4159, __extension__
__PRETTY_FUNCTION__))
4159 "Exit block must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4159, __extension__
__PRETTY_FUNCTION__))
;
4160 assert(Exit->getSingleSuccessor() == After &&(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4161, __extension__
__PRETTY_FUNCTION__))
4161 "Exit block must jump to after block")(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4161, __extension__
__PRETTY_FUNCTION__))
;
4162
4163 assert(After)(static_cast <bool> (After) ? void (0) : __assert_fail (
"After", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4163, __extension__
__PRETTY_FUNCTION__))
;
4164 assert(After->getSinglePredecessor() == Exit &&(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4165, __extension__
__PRETTY_FUNCTION__))
4165 "After block only reachable from exit block")(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4165, __extension__
__PRETTY_FUNCTION__))
;
4166 assert(After->empty() || !isa<PHINode>(After->front()))(static_cast <bool> (After->empty() || !isa<PHINode
>(After->front())) ? void (0) : __assert_fail ("After->empty() || !isa<PHINode>(After->front())"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4166, __extension__
__PRETTY_FUNCTION__))
;
4167
4168 Instruction *IndVar = getIndVar();
4169 assert(IndVar && "Canonical induction variable not found?")(static_cast <bool> (IndVar && "Canonical induction variable not found?"
) ? void (0) : __assert_fail ("IndVar && \"Canonical induction variable not found?\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4169, __extension__
__PRETTY_FUNCTION__))
;
4170 assert(isa<IntegerType>(IndVar->getType()) &&(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4171, __extension__
__PRETTY_FUNCTION__))
4171 "Induction variable must be an integer")(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4171, __extension__
__PRETTY_FUNCTION__))
;
4172 assert(cast<PHINode>(IndVar)->getParent() == Header &&(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4173, __extension__
__PRETTY_FUNCTION__))
4173 "Induction variable must be a PHI in the loop header")(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4173, __extension__
__PRETTY_FUNCTION__))
;
4174 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(0) == Preheader) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4174, __extension__
__PRETTY_FUNCTION__))
;
4175 assert((static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4176, __extension__
__PRETTY_FUNCTION__))
4176 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())(static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4176, __extension__
__PRETTY_FUNCTION__))
;
4177 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(1) == Latch) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4177, __extension__
__PRETTY_FUNCTION__))
;
4178
4179 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1);
4180 assert(cast<Instruction>(NextIndVar)->getParent() == Latch)(static_cast <bool> (cast<Instruction>(NextIndVar
)->getParent() == Latch) ? void (0) : __assert_fail ("cast<Instruction>(NextIndVar)->getParent() == Latch"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4180, __extension__
__PRETTY_FUNCTION__))
;
4181 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOpcode() == BinaryOperator::Add) ? void (0) : __assert_fail
("cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4181, __extension__
__PRETTY_FUNCTION__))
;
4182 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOperand(0) == IndVar) ? void (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4182, __extension__
__PRETTY_FUNCTION__))
;
4183 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4184, __extension__
__PRETTY_FUNCTION__))
4184 ->isOne())(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4184, __extension__
__PRETTY_FUNCTION__))
;
4185
4186 Value *TripCount = getTripCount();
4187 assert(TripCount && "Loop trip count not found?")(static_cast <bool> (TripCount && "Loop trip count not found?"
) ? void (0) : __assert_fail ("TripCount && \"Loop trip count not found?\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4187, __extension__
__PRETTY_FUNCTION__))
;
4188 assert(IndVar->getType() == TripCount->getType() &&(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4189, __extension__
__PRETTY_FUNCTION__))
4189 "Trip count and induction variable must have the same type")(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4189, __extension__
__PRETTY_FUNCTION__))
;
4190
4191 auto *CmpI = cast<CmpInst>(&Cond->front());
4192 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4193, __extension__
__PRETTY_FUNCTION__))
4193 "Exit condition must be a signed less-than comparison")(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4193, __extension__
__PRETTY_FUNCTION__))
;
4194 assert(CmpI->getOperand(0) == IndVar &&(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4195, __extension__
__PRETTY_FUNCTION__))
4195 "Exit condition must compare the induction variable")(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4195, __extension__
__PRETTY_FUNCTION__))
;
4196 assert(CmpI->getOperand(1) == TripCount &&(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4197, __extension__
__PRETTY_FUNCTION__))
4197 "Exit condition must compare with the trip count")(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4197, __extension__
__PRETTY_FUNCTION__))
;
4198#endif
4199}
4200
4201void CanonicalLoopInfo::invalidate() {
4202 Header = nullptr;
4203 Cond = nullptr;
4204 Latch = nullptr;
4205 Exit = nullptr;
4206}

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/IR/Type.h

1//===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the Type class. For more "Type"
10// stuff, look in DerivedTypes.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_TYPE_H
15#define LLVM_IR_TYPE_H
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/Support/CBindingWrapping.h"
19#include "llvm/Support/Casting.h"
20#include "llvm/Support/Compiler.h"
21#include "llvm/Support/ErrorHandling.h"
22#include "llvm/Support/TypeSize.h"
23#include <cassert>
24#include <cstdint>
25#include <iterator>
26
27namespace llvm {
28
29class IntegerType;
30struct fltSemantics;
31class LLVMContext;
32class PointerType;
33class raw_ostream;
34class StringRef;
35template <typename PtrType> class SmallPtrSetImpl;
36
37/// The instances of the Type class are immutable: once they are created,
38/// they are never changed. Also note that only one instance of a particular
39/// type is ever created. Thus seeing if two types are equal is a matter of
40/// doing a trivial pointer comparison. To enforce that no two equal instances
41/// are created, Type instances can only be created via static factory methods
42/// in class Type and in derived classes. Once allocated, Types are never
43/// free'd.
44///
45class Type {
46public:
47 //===--------------------------------------------------------------------===//
48 /// Definitions of all of the base types for the Type system. Based on this
49 /// value, you can cast to a class defined in DerivedTypes.h.
50 /// Note: If you add an element to this, you need to add an element to the
51 /// Type::getPrimitiveType function, or else things will break!
52 /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
53 ///
54 enum TypeID {
55 // PrimitiveTypes
56 HalfTyID = 0, ///< 16-bit floating point type
57 BFloatTyID, ///< 16-bit floating point type (7-bit significand)
58 FloatTyID, ///< 32-bit floating point type
59 DoubleTyID, ///< 64-bit floating point type
60 X86_FP80TyID, ///< 80-bit floating point type (X87)
61 FP128TyID, ///< 128-bit floating point type (112-bit significand)
62 PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC)
63 VoidTyID, ///< type with no size
64 LabelTyID, ///< Labels
65 MetadataTyID, ///< Metadata
66 X86_MMXTyID, ///< MMX vectors (64 bits, X86 specific)
67 X86_AMXTyID, ///< AMX vectors (8192 bits, X86 specific)
68 TokenTyID, ///< Tokens
69
70 // Derived types... see DerivedTypes.h file.
71 IntegerTyID, ///< Arbitrary bit width integers
72 FunctionTyID, ///< Functions
73 PointerTyID, ///< Pointers
74 StructTyID, ///< Structures
75 ArrayTyID, ///< Arrays
76 FixedVectorTyID, ///< Fixed width SIMD vector type
77 ScalableVectorTyID ///< Scalable SIMD vector type
78 };
79
80private:
81 /// This refers to the LLVMContext in which this type was uniqued.
82 LLVMContext &Context;
83
84 TypeID ID : 8; // The current base type of this type.
85 unsigned SubclassData : 24; // Space for subclasses to store data.
86 // Note that this should be synchronized with
87 // MAX_INT_BITS value in IntegerType class.
88
89protected:
90 friend class LLVMContextImpl;
91
92 explicit Type(LLVMContext &C, TypeID tid)
93 : Context(C), ID(tid), SubclassData(0) {}
94 ~Type() = default;
95
96 unsigned getSubclassData() const { return SubclassData; }
97
98 void setSubclassData(unsigned val) {
99 SubclassData = val;
100 // Ensure we don't have any accidental truncation.
101 assert(getSubclassData() == val && "Subclass data too large for field")(static_cast <bool> (getSubclassData() == val &&
"Subclass data too large for field") ? void (0) : __assert_fail
("getSubclassData() == val && \"Subclass data too large for field\""
, "llvm/include/llvm/IR/Type.h", 101, __extension__ __PRETTY_FUNCTION__
))
;
102 }
103
104 /// Keeps track of how many Type*'s there are in the ContainedTys list.
105 unsigned NumContainedTys = 0;
106
107 /// A pointer to the array of Types contained by this Type. For example, this
108 /// includes the arguments of a function type, the elements of a structure,
109 /// the pointee of a pointer, the element type of an array, etc. This pointer
110 /// may be 0 for types that don't contain other types (Integer, Double,
111 /// Float).
112 Type * const *ContainedTys = nullptr;
113
114public:
115 /// Print the current type.
116 /// Omit the type details if \p NoDetails == true.
117 /// E.g., let %st = type { i32, i16 }
118 /// When \p NoDetails is true, we only print %st.
119 /// Put differently, \p NoDetails prints the type as if
120 /// inlined with the operands when printing an instruction.
121 void print(raw_ostream &O, bool IsForDebug = false,
122 bool NoDetails = false) const;
123
124 void dump() const;
125
126 /// Return the LLVMContext in which this type was uniqued.
127 LLVMContext &getContext() const { return Context; }
128
129 //===--------------------------------------------------------------------===//
130 // Accessors for working with types.
131 //
132
133 /// Return the type id for the type. This will return one of the TypeID enum
134 /// elements defined above.
135 TypeID getTypeID() const { return ID; }
136
137 /// Return true if this is 'void'.
138 bool isVoidTy() const { return getTypeID() == VoidTyID; }
139
140 /// Return true if this is 'half', a 16-bit IEEE fp type.
141 bool isHalfTy() const { return getTypeID() == HalfTyID; }
142
143 /// Return true if this is 'bfloat', a 16-bit bfloat type.
144 bool isBFloatTy() const { return getTypeID() == BFloatTyID; }
145
146 /// Return true if this is 'float', a 32-bit IEEE fp type.
147 bool isFloatTy() const { return getTypeID() == FloatTyID; }
148
149 /// Return true if this is 'double', a 64-bit IEEE fp type.
150 bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
151
152 /// Return true if this is x86 long double.
153 bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
154
155 /// Return true if this is 'fp128'.
156 bool isFP128Ty() const { return getTypeID() == FP128TyID; }
157
158 /// Return true if this is powerpc long double.
159 bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
160
161 /// Return true if this is one of the six floating-point types
162 bool isFloatingPointTy() const {
163 return getTypeID() == HalfTyID || getTypeID() == BFloatTyID ||
164 getTypeID() == FloatTyID || getTypeID() == DoubleTyID ||
165 getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
166 getTypeID() == PPC_FP128TyID;
167 }
168
169 const fltSemantics &getFltSemantics() const;
170
171 /// Return true if this is X86 MMX.
172 bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
173
174 /// Return true if this is X86 AMX.
175 bool isX86_AMXTy() const { return getTypeID() == X86_AMXTyID; }
176
177 /// Return true if this is a FP type or a vector of FP.
178 bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
179
180 /// Return true if this is 'label'.
181 bool isLabelTy() const { return getTypeID() == LabelTyID; }
182
183 /// Return true if this is 'metadata'.
184 bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
185
186 /// Return true if this is 'token'.
187 bool isTokenTy() const { return getTypeID() == TokenTyID; }
188
189 /// True if this is an instance of IntegerType.
190 bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
8
Assuming the condition is false
9
Returning zero, which participates in a condition later
191
192 /// Return true if this is an IntegerType of the given width.
193 bool isIntegerTy(unsigned Bitwidth) const;
194
195 /// Return true if this is an integer type or a vector of integer types.
196 bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
197
198 /// Return true if this is an integer type or a vector of integer types of
199 /// the given width.
200 bool isIntOrIntVectorTy(unsigned BitWidth) const {
201 return getScalarType()->isIntegerTy(BitWidth);
202 }
203
204 /// Return true if this is an integer type or a pointer type.
205 bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); }
206
207 /// True if this is an instance of FunctionType.
208 bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
209
210 /// True if this is an instance of StructType.
211 bool isStructTy() const { return getTypeID() == StructTyID; }
212
213 /// True if this is an instance of ArrayType.
214 bool isArrayTy() const { return getTypeID() == ArrayTyID; }
215
216 /// True if this is an instance of PointerType.
217 bool isPointerTy() const { return getTypeID() == PointerTyID; }
218
219 /// True if this is an instance of an opaque PointerType.
220 bool isOpaquePointerTy() const;
221
222 /// Return true if this is a pointer type or a vector of pointer types.
223 bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
224
225 /// True if this is an instance of VectorType.
226 inline bool isVectorTy() const {
227 return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID;
228 }
229
230 /// Return true if this type could be converted with a lossless BitCast to
231 /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
232 /// same size only where no re-interpretation of the bits is done.
233 /// Determine if this type could be losslessly bitcast to Ty
234 bool canLosslesslyBitCastTo(Type *Ty) const;
235
236 /// Return true if this type is empty, that is, it has no elements or all of
237 /// its elements are empty.
238 bool isEmptyTy() const;
239
240 /// Return true if the type is "first class", meaning it is a valid type for a
241 /// Value.
242 bool isFirstClassType() const {
243 return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
244 }
245
246 /// Return true if the type is a valid type for a register in codegen. This
247 /// includes all first-class types except struct and array types.
248 bool isSingleValueType() const {
249 return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
250 isPointerTy() || isVectorTy() || isX86_AMXTy();
251 }
252
253 /// Return true if the type is an aggregate type. This means it is valid as
254 /// the first operand of an insertvalue or extractvalue instruction. This
255 /// includes struct and array types, but does not include vector types.
256 bool isAggregateType() const {
257 return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
258 }
259
260 /// Return true if it makes sense to take the size of this type. To get the
261 /// actual size for a particular target, it is reasonable to use the
262 /// DataLayout subsystem to do this.
263 bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
264 // If it's a primitive, it is always sized.
265 if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
266 getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID ||
267 getTypeID() == X86_AMXTyID)
268 return true;
269 // If it is not something that can have a size (e.g. a function or label),
270 // it doesn't have a size.
271 if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && !isVectorTy())
272 return false;
273 // Otherwise we have to try harder to decide.
274 return isSizedDerivedType(Visited);
275 }
276
277 /// Return the basic size of this type if it is a primitive type. These are
278 /// fixed by LLVM and are not target-dependent.
279 /// This will return zero if the type does not have a size or is not a
280 /// primitive type.
281 ///
282 /// If this is a scalable vector type, the scalable property will be set and
283 /// the runtime size will be a positive integer multiple of the base size.
284 ///
285 /// Note that this may not reflect the size of memory allocated for an
286 /// instance of the type or the number of bytes that are written when an
287 /// instance of the type is stored to memory. The DataLayout class provides
288 /// additional query functions to provide this information.
289 ///
290 TypeSize getPrimitiveSizeInBits() const LLVM_READONLY__attribute__((__pure__));
291
292 /// If this is a vector type, return the getPrimitiveSizeInBits value for the
293 /// element type. Otherwise return the getPrimitiveSizeInBits value for this
294 /// type.
295 unsigned getScalarSizeInBits() const LLVM_READONLY__attribute__((__pure__));
296
297 /// Return the width of the mantissa of this type. This is only valid on
298 /// floating-point types. If the FP type does not have a stable mantissa (e.g.
299 /// ppc long double), this method returns -1.
300 int getFPMantissaWidth() const;
301
302 /// Return whether the type is IEEE compatible, as defined by the eponymous
303 /// method in APFloat.
304 bool isIEEE() const;
305
306 /// If this is a vector type, return the element type, otherwise return
307 /// 'this'.
308 inline Type *getScalarType() const {
309 if (isVectorTy())
310 return getContainedType(0);
311 return const_cast<Type *>(this);
312 }
313
314 //===--------------------------------------------------------------------===//
315 // Type Iteration support.
316 //
317 using subtype_iterator = Type * const *;
318
319 subtype_iterator subtype_begin() const { return ContainedTys; }
320 subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
321 ArrayRef<Type*> subtypes() const {
322 return makeArrayRef(subtype_begin(), subtype_end());
323 }
324
325 using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>;
326
327 subtype_reverse_iterator subtype_rbegin() const {
328 return subtype_reverse_iterator(subtype_end());
329 }
330 subtype_reverse_iterator subtype_rend() const {
331 return subtype_reverse_iterator(subtype_begin());
332 }
333
334 /// This method is used to implement the type iterator (defined at the end of
335 /// the file). For derived types, this returns the types 'contained' in the
336 /// derived type.
337 Type *getContainedType(unsigned i) const {
338 assert(i < NumContainedTys && "Index out of range!")(static_cast <bool> (i < NumContainedTys && "Index out of range!"
) ? void (0) : __assert_fail ("i < NumContainedTys && \"Index out of range!\""
, "llvm/include/llvm/IR/Type.h", 338, __extension__ __PRETTY_FUNCTION__
))
;
339 return ContainedTys[i];
340 }
341
342 /// Return the number of types in the derived type.
343 unsigned getNumContainedTypes() const { return NumContainedTys; }
344
345 //===--------------------------------------------------------------------===//
346 // Helper methods corresponding to subclass methods. This forces a cast to
347 // the specified subclass and calls its accessor. "getArrayNumElements" (for
348 // example) is shorthand for cast<ArrayType>(Ty)->getNumElements(). This is
349 // only intended to cover the core methods that are frequently used, helper
350 // methods should not be added here.
351
352 inline unsigned getIntegerBitWidth() const;
353
354 inline Type *getFunctionParamType(unsigned i) const;
355 inline unsigned getFunctionNumParams() const;
356 inline bool isFunctionVarArg() const;
357
358 inline StringRef getStructName() const;
359 inline unsigned getStructNumElements() const;
360 inline Type *getStructElementType(unsigned N) const;
361
362 inline uint64_t getArrayNumElements() const;
363
364 Type *getArrayElementType() const {
365 assert(getTypeID() == ArrayTyID)(static_cast <bool> (getTypeID() == ArrayTyID) ? void (
0) : __assert_fail ("getTypeID() == ArrayTyID", "llvm/include/llvm/IR/Type.h"
, 365, __extension__ __PRETTY_FUNCTION__))
;
366 return ContainedTys[0];
367 }
368
369 /// This method is deprecated without replacement. Pointer element types are
370 /// not available with opaque pointers.
371 [[deprecated("Deprecated without replacement, see "
372 "https://llvm.org/docs/OpaquePointers.html for context and "
373 "migration instructions")]]
374 Type *getPointerElementType() const {
375 return getNonOpaquePointerElementType();
376 }
377
378 /// Only use this method in code that is not reachable with opaque pointers,
379 /// or part of deprecated methods that will be removed as part of the opaque
380 /// pointers transition.
381 Type *getNonOpaquePointerElementType() const {
382 assert(getTypeID() == PointerTyID)(static_cast <bool> (getTypeID() == PointerTyID) ? void
(0) : __assert_fail ("getTypeID() == PointerTyID", "llvm/include/llvm/IR/Type.h"
, 382, __extension__ __PRETTY_FUNCTION__))
;
383 assert(NumContainedTys &&(static_cast <bool> (NumContainedTys && "Attempting to get element type of opaque pointer"
) ? void (0) : __assert_fail ("NumContainedTys && \"Attempting to get element type of opaque pointer\""
, "llvm/include/llvm/IR/Type.h", 384, __extension__ __PRETTY_FUNCTION__
))
384 "Attempting to get element type of opaque pointer")(static_cast <bool> (NumContainedTys && "Attempting to get element type of opaque pointer"
) ? void (0) : __assert_fail ("NumContainedTys && \"Attempting to get element type of opaque pointer\""
, "llvm/include/llvm/IR/Type.h", 384, __extension__ __PRETTY_FUNCTION__
))
;
385 return ContainedTys[0];
386 }
387
388 /// Given vector type, change the element type,
389 /// whilst keeping the old number of elements.
390 /// For non-vectors simply returns \p EltTy.
391 inline Type *getWithNewType(Type *EltTy) const;
392
393 /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
394 /// whilst keeping the old number of lanes.
395 inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;
396
397 /// Given scalar/vector integer type, returns a type with elements twice as
398 /// wide as in the original type. For vectors, preserves element count.
399 inline Type *getExtendedType() const;
400
401 /// Get the address space of this pointer or pointer vector type.
402 inline unsigned getPointerAddressSpace() const;
403
404 //===--------------------------------------------------------------------===//
405 // Static members exported by the Type class itself. Useful for getting
406 // instances of Type.
407 //
408
409 /// Return a type based on an identifier.
410 static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);
411
412 //===--------------------------------------------------------------------===//
413 // These are the builtin types that are always available.
414 //
415 static Type *getVoidTy(LLVMContext &C);
416 static Type *getLabelTy(LLVMContext &C);
417 static Type *getHalfTy(LLVMContext &C);
418 static Type *getBFloatTy(LLVMContext &C);
419 static Type *getFloatTy(LLVMContext &C);
420 static Type *getDoubleTy(LLVMContext &C);
421 static Type *getMetadataTy(LLVMContext &C);
422 static Type *getX86_FP80Ty(LLVMContext &C);
423 static Type *getFP128Ty(LLVMContext &C);
424 static Type *getPPC_FP128Ty(LLVMContext &C);
425 static Type *getX86_MMXTy(LLVMContext &C);
426 static Type *getX86_AMXTy(LLVMContext &C);
427 static Type *getTokenTy(LLVMContext &C);
428 static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
429 static IntegerType *getInt1Ty(LLVMContext &C);
430 static IntegerType *getInt8Ty(LLVMContext &C);
431 static IntegerType *getInt16Ty(LLVMContext &C);
432 static IntegerType *getInt32Ty(LLVMContext &C);
433 static IntegerType *getInt64Ty(LLVMContext &C);
434 static IntegerType *getInt128Ty(LLVMContext &C);
435 template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) {
436 int noOfBits = sizeof(ScalarTy) * CHAR_BIT8;
437 if (std::is_integral<ScalarTy>::value) {
438 return (Type*) Type::getIntNTy(C, noOfBits);
439 } else if (std::is_floating_point<ScalarTy>::value) {
440 switch (noOfBits) {
441 case 32:
442 return Type::getFloatTy(C);
443 case 64:
444 return Type::getDoubleTy(C);
445 }
446 }
447 llvm_unreachable("Unsupported type in Type::getScalarTy")::llvm::llvm_unreachable_internal("Unsupported type in Type::getScalarTy"
, "llvm/include/llvm/IR/Type.h", 447)
;
448 }
449 static Type *getFloatingPointTy(LLVMContext &C, const fltSemantics &S);
450
451 //===--------------------------------------------------------------------===//
452 // Convenience methods for getting pointer types with one of the above builtin
453 // types as pointee.
454 //
455 static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
456 static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0);
457 static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
458 static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
459 static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
460 static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
461 static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
462 static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
463 static PointerType *getX86_AMXPtrTy(LLVMContext &C, unsigned AS = 0);
464 static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
465 static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
466 static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
467 static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
468 static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
469 static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
470
471 /// Return a pointer to the current type. This is equivalent to
472 /// PointerType::get(Foo, AddrSpace).
473 /// TODO: Remove this after opaque pointer transition is complete.
474 PointerType *getPointerTo(unsigned AddrSpace = 0) const;
475
476private:
477 /// Derived types like structures and arrays are sized iff all of the members
478 /// of the type are sized as well. Since asking for their size is relatively
479 /// uncommon, move this operation out-of-line.
480 bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
481};
482
483// Printing of types.
484inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
485 T.print(OS);
486 return OS;
487}
488
489// allow isa<PointerType>(x) to work without DerivedTypes.h included.
490template <> struct isa_impl<PointerType, Type> {
491 static inline bool doit(const Type &Ty) {
492 return Ty.getTypeID() == Type::PointerTyID;
493 }
494};
495
496// Create wrappers for C Binding types (see CBindingWrapping.h).
497DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)inline Type *unwrap(LLVMTypeRef P) { return reinterpret_cast<
Type*>(P); } inline LLVMTypeRef wrap(const Type *P) { return
reinterpret_cast<LLVMTypeRef>(const_cast<Type*>(
P)); } template<typename T> inline T *unwrap(LLVMTypeRef
P) { return cast<T>(unwrap(P)); }
498
499/* Specialized opaque type conversions.
500 */
501inline Type **unwrap(LLVMTypeRef* Tys) {
502 return reinterpret_cast<Type**>(Tys);
503}
504
505inline LLVMTypeRef *wrap(Type **Tys) {
506 return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
507}
508
509} // end namespace llvm
510
511#endif // LLVM_IR_TYPE_H

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugLoc.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/FPEnv.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstdint>
48#include <functional>
49#include <utility>
50
51namespace llvm {
52
53class APInt;
54class Use;
55
56/// This provides the default implementation of the IRBuilder
57/// 'InsertHelper' method that is called whenever an instruction is created by
58/// IRBuilder and needs to be inserted.
59///
60/// By default, this inserts the instruction at the insertion point.
61class IRBuilderDefaultInserter {
62public:
63 virtual ~IRBuilderDefaultInserter();
64
65 virtual void InsertHelper(Instruction *I, const Twine &Name,
66 BasicBlock *BB,
67 BasicBlock::iterator InsertPt) const {
68 if (BB) BB->getInstList().insert(InsertPt, I);
69 I->setName(Name);
70 }
71};
72
73/// Provides an 'InsertHelper' that calls a user-provided callback after
74/// performing the default insertion.
75class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
76 std::function<void(Instruction *)> Callback;
77
78public:
79 ~IRBuilderCallbackInserter() override;
80
81 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
82 : Callback(std::move(Callback)) {}
83
84 void InsertHelper(Instruction *I, const Twine &Name,
85 BasicBlock *BB,
86 BasicBlock::iterator InsertPt) const override {
87 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
88 Callback(I);
89 }
90};
91
92/// Common base class shared among various IRBuilders.
93class IRBuilderBase {
94 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
95 /// created instructions, like !dbg metadata.
96 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
97
98 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
99 /// null. If \p MD is null, remove the entry with \p Kind.
100 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
101 if (!MD) {
102 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
103 return KV.first == Kind;
104 });
105 return;
106 }
107
108 for (auto &KV : MetadataToCopy)
109 if (KV.first == Kind) {
110 KV.second = MD;
111 return;
112 }
113
114 MetadataToCopy.emplace_back(Kind, MD);
115 }
116
117protected:
118 BasicBlock *BB;
119 BasicBlock::iterator InsertPt;
120 LLVMContext &Context;
121 const IRBuilderFolder &Folder;
122 const IRBuilderDefaultInserter &Inserter;
123
124 MDNode *DefaultFPMathTag;
125 FastMathFlags FMF;
126
127 bool IsFPConstrained = false;
128 fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict;
129 RoundingMode DefaultConstrainedRounding = RoundingMode::Dynamic;
130
131 ArrayRef<OperandBundleDef> DefaultOperandBundles;
132
133public:
134 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
135 const IRBuilderDefaultInserter &Inserter, MDNode *FPMathTag,
136 ArrayRef<OperandBundleDef> OpBundles)
137 : Context(context), Folder(Folder), Inserter(Inserter),
138 DefaultFPMathTag(FPMathTag), DefaultOperandBundles(OpBundles) {
139 ClearInsertionPoint();
140 }
141
142 /// Insert and return the specified instruction.
143 template<typename InstTy>
144 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
145 Inserter.InsertHelper(I, Name, BB, InsertPt);
146 AddMetadataToInst(I);
147 return I;
148 }
149
150 /// No-op overload to handle constants.
151 Constant *Insert(Constant *C, const Twine& = "") const {
152 return C;
153 }
154
155 Value *Insert(Value *V, const Twine &Name = "") const {
156 if (Instruction *I = dyn_cast<Instruction>(V))
157 return Insert(I, Name);
158 assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0)
: __assert_fail ("isa<Constant>(V)", "llvm/include/llvm/IR/IRBuilder.h"
, 158, __extension__ __PRETTY_FUNCTION__))
;
159 return V;
160 }
161
162 //===--------------------------------------------------------------------===//
163 // Builder configuration methods
164 //===--------------------------------------------------------------------===//
165
166 /// Clear the insertion point: created instructions will not be
167 /// inserted into a block.
168 void ClearInsertionPoint() {
169 BB = nullptr;
170 InsertPt = BasicBlock::iterator();
171 }
172
173 BasicBlock *GetInsertBlock() const { return BB; }
174 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
175 LLVMContext &getContext() const { return Context; }
176
177 /// This specifies that created instructions should be appended to the
178 /// end of the specified block.
179 void SetInsertPoint(BasicBlock *TheBB) {
180 BB = TheBB;
181 InsertPt = BB->end();
182 }
183
184 /// This specifies that created instructions should be inserted before
185 /// the specified instruction.
186 void SetInsertPoint(Instruction *I) {
187 BB = I->getParent();
24
Called C++ object pointer is null
188 InsertPt = I->getIterator();
189 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "llvm/include/llvm/IR/IRBuilder.h", 189, __extension__ __PRETTY_FUNCTION__
))
;
190 SetCurrentDebugLocation(I->getDebugLoc());
191 }
192
193 /// This specifies that created instructions should be inserted at the
194 /// specified point.
195 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
196 BB = TheBB;
197 InsertPt = IP;
198 if (IP != TheBB->end())
199 SetCurrentDebugLocation(IP->getDebugLoc());
200 }
201
202 /// Set location information used by debugging information.
203 void SetCurrentDebugLocation(DebugLoc L) {
204 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
205 }
206
207 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
208 /// added to all created instructions. Entries present in MedataDataToCopy but
209 /// not on \p Src will be dropped from MetadataToCopy.
210 void CollectMetadataToCopy(Instruction *Src,
211 ArrayRef<unsigned> MetadataKinds) {
212 for (unsigned K : MetadataKinds)
213 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
214 }
215
216 /// Get location information used by debugging information.
217 DebugLoc getCurrentDebugLocation() const;
218
219 /// If this builder has a current debug location, set it on the
220 /// specified instruction.
221 void SetInstDebugLocation(Instruction *I) const;
222
223 /// Add all entries in MetadataToCopy to \p I.
224 void AddMetadataToInst(Instruction *I) const {
225 for (auto &KV : MetadataToCopy)
226 I->setMetadata(KV.first, KV.second);
227 }
228
229 /// Get the return type of the current function that we're emitting
230 /// into.
231 Type *getCurrentFunctionReturnType() const;
232
233 /// InsertPoint - A saved insertion point.
234 class InsertPoint {
235 BasicBlock *Block = nullptr;
236 BasicBlock::iterator Point;
237
238 public:
239 /// Creates a new insertion point which doesn't point to anything.
240 InsertPoint() = default;
241
242 /// Creates a new insertion point at the given location.
243 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
244 : Block(InsertBlock), Point(InsertPoint) {}
245
246 /// Returns true if this insert point is set.
247 bool isSet() const { return (Block != nullptr); }
248
249 BasicBlock *getBlock() const { return Block; }
250 BasicBlock::iterator getPoint() const { return Point; }
251 };
252
253 /// Returns the current insert point.
254 InsertPoint saveIP() const {
255 return InsertPoint(GetInsertBlock(), GetInsertPoint());
256 }
257
258 /// Returns the current insert point, clearing it in the process.
259 InsertPoint saveAndClearIP() {
260 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
261 ClearInsertionPoint();
262 return IP;
263 }
264
265 /// Sets the current insert point to a previously-saved location.
266 void restoreIP(InsertPoint IP) {
267 if (IP.isSet())
268 SetInsertPoint(IP.getBlock(), IP.getPoint());
269 else
270 ClearInsertionPoint();
271 }
272
273 /// Get the floating point math metadata being used.
274 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
275
276 /// Get the flags to be applied to created floating point ops
277 FastMathFlags getFastMathFlags() const { return FMF; }
278
279 FastMathFlags &getFastMathFlags() { return FMF; }
280
281 /// Clear the fast-math flags.
282 void clearFastMathFlags() { FMF.clear(); }
283
284 /// Set the floating point math metadata to be used.
285 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
286
287 /// Set the fast-math flags to be used with generated fp-math operators
288 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
289
290 /// Enable/Disable use of constrained floating point math. When
291 /// enabled the CreateF<op>() calls instead create constrained
292 /// floating point intrinsic calls. Fast math flags are unaffected
293 /// by this setting.
294 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
295
296 /// Query for the use of constrained floating point math
297 bool getIsFPConstrained() { return IsFPConstrained; }
298
299 /// Set the exception handling to be used with constrained floating point
300 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
301#ifndef NDEBUG
302 Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(NewExcept);
303 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "llvm/include/llvm/IR/IRBuilder.h", 303, __extension__ __PRETTY_FUNCTION__
))
;
304#endif
305 DefaultConstrainedExcept = NewExcept;
306 }
307
308 /// Set the rounding mode handling to be used with constrained floating point
309 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
310#ifndef NDEBUG
311 Optional<StringRef> RoundingStr = convertRoundingModeToStr(NewRounding);
312 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "llvm/include/llvm/IR/IRBuilder.h", 312, __extension__ __PRETTY_FUNCTION__
))
;
313#endif
314 DefaultConstrainedRounding = NewRounding;
315 }
316
317 /// Get the exception handling used with constrained floating point
318 fp::ExceptionBehavior getDefaultConstrainedExcept() {
319 return DefaultConstrainedExcept;
320 }
321
322 /// Get the rounding mode handling used with constrained floating point
323 RoundingMode getDefaultConstrainedRounding() {
324 return DefaultConstrainedRounding;
325 }
326
327 void setConstrainedFPFunctionAttr() {
328 assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!"
) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "llvm/include/llvm/IR/IRBuilder.h", 328, __extension__ __PRETTY_FUNCTION__
))
;
329
330 Function *F = BB->getParent();
331 if (!F->hasFnAttribute(Attribute::StrictFP)) {
332 F->addFnAttr(Attribute::StrictFP);
333 }
334 }
335
336 void setConstrainedFPCallAttr(CallBase *I) {
337 I->addFnAttr(Attribute::StrictFP);
338 }
339
340 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
341 DefaultOperandBundles = OpBundles;
342 }
343
344 //===--------------------------------------------------------------------===//
345 // RAII helpers.
346 //===--------------------------------------------------------------------===//
347
348 // RAII object that stores the current insertion point and restores it
349 // when the object is destroyed. This includes the debug location.
350 class InsertPointGuard {
351 IRBuilderBase &Builder;
352 AssertingVH<BasicBlock> Block;
353 BasicBlock::iterator Point;
354 DebugLoc DbgLoc;
355
356 public:
357 InsertPointGuard(IRBuilderBase &B)
358 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
359 DbgLoc(B.getCurrentDebugLocation()) {}
360
361 InsertPointGuard(const InsertPointGuard &) = delete;
362 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
363
364 ~InsertPointGuard() {
365 Builder.restoreIP(InsertPoint(Block, Point));
366 Builder.SetCurrentDebugLocation(DbgLoc);
367 }
368 };
369
370 // RAII object that stores the current fast math settings and restores
371 // them when the object is destroyed.
372 class FastMathFlagGuard {
373 IRBuilderBase &Builder;
374 FastMathFlags FMF;
375 MDNode *FPMathTag;
376 bool IsFPConstrained;
377 fp::ExceptionBehavior DefaultConstrainedExcept;
378 RoundingMode DefaultConstrainedRounding;
379
380 public:
381 FastMathFlagGuard(IRBuilderBase &B)
382 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
383 IsFPConstrained(B.IsFPConstrained),
384 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
385 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
386
387 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
388 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
389
390 ~FastMathFlagGuard() {
391 Builder.FMF = FMF;
392 Builder.DefaultFPMathTag = FPMathTag;
393 Builder.IsFPConstrained = IsFPConstrained;
394 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
395 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
396 }
397 };
398
399 // RAII object that stores the current default operand bundles and restores
400 // them when the object is destroyed.
401 class OperandBundlesGuard {
402 IRBuilderBase &Builder;
403 ArrayRef<OperandBundleDef> DefaultOperandBundles;
404
405 public:
406 OperandBundlesGuard(IRBuilderBase &B)
407 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
408
409 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
410 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
411
412 ~OperandBundlesGuard() {
413 Builder.DefaultOperandBundles = DefaultOperandBundles;
414 }
415 };
416
417
418 //===--------------------------------------------------------------------===//
419 // Miscellaneous creation methods.
420 //===--------------------------------------------------------------------===//
421
422 /// Make a new global variable with initializer type i8*
423 ///
424 /// Make a new global variable with an initializer that has array of i8 type
425 /// filled in with the null terminated string value specified. The new global
426 /// variable will be marked mergable with any others of the same contents. If
427 /// Name is specified, it is the name of the global variable created.
428 ///
429 /// If no module is given via \p M, it is take from the insertion point basic
430 /// block.
431 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
432 unsigned AddressSpace = 0,
433 Module *M = nullptr);
434
435 /// Get a constant value representing either true or false.
436 ConstantInt *getInt1(bool V) {
437 return ConstantInt::get(getInt1Ty(), V);
438 }
439
440 /// Get the constant value for i1 true.
441 ConstantInt *getTrue() {
442 return ConstantInt::getTrue(Context);
443 }
444
445 /// Get the constant value for i1 false.
446 ConstantInt *getFalse() {
447 return ConstantInt::getFalse(Context);
448 }
449
450 /// Get a constant 8-bit value.
451 ConstantInt *getInt8(uint8_t C) {
452 return ConstantInt::get(getInt8Ty(), C);
453 }
454
455 /// Get a constant 16-bit value.
456 ConstantInt *getInt16(uint16_t C) {
457 return ConstantInt::get(getInt16Ty(), C);
458 }
459
460 /// Get a constant 32-bit value.
461 ConstantInt *getInt32(uint32_t C) {
462 return ConstantInt::get(getInt32Ty(), C);
463 }
464
465 /// Get a constant 64-bit value.
466 ConstantInt *getInt64(uint64_t C) {
467 return ConstantInt::get(getInt64Ty(), C);
468 }
469
470 /// Get a constant N-bit value, zero extended or truncated from
471 /// a 64-bit value.
472 ConstantInt *getIntN(unsigned N, uint64_t C) {
473 return ConstantInt::get(getIntNTy(N), C);
474 }
475
476 /// Get a constant integer value.
477 ConstantInt *getInt(const APInt &AI) {
478 return ConstantInt::get(Context, AI);
479 }
480
481 //===--------------------------------------------------------------------===//
482 // Type creation methods
483 //===--------------------------------------------------------------------===//
484
485 /// Fetch the type representing a single bit
486 IntegerType *getInt1Ty() {
487 return Type::getInt1Ty(Context);
488 }
489
490 /// Fetch the type representing an 8-bit integer.
491 IntegerType *getInt8Ty() {
492 return Type::getInt8Ty(Context);
493 }
494
495 /// Fetch the type representing a 16-bit integer.
496 IntegerType *getInt16Ty() {
497 return Type::getInt16Ty(Context);
498 }
499
500 /// Fetch the type representing a 32-bit integer.
501 IntegerType *getInt32Ty() {
502 return Type::getInt32Ty(Context);
503 }
504
505 /// Fetch the type representing a 64-bit integer.
506 IntegerType *getInt64Ty() {
507 return Type::getInt64Ty(Context);
508 }
509
510 /// Fetch the type representing a 128-bit integer.
511 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
512
513 /// Fetch the type representing an N-bit integer.
514 IntegerType *getIntNTy(unsigned N) {
515 return Type::getIntNTy(Context, N);
516 }
517
518 /// Fetch the type representing a 16-bit floating point value.
519 Type *getHalfTy() {
520 return Type::getHalfTy(Context);
521 }
522
523 /// Fetch the type representing a 16-bit brain floating point value.
524 Type *getBFloatTy() {
525 return Type::getBFloatTy(Context);
526 }
527
528 /// Fetch the type representing a 32-bit floating point value.
529 Type *getFloatTy() {
530 return Type::getFloatTy(Context);
531 }
532
533 /// Fetch the type representing a 64-bit floating point value.
534 Type *getDoubleTy() {
535 return Type::getDoubleTy(Context);
536 }
537
538 /// Fetch the type representing void.
539 Type *getVoidTy() {
540 return Type::getVoidTy(Context);
541 }
542
543 /// Fetch the type representing a pointer to an 8-bit integer value.
544 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
545 return Type::getInt8PtrTy(Context, AddrSpace);
546 }
547
548 /// Fetch the type representing a pointer to an integer value.
549 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
550 return DL.getIntPtrType(Context, AddrSpace);
551 }
552
553 //===--------------------------------------------------------------------===//
554 // Intrinsic creation methods
555 //===--------------------------------------------------------------------===//
556
557 /// Create and insert a memset to the specified pointer and the
558 /// specified value.