File: | build/source/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp |
Warning: | line 1941, column 20 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | /// \file | |||
9 | /// | |||
10 | /// This file implements the OpenMPIRBuilder class, which is used as a | |||
11 | /// convenient way to create LLVM instructions for OpenMP directives. | |||
12 | /// | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" | |||
16 | #include "llvm/ADT/SmallSet.h" | |||
17 | #include "llvm/ADT/StringRef.h" | |||
18 | #include "llvm/Analysis/AssumptionCache.h" | |||
19 | #include "llvm/Analysis/CodeMetrics.h" | |||
20 | #include "llvm/Analysis/LoopInfo.h" | |||
21 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | |||
22 | #include "llvm/Analysis/ScalarEvolution.h" | |||
23 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
24 | #include "llvm/IR/CFG.h" | |||
25 | #include "llvm/IR/Constants.h" | |||
26 | #include "llvm/IR/DebugInfoMetadata.h" | |||
27 | #include "llvm/IR/DerivedTypes.h" | |||
28 | #include "llvm/IR/GlobalVariable.h" | |||
29 | #include "llvm/IR/IRBuilder.h" | |||
30 | #include "llvm/IR/MDBuilder.h" | |||
31 | #include "llvm/IR/PassManager.h" | |||
32 | #include "llvm/IR/Value.h" | |||
33 | #include "llvm/MC/TargetRegistry.h" | |||
34 | #include "llvm/Support/CommandLine.h" | |||
35 | #include "llvm/Target/TargetMachine.h" | |||
36 | #include "llvm/Target/TargetOptions.h" | |||
37 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
38 | #include "llvm/Transforms/Utils/Cloning.h" | |||
39 | #include "llvm/Transforms/Utils/CodeExtractor.h" | |||
40 | #include "llvm/Transforms/Utils/LoopPeel.h" | |||
41 | #include "llvm/Transforms/Utils/UnrollLoop.h" | |||
42 | ||||
43 | #include <cstdint> | |||
44 | #include <optional> | |||
45 | ||||
46 | #define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder" | |||
47 | ||||
48 | using namespace llvm; | |||
49 | using namespace omp; | |||
50 | ||||
51 | static cl::opt<bool> | |||
52 | OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden, | |||
53 | cl::desc("Use optimistic attributes describing " | |||
54 | "'as-if' properties of runtime calls."), | |||
55 | cl::init(false)); | |||
56 | ||||
57 | static cl::opt<double> UnrollThresholdFactor( | |||
58 | "openmp-ir-builder-unroll-threshold-factor", cl::Hidden, | |||
59 | cl::desc("Factor for the unroll threshold to account for code " | |||
60 | "simplifications still taking place"), | |||
61 | cl::init(1.5)); | |||
62 | ||||
63 | #ifndef NDEBUG | |||
64 | /// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions | |||
65 | /// at position IP1 may change the meaning of IP2 or vice-versa. This is because | |||
66 | /// an InsertPoint stores the instruction before something is inserted. For | |||
67 | /// instance, if both point to the same instruction, two IRBuilders alternating | |||
68 | /// creating instruction will cause the instructions to be interleaved. | |||
69 | static bool isConflictIP(IRBuilder<>::InsertPoint IP1, | |||
70 | IRBuilder<>::InsertPoint IP2) { | |||
71 | if (!IP1.isSet() || !IP2.isSet()) | |||
72 | return false; | |||
73 | return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint(); | |||
74 | } | |||
75 | ||||
76 | static bool isValidWorkshareLoopScheduleType(OMPScheduleType SchedType) { | |||
77 | // Valid ordered/unordered and base algorithm combinations. | |||
78 | switch (SchedType & ~OMPScheduleType::MonotonicityMask) { | |||
79 | case OMPScheduleType::UnorderedStaticChunked: | |||
80 | case OMPScheduleType::UnorderedStatic: | |||
81 | case OMPScheduleType::UnorderedDynamicChunked: | |||
82 | case OMPScheduleType::UnorderedGuidedChunked: | |||
83 | case OMPScheduleType::UnorderedRuntime: | |||
84 | case OMPScheduleType::UnorderedAuto: | |||
85 | case OMPScheduleType::UnorderedTrapezoidal: | |||
86 | case OMPScheduleType::UnorderedGreedy: | |||
87 | case OMPScheduleType::UnorderedBalanced: | |||
88 | case OMPScheduleType::UnorderedGuidedIterativeChunked: | |||
89 | case OMPScheduleType::UnorderedGuidedAnalyticalChunked: | |||
90 | case OMPScheduleType::UnorderedSteal: | |||
91 | case OMPScheduleType::UnorderedStaticBalancedChunked: | |||
92 | case OMPScheduleType::UnorderedGuidedSimd: | |||
93 | case OMPScheduleType::UnorderedRuntimeSimd: | |||
94 | case OMPScheduleType::OrderedStaticChunked: | |||
95 | case OMPScheduleType::OrderedStatic: | |||
96 | case OMPScheduleType::OrderedDynamicChunked: | |||
97 | case OMPScheduleType::OrderedGuidedChunked: | |||
98 | case OMPScheduleType::OrderedRuntime: | |||
99 | case OMPScheduleType::OrderedAuto: | |||
100 | case OMPScheduleType::OrderdTrapezoidal: | |||
101 | case OMPScheduleType::NomergeUnorderedStaticChunked: | |||
102 | case OMPScheduleType::NomergeUnorderedStatic: | |||
103 | case OMPScheduleType::NomergeUnorderedDynamicChunked: | |||
104 | case OMPScheduleType::NomergeUnorderedGuidedChunked: | |||
105 | case OMPScheduleType::NomergeUnorderedRuntime: | |||
106 | case OMPScheduleType::NomergeUnorderedAuto: | |||
107 | case OMPScheduleType::NomergeUnorderedTrapezoidal: | |||
108 | case OMPScheduleType::NomergeUnorderedGreedy: | |||
109 | case OMPScheduleType::NomergeUnorderedBalanced: | |||
110 | case OMPScheduleType::NomergeUnorderedGuidedIterativeChunked: | |||
111 | case OMPScheduleType::NomergeUnorderedGuidedAnalyticalChunked: | |||
112 | case OMPScheduleType::NomergeUnorderedSteal: | |||
113 | case OMPScheduleType::NomergeOrderedStaticChunked: | |||
114 | case OMPScheduleType::NomergeOrderedStatic: | |||
115 | case OMPScheduleType::NomergeOrderedDynamicChunked: | |||
116 | case OMPScheduleType::NomergeOrderedGuidedChunked: | |||
117 | case OMPScheduleType::NomergeOrderedRuntime: | |||
118 | case OMPScheduleType::NomergeOrderedAuto: | |||
119 | case OMPScheduleType::NomergeOrderedTrapezoidal: | |||
120 | break; | |||
121 | default: | |||
122 | return false; | |||
123 | } | |||
124 | ||||
125 | // Must not set both monotonicity modifiers at the same time. | |||
126 | OMPScheduleType MonotonicityFlags = | |||
127 | SchedType & OMPScheduleType::MonotonicityMask; | |||
128 | if (MonotonicityFlags == OMPScheduleType::MonotonicityMask) | |||
129 | return false; | |||
130 | ||||
131 | return true; | |||
132 | } | |||
133 | #endif | |||
134 | ||||
135 | /// Determine which scheduling algorithm to use, determined from schedule clause | |||
136 | /// arguments. | |||
137 | static OMPScheduleType | |||
138 | getOpenMPBaseScheduleType(llvm::omp::ScheduleKind ClauseKind, bool HasChunks, | |||
139 | bool HasSimdModifier) { | |||
140 | // Currently, the default schedule it static. | |||
141 | switch (ClauseKind) { | |||
142 | case OMP_SCHEDULE_Default: | |||
143 | case OMP_SCHEDULE_Static: | |||
144 | return HasChunks ? OMPScheduleType::BaseStaticChunked | |||
145 | : OMPScheduleType::BaseStatic; | |||
146 | case OMP_SCHEDULE_Dynamic: | |||
147 | return OMPScheduleType::BaseDynamicChunked; | |||
148 | case OMP_SCHEDULE_Guided: | |||
149 | return HasSimdModifier ? OMPScheduleType::BaseGuidedSimd | |||
150 | : OMPScheduleType::BaseGuidedChunked; | |||
151 | case OMP_SCHEDULE_Auto: | |||
152 | return llvm::omp::OMPScheduleType::BaseAuto; | |||
153 | case OMP_SCHEDULE_Runtime: | |||
154 | return HasSimdModifier ? OMPScheduleType::BaseRuntimeSimd | |||
155 | : OMPScheduleType::BaseRuntime; | |||
156 | } | |||
157 | llvm_unreachable("unhandled schedule clause argument")::llvm::llvm_unreachable_internal("unhandled schedule clause argument" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 157); | |||
158 | } | |||
159 | ||||
160 | /// Adds ordering modifier flags to schedule type. | |||
161 | static OMPScheduleType | |||
162 | getOpenMPOrderingScheduleType(OMPScheduleType BaseScheduleType, | |||
163 | bool HasOrderedClause) { | |||
164 | assert((BaseScheduleType & OMPScheduleType::ModifierMask) ==(static_cast <bool> ((BaseScheduleType & OMPScheduleType ::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set" ) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 166, __extension__ __PRETTY_FUNCTION__)) | |||
165 | OMPScheduleType::None &&(static_cast <bool> ((BaseScheduleType & OMPScheduleType ::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set" ) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 166, __extension__ __PRETTY_FUNCTION__)) | |||
166 | "Must not have ordering nor monotonicity flags already set")(static_cast <bool> ((BaseScheduleType & OMPScheduleType ::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set" ) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 166, __extension__ __PRETTY_FUNCTION__)); | |||
167 | ||||
168 | OMPScheduleType OrderingModifier = HasOrderedClause | |||
169 | ? OMPScheduleType::ModifierOrdered | |||
170 | : OMPScheduleType::ModifierUnordered; | |||
171 | OMPScheduleType OrderingScheduleType = BaseScheduleType | OrderingModifier; | |||
172 | ||||
173 | // Unsupported combinations | |||
174 | if (OrderingScheduleType == | |||
175 | (OMPScheduleType::BaseGuidedSimd | OMPScheduleType::ModifierOrdered)) | |||
176 | return OMPScheduleType::OrderedGuidedChunked; | |||
177 | else if (OrderingScheduleType == (OMPScheduleType::BaseRuntimeSimd | | |||
178 | OMPScheduleType::ModifierOrdered)) | |||
179 | return OMPScheduleType::OrderedRuntime; | |||
180 | ||||
181 | return OrderingScheduleType; | |||
182 | } | |||
183 | ||||
184 | /// Adds monotonicity modifier flags to schedule type. | |||
185 | static OMPScheduleType | |||
186 | getOpenMPMonotonicityScheduleType(OMPScheduleType ScheduleType, | |||
187 | bool HasSimdModifier, bool HasMonotonic, | |||
188 | bool HasNonmonotonic, bool HasOrderedClause) { | |||
189 | assert((ScheduleType & OMPScheduleType::MonotonicityMask) ==(static_cast <bool> ((ScheduleType & OMPScheduleType ::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set" ) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 191, __extension__ __PRETTY_FUNCTION__)) | |||
190 | OMPScheduleType::None &&(static_cast <bool> ((ScheduleType & OMPScheduleType ::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set" ) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 191, __extension__ __PRETTY_FUNCTION__)) | |||
191 | "Must not have monotonicity flags already set")(static_cast <bool> ((ScheduleType & OMPScheduleType ::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set" ) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 191, __extension__ __PRETTY_FUNCTION__)); | |||
192 | assert((!HasMonotonic || !HasNonmonotonic) &&(static_cast <bool> ((!HasMonotonic || !HasNonmonotonic ) && "Monotonic and Nonmonotonic are contradicting each other" ) ? void (0) : __assert_fail ("(!HasMonotonic || !HasNonmonotonic) && \"Monotonic and Nonmonotonic are contradicting each other\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 193, __extension__ __PRETTY_FUNCTION__)) | |||
193 | "Monotonic and Nonmonotonic are contradicting each other")(static_cast <bool> ((!HasMonotonic || !HasNonmonotonic ) && "Monotonic and Nonmonotonic are contradicting each other" ) ? void (0) : __assert_fail ("(!HasMonotonic || !HasNonmonotonic) && \"Monotonic and Nonmonotonic are contradicting each other\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 193, __extension__ __PRETTY_FUNCTION__)); | |||
194 | ||||
195 | if (HasMonotonic) { | |||
196 | return ScheduleType | OMPScheduleType::ModifierMonotonic; | |||
197 | } else if (HasNonmonotonic) { | |||
198 | return ScheduleType | OMPScheduleType::ModifierNonmonotonic; | |||
199 | } else { | |||
200 | // OpenMP 5.1, 2.11.4 Worksharing-Loop Construct, Description. | |||
201 | // If the static schedule kind is specified or if the ordered clause is | |||
202 | // specified, and if the nonmonotonic modifier is not specified, the | |||
203 | // effect is as if the monotonic modifier is specified. Otherwise, unless | |||
204 | // the monotonic modifier is specified, the effect is as if the | |||
205 | // nonmonotonic modifier is specified. | |||
206 | OMPScheduleType BaseScheduleType = | |||
207 | ScheduleType & ~OMPScheduleType::ModifierMask; | |||
208 | if ((BaseScheduleType == OMPScheduleType::BaseStatic) || | |||
209 | (BaseScheduleType == OMPScheduleType::BaseStaticChunked) || | |||
210 | HasOrderedClause) { | |||
211 | // The monotonic is used by default in openmp runtime library, so no need | |||
212 | // to set it. | |||
213 | return ScheduleType; | |||
214 | } else { | |||
215 | return ScheduleType | OMPScheduleType::ModifierNonmonotonic; | |||
216 | } | |||
217 | } | |||
218 | } | |||
219 | ||||
220 | /// Determine the schedule type using schedule and ordering clause arguments. | |||
221 | static OMPScheduleType | |||
222 | computeOpenMPScheduleType(ScheduleKind ClauseKind, bool HasChunks, | |||
223 | bool HasSimdModifier, bool HasMonotonicModifier, | |||
224 | bool HasNonmonotonicModifier, bool HasOrderedClause) { | |||
225 | OMPScheduleType BaseSchedule = | |||
226 | getOpenMPBaseScheduleType(ClauseKind, HasChunks, HasSimdModifier); | |||
227 | OMPScheduleType OrderedSchedule = | |||
228 | getOpenMPOrderingScheduleType(BaseSchedule, HasOrderedClause); | |||
229 | OMPScheduleType Result = getOpenMPMonotonicityScheduleType( | |||
230 | OrderedSchedule, HasSimdModifier, HasMonotonicModifier, | |||
231 | HasNonmonotonicModifier, HasOrderedClause); | |||
232 | ||||
233 | assert(isValidWorkshareLoopScheduleType(Result))(static_cast <bool> (isValidWorkshareLoopScheduleType(Result )) ? void (0) : __assert_fail ("isValidWorkshareLoopScheduleType(Result)" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 233, __extension__ __PRETTY_FUNCTION__)); | |||
234 | return Result; | |||
235 | } | |||
236 | ||||
237 | /// Make \p Source branch to \p Target. | |||
238 | /// | |||
239 | /// Handles two situations: | |||
240 | /// * \p Source already has an unconditional branch. | |||
241 | /// * \p Source is a degenerate block (no terminator because the BB is | |||
242 | /// the current head of the IR construction). | |||
243 | static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) { | |||
244 | if (Instruction *Term = Source->getTerminator()) { | |||
245 | auto *Br = cast<BranchInst>(Term); | |||
246 | assert(!Br->isConditional() &&(static_cast <bool> (!Br->isConditional() && "BB's terminator must be an unconditional branch (or degenerate)" ) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 247, __extension__ __PRETTY_FUNCTION__)) | |||
247 | "BB's terminator must be an unconditional branch (or degenerate)")(static_cast <bool> (!Br->isConditional() && "BB's terminator must be an unconditional branch (or degenerate)" ) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 247, __extension__ __PRETTY_FUNCTION__)); | |||
248 | BasicBlock *Succ = Br->getSuccessor(0); | |||
249 | Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true); | |||
250 | Br->setSuccessor(0, Target); | |||
251 | return; | |||
252 | } | |||
253 | ||||
254 | auto *NewBr = BranchInst::Create(Target, Source); | |||
255 | NewBr->setDebugLoc(DL); | |||
256 | } | |||
257 | ||||
258 | void llvm::spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, | |||
259 | bool CreateBranch) { | |||
260 | assert(New->getFirstInsertionPt() == New->begin() &&(static_cast <bool> (New->getFirstInsertionPt() == New ->begin() && "Target BB must not have PHI nodes") ? void (0) : __assert_fail ("New->getFirstInsertionPt() == New->begin() && \"Target BB must not have PHI nodes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 261, __extension__ __PRETTY_FUNCTION__)) | |||
261 | "Target BB must not have PHI nodes")(static_cast <bool> (New->getFirstInsertionPt() == New ->begin() && "Target BB must not have PHI nodes") ? void (0) : __assert_fail ("New->getFirstInsertionPt() == New->begin() && \"Target BB must not have PHI nodes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 261, __extension__ __PRETTY_FUNCTION__)); | |||
262 | ||||
263 | // Move instructions to new block. | |||
264 | BasicBlock *Old = IP.getBlock(); | |||
265 | New->splice(New->begin(), Old, IP.getPoint(), Old->end()); | |||
266 | ||||
267 | if (CreateBranch) | |||
268 | BranchInst::Create(New, Old); | |||
269 | } | |||
270 | ||||
271 | void llvm::spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) { | |||
272 | DebugLoc DebugLoc = Builder.getCurrentDebugLocation(); | |||
273 | BasicBlock *Old = Builder.GetInsertBlock(); | |||
274 | ||||
275 | spliceBB(Builder.saveIP(), New, CreateBranch); | |||
276 | if (CreateBranch) | |||
277 | Builder.SetInsertPoint(Old->getTerminator()); | |||
278 | else | |||
279 | Builder.SetInsertPoint(Old); | |||
280 | ||||
281 | // SetInsertPoint also updates the Builder's debug location, but we want to | |||
282 | // keep the one the Builder was configured to use. | |||
283 | Builder.SetCurrentDebugLocation(DebugLoc); | |||
284 | } | |||
285 | ||||
286 | BasicBlock *llvm::splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch, | |||
287 | llvm::Twine Name) { | |||
288 | BasicBlock *Old = IP.getBlock(); | |||
289 | BasicBlock *New = BasicBlock::Create( | |||
290 | Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name, | |||
291 | Old->getParent(), Old->getNextNode()); | |||
292 | spliceBB(IP, New, CreateBranch); | |||
293 | New->replaceSuccessorsPhiUsesWith(Old, New); | |||
294 | return New; | |||
295 | } | |||
296 | ||||
297 | BasicBlock *llvm::splitBB(IRBuilderBase &Builder, bool CreateBranch, | |||
298 | llvm::Twine Name) { | |||
299 | DebugLoc DebugLoc = Builder.getCurrentDebugLocation(); | |||
300 | BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name); | |||
301 | if (CreateBranch) | |||
302 | Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator()); | |||
303 | else | |||
304 | Builder.SetInsertPoint(Builder.GetInsertBlock()); | |||
305 | // SetInsertPoint also updates the Builder's debug location, but we want to | |||
306 | // keep the one the Builder was configured to use. | |||
307 | Builder.SetCurrentDebugLocation(DebugLoc); | |||
308 | return New; | |||
309 | } | |||
310 | ||||
311 | BasicBlock *llvm::splitBB(IRBuilder<> &Builder, bool CreateBranch, | |||
312 | llvm::Twine Name) { | |||
313 | DebugLoc DebugLoc = Builder.getCurrentDebugLocation(); | |||
314 | BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name); | |||
315 | if (CreateBranch) | |||
316 | Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator()); | |||
317 | else | |||
318 | Builder.SetInsertPoint(Builder.GetInsertBlock()); | |||
319 | // SetInsertPoint also updates the Builder's debug location, but we want to | |||
320 | // keep the one the Builder was configured to use. | |||
321 | Builder.SetCurrentDebugLocation(DebugLoc); | |||
322 | return New; | |||
323 | } | |||
324 | ||||
325 | BasicBlock *llvm::splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch, | |||
326 | llvm::Twine Suffix) { | |||
327 | BasicBlock *Old = Builder.GetInsertBlock(); | |||
328 | return splitBB(Builder, CreateBranch, Old->getName() + Suffix); | |||
329 | } | |||
330 | ||||
331 | void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) { | |||
332 | LLVMContext &Ctx = Fn.getContext(); | |||
333 | Triple T(M.getTargetTriple()); | |||
334 | ||||
335 | // Get the function's current attributes. | |||
336 | auto Attrs = Fn.getAttributes(); | |||
337 | auto FnAttrs = Attrs.getFnAttrs(); | |||
338 | auto RetAttrs = Attrs.getRetAttrs(); | |||
339 | SmallVector<AttributeSet, 4> ArgAttrs; | |||
340 | for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo) | |||
341 | ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo)); | |||
342 | ||||
343 | // Add AS to FnAS while taking special care with integer extensions. | |||
344 | auto addAttrSet = [&](AttributeSet &FnAS, const AttributeSet &AS, | |||
345 | bool Param = true) -> void { | |||
346 | bool HasSignExt = AS.hasAttribute(Attribute::SExt); | |||
347 | bool HasZeroExt = AS.hasAttribute(Attribute::ZExt); | |||
348 | if (HasSignExt || HasZeroExt) { | |||
349 | assert(AS.getNumAttributes() == 1 &&(static_cast <bool> (AS.getNumAttributes() == 1 && "Currently not handling extension attr combined with others." ) ? void (0) : __assert_fail ("AS.getNumAttributes() == 1 && \"Currently not handling extension attr combined with others.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 350, __extension__ __PRETTY_FUNCTION__)) | |||
350 | "Currently not handling extension attr combined with others.")(static_cast <bool> (AS.getNumAttributes() == 1 && "Currently not handling extension attr combined with others." ) ? void (0) : __assert_fail ("AS.getNumAttributes() == 1 && \"Currently not handling extension attr combined with others.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 350, __extension__ __PRETTY_FUNCTION__)); | |||
351 | if (Param) { | |||
352 | if (auto AK = TargetLibraryInfo::getExtAttrForI32Param(T, HasSignExt)) | |||
353 | FnAS = FnAS.addAttribute(Ctx, AK); | |||
354 | } else | |||
355 | if (auto AK = TargetLibraryInfo::getExtAttrForI32Return(T, HasSignExt)) | |||
356 | FnAS = FnAS.addAttribute(Ctx, AK); | |||
357 | } else { | |||
358 | FnAS = FnAS.addAttributes(Ctx, AS); | |||
359 | } | |||
360 | }; | |||
361 | ||||
362 | #define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet; | |||
363 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
364 | ||||
365 | // Add attributes to the function declaration. | |||
366 | switch (FnID) { | |||
367 | #define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \ | |||
368 | case Enum: \ | |||
369 | FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \ | |||
370 | addAttrSet(RetAttrs, RetAttrSet, /*Param*/false); \ | |||
371 | for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \ | |||
372 | addAttrSet(ArgAttrs[ArgNo], ArgAttrSets[ArgNo]); \ | |||
373 | Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \ | |||
374 | break; | |||
375 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
376 | default: | |||
377 | // Attributes are optional. | |||
378 | break; | |||
379 | } | |||
380 | } | |||
381 | ||||
382 | FunctionCallee | |||
383 | OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) { | |||
384 | FunctionType *FnTy = nullptr; | |||
385 | Function *Fn = nullptr; | |||
386 | ||||
387 | // Try to find the declation in the module first. | |||
388 | switch (FnID) { | |||
389 | #define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \ | |||
390 | case Enum: \ | |||
391 | FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \ | |||
392 | IsVarArg); \ | |||
393 | Fn = M.getFunction(Str); \ | |||
394 | break; | |||
395 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
396 | } | |||
397 | ||||
398 | if (!Fn) { | |||
399 | // Create a new declaration if we need one. | |||
400 | switch (FnID) { | |||
401 | #define OMP_RTL(Enum, Str, ...) \ | |||
402 | case Enum: \ | |||
403 | Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \ | |||
404 | break; | |||
405 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
406 | } | |||
407 | ||||
408 | // Add information if the runtime function takes a callback function | |||
409 | if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) { | |||
410 | if (!Fn->hasMetadata(LLVMContext::MD_callback)) { | |||
411 | LLVMContext &Ctx = Fn->getContext(); | |||
412 | MDBuilder MDB(Ctx); | |||
413 | // Annotate the callback behavior of the runtime function: | |||
414 | // - The callback callee is argument number 2 (microtask). | |||
415 | // - The first two arguments of the callback callee are unknown (-1). | |||
416 | // - All variadic arguments to the runtime function are passed to the | |||
417 | // callback callee. | |||
418 | Fn->addMetadata( | |||
419 | LLVMContext::MD_callback, | |||
420 | *MDNode::get(Ctx, {MDB.createCallbackEncoding( | |||
421 | 2, {-1, -1}, /* VarArgsArePassed */ true)})); | |||
422 | } | |||
423 | } | |||
424 | ||||
425 | LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false) | |||
426 | << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false); | |||
427 | addAttributes(FnID, *Fn); | |||
428 | ||||
429 | } else { | |||
430 | LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false) | |||
431 | << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false); | |||
432 | } | |||
433 | ||||
434 | assert(Fn && "Failed to create OpenMP runtime function")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function" ) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 434, __extension__ __PRETTY_FUNCTION__)); | |||
435 | ||||
436 | // Cast the function to the expected type if necessary | |||
437 | Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo()); | |||
438 | return {FnTy, C}; | |||
439 | } | |||
440 | ||||
441 | Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) { | |||
442 | FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID); | |||
443 | auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee()); | |||
444 | assert(Fn && "Failed to create OpenMP runtime function pointer")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function pointer" ) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 444, __extension__ __PRETTY_FUNCTION__)); | |||
445 | return Fn; | |||
446 | } | |||
447 | ||||
448 | void OpenMPIRBuilder::initialize() { initializeTypes(M); } | |||
449 | ||||
450 | void OpenMPIRBuilder::finalize(Function *Fn) { | |||
451 | SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; | |||
452 | SmallVector<BasicBlock *, 32> Blocks; | |||
453 | SmallVector<OutlineInfo, 16> DeferredOutlines; | |||
454 | for (OutlineInfo &OI : OutlineInfos) { | |||
455 | // Skip functions that have not finalized yet; may happen with nested | |||
456 | // function generation. | |||
457 | if (Fn && OI.getFunction() != Fn) { | |||
458 | DeferredOutlines.push_back(OI); | |||
459 | continue; | |||
460 | } | |||
461 | ||||
462 | ParallelRegionBlockSet.clear(); | |||
463 | Blocks.clear(); | |||
464 | OI.collectBlocks(ParallelRegionBlockSet, Blocks); | |||
465 | ||||
466 | Function *OuterFn = OI.getFunction(); | |||
467 | CodeExtractorAnalysisCache CEAC(*OuterFn); | |||
468 | CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, | |||
469 | /* AggregateArgs */ true, | |||
470 | /* BlockFrequencyInfo */ nullptr, | |||
471 | /* BranchProbabilityInfo */ nullptr, | |||
472 | /* AssumptionCache */ nullptr, | |||
473 | /* AllowVarArgs */ true, | |||
474 | /* AllowAlloca */ true, | |||
475 | /* AllocaBlock*/ OI.OuterAllocaBB, | |||
476 | /* Suffix */ ".omp_par"); | |||
477 | ||||
478 | LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Before outlining: " << *OuterFn << "\n"; } } while (false); | |||
479 | LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Entry " << OI .EntryBB->getName() << " Exit: " << OI.ExitBB-> getName() << "\n"; } } while (false) | |||
480 | << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Entry " << OI .EntryBB->getName() << " Exit: " << OI.ExitBB-> getName() << "\n"; } } while (false); | |||
481 | assert(Extractor.isEligible() &&(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!" ) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 482, __extension__ __PRETTY_FUNCTION__)) | |||
482 | "Expected OpenMP outlining to be possible!")(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!" ) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 482, __extension__ __PRETTY_FUNCTION__)); | |||
483 | ||||
484 | for (auto *V : OI.ExcludeArgsFromAggregate) | |||
485 | Extractor.excludeArgFromAggregate(V); | |||
486 | ||||
487 | Function *OutlinedFn = Extractor.extractCodeRegion(CEAC); | |||
488 | ||||
489 | LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "After outlining: " << *OuterFn << "\n"; } } while (false); | |||
490 | LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << " Outlined function: " << *OutlinedFn << "\n"; } } while (false); | |||
491 | assert(OutlinedFn->getReturnType()->isVoidTy() &&(static_cast <bool> (OutlinedFn->getReturnType()-> isVoidTy() && "OpenMP outlined functions should not return a value!" ) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 492, __extension__ __PRETTY_FUNCTION__)) | |||
492 | "OpenMP outlined functions should not return a value!")(static_cast <bool> (OutlinedFn->getReturnType()-> isVoidTy() && "OpenMP outlined functions should not return a value!" ) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 492, __extension__ __PRETTY_FUNCTION__)); | |||
493 | ||||
494 | // For compability with the clang CG we move the outlined function after the | |||
495 | // one with the parallel region. | |||
496 | OutlinedFn->removeFromParent(); | |||
497 | M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn); | |||
498 | ||||
499 | // Remove the artificial entry introduced by the extractor right away, we | |||
500 | // made our own entry block after all. | |||
501 | { | |||
502 | BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock(); | |||
503 | assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)(static_cast <bool> (ArtificialEntry.getUniqueSuccessor () == OI.EntryBB) ? void (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 503, __extension__ __PRETTY_FUNCTION__)); | |||
504 | assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)(static_cast <bool> (OI.EntryBB->getUniquePredecessor () == &ArtificialEntry) ? void (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 504, __extension__ __PRETTY_FUNCTION__)); | |||
505 | // Move instructions from the to-be-deleted ArtificialEntry to the entry | |||
506 | // basic block of the parallel region. CodeExtractor generates | |||
507 | // instructions to unwrap the aggregate argument and may sink | |||
508 | // allocas/bitcasts for values that are solely used in the outlined region | |||
509 | // and do not escape. | |||
510 | assert(!ArtificialEntry.empty() &&(static_cast <bool> (!ArtificialEntry.empty() && "Expected instructions to add in the outlined region entry") ? void (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to add in the outlined region entry\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 511, __extension__ __PRETTY_FUNCTION__)) | |||
511 | "Expected instructions to add in the outlined region entry")(static_cast <bool> (!ArtificialEntry.empty() && "Expected instructions to add in the outlined region entry") ? void (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to add in the outlined region entry\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 511, __extension__ __PRETTY_FUNCTION__)); | |||
512 | for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(), | |||
513 | End = ArtificialEntry.rend(); | |||
514 | It != End;) { | |||
515 | Instruction &I = *It; | |||
516 | It++; | |||
517 | ||||
518 | if (I.isTerminator()) | |||
519 | continue; | |||
520 | ||||
521 | I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt()); | |||
522 | } | |||
523 | ||||
524 | OI.EntryBB->moveBefore(&ArtificialEntry); | |||
525 | ArtificialEntry.eraseFromParent(); | |||
526 | } | |||
527 | assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)(static_cast <bool> (&OutlinedFn->getEntryBlock( ) == OI.EntryBB) ? void (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 527, __extension__ __PRETTY_FUNCTION__)); | |||
528 | assert(OutlinedFn && OutlinedFn->getNumUses() == 1)(static_cast <bool> (OutlinedFn && OutlinedFn-> getNumUses() == 1) ? void (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 528, __extension__ __PRETTY_FUNCTION__)); | |||
529 | ||||
530 | // Run a user callback, e.g. to add attributes. | |||
531 | if (OI.PostOutlineCB) | |||
532 | OI.PostOutlineCB(*OutlinedFn); | |||
533 | } | |||
534 | ||||
535 | // Remove work items that have been completed. | |||
536 | OutlineInfos = std::move(DeferredOutlines); | |||
537 | } | |||
538 | ||||
539 | OpenMPIRBuilder::~OpenMPIRBuilder() { | |||
540 | assert(OutlineInfos.empty() && "There must be no outstanding outlinings")(static_cast <bool> (OutlineInfos.empty() && "There must be no outstanding outlinings" ) ? void (0) : __assert_fail ("OutlineInfos.empty() && \"There must be no outstanding outlinings\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 540, __extension__ __PRETTY_FUNCTION__)); | |||
541 | } | |||
542 | ||||
543 | GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) { | |||
544 | IntegerType *I32Ty = Type::getInt32Ty(M.getContext()); | |||
545 | auto *GV = | |||
546 | new GlobalVariable(M, I32Ty, | |||
547 | /* isConstant = */ true, GlobalValue::WeakODRLinkage, | |||
548 | ConstantInt::get(I32Ty, Value), Name); | |||
549 | GV->setVisibility(GlobalValue::HiddenVisibility); | |||
550 | ||||
551 | return GV; | |||
552 | } | |||
553 | ||||
554 | Constant *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr, | |||
555 | uint32_t SrcLocStrSize, | |||
556 | IdentFlag LocFlags, | |||
557 | unsigned Reserve2Flags) { | |||
558 | // Enable "C-mode". | |||
559 | LocFlags |= OMP_IDENT_FLAG_KMPC; | |||
560 | ||||
561 | Constant *&Ident = | |||
562 | IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}]; | |||
563 | if (!Ident) { | |||
564 | Constant *I32Null = ConstantInt::getNullValue(Int32); | |||
565 | Constant *IdentData[] = {I32Null, | |||
566 | ConstantInt::get(Int32, uint32_t(LocFlags)), | |||
567 | ConstantInt::get(Int32, Reserve2Flags), | |||
568 | ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr}; | |||
569 | Constant *Initializer = | |||
570 | ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData); | |||
571 | ||||
572 | // Look for existing encoding of the location + flags, not needed but | |||
573 | // minimizes the difference to the existing solution while we transition. | |||
574 | for (GlobalVariable &GV : M.globals()) | |||
575 | if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer()) | |||
576 | if (GV.getInitializer() == Initializer) | |||
577 | Ident = &GV; | |||
578 | ||||
579 | if (!Ident) { | |||
580 | auto *GV = new GlobalVariable( | |||
581 | M, OpenMPIRBuilder::Ident, | |||
582 | /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "", | |||
583 | nullptr, GlobalValue::NotThreadLocal, | |||
584 | M.getDataLayout().getDefaultGlobalsAddressSpace()); | |||
585 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); | |||
586 | GV->setAlignment(Align(8)); | |||
587 | Ident = GV; | |||
588 | } | |||
589 | } | |||
590 | ||||
591 | return ConstantExpr::getPointerBitCastOrAddrSpaceCast(Ident, IdentPtr); | |||
592 | } | |||
593 | ||||
594 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr, | |||
595 | uint32_t &SrcLocStrSize) { | |||
596 | SrcLocStrSize = LocStr.size(); | |||
597 | Constant *&SrcLocStr = SrcLocStrMap[LocStr]; | |||
598 | if (!SrcLocStr) { | |||
599 | Constant *Initializer = | |||
600 | ConstantDataArray::getString(M.getContext(), LocStr); | |||
601 | ||||
602 | // Look for existing encoding of the location, not needed but minimizes the | |||
603 | // difference to the existing solution while we transition. | |||
604 | for (GlobalVariable &GV : M.globals()) | |||
605 | if (GV.isConstant() && GV.hasInitializer() && | |||
606 | GV.getInitializer() == Initializer) | |||
607 | return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr); | |||
608 | ||||
609 | SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "", | |||
610 | /* AddressSpace */ 0, &M); | |||
611 | } | |||
612 | return SrcLocStr; | |||
613 | } | |||
614 | ||||
615 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName, | |||
616 | StringRef FileName, | |||
617 | unsigned Line, unsigned Column, | |||
618 | uint32_t &SrcLocStrSize) { | |||
619 | SmallString<128> Buffer; | |||
620 | Buffer.push_back(';'); | |||
621 | Buffer.append(FileName); | |||
622 | Buffer.push_back(';'); | |||
623 | Buffer.append(FunctionName); | |||
624 | Buffer.push_back(';'); | |||
625 | Buffer.append(std::to_string(Line)); | |||
626 | Buffer.push_back(';'); | |||
627 | Buffer.append(std::to_string(Column)); | |||
628 | Buffer.push_back(';'); | |||
629 | Buffer.push_back(';'); | |||
630 | return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize); | |||
631 | } | |||
632 | ||||
633 | Constant * | |||
634 | OpenMPIRBuilder::getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize) { | |||
635 | StringRef UnknownLoc = ";unknown;unknown;0;0;;"; | |||
636 | return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize); | |||
637 | } | |||
638 | ||||
639 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, | |||
640 | uint32_t &SrcLocStrSize, | |||
641 | Function *F) { | |||
642 | DILocation *DIL = DL.get(); | |||
643 | if (!DIL) | |||
644 | return getOrCreateDefaultSrcLocStr(SrcLocStrSize); | |||
645 | StringRef FileName = M.getName(); | |||
646 | if (DIFile *DIF = DIL->getFile()) | |||
647 | if (std::optional<StringRef> Source = DIF->getSource()) | |||
648 | FileName = *Source; | |||
649 | StringRef Function = DIL->getScope()->getSubprogram()->getName(); | |||
650 | if (Function.empty() && F) | |||
651 | Function = F->getName(); | |||
652 | return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(), | |||
653 | DIL->getColumn(), SrcLocStrSize); | |||
654 | } | |||
655 | ||||
656 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc, | |||
657 | uint32_t &SrcLocStrSize) { | |||
658 | return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize, | |||
659 | Loc.IP.getBlock()->getParent()); | |||
660 | } | |||
661 | ||||
662 | Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) { | |||
663 | return Builder.CreateCall( | |||
664 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident, | |||
665 | "omp_global_thread_num"); | |||
666 | } | |||
667 | ||||
668 | OpenMPIRBuilder::InsertPointTy | |||
669 | OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK, | |||
670 | bool ForceSimpleCall, bool CheckCancelFlag) { | |||
671 | if (!updateToLocation(Loc)) | |||
672 | return Loc.IP; | |||
673 | return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag); | |||
674 | } | |||
675 | ||||
676 | OpenMPIRBuilder::InsertPointTy | |||
677 | OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind, | |||
678 | bool ForceSimpleCall, bool CheckCancelFlag) { | |||
679 | // Build call __kmpc_cancel_barrier(loc, thread_id) or | |||
680 | // __kmpc_barrier(loc, thread_id); | |||
681 | ||||
682 | IdentFlag BarrierLocFlags; | |||
683 | switch (Kind) { | |||
684 | case OMPD_for: | |||
685 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR; | |||
686 | break; | |||
687 | case OMPD_sections: | |||
688 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS; | |||
689 | break; | |||
690 | case OMPD_single: | |||
691 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE; | |||
692 | break; | |||
693 | case OMPD_barrier: | |||
694 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL; | |||
695 | break; | |||
696 | default: | |||
697 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL; | |||
698 | break; | |||
699 | } | |||
700 | ||||
701 | uint32_t SrcLocStrSize; | |||
702 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
703 | Value *Args[] = { | |||
704 | getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags), | |||
705 | getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))}; | |||
706 | ||||
707 | // If we are in a cancellable parallel region, barriers are cancellation | |||
708 | // points. | |||
709 | // TODO: Check why we would force simple calls or to ignore the cancel flag. | |||
710 | bool UseCancelBarrier = | |||
711 | !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel); | |||
712 | ||||
713 | Value *Result = | |||
714 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr( | |||
715 | UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier | |||
716 | : OMPRTL___kmpc_barrier), | |||
717 | Args); | |||
718 | ||||
719 | if (UseCancelBarrier && CheckCancelFlag) | |||
720 | emitCancelationCheckImpl(Result, OMPD_parallel); | |||
721 | ||||
722 | return Builder.saveIP(); | |||
723 | } | |||
724 | ||||
725 | OpenMPIRBuilder::InsertPointTy | |||
726 | OpenMPIRBuilder::createCancel(const LocationDescription &Loc, | |||
727 | Value *IfCondition, | |||
728 | omp::Directive CanceledDirective) { | |||
729 | if (!updateToLocation(Loc)) | |||
730 | return Loc.IP; | |||
731 | ||||
732 | // LLVM utilities like blocks with terminators. | |||
733 | auto *UI = Builder.CreateUnreachable(); | |||
734 | ||||
735 | Instruction *ThenTI = UI, *ElseTI = nullptr; | |||
736 | if (IfCondition) | |||
737 | SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); | |||
738 | Builder.SetInsertPoint(ThenTI); | |||
739 | ||||
740 | Value *CancelKind = nullptr; | |||
741 | switch (CanceledDirective) { | |||
742 | #define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \ | |||
743 | case DirectiveEnum: \ | |||
744 | CancelKind = Builder.getInt32(Value); \ | |||
745 | break; | |||
746 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
747 | default: | |||
748 | llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 748); | |||
749 | } | |||
750 | ||||
751 | uint32_t SrcLocStrSize; | |||
752 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
753 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
754 | Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind}; | |||
755 | Value *Result = Builder.CreateCall( | |||
756 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args); | |||
757 | auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) { | |||
758 | if (CanceledDirective == OMPD_parallel) { | |||
759 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
760 | Builder.restoreIP(IP); | |||
761 | createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), | |||
762 | omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, | |||
763 | /* CheckCancelFlag */ false); | |||
764 | } | |||
765 | }; | |||
766 | ||||
767 | // The actual cancel logic is shared with others, e.g., cancel_barriers. | |||
768 | emitCancelationCheckImpl(Result, CanceledDirective, ExitCB); | |||
769 | ||||
770 | // Update the insertion point and remove the terminator we introduced. | |||
771 | Builder.SetInsertPoint(UI->getParent()); | |||
772 | UI->eraseFromParent(); | |||
773 | ||||
774 | return Builder.saveIP(); | |||
775 | } | |||
776 | ||||
777 | void OpenMPIRBuilder::emitOffloadingEntry(Constant *Addr, StringRef Name, | |||
778 | uint64_t Size, int32_t Flags, | |||
779 | StringRef SectionName) { | |||
780 | Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext()); | |||
781 | Type *Int32Ty = Type::getInt32Ty(M.getContext()); | |||
782 | Type *SizeTy = M.getDataLayout().getIntPtrType(M.getContext()); | |||
783 | ||||
784 | Constant *AddrName = ConstantDataArray::getString(M.getContext(), Name); | |||
785 | ||||
786 | // Create the constant string used to look up the symbol in the device. | |||
787 | auto *Str = | |||
788 | new llvm::GlobalVariable(M, AddrName->getType(), /*isConstant=*/true, | |||
789 | llvm::GlobalValue::InternalLinkage, AddrName, | |||
790 | ".omp_offloading.entry_name"); | |||
791 | Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); | |||
792 | ||||
793 | // Construct the offloading entry. | |||
794 | Constant *EntryData[] = { | |||
795 | ConstantExpr::getPointerBitCastOrAddrSpaceCast(Addr, Int8PtrTy), | |||
796 | ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, Int8PtrTy), | |||
797 | ConstantInt::get(SizeTy, Size), | |||
798 | ConstantInt::get(Int32Ty, Flags), | |||
799 | ConstantInt::get(Int32Ty, 0), | |||
800 | }; | |||
801 | Constant *EntryInitializer = | |||
802 | ConstantStruct::get(OpenMPIRBuilder::OffloadEntry, EntryData); | |||
803 | ||||
804 | auto *Entry = new GlobalVariable( | |||
805 | M, OpenMPIRBuilder::OffloadEntry, | |||
806 | /* isConstant = */ true, GlobalValue::WeakAnyLinkage, EntryInitializer, | |||
807 | ".omp_offloading.entry." + Name, nullptr, GlobalValue::NotThreadLocal, | |||
808 | M.getDataLayout().getDefaultGlobalsAddressSpace()); | |||
809 | ||||
810 | // The entry has to be created in the section the linker expects it to be. | |||
811 | Entry->setSection(SectionName); | |||
812 | Entry->setAlignment(Align(1)); | |||
813 | } | |||
814 | ||||
815 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitTargetKernel( | |||
816 | const LocationDescription &Loc, InsertPointTy AllocaIP, Value *&Return, | |||
817 | Value *Ident, Value *DeviceID, Value *NumTeams, Value *NumThreads, | |||
818 | Value *HostPtr, ArrayRef<Value *> KernelArgs) { | |||
819 | if (!updateToLocation(Loc)) | |||
820 | return Loc.IP; | |||
821 | ||||
822 | Builder.restoreIP(AllocaIP); | |||
823 | auto *KernelArgsPtr = | |||
824 | Builder.CreateAlloca(OpenMPIRBuilder::KernelArgs, nullptr, "kernel_args"); | |||
825 | Builder.restoreIP(Loc.IP); | |||
826 | ||||
827 | for (unsigned I = 0, Size = KernelArgs.size(); I != Size; ++I) { | |||
828 | llvm::Value *Arg = | |||
829 | Builder.CreateStructGEP(OpenMPIRBuilder::KernelArgs, KernelArgsPtr, I); | |||
830 | Builder.CreateAlignedStore( | |||
831 | KernelArgs[I], Arg, | |||
832 | M.getDataLayout().getPrefTypeAlign(KernelArgs[I]->getType())); | |||
833 | } | |||
834 | ||||
835 | SmallVector<Value *> OffloadingArgs{Ident, DeviceID, NumTeams, | |||
836 | NumThreads, HostPtr, KernelArgsPtr}; | |||
837 | ||||
838 | Return = Builder.CreateCall( | |||
839 | getOrCreateRuntimeFunction(M, OMPRTL___tgt_target_kernel), | |||
840 | OffloadingArgs); | |||
841 | ||||
842 | return Builder.saveIP(); | |||
843 | } | |||
844 | ||||
845 | void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, | |||
846 | omp::Directive CanceledDirective, | |||
847 | FinalizeCallbackTy ExitCB) { | |||
848 | assert(isLastFinalizationInfoCancellable(CanceledDirective) &&(static_cast <bool> (isLastFinalizationInfoCancellable( CanceledDirective) && "Unexpected cancellation!") ? void (0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 849, __extension__ __PRETTY_FUNCTION__)) | |||
849 | "Unexpected cancellation!")(static_cast <bool> (isLastFinalizationInfoCancellable( CanceledDirective) && "Unexpected cancellation!") ? void (0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 849, __extension__ __PRETTY_FUNCTION__)); | |||
850 | ||||
851 | // For a cancel barrier we create two new blocks. | |||
852 | BasicBlock *BB = Builder.GetInsertBlock(); | |||
853 | BasicBlock *NonCancellationBlock; | |||
854 | if (Builder.GetInsertPoint() == BB->end()) { | |||
855 | // TODO: This branch will not be needed once we moved to the | |||
856 | // OpenMPIRBuilder codegen completely. | |||
857 | NonCancellationBlock = BasicBlock::Create( | |||
858 | BB->getContext(), BB->getName() + ".cont", BB->getParent()); | |||
859 | } else { | |||
860 | NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint()); | |||
861 | BB->getTerminator()->eraseFromParent(); | |||
862 | Builder.SetInsertPoint(BB); | |||
863 | } | |||
864 | BasicBlock *CancellationBlock = BasicBlock::Create( | |||
865 | BB->getContext(), BB->getName() + ".cncl", BB->getParent()); | |||
866 | ||||
867 | // Jump to them based on the return value. | |||
868 | Value *Cmp = Builder.CreateIsNull(CancelFlag); | |||
869 | Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock, | |||
870 | /* TODO weight */ nullptr, nullptr); | |||
871 | ||||
872 | // From the cancellation block we finalize all variables and go to the | |||
873 | // post finalization block that is known to the FiniCB callback. | |||
874 | Builder.SetInsertPoint(CancellationBlock); | |||
875 | if (ExitCB) | |||
876 | ExitCB(Builder.saveIP()); | |||
877 | auto &FI = FinalizationStack.back(); | |||
878 | FI.FiniCB(Builder.saveIP()); | |||
879 | ||||
880 | // The continuation block is where code generation continues. | |||
881 | Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin()); | |||
882 | } | |||
883 | ||||
884 | IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel( | |||
885 | const LocationDescription &Loc, InsertPointTy OuterAllocaIP, | |||
886 | BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, | |||
887 | FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, | |||
888 | omp::ProcBindKind ProcBind, bool IsCancellable) { | |||
889 | assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous")(static_cast <bool> (!isConflictIP(Loc.IP, OuterAllocaIP ) && "IPs must not be ambiguous") ? void (0) : __assert_fail ("!isConflictIP(Loc.IP, OuterAllocaIP) && \"IPs must not be ambiguous\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 889, __extension__ __PRETTY_FUNCTION__)); | |||
890 | ||||
891 | if (!updateToLocation(Loc)) | |||
892 | return Loc.IP; | |||
893 | ||||
894 | uint32_t SrcLocStrSize; | |||
895 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
896 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
897 | Value *ThreadID = getOrCreateThreadID(Ident); | |||
898 | ||||
899 | if (NumThreads) { | |||
900 | // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads) | |||
901 | Value *Args[] = { | |||
902 | Ident, ThreadID, | |||
903 | Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)}; | |||
904 | Builder.CreateCall( | |||
905 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args); | |||
906 | } | |||
907 | ||||
908 | if (ProcBind != OMP_PROC_BIND_default) { | |||
909 | // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind) | |||
910 | Value *Args[] = { | |||
911 | Ident, ThreadID, | |||
912 | ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)}; | |||
913 | Builder.CreateCall( | |||
914 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args); | |||
915 | } | |||
916 | ||||
917 | BasicBlock *InsertBB = Builder.GetInsertBlock(); | |||
918 | Function *OuterFn = InsertBB->getParent(); | |||
919 | ||||
920 | // Save the outer alloca block because the insertion iterator may get | |||
921 | // invalidated and we still need this later. | |||
922 | BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock(); | |||
923 | ||||
924 | // Vector to remember instructions we used only during the modeling but which | |||
925 | // we want to delete at the end. | |||
926 | SmallVector<Instruction *, 4> ToBeDeleted; | |||
927 | ||||
928 | // Change the location to the outer alloca insertion point to create and | |||
929 | // initialize the allocas we pass into the parallel region. | |||
930 | Builder.restoreIP(OuterAllocaIP); | |||
931 | AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr"); | |||
932 | AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr"); | |||
933 | ||||
934 | // We only need TIDAddr and ZeroAddr for modeling purposes to get the | |||
935 | // associated arguments in the outlined function, so we delete them later. | |||
936 | ToBeDeleted.push_back(TIDAddr); | |||
937 | ToBeDeleted.push_back(ZeroAddr); | |||
938 | ||||
939 | // Create an artificial insertion point that will also ensure the blocks we | |||
940 | // are about to split are not degenerated. | |||
941 | auto *UI = new UnreachableInst(Builder.getContext(), InsertBB); | |||
942 | ||||
943 | BasicBlock *EntryBB = UI->getParent(); | |||
944 | BasicBlock *PRegEntryBB = EntryBB->splitBasicBlock(UI, "omp.par.entry"); | |||
945 | BasicBlock *PRegBodyBB = PRegEntryBB->splitBasicBlock(UI, "omp.par.region"); | |||
946 | BasicBlock *PRegPreFiniBB = | |||
947 | PRegBodyBB->splitBasicBlock(UI, "omp.par.pre_finalize"); | |||
948 | BasicBlock *PRegExitBB = PRegPreFiniBB->splitBasicBlock(UI, "omp.par.exit"); | |||
949 | ||||
950 | auto FiniCBWrapper = [&](InsertPointTy IP) { | |||
951 | // Hide "open-ended" blocks from the given FiniCB by setting the right jump | |||
952 | // target to the region exit block. | |||
953 | if (IP.getBlock()->end() == IP.getPoint()) { | |||
954 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
955 | Builder.restoreIP(IP); | |||
956 | Instruction *I = Builder.CreateBr(PRegExitBB); | |||
957 | IP = InsertPointTy(I->getParent(), I->getIterator()); | |||
958 | } | |||
959 | assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (IP.getBlock()->getTerminator()-> getNumSuccessors() == 1 && IP.getBlock()->getTerminator ()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!" ) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 961, __extension__ __PRETTY_FUNCTION__)) | |||
960 | IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&(static_cast <bool> (IP.getBlock()->getTerminator()-> getNumSuccessors() == 1 && IP.getBlock()->getTerminator ()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!" ) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 961, __extension__ __PRETTY_FUNCTION__)) | |||
961 | "Unexpected insertion point for finalization call!")(static_cast <bool> (IP.getBlock()->getTerminator()-> getNumSuccessors() == 1 && IP.getBlock()->getTerminator ()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!" ) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 961, __extension__ __PRETTY_FUNCTION__)); | |||
962 | return FiniCB(IP); | |||
963 | }; | |||
964 | ||||
965 | FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable}); | |||
966 | ||||
967 | // Generate the privatization allocas in the block that will become the entry | |||
968 | // of the outlined function. | |||
969 | Builder.SetInsertPoint(PRegEntryBB->getTerminator()); | |||
970 | InsertPointTy InnerAllocaIP = Builder.saveIP(); | |||
971 | ||||
972 | AllocaInst *PrivTIDAddr = | |||
973 | Builder.CreateAlloca(Int32, nullptr, "tid.addr.local"); | |||
974 | Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid"); | |||
975 | ||||
976 | // Add some fake uses for OpenMP provided arguments. | |||
977 | ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use")); | |||
978 | Instruction *ZeroAddrUse = | |||
979 | Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use"); | |||
980 | ToBeDeleted.push_back(ZeroAddrUse); | |||
981 | ||||
982 | // EntryBB | |||
983 | // | | |||
984 | // V | |||
985 | // PRegionEntryBB <- Privatization allocas are placed here. | |||
986 | // | | |||
987 | // V | |||
988 | // PRegionBodyBB <- BodeGen is invoked here. | |||
989 | // | | |||
990 | // V | |||
991 | // PRegPreFiniBB <- The block we will start finalization from. | |||
992 | // | | |||
993 | // V | |||
994 | // PRegionExitBB <- A common exit to simplify block collection. | |||
995 | // | |||
996 | ||||
997 | LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Before body codegen: " << *OuterFn << "\n"; } } while (false); | |||
998 | ||||
999 | // Let the caller create the body. | |||
1000 | assert(BodyGenCB && "Expected body generation callback!")(static_cast <bool> (BodyGenCB && "Expected body generation callback!" ) ? void (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1000, __extension__ __PRETTY_FUNCTION__)); | |||
1001 | InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin()); | |||
1002 | BodyGenCB(InnerAllocaIP, CodeGenIP); | |||
1003 | ||||
1004 | LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "After body codegen: " << *OuterFn << "\n"; } } while (false); | |||
1005 | FunctionCallee RTLFn; | |||
1006 | if (IfCondition) | |||
1007 | RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call_if); | |||
1008 | else | |||
1009 | RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call); | |||
1010 | ||||
1011 | if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) { | |||
1012 | if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) { | |||
1013 | llvm::LLVMContext &Ctx = F->getContext(); | |||
1014 | MDBuilder MDB(Ctx); | |||
1015 | // Annotate the callback behavior of the __kmpc_fork_call: | |||
1016 | // - The callback callee is argument number 2 (microtask). | |||
1017 | // - The first two arguments of the callback callee are unknown (-1). | |||
1018 | // - All variadic arguments to the __kmpc_fork_call are passed to the | |||
1019 | // callback callee. | |||
1020 | F->addMetadata( | |||
1021 | llvm::LLVMContext::MD_callback, | |||
1022 | *llvm::MDNode::get( | |||
1023 | Ctx, {MDB.createCallbackEncoding(2, {-1, -1}, | |||
1024 | /* VarArgsArePassed */ true)})); | |||
1025 | } | |||
1026 | } | |||
1027 | ||||
1028 | OutlineInfo OI; | |||
1029 | OI.PostOutlineCB = [=](Function &OutlinedFn) { | |||
1030 | // Add some known attributes. | |||
1031 | OutlinedFn.addParamAttr(0, Attribute::NoAlias); | |||
1032 | OutlinedFn.addParamAttr(1, Attribute::NoAlias); | |||
1033 | OutlinedFn.addFnAttr(Attribute::NoUnwind); | |||
1034 | OutlinedFn.addFnAttr(Attribute::NoRecurse); | |||
1035 | ||||
1036 | assert(OutlinedFn.arg_size() >= 2 &&(static_cast <bool> (OutlinedFn.arg_size() >= 2 && "Expected at least tid and bounded tid as arguments") ? void (0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1037, __extension__ __PRETTY_FUNCTION__)) | |||
1037 | "Expected at least tid and bounded tid as arguments")(static_cast <bool> (OutlinedFn.arg_size() >= 2 && "Expected at least tid and bounded tid as arguments") ? void (0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1037, __extension__ __PRETTY_FUNCTION__)); | |||
1038 | unsigned NumCapturedVars = | |||
1039 | OutlinedFn.arg_size() - /* tid & bounded tid */ 2; | |||
1040 | ||||
1041 | CallInst *CI = cast<CallInst>(OutlinedFn.user_back()); | |||
1042 | CI->getParent()->setName("omp_parallel"); | |||
1043 | Builder.SetInsertPoint(CI); | |||
1044 | ||||
1045 | // Build call __kmpc_fork_call[_if](Ident, n, microtask, var1, .., varn); | |||
1046 | Value *ForkCallArgs[] = { | |||
1047 | Ident, Builder.getInt32(NumCapturedVars), | |||
1048 | Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)}; | |||
1049 | ||||
1050 | SmallVector<Value *, 16> RealArgs; | |||
1051 | RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs)); | |||
1052 | if (IfCondition) { | |||
1053 | Value *Cond = Builder.CreateSExtOrTrunc(IfCondition, | |||
1054 | Type::getInt32Ty(M.getContext())); | |||
1055 | RealArgs.push_back(Cond); | |||
1056 | } | |||
1057 | RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end()); | |||
1058 | ||||
1059 | // __kmpc_fork_call_if always expects a void ptr as the last argument | |||
1060 | // If there are no arguments, pass a null pointer. | |||
1061 | auto PtrTy = Type::getInt8PtrTy(M.getContext()); | |||
1062 | if (IfCondition && NumCapturedVars == 0) { | |||
1063 | llvm::Value *Void = ConstantPointerNull::get(PtrTy); | |||
1064 | RealArgs.push_back(Void); | |||
1065 | } | |||
1066 | if (IfCondition && RealArgs.back()->getType() != PtrTy) | |||
1067 | RealArgs.back() = Builder.CreateBitCast(RealArgs.back(), PtrTy); | |||
1068 | ||||
1069 | Builder.CreateCall(RTLFn, RealArgs); | |||
1070 | ||||
1071 | LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "With fork_call placed: " << *Builder.GetInsertBlock()->getParent() << "\n" ; } } while (false) | |||
1072 | << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "With fork_call placed: " << *Builder.GetInsertBlock()->getParent() << "\n" ; } } while (false); | |||
1073 | ||||
1074 | InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end()); | |||
1075 | ||||
1076 | // Initialize the local TID stack location with the argument value. | |||
1077 | Builder.SetInsertPoint(PrivTID); | |||
1078 | Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin(); | |||
1079 | Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr); | |||
1080 | ||||
1081 | CI->eraseFromParent(); | |||
1082 | ||||
1083 | for (Instruction *I : ToBeDeleted) | |||
1084 | I->eraseFromParent(); | |||
1085 | }; | |||
1086 | ||||
1087 | // Adjust the finalization stack, verify the adjustment, and call the | |||
1088 | // finalize function a last time to finalize values between the pre-fini | |||
1089 | // block and the exit block if we left the parallel "the normal way". | |||
1090 | auto FiniInfo = FinalizationStack.pop_back_val(); | |||
1091 | (void)FiniInfo; | |||
1092 | assert(FiniInfo.DK == OMPD_parallel &&(static_cast <bool> (FiniInfo.DK == OMPD_parallel && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1093, __extension__ __PRETTY_FUNCTION__)) | |||
1093 | "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_parallel && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1093, __extension__ __PRETTY_FUNCTION__)); | |||
1094 | ||||
1095 | Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator(); | |||
1096 | ||||
1097 | InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator()); | |||
1098 | FiniCB(PreFiniIP); | |||
1099 | ||||
1100 | OI.OuterAllocaBB = OuterAllocaBlock; | |||
1101 | OI.EntryBB = PRegEntryBB; | |||
1102 | OI.ExitBB = PRegExitBB; | |||
1103 | ||||
1104 | SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; | |||
1105 | SmallVector<BasicBlock *, 32> Blocks; | |||
1106 | OI.collectBlocks(ParallelRegionBlockSet, Blocks); | |||
1107 | ||||
1108 | // Ensure a single exit node for the outlined region by creating one. | |||
1109 | // We might have multiple incoming edges to the exit now due to finalizations, | |||
1110 | // e.g., cancel calls that cause the control flow to leave the region. | |||
1111 | BasicBlock *PRegOutlinedExitBB = PRegExitBB; | |||
1112 | PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt()); | |||
1113 | PRegOutlinedExitBB->setName("omp.par.outlined.exit"); | |||
1114 | Blocks.push_back(PRegOutlinedExitBB); | |||
1115 | ||||
1116 | CodeExtractorAnalysisCache CEAC(*OuterFn); | |||
1117 | CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, | |||
1118 | /* AggregateArgs */ false, | |||
1119 | /* BlockFrequencyInfo */ nullptr, | |||
1120 | /* BranchProbabilityInfo */ nullptr, | |||
1121 | /* AssumptionCache */ nullptr, | |||
1122 | /* AllowVarArgs */ true, | |||
1123 | /* AllowAlloca */ true, | |||
1124 | /* AllocationBlock */ OuterAllocaBlock, | |||
1125 | /* Suffix */ ".omp_par"); | |||
1126 | ||||
1127 | // Find inputs to, outputs from the code region. | |||
1128 | BasicBlock *CommonExit = nullptr; | |||
1129 | SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands; | |||
1130 | Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit); | |||
1131 | Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands); | |||
1132 | ||||
1133 | LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Before privatization: " << *OuterFn << "\n"; } } while (false); | |||
1134 | ||||
1135 | FunctionCallee TIDRTLFn = | |||
1136 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num); | |||
1137 | ||||
1138 | auto PrivHelper = [&](Value &V) { | |||
1139 | if (&V == TIDAddr || &V == ZeroAddr) { | |||
1140 | OI.ExcludeArgsFromAggregate.push_back(&V); | |||
1141 | return; | |||
1142 | } | |||
1143 | ||||
1144 | SetVector<Use *> Uses; | |||
1145 | for (Use &U : V.uses()) | |||
1146 | if (auto *UserI = dyn_cast<Instruction>(U.getUser())) | |||
1147 | if (ParallelRegionBlockSet.count(UserI->getParent())) | |||
1148 | Uses.insert(&U); | |||
1149 | ||||
1150 | // __kmpc_fork_call expects extra arguments as pointers. If the input | |||
1151 | // already has a pointer type, everything is fine. Otherwise, store the | |||
1152 | // value onto stack and load it back inside the to-be-outlined region. This | |||
1153 | // will ensure only the pointer will be passed to the function. | |||
1154 | // FIXME: if there are more than 15 trailing arguments, they must be | |||
1155 | // additionally packed in a struct. | |||
1156 | Value *Inner = &V; | |||
1157 | if (!V.getType()->isPointerTy()) { | |||
1158 | IRBuilder<>::InsertPointGuard Guard(Builder); | |||
1159 | LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: " << V << "\n"; } } while (false); | |||
1160 | ||||
1161 | Builder.restoreIP(OuterAllocaIP); | |||
1162 | Value *Ptr = | |||
1163 | Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded"); | |||
1164 | ||||
1165 | // Store to stack at end of the block that currently branches to the entry | |||
1166 | // block of the to-be-outlined region. | |||
1167 | Builder.SetInsertPoint(InsertBB, | |||
1168 | InsertBB->getTerminator()->getIterator()); | |||
1169 | Builder.CreateStore(&V, Ptr); | |||
1170 | ||||
1171 | // Load back next to allocations in the to-be-outlined region. | |||
1172 | Builder.restoreIP(InnerAllocaIP); | |||
1173 | Inner = Builder.CreateLoad(V.getType(), Ptr); | |||
1174 | } | |||
1175 | ||||
1176 | Value *ReplacementValue = nullptr; | |||
1177 | CallInst *CI = dyn_cast<CallInst>(&V); | |||
1178 | if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) { | |||
1179 | ReplacementValue = PrivTID; | |||
1180 | } else { | |||
1181 | Builder.restoreIP( | |||
1182 | PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue)); | |||
1183 | assert(ReplacementValue &&(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!" ) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1184, __extension__ __PRETTY_FUNCTION__)) | |||
1184 | "Expected copy/create callback to set replacement value!")(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!" ) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1184, __extension__ __PRETTY_FUNCTION__)); | |||
1185 | if (ReplacementValue == &V) | |||
1186 | return; | |||
1187 | } | |||
1188 | ||||
1189 | for (Use *UPtr : Uses) | |||
1190 | UPtr->set(ReplacementValue); | |||
1191 | }; | |||
1192 | ||||
1193 | // Reset the inner alloca insertion as it will be used for loading the values | |||
1194 | // wrapped into pointers before passing them into the to-be-outlined region. | |||
1195 | // Configure it to insert immediately after the fake use of zero address so | |||
1196 | // that they are available in the generated body and so that the | |||
1197 | // OpenMP-related values (thread ID and zero address pointers) remain leading | |||
1198 | // in the argument list. | |||
1199 | InnerAllocaIP = IRBuilder<>::InsertPoint( | |||
1200 | ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator()); | |||
1201 | ||||
1202 | // Reset the outer alloca insertion point to the entry of the relevant block | |||
1203 | // in case it was invalidated. | |||
1204 | OuterAllocaIP = IRBuilder<>::InsertPoint( | |||
1205 | OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt()); | |||
1206 | ||||
1207 | for (Value *Input : Inputs) { | |||
1208 | LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Captured input: " << *Input << "\n"; } } while (false); | |||
1209 | PrivHelper(*Input); | |||
1210 | } | |||
1211 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ) | |||
1212 | for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ) | |||
1213 | LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ) | |||
1214 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ); | |||
1215 | assert(Outputs.empty() &&(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!" ) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1216, __extension__ __PRETTY_FUNCTION__)) | |||
1216 | "OpenMP outlining should not produce live-out values!")(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!" ) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1216, __extension__ __PRETTY_FUNCTION__)); | |||
1217 | ||||
1218 | LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "After privatization: " << *OuterFn << "\n"; } } while (false); | |||
1219 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false) | |||
1220 | for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false) | |||
1221 | dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false) | |||
1222 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false); | |||
1223 | ||||
1224 | // Register the outlined info. | |||
1225 | addOutlineInfo(std::move(OI)); | |||
1226 | ||||
1227 | InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end()); | |||
1228 | UI->eraseFromParent(); | |||
1229 | ||||
1230 | return AfterIP; | |||
1231 | } | |||
1232 | ||||
1233 | void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) { | |||
1234 | // Build call void __kmpc_flush(ident_t *loc) | |||
1235 | uint32_t SrcLocStrSize; | |||
1236 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1237 | Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)}; | |||
1238 | ||||
1239 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args); | |||
1240 | } | |||
1241 | ||||
1242 | void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) { | |||
1243 | if (!updateToLocation(Loc)) | |||
1244 | return; | |||
1245 | emitFlush(Loc); | |||
1246 | } | |||
1247 | ||||
1248 | void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) { | |||
1249 | // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 | |||
1250 | // global_tid); | |||
1251 | uint32_t SrcLocStrSize; | |||
1252 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1253 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
1254 | Value *Args[] = {Ident, getOrCreateThreadID(Ident)}; | |||
1255 | ||||
1256 | // Ignore return result until untied tasks are supported. | |||
1257 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait), | |||
1258 | Args); | |||
1259 | } | |||
1260 | ||||
1261 | void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) { | |||
1262 | if (!updateToLocation(Loc)) | |||
1263 | return; | |||
1264 | emitTaskwaitImpl(Loc); | |||
1265 | } | |||
1266 | ||||
1267 | void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) { | |||
1268 | // Build call __kmpc_omp_taskyield(loc, thread_id, 0); | |||
1269 | uint32_t SrcLocStrSize; | |||
1270 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1271 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
1272 | Constant *I32Null = ConstantInt::getNullValue(Int32); | |||
1273 | Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null}; | |||
1274 | ||||
1275 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield), | |||
1276 | Args); | |||
1277 | } | |||
1278 | ||||
1279 | void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) { | |||
1280 | if (!updateToLocation(Loc)) | |||
1281 | return; | |||
1282 | emitTaskyieldImpl(Loc); | |||
1283 | } | |||
1284 | ||||
1285 | OpenMPIRBuilder::InsertPointTy | |||
1286 | OpenMPIRBuilder::createTask(const LocationDescription &Loc, | |||
1287 | InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, | |||
1288 | bool Tied, Value *Final, Value *IfCondition, | |||
1289 | SmallVector<DependData> Dependencies) { | |||
1290 | if (!updateToLocation(Loc)) | |||
1291 | return InsertPointTy(); | |||
1292 | ||||
1293 | uint32_t SrcLocStrSize; | |||
1294 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1295 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
1296 | // The current basic block is split into four basic blocks. After outlining, | |||
1297 | // they will be mapped as follows: | |||
1298 | // ``` | |||
1299 | // def current_fn() { | |||
1300 | // current_basic_block: | |||
1301 | // br label %task.exit | |||
1302 | // task.exit: | |||
1303 | // ; instructions after task | |||
1304 | // } | |||
1305 | // def outlined_fn() { | |||
1306 | // task.alloca: | |||
1307 | // br label %task.body | |||
1308 | // task.body: | |||
1309 | // ret void | |||
1310 | // } | |||
1311 | // ``` | |||
1312 | BasicBlock *TaskExitBB = splitBB(Builder, /*CreateBranch=*/true, "task.exit"); | |||
1313 | BasicBlock *TaskBodyBB = splitBB(Builder, /*CreateBranch=*/true, "task.body"); | |||
1314 | BasicBlock *TaskAllocaBB = | |||
1315 | splitBB(Builder, /*CreateBranch=*/true, "task.alloca"); | |||
1316 | ||||
1317 | OutlineInfo OI; | |||
1318 | OI.EntryBB = TaskAllocaBB; | |||
1319 | OI.OuterAllocaBB = AllocaIP.getBlock(); | |||
1320 | OI.ExitBB = TaskExitBB; | |||
1321 | OI.PostOutlineCB = [this, Ident, Tied, Final, IfCondition, | |||
1322 | Dependencies](Function &OutlinedFn) { | |||
1323 | // The input IR here looks like the following- | |||
1324 | // ``` | |||
1325 | // func @current_fn() { | |||
1326 | // outlined_fn(%args) | |||
1327 | // } | |||
1328 | // func @outlined_fn(%args) { ... } | |||
1329 | // ``` | |||
1330 | // | |||
1331 | // This is changed to the following- | |||
1332 | // | |||
1333 | // ``` | |||
1334 | // func @current_fn() { | |||
1335 | // runtime_call(..., wrapper_fn, ...) | |||
1336 | // } | |||
1337 | // func @wrapper_fn(..., %args) { | |||
1338 | // outlined_fn(%args) | |||
1339 | // } | |||
1340 | // func @outlined_fn(%args) { ... } | |||
1341 | // ``` | |||
1342 | ||||
1343 | // The stale call instruction will be replaced with a new call instruction | |||
1344 | // for runtime call with a wrapper function. | |||
1345 | assert(OutlinedFn.getNumUses() == 1 &&(static_cast <bool> (OutlinedFn.getNumUses() == 1 && "there must be a single user for the outlined function") ? void (0) : __assert_fail ("OutlinedFn.getNumUses() == 1 && \"there must be a single user for the outlined function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1346, __extension__ __PRETTY_FUNCTION__)) | |||
1346 | "there must be a single user for the outlined function")(static_cast <bool> (OutlinedFn.getNumUses() == 1 && "there must be a single user for the outlined function") ? void (0) : __assert_fail ("OutlinedFn.getNumUses() == 1 && \"there must be a single user for the outlined function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1346, __extension__ __PRETTY_FUNCTION__)); | |||
1347 | CallInst *StaleCI = cast<CallInst>(OutlinedFn.user_back()); | |||
1348 | ||||
1349 | // HasTaskData is true if any variables are captured in the outlined region, | |||
1350 | // false otherwise. | |||
1351 | bool HasTaskData = StaleCI->arg_size() > 0; | |||
1352 | Builder.SetInsertPoint(StaleCI); | |||
1353 | ||||
1354 | // Gather the arguments for emitting the runtime call for | |||
1355 | // @__kmpc_omp_task_alloc | |||
1356 | Function *TaskAllocFn = | |||
1357 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_alloc); | |||
1358 | ||||
1359 | // Arguments - `loc_ref` (Ident) and `gtid` (ThreadID) | |||
1360 | // call. | |||
1361 | Value *ThreadID = getOrCreateThreadID(Ident); | |||
1362 | ||||
1363 | // Argument - `flags` | |||
1364 | // Task is tied iff (Flags & 1) == 1. | |||
1365 | // Task is untied iff (Flags & 1) == 0. | |||
1366 | // Task is final iff (Flags & 2) == 2. | |||
1367 | // Task is not final iff (Flags & 2) == 0. | |||
1368 | // TODO: Handle the other flags. | |||
1369 | Value *Flags = Builder.getInt32(Tied); | |||
1370 | if (Final) { | |||
1371 | Value *FinalFlag = | |||
1372 | Builder.CreateSelect(Final, Builder.getInt32(2), Builder.getInt32(0)); | |||
1373 | Flags = Builder.CreateOr(FinalFlag, Flags); | |||
1374 | } | |||
1375 | ||||
1376 | // Argument - `sizeof_kmp_task_t` (TaskSize) | |||
1377 | // Tasksize refers to the size in bytes of kmp_task_t data structure | |||
1378 | // including private vars accessed in task. | |||
1379 | Value *TaskSize = Builder.getInt64(0); | |||
1380 | if (HasTaskData) { | |||
1381 | AllocaInst *ArgStructAlloca = | |||
1382 | dyn_cast<AllocaInst>(StaleCI->getArgOperand(0)); | |||
1383 | assert(ArgStructAlloca &&(static_cast <bool> (ArgStructAlloca && "Unable to find the alloca instruction corresponding to arguments " "for extracted function") ? void (0) : __assert_fail ("ArgStructAlloca && \"Unable to find the alloca instruction corresponding to arguments \" \"for extracted function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1385, __extension__ __PRETTY_FUNCTION__)) | |||
1384 | "Unable to find the alloca instruction corresponding to arguments "(static_cast <bool> (ArgStructAlloca && "Unable to find the alloca instruction corresponding to arguments " "for extracted function") ? void (0) : __assert_fail ("ArgStructAlloca && \"Unable to find the alloca instruction corresponding to arguments \" \"for extracted function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1385, __extension__ __PRETTY_FUNCTION__)) | |||
1385 | "for extracted function")(static_cast <bool> (ArgStructAlloca && "Unable to find the alloca instruction corresponding to arguments " "for extracted function") ? void (0) : __assert_fail ("ArgStructAlloca && \"Unable to find the alloca instruction corresponding to arguments \" \"for extracted function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1385, __extension__ __PRETTY_FUNCTION__)); | |||
1386 | StructType *ArgStructType = | |||
1387 | dyn_cast<StructType>(ArgStructAlloca->getAllocatedType()); | |||
1388 | assert(ArgStructType && "Unable to find struct type corresponding to "(static_cast <bool> (ArgStructType && "Unable to find struct type corresponding to " "arguments for extracted function") ? void (0) : __assert_fail ("ArgStructType && \"Unable to find struct type corresponding to \" \"arguments for extracted function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1389, __extension__ __PRETTY_FUNCTION__)) | |||
1389 | "arguments for extracted function")(static_cast <bool> (ArgStructType && "Unable to find struct type corresponding to " "arguments for extracted function") ? void (0) : __assert_fail ("ArgStructType && \"Unable to find struct type corresponding to \" \"arguments for extracted function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1389, __extension__ __PRETTY_FUNCTION__)); | |||
1390 | TaskSize = | |||
1391 | Builder.getInt64(M.getDataLayout().getTypeStoreSize(ArgStructType)); | |||
1392 | } | |||
1393 | ||||
1394 | // TODO: Argument - sizeof_shareds | |||
1395 | ||||
1396 | // Argument - task_entry (the wrapper function) | |||
1397 | // If the outlined function has some captured variables (i.e. HasTaskData is | |||
1398 | // true), then the wrapper function will have an additional argument (the | |||
1399 | // struct containing captured variables). Otherwise, no such argument will | |||
1400 | // be present. | |||
1401 | SmallVector<Type *> WrapperArgTys{Builder.getInt32Ty()}; | |||
1402 | if (HasTaskData) | |||
1403 | WrapperArgTys.push_back(OutlinedFn.getArg(0)->getType()); | |||
1404 | FunctionCallee WrapperFuncVal = M.getOrInsertFunction( | |||
1405 | (Twine(OutlinedFn.getName()) + ".wrapper").str(), | |||
1406 | FunctionType::get(Builder.getInt32Ty(), WrapperArgTys, false)); | |||
1407 | Function *WrapperFunc = dyn_cast<Function>(WrapperFuncVal.getCallee()); | |||
1408 | PointerType *WrapperFuncBitcastType = | |||
1409 | FunctionType::get(Builder.getInt32Ty(), | |||
1410 | {Builder.getInt32Ty(), Builder.getInt8PtrTy()}, false) | |||
1411 | ->getPointerTo(); | |||
1412 | Value *WrapperFuncBitcast = | |||
1413 | ConstantExpr::getBitCast(WrapperFunc, WrapperFuncBitcastType); | |||
1414 | ||||
1415 | // Emit the @__kmpc_omp_task_alloc runtime call | |||
1416 | // The runtime call returns a pointer to an area where the task captured | |||
1417 | // variables must be copied before the task is run (NewTaskData) | |||
1418 | CallInst *NewTaskData = Builder.CreateCall( | |||
1419 | TaskAllocFn, | |||
1420 | {/*loc_ref=*/Ident, /*gtid=*/ThreadID, /*flags=*/Flags, | |||
1421 | /*sizeof_task=*/TaskSize, /*sizeof_shared=*/Builder.getInt64(0), | |||
1422 | /*task_func=*/WrapperFuncBitcast}); | |||
1423 | ||||
1424 | // Copy the arguments for outlined function | |||
1425 | if (HasTaskData) { | |||
1426 | Value *TaskData = StaleCI->getArgOperand(0); | |||
1427 | Align Alignment = TaskData->getPointerAlignment(M.getDataLayout()); | |||
1428 | Builder.CreateMemCpy(NewTaskData, Alignment, TaskData, Alignment, | |||
1429 | TaskSize); | |||
1430 | } | |||
1431 | ||||
1432 | Value *DepArrayPtr = nullptr; | |||
1433 | if (Dependencies.size()) { | |||
1434 | InsertPointTy OldIP = Builder.saveIP(); | |||
1435 | Builder.SetInsertPoint( | |||
1436 | &OldIP.getBlock()->getParent()->getEntryBlock().back()); | |||
1437 | ||||
1438 | Type *DepArrayTy = ArrayType::get(DependInfo, Dependencies.size()); | |||
1439 | Value *DepArray = | |||
1440 | Builder.CreateAlloca(DepArrayTy, nullptr, ".dep.arr.addr"); | |||
1441 | ||||
1442 | unsigned P = 0; | |||
1443 | for (const DependData &Dep : Dependencies) { | |||
1444 | Value *Base = | |||
1445 | Builder.CreateConstInBoundsGEP2_64(DepArrayTy, DepArray, 0, P); | |||
1446 | // Store the pointer to the variable | |||
1447 | Value *Addr = Builder.CreateStructGEP( | |||
1448 | DependInfo, Base, | |||
1449 | static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)); | |||
1450 | Value *DepValPtr = | |||
1451 | Builder.CreatePtrToInt(Dep.DepVal, Builder.getInt64Ty()); | |||
1452 | Builder.CreateStore(DepValPtr, Addr); | |||
1453 | // Store the size of the variable | |||
1454 | Value *Size = Builder.CreateStructGEP( | |||
1455 | DependInfo, Base, | |||
1456 | static_cast<unsigned int>(RTLDependInfoFields::Len)); | |||
1457 | Builder.CreateStore(Builder.getInt64(M.getDataLayout().getTypeStoreSize( | |||
1458 | Dep.DepValueType)), | |||
1459 | Size); | |||
1460 | // Store the dependency kind | |||
1461 | Value *Flags = Builder.CreateStructGEP( | |||
1462 | DependInfo, Base, | |||
1463 | static_cast<unsigned int>(RTLDependInfoFields::Flags)); | |||
1464 | Builder.CreateStore( | |||
1465 | ConstantInt::get(Builder.getInt8Ty(), | |||
1466 | static_cast<unsigned int>(Dep.DepKind)), | |||
1467 | Flags); | |||
1468 | ++P; | |||
1469 | } | |||
1470 | ||||
1471 | DepArrayPtr = Builder.CreateBitCast(DepArray, Builder.getInt8PtrTy()); | |||
1472 | Builder.restoreIP(OldIP); | |||
1473 | } | |||
1474 | ||||
1475 | // In the presence of the `if` clause, the following IR is generated: | |||
1476 | // ... | |||
1477 | // %data = call @__kmpc_omp_task_alloc(...) | |||
1478 | // br i1 %if_condition, label %then, label %else | |||
1479 | // then: | |||
1480 | // call @__kmpc_omp_task(...) | |||
1481 | // br label %exit | |||
1482 | // else: | |||
1483 | // call @__kmpc_omp_task_begin_if0(...) | |||
1484 | // call @wrapper_fn(...) | |||
1485 | // call @__kmpc_omp_task_complete_if0(...) | |||
1486 | // br label %exit | |||
1487 | // exit: | |||
1488 | // ... | |||
1489 | if (IfCondition) { | |||
1490 | // `SplitBlockAndInsertIfThenElse` requires the block to have a | |||
1491 | // terminator. | |||
1492 | BasicBlock *NewBasicBlock = | |||
1493 | splitBB(Builder, /*CreateBranch=*/true, "if.end"); | |||
1494 | Instruction *IfTerminator = | |||
1495 | NewBasicBlock->getSinglePredecessor()->getTerminator(); | |||
1496 | Instruction *ThenTI = IfTerminator, *ElseTI = nullptr; | |||
1497 | Builder.SetInsertPoint(IfTerminator); | |||
1498 | SplitBlockAndInsertIfThenElse(IfCondition, IfTerminator, &ThenTI, | |||
1499 | &ElseTI); | |||
1500 | Builder.SetInsertPoint(ElseTI); | |||
1501 | Function *TaskBeginFn = | |||
1502 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_begin_if0); | |||
1503 | Function *TaskCompleteFn = | |||
1504 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_complete_if0); | |||
1505 | Builder.CreateCall(TaskBeginFn, {Ident, ThreadID, NewTaskData}); | |||
1506 | if (HasTaskData) | |||
1507 | Builder.CreateCall(WrapperFunc, {ThreadID, NewTaskData}); | |||
1508 | else | |||
1509 | Builder.CreateCall(WrapperFunc, {ThreadID}); | |||
1510 | Builder.CreateCall(TaskCompleteFn, {Ident, ThreadID, NewTaskData}); | |||
1511 | Builder.SetInsertPoint(ThenTI); | |||
1512 | } | |||
1513 | ||||
1514 | if (Dependencies.size()) { | |||
1515 | Function *TaskFn = | |||
1516 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task_with_deps); | |||
1517 | Builder.CreateCall( | |||
1518 | TaskFn, | |||
1519 | {Ident, ThreadID, NewTaskData, Builder.getInt32(Dependencies.size()), | |||
1520 | DepArrayPtr, ConstantInt::get(Builder.getInt32Ty(), 0), | |||
1521 | ConstantPointerNull::get(Type::getInt8PtrTy(M.getContext()))}); | |||
1522 | ||||
1523 | } else { | |||
1524 | // Emit the @__kmpc_omp_task runtime call to spawn the task | |||
1525 | Function *TaskFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_task); | |||
1526 | Builder.CreateCall(TaskFn, {Ident, ThreadID, NewTaskData}); | |||
1527 | } | |||
1528 | ||||
1529 | StaleCI->eraseFromParent(); | |||
1530 | ||||
1531 | // Emit the body for wrapper function | |||
1532 | BasicBlock *WrapperEntryBB = | |||
1533 | BasicBlock::Create(M.getContext(), "", WrapperFunc); | |||
1534 | Builder.SetInsertPoint(WrapperEntryBB); | |||
1535 | if (HasTaskData) | |||
1536 | Builder.CreateCall(&OutlinedFn, {WrapperFunc->getArg(1)}); | |||
1537 | else | |||
1538 | Builder.CreateCall(&OutlinedFn); | |||
1539 | Builder.CreateRet(Builder.getInt32(0)); | |||
1540 | }; | |||
1541 | ||||
1542 | addOutlineInfo(std::move(OI)); | |||
1543 | ||||
1544 | InsertPointTy TaskAllocaIP = | |||
1545 | InsertPointTy(TaskAllocaBB, TaskAllocaBB->begin()); | |||
1546 | InsertPointTy TaskBodyIP = InsertPointTy(TaskBodyBB, TaskBodyBB->begin()); | |||
1547 | BodyGenCB(TaskAllocaIP, TaskBodyIP); | |||
1548 | Builder.SetInsertPoint(TaskExitBB, TaskExitBB->begin()); | |||
1549 | ||||
1550 | return Builder.saveIP(); | |||
1551 | } | |||
1552 | ||||
1553 | OpenMPIRBuilder::InsertPointTy | |||
1554 | OpenMPIRBuilder::createTaskgroup(const LocationDescription &Loc, | |||
1555 | InsertPointTy AllocaIP, | |||
1556 | BodyGenCallbackTy BodyGenCB) { | |||
1557 | if (!updateToLocation(Loc)) | |||
1558 | return InsertPointTy(); | |||
1559 | ||||
1560 | uint32_t SrcLocStrSize; | |||
1561 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1562 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
1563 | Value *ThreadID = getOrCreateThreadID(Ident); | |||
1564 | ||||
1565 | // Emit the @__kmpc_taskgroup runtime call to start the taskgroup | |||
1566 | Function *TaskgroupFn = | |||
1567 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_taskgroup); | |||
1568 | Builder.CreateCall(TaskgroupFn, {Ident, ThreadID}); | |||
1569 | ||||
1570 | BasicBlock *TaskgroupExitBB = splitBB(Builder, true, "taskgroup.exit"); | |||
1571 | BodyGenCB(AllocaIP, Builder.saveIP()); | |||
1572 | ||||
1573 | Builder.SetInsertPoint(TaskgroupExitBB); | |||
1574 | // Emit the @__kmpc_end_taskgroup runtime call to end the taskgroup | |||
1575 | Function *EndTaskgroupFn = | |||
1576 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_taskgroup); | |||
1577 | Builder.CreateCall(EndTaskgroupFn, {Ident, ThreadID}); | |||
1578 | ||||
1579 | return Builder.saveIP(); | |||
1580 | } | |||
1581 | ||||
1582 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections( | |||
1583 | const LocationDescription &Loc, InsertPointTy AllocaIP, | |||
1584 | ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, | |||
1585 | FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) { | |||
1586 | assert(!isConflictIP(AllocaIP, Loc.IP) && "Dedicated IP allocas required")(static_cast <bool> (!isConflictIP(AllocaIP, Loc.IP) && "Dedicated IP allocas required") ? void (0) : __assert_fail ( "!isConflictIP(AllocaIP, Loc.IP) && \"Dedicated IP allocas required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1586, __extension__ __PRETTY_FUNCTION__)); | |||
1587 | ||||
1588 | if (!updateToLocation(Loc)) | |||
1589 | return Loc.IP; | |||
1590 | ||||
1591 | auto FiniCBWrapper = [&](InsertPointTy IP) { | |||
1592 | if (IP.getBlock()->end() != IP.getPoint()) | |||
1593 | return FiniCB(IP); | |||
1594 | // This must be done otherwise any nested constructs using FinalizeOMPRegion | |||
1595 | // will fail because that function requires the Finalization Basic Block to | |||
1596 | // have a terminator, which is already removed by EmitOMPRegionBody. | |||
1597 | // IP is currently at cancelation block. | |||
1598 | // We need to backtrack to the condition block to fetch | |||
1599 | // the exit block and create a branch from cancelation | |||
1600 | // to exit block. | |||
1601 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
1602 | Builder.restoreIP(IP); | |||
1603 | auto *CaseBB = IP.getBlock()->getSinglePredecessor(); | |||
1604 | auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); | |||
1605 | auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); | |||
1606 | Instruction *I = Builder.CreateBr(ExitBB); | |||
1607 | IP = InsertPointTy(I->getParent(), I->getIterator()); | |||
1608 | return FiniCB(IP); | |||
1609 | }; | |||
1610 | ||||
1611 | FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable}); | |||
1612 | ||||
1613 | // Each section is emitted as a switch case | |||
1614 | // Each finalization callback is handled from clang.EmitOMPSectionDirective() | |||
1615 | // -> OMP.createSection() which generates the IR for each section | |||
1616 | // Iterate through all sections and emit a switch construct: | |||
1617 | // switch (IV) { | |||
1618 | // case 0: | |||
1619 | // <SectionStmt[0]>; | |||
1620 | // break; | |||
1621 | // ... | |||
1622 | // case <NumSection> - 1: | |||
1623 | // <SectionStmt[<NumSection> - 1]>; | |||
1624 | // break; | |||
1625 | // } | |||
1626 | // ... | |||
1627 | // section_loop.after: | |||
1628 | // <FiniCB>; | |||
1629 | auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) { | |||
1630 | Builder.restoreIP(CodeGenIP); | |||
1631 | BasicBlock *Continue = | |||
1632 | splitBBWithSuffix(Builder, /*CreateBranch=*/false, ".sections.after"); | |||
1633 | Function *CurFn = Continue->getParent(); | |||
1634 | SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, Continue); | |||
1635 | ||||
1636 | unsigned CaseNumber = 0; | |||
1637 | for (auto SectionCB : SectionCBs) { | |||
1638 | BasicBlock *CaseBB = BasicBlock::Create( | |||
1639 | M.getContext(), "omp_section_loop.body.case", CurFn, Continue); | |||
1640 | SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB); | |||
1641 | Builder.SetInsertPoint(CaseBB); | |||
1642 | BranchInst *CaseEndBr = Builder.CreateBr(Continue); | |||
1643 | SectionCB(InsertPointTy(), | |||
1644 | {CaseEndBr->getParent(), CaseEndBr->getIterator()}); | |||
1645 | CaseNumber++; | |||
1646 | } | |||
1647 | // remove the existing terminator from body BB since there can be no | |||
1648 | // terminators after switch/case | |||
1649 | }; | |||
1650 | // Loop body ends here | |||
1651 | // LowerBound, UpperBound, and STride for createCanonicalLoop | |||
1652 | Type *I32Ty = Type::getInt32Ty(M.getContext()); | |||
1653 | Value *LB = ConstantInt::get(I32Ty, 0); | |||
1654 | Value *UB = ConstantInt::get(I32Ty, SectionCBs.size()); | |||
1655 | Value *ST = ConstantInt::get(I32Ty, 1); | |||
1656 | llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop( | |||
1657 | Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop"); | |||
1658 | InsertPointTy AfterIP = | |||
1659 | applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait); | |||
1660 | ||||
1661 | // Apply the finalization callback in LoopAfterBB | |||
1662 | auto FiniInfo = FinalizationStack.pop_back_val(); | |||
1663 | assert(FiniInfo.DK == OMPD_sections &&(static_cast <bool> (FiniInfo.DK == OMPD_sections && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1664, __extension__ __PRETTY_FUNCTION__)) | |||
1664 | "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_sections && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1664, __extension__ __PRETTY_FUNCTION__)); | |||
1665 | if (FinalizeCallbackTy &CB = FiniInfo.FiniCB) { | |||
1666 | Builder.restoreIP(AfterIP); | |||
1667 | BasicBlock *FiniBB = | |||
1668 | splitBBWithSuffix(Builder, /*CreateBranch=*/true, "sections.fini"); | |||
1669 | CB(Builder.saveIP()); | |||
1670 | AfterIP = {FiniBB, FiniBB->begin()}; | |||
1671 | } | |||
1672 | ||||
1673 | return AfterIP; | |||
1674 | } | |||
1675 | ||||
1676 | OpenMPIRBuilder::InsertPointTy | |||
1677 | OpenMPIRBuilder::createSection(const LocationDescription &Loc, | |||
1678 | BodyGenCallbackTy BodyGenCB, | |||
1679 | FinalizeCallbackTy FiniCB) { | |||
1680 | if (!updateToLocation(Loc)) | |||
1681 | return Loc.IP; | |||
1682 | ||||
1683 | auto FiniCBWrapper = [&](InsertPointTy IP) { | |||
1684 | if (IP.getBlock()->end() != IP.getPoint()) | |||
1685 | return FiniCB(IP); | |||
1686 | // This must be done otherwise any nested constructs using FinalizeOMPRegion | |||
1687 | // will fail because that function requires the Finalization Basic Block to | |||
1688 | // have a terminator, which is already removed by EmitOMPRegionBody. | |||
1689 | // IP is currently at cancelation block. | |||
1690 | // We need to backtrack to the condition block to fetch | |||
1691 | // the exit block and create a branch from cancelation | |||
1692 | // to exit block. | |||
1693 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
1694 | Builder.restoreIP(IP); | |||
1695 | auto *CaseBB = Loc.IP.getBlock(); | |||
1696 | auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); | |||
1697 | auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); | |||
1698 | Instruction *I = Builder.CreateBr(ExitBB); | |||
1699 | IP = InsertPointTy(I->getParent(), I->getIterator()); | |||
1700 | return FiniCB(IP); | |||
1701 | }; | |||
1702 | ||||
1703 | Directive OMPD = Directive::OMPD_sections; | |||
1704 | // Since we are using Finalization Callback here, HasFinalize | |||
1705 | // and IsCancellable have to be true | |||
1706 | return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper, | |||
1707 | /*Conditional*/ false, /*hasFinalize*/ true, | |||
1708 | /*IsCancellable*/ true); | |||
1709 | } | |||
1710 | ||||
1711 | /// Create a function with a unique name and a "void (i8*, i8*)" signature in | |||
1712 | /// the given module and return it. | |||
1713 | Function *getFreshReductionFunc(Module &M) { | |||
1714 | Type *VoidTy = Type::getVoidTy(M.getContext()); | |||
1715 | Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext()); | |||
1716 | auto *FuncTy = | |||
1717 | FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false); | |||
1718 | return Function::Create(FuncTy, GlobalVariable::InternalLinkage, | |||
1719 | M.getDataLayout().getDefaultGlobalsAddressSpace(), | |||
1720 | ".omp.reduction.func", &M); | |||
1721 | } | |||
1722 | ||||
1723 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions( | |||
1724 | const LocationDescription &Loc, InsertPointTy AllocaIP, | |||
1725 | ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) { | |||
1726 | for (const ReductionInfo &RI : ReductionInfos) { | |||
1727 | (void)RI; | |||
1728 | assert(RI.Variable && "expected non-null variable")(static_cast <bool> (RI.Variable && "expected non-null variable" ) ? void (0) : __assert_fail ("RI.Variable && \"expected non-null variable\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1728, __extension__ __PRETTY_FUNCTION__)); | |||
1729 | assert(RI.PrivateVariable && "expected non-null private variable")(static_cast <bool> (RI.PrivateVariable && "expected non-null private variable" ) ? void (0) : __assert_fail ("RI.PrivateVariable && \"expected non-null private variable\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1729, __extension__ __PRETTY_FUNCTION__)); | |||
1730 | assert(RI.ReductionGen && "expected non-null reduction generator callback")(static_cast <bool> (RI.ReductionGen && "expected non-null reduction generator callback" ) ? void (0) : __assert_fail ("RI.ReductionGen && \"expected non-null reduction generator callback\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1730, __extension__ __PRETTY_FUNCTION__)); | |||
1731 | assert(RI.Variable->getType() == RI.PrivateVariable->getType() &&(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable ->getType() && "expected variables and their private equivalents to have the same " "type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1733, __extension__ __PRETTY_FUNCTION__)) | |||
1732 | "expected variables and their private equivalents to have the same "(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable ->getType() && "expected variables and their private equivalents to have the same " "type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1733, __extension__ __PRETTY_FUNCTION__)) | |||
1733 | "type")(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable ->getType() && "expected variables and their private equivalents to have the same " "type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1733, __extension__ __PRETTY_FUNCTION__)); | |||
1734 | assert(RI.Variable->getType()->isPointerTy() &&(static_cast <bool> (RI.Variable->getType()->isPointerTy () && "expected variables to be pointers") ? void (0) : __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1735, __extension__ __PRETTY_FUNCTION__)) | |||
1735 | "expected variables to be pointers")(static_cast <bool> (RI.Variable->getType()->isPointerTy () && "expected variables to be pointers") ? void (0) : __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1735, __extension__ __PRETTY_FUNCTION__)); | |||
1736 | } | |||
1737 | ||||
1738 | if (!updateToLocation(Loc)) | |||
1739 | return InsertPointTy(); | |||
1740 | ||||
1741 | BasicBlock *InsertBlock = Loc.IP.getBlock(); | |||
1742 | BasicBlock *ContinuationBlock = | |||
1743 | InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize"); | |||
1744 | InsertBlock->getTerminator()->eraseFromParent(); | |||
1745 | ||||
1746 | // Create and populate array of type-erased pointers to private reduction | |||
1747 | // values. | |||
1748 | unsigned NumReductions = ReductionInfos.size(); | |||
1749 | Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions); | |||
1750 | Builder.restoreIP(AllocaIP); | |||
1751 | Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array"); | |||
1752 | ||||
1753 | Builder.SetInsertPoint(InsertBlock, InsertBlock->end()); | |||
1754 | ||||
1755 | for (auto En : enumerate(ReductionInfos)) { | |||
1756 | unsigned Index = En.index(); | |||
1757 | const ReductionInfo &RI = En.value(); | |||
1758 | Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64( | |||
1759 | RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index)); | |||
1760 | Value *Casted = | |||
1761 | Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(), | |||
1762 | "private.red.var." + Twine(Index) + ".casted"); | |||
1763 | Builder.CreateStore(Casted, RedArrayElemPtr); | |||
1764 | } | |||
1765 | ||||
1766 | // Emit a call to the runtime function that orchestrates the reduction. | |||
1767 | // Declare the reduction function in the process. | |||
1768 | Function *Func = Builder.GetInsertBlock()->getParent(); | |||
1769 | Module *Module = Func->getParent(); | |||
1770 | Value *RedArrayPtr = | |||
1771 | Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr"); | |||
1772 | uint32_t SrcLocStrSize; | |||
1773 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1774 | bool CanGenerateAtomic = | |||
1775 | llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) { | |||
1776 | return RI.AtomicReductionGen; | |||
1777 | }); | |||
1778 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize, | |||
1779 | CanGenerateAtomic | |||
1780 | ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE | |||
1781 | : IdentFlag(0)); | |||
1782 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1783 | Constant *NumVariables = Builder.getInt32(NumReductions); | |||
1784 | const DataLayout &DL = Module->getDataLayout(); | |||
1785 | unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy); | |||
1786 | Constant *RedArraySize = Builder.getInt64(RedArrayByteSize); | |||
1787 | Function *ReductionFunc = getFreshReductionFunc(*Module); | |||
1788 | Value *Lock = getOMPCriticalRegionLock(".reduction"); | |||
1789 | Function *ReduceFunc = getOrCreateRuntimeFunctionPtr( | |||
1790 | IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait | |||
1791 | : RuntimeFunction::OMPRTL___kmpc_reduce); | |||
1792 | CallInst *ReduceCall = | |||
1793 | Builder.CreateCall(ReduceFunc, | |||
1794 | {Ident, ThreadId, NumVariables, RedArraySize, | |||
1795 | RedArrayPtr, ReductionFunc, Lock}, | |||
1796 | "reduce"); | |||
1797 | ||||
1798 | // Create final reduction entry blocks for the atomic and non-atomic case. | |||
1799 | // Emit IR that dispatches control flow to one of the blocks based on the | |||
1800 | // reduction supporting the atomic mode. | |||
1801 | BasicBlock *NonAtomicRedBlock = | |||
1802 | BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func); | |||
1803 | BasicBlock *AtomicRedBlock = | |||
1804 | BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func); | |||
1805 | SwitchInst *Switch = | |||
1806 | Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2); | |||
1807 | Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock); | |||
1808 | Switch->addCase(Builder.getInt32(2), AtomicRedBlock); | |||
1809 | ||||
1810 | // Populate the non-atomic reduction using the elementwise reduction function. | |||
1811 | // This loads the elements from the global and private variables and reduces | |||
1812 | // them before storing back the result to the global variable. | |||
1813 | Builder.SetInsertPoint(NonAtomicRedBlock); | |||
1814 | for (auto En : enumerate(ReductionInfos)) { | |||
1815 | const ReductionInfo &RI = En.value(); | |||
1816 | Type *ValueType = RI.ElementType; | |||
1817 | Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable, | |||
1818 | "red.value." + Twine(En.index())); | |||
1819 | Value *PrivateRedValue = | |||
1820 | Builder.CreateLoad(ValueType, RI.PrivateVariable, | |||
1821 | "red.private.value." + Twine(En.index())); | |||
1822 | Value *Reduced; | |||
1823 | Builder.restoreIP( | |||
1824 | RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced)); | |||
1825 | if (!Builder.GetInsertBlock()) | |||
1826 | return InsertPointTy(); | |||
1827 | Builder.CreateStore(Reduced, RI.Variable); | |||
1828 | } | |||
1829 | Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr( | |||
1830 | IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait | |||
1831 | : RuntimeFunction::OMPRTL___kmpc_end_reduce); | |||
1832 | Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock}); | |||
1833 | Builder.CreateBr(ContinuationBlock); | |||
1834 | ||||
1835 | // Populate the atomic reduction using the atomic elementwise reduction | |||
1836 | // function. There are no loads/stores here because they will be happening | |||
1837 | // inside the atomic elementwise reduction. | |||
1838 | Builder.SetInsertPoint(AtomicRedBlock); | |||
1839 | if (CanGenerateAtomic) { | |||
1840 | for (const ReductionInfo &RI : ReductionInfos) { | |||
1841 | Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.ElementType, | |||
1842 | RI.Variable, RI.PrivateVariable)); | |||
1843 | if (!Builder.GetInsertBlock()) | |||
1844 | return InsertPointTy(); | |||
1845 | } | |||
1846 | Builder.CreateBr(ContinuationBlock); | |||
1847 | } else { | |||
1848 | Builder.CreateUnreachable(); | |||
1849 | } | |||
1850 | ||||
1851 | // Populate the outlined reduction function using the elementwise reduction | |||
1852 | // function. Partial values are extracted from the type-erased array of | |||
1853 | // pointers to private variables. | |||
1854 | BasicBlock *ReductionFuncBlock = | |||
1855 | BasicBlock::Create(Module->getContext(), "", ReductionFunc); | |||
1856 | Builder.SetInsertPoint(ReductionFuncBlock); | |||
1857 | Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0), | |||
1858 | RedArrayTy->getPointerTo()); | |||
1859 | Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1), | |||
1860 | RedArrayTy->getPointerTo()); | |||
1861 | for (auto En : enumerate(ReductionInfos)) { | |||
1862 | const ReductionInfo &RI = En.value(); | |||
1863 | Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( | |||
1864 | RedArrayTy, LHSArrayPtr, 0, En.index()); | |||
1865 | Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr); | |||
1866 | Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType()); | |||
1867 | Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); | |||
1868 | Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( | |||
1869 | RedArrayTy, RHSArrayPtr, 0, En.index()); | |||
1870 | Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr); | |||
1871 | Value *RHSPtr = | |||
1872 | Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType()); | |||
1873 | Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); | |||
1874 | Value *Reduced; | |||
1875 | Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced)); | |||
1876 | if (!Builder.GetInsertBlock()) | |||
1877 | return InsertPointTy(); | |||
1878 | Builder.CreateStore(Reduced, LHSPtr); | |||
1879 | } | |||
1880 | Builder.CreateRetVoid(); | |||
1881 | ||||
1882 | Builder.SetInsertPoint(ContinuationBlock); | |||
1883 | return Builder.saveIP(); | |||
1884 | } | |||
1885 | ||||
1886 | OpenMPIRBuilder::InsertPointTy | |||
1887 | OpenMPIRBuilder::createMaster(const LocationDescription &Loc, | |||
1888 | BodyGenCallbackTy BodyGenCB, | |||
1889 | FinalizeCallbackTy FiniCB) { | |||
1890 | ||||
1891 | if (!updateToLocation(Loc)) | |||
1892 | return Loc.IP; | |||
1893 | ||||
1894 | Directive OMPD = Directive::OMPD_master; | |||
1895 | uint32_t SrcLocStrSize; | |||
1896 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1897 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
1898 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1899 | Value *Args[] = {Ident, ThreadId}; | |||
1900 | ||||
1901 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master); | |||
1902 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | |||
1903 | ||||
1904 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master); | |||
1905 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | |||
1906 | ||||
1907 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
1908 | /*Conditional*/ true, /*hasFinalize*/ true); | |||
1909 | } | |||
1910 | ||||
1911 | OpenMPIRBuilder::InsertPointTy | |||
1912 | OpenMPIRBuilder::createMasked(const LocationDescription &Loc, | |||
1913 | BodyGenCallbackTy BodyGenCB, | |||
1914 | FinalizeCallbackTy FiniCB, Value *Filter) { | |||
1915 | if (!updateToLocation(Loc)) | |||
1916 | return Loc.IP; | |||
1917 | ||||
1918 | Directive OMPD = Directive::OMPD_masked; | |||
1919 | uint32_t SrcLocStrSize; | |||
1920 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
1921 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
1922 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
1923 | Value *Args[] = {Ident, ThreadId, Filter}; | |||
1924 | Value *ArgsEnd[] = {Ident, ThreadId}; | |||
1925 | ||||
1926 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked); | |||
1927 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | |||
1928 | ||||
1929 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked); | |||
1930 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd); | |||
1931 | ||||
1932 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
1933 | /*Conditional*/ true, /*hasFinalize*/ true); | |||
1934 | } | |||
1935 | ||||
1936 | CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( | |||
1937 | DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, | |||
1938 | BasicBlock *PostInsertBefore, const Twine &Name) { | |||
1939 | Module *M = F->getParent(); | |||
1940 | LLVMContext &Ctx = M->getContext(); | |||
1941 | Type *IndVarTy = TripCount->getType(); | |||
| ||||
1942 | ||||
1943 | // Create the basic block structure. | |||
1944 | BasicBlock *Preheader = | |||
1945 | BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore); | |||
1946 | BasicBlock *Header = | |||
1947 | BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore); | |||
1948 | BasicBlock *Cond = | |||
1949 | BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore); | |||
1950 | BasicBlock *Body = | |||
1951 | BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore); | |||
1952 | BasicBlock *Latch = | |||
1953 | BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore); | |||
1954 | BasicBlock *Exit = | |||
1955 | BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore); | |||
1956 | BasicBlock *After = | |||
1957 | BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore); | |||
1958 | ||||
1959 | // Use specified DebugLoc for new instructions. | |||
1960 | Builder.SetCurrentDebugLocation(DL); | |||
1961 | ||||
1962 | Builder.SetInsertPoint(Preheader); | |||
1963 | Builder.CreateBr(Header); | |||
1964 | ||||
1965 | Builder.SetInsertPoint(Header); | |||
1966 | PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv"); | |||
1967 | IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader); | |||
1968 | Builder.CreateBr(Cond); | |||
1969 | ||||
1970 | Builder.SetInsertPoint(Cond); | |||
1971 | Value *Cmp = | |||
1972 | Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp"); | |||
1973 | Builder.CreateCondBr(Cmp, Body, Exit); | |||
1974 | ||||
1975 | Builder.SetInsertPoint(Body); | |||
1976 | Builder.CreateBr(Latch); | |||
1977 | ||||
1978 | Builder.SetInsertPoint(Latch); | |||
1979 | Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1), | |||
1980 | "omp_" + Name + ".next", /*HasNUW=*/true); | |||
1981 | Builder.CreateBr(Header); | |||
1982 | IndVarPHI->addIncoming(Next, Latch); | |||
1983 | ||||
1984 | Builder.SetInsertPoint(Exit); | |||
1985 | Builder.CreateBr(After); | |||
1986 | ||||
1987 | // Remember and return the canonical control flow. | |||
1988 | LoopInfos.emplace_front(); | |||
1989 | CanonicalLoopInfo *CL = &LoopInfos.front(); | |||
1990 | ||||
1991 | CL->Header = Header; | |||
1992 | CL->Cond = Cond; | |||
1993 | CL->Latch = Latch; | |||
1994 | CL->Exit = Exit; | |||
1995 | ||||
1996 | #ifndef NDEBUG | |||
1997 | CL->assertOK(); | |||
1998 | #endif | |||
1999 | return CL; | |||
2000 | } | |||
2001 | ||||
2002 | CanonicalLoopInfo * | |||
2003 | OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, | |||
2004 | LoopBodyGenCallbackTy BodyGenCB, | |||
2005 | Value *TripCount, const Twine &Name) { | |||
2006 | BasicBlock *BB = Loc.IP.getBlock(); | |||
2007 | BasicBlock *NextBB = BB->getNextNode(); | |||
2008 | ||||
2009 | CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(), | |||
2010 | NextBB, NextBB, Name); | |||
2011 | BasicBlock *After = CL->getAfter(); | |||
2012 | ||||
2013 | // If location is not set, don't connect the loop. | |||
2014 | if (updateToLocation(Loc)) { | |||
2015 | // Split the loop at the insertion point: Branch to the preheader and move | |||
2016 | // every following instruction to after the loop (the After BB). Also, the | |||
2017 | // new successor is the loop's after block. | |||
2018 | spliceBB(Builder, After, /*CreateBranch=*/false); | |||
2019 | Builder.CreateBr(CL->getPreheader()); | |||
2020 | } | |||
2021 | ||||
2022 | // Emit the body content. We do it after connecting the loop to the CFG to | |||
2023 | // avoid that the callback encounters degenerate BBs. | |||
2024 | BodyGenCB(CL->getBodyIP(), CL->getIndVar()); | |||
2025 | ||||
2026 | #ifndef NDEBUG | |||
2027 | CL->assertOK(); | |||
2028 | #endif | |||
2029 | return CL; | |||
2030 | } | |||
2031 | ||||
2032 | CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop( | |||
2033 | const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, | |||
2034 | Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, | |||
2035 | InsertPointTy ComputeIP, const Twine &Name) { | |||
2036 | ||||
2037 | // Consider the following difficulties (assuming 8-bit signed integers): | |||
2038 | // * Adding \p Step to the loop counter which passes \p Stop may overflow: | |||
2039 | // DO I = 1, 100, 50 | |||
2040 | /// * A \p Step of INT_MIN cannot not be normalized to a positive direction: | |||
2041 | // DO I = 100, 0, -128 | |||
2042 | ||||
2043 | // Start, Stop and Step must be of the same integer type. | |||
2044 | auto *IndVarTy = cast<IntegerType>(Start->getType()); | |||
2045 | assert(IndVarTy == Stop->getType() && "Stop type mismatch")(static_cast <bool> (IndVarTy == Stop->getType() && "Stop type mismatch") ? void (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2045, __extension__ __PRETTY_FUNCTION__)); | |||
2046 | assert(IndVarTy == Step->getType() && "Step type mismatch")(static_cast <bool> (IndVarTy == Step->getType() && "Step type mismatch") ? void (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2046, __extension__ __PRETTY_FUNCTION__)); | |||
2047 | ||||
2048 | LocationDescription ComputeLoc = | |||
2049 | ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; | |||
2050 | updateToLocation(ComputeLoc); | |||
2051 | ||||
2052 | ConstantInt *Zero = ConstantInt::get(IndVarTy, 0); | |||
2053 | ConstantInt *One = ConstantInt::get(IndVarTy, 1); | |||
2054 | ||||
2055 | // Like Step, but always positive. | |||
2056 | Value *Incr = Step; | |||
2057 | ||||
2058 | // Distance between Start and Stop; always positive. | |||
2059 | Value *Span; | |||
2060 | ||||
2061 | // Condition whether there are no iterations are executed at all, e.g. because | |||
2062 | // UB < LB. | |||
2063 | Value *ZeroCmp; | |||
2064 | ||||
2065 | if (IsSigned) { | |||
2066 | // Ensure that increment is positive. If not, negate and invert LB and UB. | |||
2067 | Value *IsNeg = Builder.CreateICmpSLT(Step, Zero); | |||
2068 | Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step); | |||
2069 | Value *LB = Builder.CreateSelect(IsNeg, Stop, Start); | |||
2070 | Value *UB = Builder.CreateSelect(IsNeg, Start, Stop); | |||
2071 | Span = Builder.CreateSub(UB, LB, "", false, true); | |||
2072 | ZeroCmp = Builder.CreateICmp( | |||
2073 | InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB); | |||
2074 | } else { | |||
2075 | Span = Builder.CreateSub(Stop, Start, "", true); | |||
2076 | ZeroCmp = Builder.CreateICmp( | |||
2077 | InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start); | |||
2078 | } | |||
2079 | ||||
2080 | Value *CountIfLooping; | |||
2081 | if (InclusiveStop) { | |||
2082 | CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One); | |||
2083 | } else { | |||
2084 | // Avoid incrementing past stop since it could overflow. | |||
2085 | Value *CountIfTwo = Builder.CreateAdd( | |||
2086 | Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One); | |||
2087 | Value *OneCmp = Builder.CreateICmp( | |||
2088 | InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr); | |||
2089 | CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo); | |||
2090 | } | |||
2091 | Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping, | |||
2092 | "omp_" + Name + ".tripcount"); | |||
2093 | ||||
2094 | auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { | |||
2095 | Builder.restoreIP(CodeGenIP); | |||
2096 | Value *Span = Builder.CreateMul(IV, Step); | |||
2097 | Value *IndVar = Builder.CreateAdd(Span, Start); | |||
2098 | BodyGenCB(Builder.saveIP(), IndVar); | |||
2099 | }; | |||
2100 | LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP(); | |||
2101 | return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name); | |||
2102 | } | |||
2103 | ||||
2104 | // Returns an LLVM function to call for initializing loop bounds using OpenMP | |||
2105 | // static scheduling depending on `type`. Only i32 and i64 are supported by the | |||
2106 | // runtime. Always interpret integers as unsigned similarly to | |||
2107 | // CanonicalLoopInfo. | |||
2108 | static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, | |||
2109 | OpenMPIRBuilder &OMPBuilder) { | |||
2110 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | |||
2111 | if (Bitwidth == 32) | |||
2112 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2113 | M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u); | |||
2114 | if (Bitwidth == 64) | |||
2115 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2116 | M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u); | |||
2117 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2117); | |||
2118 | } | |||
2119 | ||||
2120 | OpenMPIRBuilder::InsertPointTy | |||
2121 | OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, | |||
2122 | InsertPointTy AllocaIP, | |||
2123 | bool NeedsBarrier) { | |||
2124 | assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2124, __extension__ __PRETTY_FUNCTION__)); | |||
2125 | assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2126, __extension__ __PRETTY_FUNCTION__)) | |||
2126 | "Require dedicated allocate IP")(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2126, __extension__ __PRETTY_FUNCTION__)); | |||
2127 | ||||
2128 | // Set up the source location value for OpenMP runtime. | |||
2129 | Builder.restoreIP(CLI->getPreheaderIP()); | |||
2130 | Builder.SetCurrentDebugLocation(DL); | |||
2131 | ||||
2132 | uint32_t SrcLocStrSize; | |||
2133 | Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); | |||
2134 | Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
2135 | ||||
2136 | // Declare useful OpenMP runtime functions. | |||
2137 | Value *IV = CLI->getIndVar(); | |||
2138 | Type *IVTy = IV->getType(); | |||
2139 | FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this); | |||
2140 | FunctionCallee StaticFini = | |||
2141 | getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); | |||
2142 | ||||
2143 | // Allocate space for computed loop bounds as expected by the "init" function. | |||
2144 | Builder.restoreIP(AllocaIP); | |||
2145 | Type *I32Type = Type::getInt32Ty(M.getContext()); | |||
2146 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | |||
2147 | Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); | |||
2148 | Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); | |||
2149 | Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); | |||
2150 | ||||
2151 | // At the end of the preheader, prepare for calling the "init" function by | |||
2152 | // storing the current loop bounds into the allocated space. A canonical loop | |||
2153 | // always iterates from 0 to trip-count with step 1. Note that "init" expects | |||
2154 | // and produces an inclusive upper bound. | |||
2155 | Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); | |||
2156 | Constant *Zero = ConstantInt::get(IVTy, 0); | |||
2157 | Constant *One = ConstantInt::get(IVTy, 1); | |||
2158 | Builder.CreateStore(Zero, PLowerBound); | |||
2159 | Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One); | |||
2160 | Builder.CreateStore(UpperBound, PUpperBound); | |||
2161 | Builder.CreateStore(One, PStride); | |||
2162 | ||||
2163 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | |||
2164 | ||||
2165 | Constant *SchedulingType = ConstantInt::get( | |||
2166 | I32Type, static_cast<int>(OMPScheduleType::UnorderedStatic)); | |||
2167 | ||||
2168 | // Call the "init" function and update the trip count of the loop with the | |||
2169 | // value it produced. | |||
2170 | Builder.CreateCall(StaticInit, | |||
2171 | {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, | |||
2172 | PUpperBound, PStride, One, Zero}); | |||
2173 | Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); | |||
2174 | Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); | |||
2175 | Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); | |||
2176 | Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One); | |||
2177 | CLI->setTripCount(TripCount); | |||
2178 | ||||
2179 | // Update all uses of the induction variable except the one in the condition | |||
2180 | // block that compares it with the actual upper bound, and the increment in | |||
2181 | // the latch block. | |||
2182 | ||||
2183 | CLI->mapIndVar([&](Instruction *OldIV) -> Value * { | |||
2184 | Builder.SetInsertPoint(CLI->getBody(), | |||
2185 | CLI->getBody()->getFirstInsertionPt()); | |||
2186 | Builder.SetCurrentDebugLocation(DL); | |||
2187 | return Builder.CreateAdd(OldIV, LowerBound); | |||
2188 | }); | |||
2189 | ||||
2190 | // In the "exit" block, call the "fini" function. | |||
2191 | Builder.SetInsertPoint(CLI->getExit(), | |||
2192 | CLI->getExit()->getTerminator()->getIterator()); | |||
2193 | Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); | |||
2194 | ||||
2195 | // Add the barrier if requested. | |||
2196 | if (NeedsBarrier) | |||
2197 | createBarrier(LocationDescription(Builder.saveIP(), DL), | |||
2198 | omp::Directive::OMPD_for, /* ForceSimpleCall */ false, | |||
2199 | /* CheckCancelFlag */ false); | |||
2200 | ||||
2201 | InsertPointTy AfterIP = CLI->getAfterIP(); | |||
2202 | CLI->invalidate(); | |||
2203 | ||||
2204 | return AfterIP; | |||
2205 | } | |||
2206 | ||||
2207 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop( | |||
2208 | DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, | |||
2209 | bool NeedsBarrier, Value *ChunkSize) { | |||
2210 | assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2210, __extension__ __PRETTY_FUNCTION__)); | |||
2211 | assert(ChunkSize && "Chunk size is required")(static_cast <bool> (ChunkSize && "Chunk size is required" ) ? void (0) : __assert_fail ("ChunkSize && \"Chunk size is required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2211, __extension__ __PRETTY_FUNCTION__)); | |||
2212 | ||||
2213 | LLVMContext &Ctx = CLI->getFunction()->getContext(); | |||
2214 | Value *IV = CLI->getIndVar(); | |||
2215 | Value *OrigTripCount = CLI->getTripCount(); | |||
2216 | Type *IVTy = IV->getType(); | |||
2217 | assert(IVTy->getIntegerBitWidth() <= 64 &&(static_cast <bool> (IVTy->getIntegerBitWidth() <= 64 && "Max supported tripcount bitwidth is 64 bits") ? void (0) : __assert_fail ("IVTy->getIntegerBitWidth() <= 64 && \"Max supported tripcount bitwidth is 64 bits\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2218, __extension__ __PRETTY_FUNCTION__)) | |||
2218 | "Max supported tripcount bitwidth is 64 bits")(static_cast <bool> (IVTy->getIntegerBitWidth() <= 64 && "Max supported tripcount bitwidth is 64 bits") ? void (0) : __assert_fail ("IVTy->getIntegerBitWidth() <= 64 && \"Max supported tripcount bitwidth is 64 bits\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2218, __extension__ __PRETTY_FUNCTION__)); | |||
2219 | Type *InternalIVTy = IVTy->getIntegerBitWidth() <= 32 ? Type::getInt32Ty(Ctx) | |||
2220 | : Type::getInt64Ty(Ctx); | |||
2221 | Type *I32Type = Type::getInt32Ty(M.getContext()); | |||
2222 | Constant *Zero = ConstantInt::get(InternalIVTy, 0); | |||
2223 | Constant *One = ConstantInt::get(InternalIVTy, 1); | |||
2224 | ||||
2225 | // Declare useful OpenMP runtime functions. | |||
2226 | FunctionCallee StaticInit = | |||
2227 | getKmpcForStaticInitForType(InternalIVTy, M, *this); | |||
2228 | FunctionCallee StaticFini = | |||
2229 | getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); | |||
2230 | ||||
2231 | // Allocate space for computed loop bounds as expected by the "init" function. | |||
2232 | Builder.restoreIP(AllocaIP); | |||
2233 | Builder.SetCurrentDebugLocation(DL); | |||
2234 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | |||
2235 | Value *PLowerBound = | |||
2236 | Builder.CreateAlloca(InternalIVTy, nullptr, "p.lowerbound"); | |||
2237 | Value *PUpperBound = | |||
2238 | Builder.CreateAlloca(InternalIVTy, nullptr, "p.upperbound"); | |||
2239 | Value *PStride = Builder.CreateAlloca(InternalIVTy, nullptr, "p.stride"); | |||
2240 | ||||
2241 | // Set up the source location value for the OpenMP runtime. | |||
2242 | Builder.restoreIP(CLI->getPreheaderIP()); | |||
2243 | Builder.SetCurrentDebugLocation(DL); | |||
2244 | ||||
2245 | // TODO: Detect overflow in ubsan or max-out with current tripcount. | |||
2246 | Value *CastedChunkSize = | |||
2247 | Builder.CreateZExtOrTrunc(ChunkSize, InternalIVTy, "chunksize"); | |||
2248 | Value *CastedTripCount = | |||
2249 | Builder.CreateZExt(OrigTripCount, InternalIVTy, "tripcount"); | |||
2250 | ||||
2251 | Constant *SchedulingType = ConstantInt::get( | |||
2252 | I32Type, static_cast<int>(OMPScheduleType::UnorderedStaticChunked)); | |||
2253 | Builder.CreateStore(Zero, PLowerBound); | |||
2254 | Value *OrigUpperBound = Builder.CreateSub(CastedTripCount, One); | |||
2255 | Builder.CreateStore(OrigUpperBound, PUpperBound); | |||
2256 | Builder.CreateStore(One, PStride); | |||
2257 | ||||
2258 | // Call the "init" function and update the trip count of the loop with the | |||
2259 | // value it produced. | |||
2260 | uint32_t SrcLocStrSize; | |||
2261 | Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); | |||
2262 | Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
2263 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | |||
2264 | Builder.CreateCall(StaticInit, | |||
2265 | {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum, | |||
2266 | /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter, | |||
2267 | /*plower=*/PLowerBound, /*pupper=*/PUpperBound, | |||
2268 | /*pstride=*/PStride, /*incr=*/One, | |||
2269 | /*chunk=*/CastedChunkSize}); | |||
2270 | ||||
2271 | // Load values written by the "init" function. | |||
2272 | Value *FirstChunkStart = | |||
2273 | Builder.CreateLoad(InternalIVTy, PLowerBound, "omp_firstchunk.lb"); | |||
2274 | Value *FirstChunkStop = | |||
2275 | Builder.CreateLoad(InternalIVTy, PUpperBound, "omp_firstchunk.ub"); | |||
2276 | Value *FirstChunkEnd = Builder.CreateAdd(FirstChunkStop, One); | |||
2277 | Value *ChunkRange = | |||
2278 | Builder.CreateSub(FirstChunkEnd, FirstChunkStart, "omp_chunk.range"); | |||
2279 | Value *NextChunkStride = | |||
2280 | Builder.CreateLoad(InternalIVTy, PStride, "omp_dispatch.stride"); | |||
2281 | ||||
2282 | // Create outer "dispatch" loop for enumerating the chunks. | |||
2283 | BasicBlock *DispatchEnter = splitBB(Builder, true); | |||
2284 | Value *DispatchCounter; | |||
2285 | CanonicalLoopInfo *DispatchCLI = createCanonicalLoop( | |||
2286 | {Builder.saveIP(), DL}, | |||
2287 | [&](InsertPointTy BodyIP, Value *Counter) { DispatchCounter = Counter; }, | |||
2288 | FirstChunkStart, CastedTripCount, NextChunkStride, | |||
2289 | /*IsSigned=*/false, /*InclusiveStop=*/false, /*ComputeIP=*/{}, | |||
2290 | "dispatch"); | |||
2291 | ||||
2292 | // Remember the BasicBlocks of the dispatch loop we need, then invalidate to | |||
2293 | // not have to preserve the canonical invariant. | |||
2294 | BasicBlock *DispatchBody = DispatchCLI->getBody(); | |||
2295 | BasicBlock *DispatchLatch = DispatchCLI->getLatch(); | |||
2296 | BasicBlock *DispatchExit = DispatchCLI->getExit(); | |||
2297 | BasicBlock *DispatchAfter = DispatchCLI->getAfter(); | |||
2298 | DispatchCLI->invalidate(); | |||
2299 | ||||
2300 | // Rewire the original loop to become the chunk loop inside the dispatch loop. | |||
2301 | redirectTo(DispatchAfter, CLI->getAfter(), DL); | |||
2302 | redirectTo(CLI->getExit(), DispatchLatch, DL); | |||
2303 | redirectTo(DispatchBody, DispatchEnter, DL); | |||
2304 | ||||
2305 | // Prepare the prolog of the chunk loop. | |||
2306 | Builder.restoreIP(CLI->getPreheaderIP()); | |||
2307 | Builder.SetCurrentDebugLocation(DL); | |||
2308 | ||||
2309 | // Compute the number of iterations of the chunk loop. | |||
2310 | Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); | |||
2311 | Value *ChunkEnd = Builder.CreateAdd(DispatchCounter, ChunkRange); | |||
2312 | Value *IsLastChunk = | |||
2313 | Builder.CreateICmpUGE(ChunkEnd, CastedTripCount, "omp_chunk.is_last"); | |||
2314 | Value *CountUntilOrigTripCount = | |||
2315 | Builder.CreateSub(CastedTripCount, DispatchCounter); | |||
2316 | Value *ChunkTripCount = Builder.CreateSelect( | |||
2317 | IsLastChunk, CountUntilOrigTripCount, ChunkRange, "omp_chunk.tripcount"); | |||
2318 | Value *BackcastedChunkTC = | |||
2319 | Builder.CreateTrunc(ChunkTripCount, IVTy, "omp_chunk.tripcount.trunc"); | |||
2320 | CLI->setTripCount(BackcastedChunkTC); | |||
2321 | ||||
2322 | // Update all uses of the induction variable except the one in the condition | |||
2323 | // block that compares it with the actual upper bound, and the increment in | |||
2324 | // the latch block. | |||
2325 | Value *BackcastedDispatchCounter = | |||
2326 | Builder.CreateTrunc(DispatchCounter, IVTy, "omp_dispatch.iv.trunc"); | |||
2327 | CLI->mapIndVar([&](Instruction *) -> Value * { | |||
2328 | Builder.restoreIP(CLI->getBodyIP()); | |||
2329 | return Builder.CreateAdd(IV, BackcastedDispatchCounter); | |||
2330 | }); | |||
2331 | ||||
2332 | // In the "exit" block, call the "fini" function. | |||
2333 | Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt()); | |||
2334 | Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); | |||
2335 | ||||
2336 | // Add the barrier if requested. | |||
2337 | if (NeedsBarrier) | |||
2338 | createBarrier(LocationDescription(Builder.saveIP(), DL), OMPD_for, | |||
2339 | /*ForceSimpleCall=*/false, /*CheckCancelFlag=*/false); | |||
2340 | ||||
2341 | #ifndef NDEBUG | |||
2342 | // Even though we currently do not support applying additional methods to it, | |||
2343 | // the chunk loop should remain a canonical loop. | |||
2344 | CLI->assertOK(); | |||
2345 | #endif | |||
2346 | ||||
2347 | return {DispatchAfter, DispatchAfter->getFirstInsertionPt()}; | |||
2348 | } | |||
2349 | ||||
2350 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoop( | |||
2351 | DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, | |||
2352 | bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind, | |||
2353 | llvm::Value *ChunkSize, bool HasSimdModifier, bool HasMonotonicModifier, | |||
2354 | bool HasNonmonotonicModifier, bool HasOrderedClause) { | |||
2355 | OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType( | |||
2356 | SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier, | |||
2357 | HasNonmonotonicModifier, HasOrderedClause); | |||
2358 | ||||
2359 | bool IsOrdered = (EffectiveScheduleType & OMPScheduleType::ModifierOrdered) == | |||
2360 | OMPScheduleType::ModifierOrdered; | |||
2361 | switch (EffectiveScheduleType & ~OMPScheduleType::ModifierMask) { | |||
2362 | case OMPScheduleType::BaseStatic: | |||
2363 | assert(!ChunkSize && "No chunk size with static-chunked schedule")(static_cast <bool> (!ChunkSize && "No chunk size with static-chunked schedule" ) ? void (0) : __assert_fail ("!ChunkSize && \"No chunk size with static-chunked schedule\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2363, __extension__ __PRETTY_FUNCTION__)); | |||
2364 | if (IsOrdered) | |||
2365 | return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, | |||
2366 | NeedsBarrier, ChunkSize); | |||
2367 | // FIXME: Monotonicity ignored? | |||
2368 | return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier); | |||
2369 | ||||
2370 | case OMPScheduleType::BaseStaticChunked: | |||
2371 | if (IsOrdered) | |||
2372 | return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, | |||
2373 | NeedsBarrier, ChunkSize); | |||
2374 | // FIXME: Monotonicity ignored? | |||
2375 | return applyStaticChunkedWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier, | |||
2376 | ChunkSize); | |||
2377 | ||||
2378 | case OMPScheduleType::BaseRuntime: | |||
2379 | case OMPScheduleType::BaseAuto: | |||
2380 | case OMPScheduleType::BaseGreedy: | |||
2381 | case OMPScheduleType::BaseBalanced: | |||
2382 | case OMPScheduleType::BaseSteal: | |||
2383 | case OMPScheduleType::BaseGuidedSimd: | |||
2384 | case OMPScheduleType::BaseRuntimeSimd: | |||
2385 | assert(!ChunkSize &&(static_cast <bool> (!ChunkSize && "schedule type does not support user-defined chunk sizes" ) ? void (0) : __assert_fail ("!ChunkSize && \"schedule type does not support user-defined chunk sizes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2386, __extension__ __PRETTY_FUNCTION__)) | |||
2386 | "schedule type does not support user-defined chunk sizes")(static_cast <bool> (!ChunkSize && "schedule type does not support user-defined chunk sizes" ) ? void (0) : __assert_fail ("!ChunkSize && \"schedule type does not support user-defined chunk sizes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2386, __extension__ __PRETTY_FUNCTION__)); | |||
2387 | LLVM_FALLTHROUGH[[fallthrough]]; | |||
2388 | case OMPScheduleType::BaseDynamicChunked: | |||
2389 | case OMPScheduleType::BaseGuidedChunked: | |||
2390 | case OMPScheduleType::BaseGuidedIterativeChunked: | |||
2391 | case OMPScheduleType::BaseGuidedAnalyticalChunked: | |||
2392 | case OMPScheduleType::BaseStaticBalancedChunked: | |||
2393 | return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, | |||
2394 | NeedsBarrier, ChunkSize); | |||
2395 | ||||
2396 | default: | |||
2397 | llvm_unreachable("Unknown/unimplemented schedule kind")::llvm::llvm_unreachable_internal("Unknown/unimplemented schedule kind" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2397); | |||
2398 | } | |||
2399 | } | |||
2400 | ||||
2401 | /// Returns an LLVM function to call for initializing loop bounds using OpenMP | |||
2402 | /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by | |||
2403 | /// the runtime. Always interpret integers as unsigned similarly to | |||
2404 | /// CanonicalLoopInfo. | |||
2405 | static FunctionCallee | |||
2406 | getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | |||
2407 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | |||
2408 | if (Bitwidth == 32) | |||
2409 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2410 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u); | |||
2411 | if (Bitwidth == 64) | |||
2412 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2413 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u); | |||
2414 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2414); | |||
2415 | } | |||
2416 | ||||
2417 | /// Returns an LLVM function to call for updating the next loop using OpenMP | |||
2418 | /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by | |||
2419 | /// the runtime. Always interpret integers as unsigned similarly to | |||
2420 | /// CanonicalLoopInfo. | |||
2421 | static FunctionCallee | |||
2422 | getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | |||
2423 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | |||
2424 | if (Bitwidth == 32) | |||
2425 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2426 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u); | |||
2427 | if (Bitwidth == 64) | |||
2428 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2429 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u); | |||
2430 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2430); | |||
2431 | } | |||
2432 | ||||
2433 | /// Returns an LLVM function to call for finalizing the dynamic loop using | |||
2434 | /// depending on `type`. Only i32 and i64 are supported by the runtime. Always | |||
2435 | /// interpret integers as unsigned similarly to CanonicalLoopInfo. | |||
2436 | static FunctionCallee | |||
2437 | getKmpcForDynamicFiniForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | |||
2438 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | |||
2439 | if (Bitwidth == 32) | |||
2440 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2441 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_4u); | |||
2442 | if (Bitwidth == 64) | |||
2443 | return OMPBuilder.getOrCreateRuntimeFunction( | |||
2444 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_8u); | |||
2445 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2445); | |||
2446 | } | |||
2447 | ||||
2448 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop( | |||
2449 | DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, | |||
2450 | OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) { | |||
2451 | assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2451, __extension__ __PRETTY_FUNCTION__)); | |||
2452 | assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2453, __extension__ __PRETTY_FUNCTION__)) | |||
2453 | "Require dedicated allocate IP")(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2453, __extension__ __PRETTY_FUNCTION__)); | |||
2454 | assert(isValidWorkshareLoopScheduleType(SchedType) &&(static_cast <bool> (isValidWorkshareLoopScheduleType(SchedType ) && "Require valid schedule type") ? void (0) : __assert_fail ("isValidWorkshareLoopScheduleType(SchedType) && \"Require valid schedule type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2455, __extension__ __PRETTY_FUNCTION__)) | |||
2455 | "Require valid schedule type")(static_cast <bool> (isValidWorkshareLoopScheduleType(SchedType ) && "Require valid schedule type") ? void (0) : __assert_fail ("isValidWorkshareLoopScheduleType(SchedType) && \"Require valid schedule type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2455, __extension__ __PRETTY_FUNCTION__)); | |||
2456 | ||||
2457 | bool Ordered = (SchedType & OMPScheduleType::ModifierOrdered) == | |||
2458 | OMPScheduleType::ModifierOrdered; | |||
2459 | ||||
2460 | // Set up the source location value for OpenMP runtime. | |||
2461 | Builder.SetCurrentDebugLocation(DL); | |||
2462 | ||||
2463 | uint32_t SrcLocStrSize; | |||
2464 | Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); | |||
2465 | Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
2466 | ||||
2467 | // Declare useful OpenMP runtime functions. | |||
2468 | Value *IV = CLI->getIndVar(); | |||
2469 | Type *IVTy = IV->getType(); | |||
2470 | FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this); | |||
2471 | FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this); | |||
2472 | ||||
2473 | // Allocate space for computed loop bounds as expected by the "init" function. | |||
2474 | Builder.restoreIP(AllocaIP); | |||
2475 | Type *I32Type = Type::getInt32Ty(M.getContext()); | |||
2476 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | |||
2477 | Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); | |||
2478 | Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); | |||
2479 | Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); | |||
2480 | ||||
2481 | // At the end of the preheader, prepare for calling the "init" function by | |||
2482 | // storing the current loop bounds into the allocated space. A canonical loop | |||
2483 | // always iterates from 0 to trip-count with step 1. Note that "init" expects | |||
2484 | // and produces an inclusive upper bound. | |||
2485 | BasicBlock *PreHeader = CLI->getPreheader(); | |||
2486 | Builder.SetInsertPoint(PreHeader->getTerminator()); | |||
2487 | Constant *One = ConstantInt::get(IVTy, 1); | |||
2488 | Builder.CreateStore(One, PLowerBound); | |||
2489 | Value *UpperBound = CLI->getTripCount(); | |||
2490 | Builder.CreateStore(UpperBound, PUpperBound); | |||
2491 | Builder.CreateStore(One, PStride); | |||
2492 | ||||
2493 | BasicBlock *Header = CLI->getHeader(); | |||
2494 | BasicBlock *Exit = CLI->getExit(); | |||
2495 | BasicBlock *Cond = CLI->getCond(); | |||
2496 | BasicBlock *Latch = CLI->getLatch(); | |||
2497 | InsertPointTy AfterIP = CLI->getAfterIP(); | |||
2498 | ||||
2499 | // The CLI will be "broken" in the code below, as the loop is no longer | |||
2500 | // a valid canonical loop. | |||
2501 | ||||
2502 | if (!Chunk) | |||
2503 | Chunk = One; | |||
2504 | ||||
2505 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | |||
2506 | ||||
2507 | Constant *SchedulingType = | |||
2508 | ConstantInt::get(I32Type, static_cast<int>(SchedType)); | |||
2509 | ||||
2510 | // Call the "init" function. | |||
2511 | Builder.CreateCall(DynamicInit, | |||
2512 | {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One, | |||
2513 | UpperBound, /* step */ One, Chunk}); | |||
2514 | ||||
2515 | // An outer loop around the existing one. | |||
2516 | BasicBlock *OuterCond = BasicBlock::Create( | |||
2517 | PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond", | |||
2518 | PreHeader->getParent()); | |||
2519 | // This needs to be 32-bit always, so can't use the IVTy Zero above. | |||
2520 | Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt()); | |||
2521 | Value *Res = | |||
2522 | Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter, | |||
2523 | PLowerBound, PUpperBound, PStride}); | |||
2524 | Constant *Zero32 = ConstantInt::get(I32Type, 0); | |||
2525 | Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32); | |||
2526 | Value *LowerBound = | |||
2527 | Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb"); | |||
2528 | Builder.CreateCondBr(MoreWork, Header, Exit); | |||
2529 | ||||
2530 | // Change PHI-node in loop header to use outer cond rather than preheader, | |||
2531 | // and set IV to the LowerBound. | |||
2532 | Instruction *Phi = &Header->front(); | |||
2533 | auto *PI = cast<PHINode>(Phi); | |||
2534 | PI->setIncomingBlock(0, OuterCond); | |||
2535 | PI->setIncomingValue(0, LowerBound); | |||
2536 | ||||
2537 | // Then set the pre-header to jump to the OuterCond | |||
2538 | Instruction *Term = PreHeader->getTerminator(); | |||
2539 | auto *Br = cast<BranchInst>(Term); | |||
2540 | Br->setSuccessor(0, OuterCond); | |||
2541 | ||||
2542 | // Modify the inner condition: | |||
2543 | // * Use the UpperBound returned from the DynamicNext call. | |||
2544 | // * jump to the loop outer loop when done with one of the inner loops. | |||
2545 | Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt()); | |||
2546 | UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub"); | |||
2547 | Instruction *Comp = &*Builder.GetInsertPoint(); | |||
2548 | auto *CI = cast<CmpInst>(Comp); | |||
2549 | CI->setOperand(1, UpperBound); | |||
2550 | // Redirect the inner exit to branch to outer condition. | |||
2551 | Instruction *Branch = &Cond->back(); | |||
2552 | auto *BI = cast<BranchInst>(Branch); | |||
2553 | assert(BI->getSuccessor(1) == Exit)(static_cast <bool> (BI->getSuccessor(1) == Exit) ? void (0) : __assert_fail ("BI->getSuccessor(1) == Exit", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 2553, __extension__ __PRETTY_FUNCTION__)); | |||
2554 | BI->setSuccessor(1, OuterCond); | |||
2555 | ||||
2556 | // Call the "fini" function if "ordered" is present in wsloop directive. | |||
2557 | if (Ordered) { | |||
2558 | Builder.SetInsertPoint(&Latch->back()); | |||
2559 | FunctionCallee DynamicFini = getKmpcForDynamicFiniForType(IVTy, M, *this); | |||
2560 | Builder.CreateCall(DynamicFini, {SrcLoc, ThreadNum}); | |||
2561 | } | |||
2562 | ||||
2563 | // Add the barrier if requested. | |||
2564 | if (NeedsBarrier) { | |||
2565 | Builder.SetInsertPoint(&Exit->back()); | |||
2566 | createBarrier(LocationDescription(Builder.saveIP(), DL), | |||
2567 | omp::Directive::OMPD_for, /* ForceSimpleCall */ false, | |||
2568 | /* CheckCancelFlag */ false); | |||
2569 | } | |||
2570 | ||||
2571 | CLI->invalidate(); | |||
2572 | return AfterIP; | |||
2573 | } | |||
2574 | ||||
2575 | /// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is, | |||
2576 | /// after this \p OldTarget will be orphaned. | |||
2577 | static void redirectAllPredecessorsTo(BasicBlock *OldTarget, | |||
2578 | BasicBlock *NewTarget, DebugLoc DL) { | |||
2579 | for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget))) | |||
2580 | redirectTo(Pred, NewTarget, DL); | |||
2581 | } | |||
2582 | ||||
2583 | /// Determine which blocks in \p BBs are reachable from outside and remove the | |||
2584 | /// ones that are not reachable from the function. | |||
2585 | static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) { | |||
2586 | SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()}; | |||
2587 | auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) { | |||
2588 | for (Use &U : BB->uses()) { | |||
2589 | auto *UseInst = dyn_cast<Instruction>(U.getUser()); | |||
2590 | if (!UseInst) | |||
2591 | continue; | |||
2592 | if (BBsToErase.count(UseInst->getParent())) | |||
2593 | continue; | |||
2594 | return true; | |||
2595 | } | |||
2596 | return false; | |||
2597 | }; | |||
2598 | ||||
2599 | while (true) { | |||
2600 | bool Changed = false; | |||
2601 | for (BasicBlock *BB : make_early_inc_range(BBsToErase)) { | |||
2602 | if (HasRemainingUses(BB)) { | |||
2603 | BBsToErase.erase(BB); | |||
2604 | Changed = true; | |||
2605 | } | |||
2606 | } | |||
2607 | if (!Changed) | |||
2608 | break; | |||
2609 | } | |||
2610 | ||||
2611 | SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end()); | |||
2612 | DeleteDeadBlocks(BBVec); | |||
2613 | } | |||
2614 | ||||
2615 | CanonicalLoopInfo * | |||
2616 | OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, | |||
2617 | InsertPointTy ComputeIP) { | |||
2618 | assert(Loops.size() >= 1 && "At least one loop required")(static_cast <bool> (Loops.size() >= 1 && "At least one loop required" ) ? void (0) : __assert_fail ("Loops.size() >= 1 && \"At least one loop required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2618, __extension__ __PRETTY_FUNCTION__)); | |||
| ||||
2619 | size_t NumLoops = Loops.size(); | |||
2620 | ||||
2621 | // Nothing to do if there is already just one loop. | |||
2622 | if (NumLoops == 1) | |||
2623 | return Loops.front(); | |||
2624 | ||||
2625 | CanonicalLoopInfo *Outermost = Loops.front(); | |||
2626 | CanonicalLoopInfo *Innermost = Loops.back(); | |||
2627 | BasicBlock *OrigPreheader = Outermost->getPreheader(); | |||
2628 | BasicBlock *OrigAfter = Outermost->getAfter(); | |||
2629 | Function *F = OrigPreheader->getParent(); | |||
2630 | ||||
2631 | // Loop control blocks that may become orphaned later. | |||
2632 | SmallVector<BasicBlock *, 12> OldControlBBs; | |||
2633 | OldControlBBs.reserve(6 * Loops.size()); | |||
2634 | for (CanonicalLoopInfo *Loop : Loops) | |||
2635 | Loop->collectControlBlocks(OldControlBBs); | |||
2636 | ||||
2637 | // Setup the IRBuilder for inserting the trip count computation. | |||
2638 | Builder.SetCurrentDebugLocation(DL); | |||
2639 | if (ComputeIP.isSet()) | |||
2640 | Builder.restoreIP(ComputeIP); | |||
2641 | else | |||
2642 | Builder.restoreIP(Outermost->getPreheaderIP()); | |||
2643 | ||||
2644 | // Derive the collapsed' loop trip count. | |||
2645 | // TODO: Find common/largest indvar type. | |||
2646 | Value *CollapsedTripCount = nullptr; | |||
2647 | for (CanonicalLoopInfo *L : Loops) { | |||
2648 | assert(L->isValid() &&(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops" ) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2649, __extension__ __PRETTY_FUNCTION__)) | |||
2649 | "All loops to collapse must be valid canonical loops")(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops" ) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2649, __extension__ __PRETTY_FUNCTION__)); | |||
2650 | Value *OrigTripCount = L->getTripCount(); | |||
2651 | if (!CollapsedTripCount) { | |||
2652 | CollapsedTripCount = OrigTripCount; | |||
2653 | continue; | |||
2654 | } | |||
2655 | ||||
2656 | // TODO: Enable UndefinedSanitizer to diagnose an overflow here. | |||
2657 | CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount, | |||
2658 | {}, /*HasNUW=*/true); | |||
2659 | } | |||
2660 | ||||
2661 | // Create the collapsed loop control flow. | |||
2662 | CanonicalLoopInfo *Result = | |||
2663 | createLoopSkeleton(DL, CollapsedTripCount, F, | |||
2664 | OrigPreheader->getNextNode(), OrigAfter, "collapsed"); | |||
2665 | ||||
2666 | // Build the collapsed loop body code. | |||
2667 | // Start with deriving the input loop induction variables from the collapsed | |||
2668 | // one, using a divmod scheme. To preserve the original loops' order, the | |||
2669 | // innermost loop use the least significant bits. | |||
2670 | Builder.restoreIP(Result->getBodyIP()); | |||
2671 | ||||
2672 | Value *Leftover = Result->getIndVar(); | |||
2673 | SmallVector<Value *> NewIndVars; | |||
2674 | NewIndVars.resize(NumLoops); | |||
2675 | for (int i = NumLoops - 1; i >= 1; --i) { | |||
2676 | Value *OrigTripCount = Loops[i]->getTripCount(); | |||
2677 | ||||
2678 | Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount); | |||
2679 | NewIndVars[i] = NewIndVar; | |||
2680 | ||||
2681 | Leftover = Builder.CreateUDiv(Leftover, OrigTripCount); | |||
2682 | } | |||
2683 | // Outermost loop gets all the remaining bits. | |||
2684 | NewIndVars[0] = Leftover; | |||
2685 | ||||
2686 | // Construct the loop body control flow. | |||
2687 | // We progressively construct the branch structure following in direction of | |||
2688 | // the control flow, from the leading in-between code, the loop nest body, the | |||
2689 | // trailing in-between code, and rejoining the collapsed loop's latch. | |||
2690 | // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If | |||
2691 | // the ContinueBlock is set, continue with that block. If ContinuePred, use | |||
2692 | // its predecessors as sources. | |||
2693 | BasicBlock *ContinueBlock = Result->getBody(); | |||
2694 | BasicBlock *ContinuePred = nullptr; | |||
2695 | auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest, | |||
2696 | BasicBlock *NextSrc) { | |||
2697 | if (ContinueBlock) | |||
2698 | redirectTo(ContinueBlock, Dest, DL); | |||
2699 | else | |||
2700 | redirectAllPredecessorsTo(ContinuePred, Dest, DL); | |||
2701 | ||||
2702 | ContinueBlock = nullptr; | |||
2703 | ContinuePred = NextSrc; | |||
2704 | }; | |||
2705 | ||||
2706 | // The code before the nested loop of each level. | |||
2707 | // Because we are sinking it into the nest, it will be executed more often | |||
2708 | // that the original loop. More sophisticated schemes could keep track of what | |||
2709 | // the in-between code is and instantiate it only once per thread. | |||
2710 | for (size_t i = 0; i < NumLoops - 1; ++i) | |||
2711 | ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader()); | |||
2712 | ||||
2713 | // Connect the loop nest body. | |||
2714 | ContinueWith(Innermost->getBody(), Innermost->getLatch()); | |||
2715 | ||||
2716 | // The code after the nested loop at each level. | |||
2717 | for (size_t i = NumLoops - 1; i > 0; --i) | |||
2718 | ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch()); | |||
2719 | ||||
2720 | // Connect the finished loop to the collapsed loop latch. | |||
2721 | ContinueWith(Result->getLatch(), nullptr); | |||
2722 | ||||
2723 | // Replace the input loops with the new collapsed loop. | |||
2724 | redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL); | |||
2725 | redirectTo(Result->getAfter(), Outermost->getAfter(), DL); | |||
2726 | ||||
2727 | // Replace the input loop indvars with the derived ones. | |||
2728 | for (size_t i = 0; i < NumLoops; ++i) | |||
2729 | Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]); | |||
2730 | ||||
2731 | // Remove unused parts of the input loops. | |||
2732 | removeUnusedBlocksFromParent(OldControlBBs); | |||
2733 | ||||
2734 | for (CanonicalLoopInfo *L : Loops) | |||
2735 | L->invalidate(); | |||
2736 | ||||
2737 | #ifndef NDEBUG | |||
2738 | Result->assertOK(); | |||
2739 | #endif | |||
2740 | return Result; | |||
2741 | } | |||
2742 | ||||
2743 | std::vector<CanonicalLoopInfo *> | |||
2744 | OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, | |||
2745 | ArrayRef<Value *> TileSizes) { | |||
2746 | assert(TileSizes.size() == Loops.size() &&(static_cast <bool> (TileSizes.size() == Loops.size() && "Must pass as many tile sizes as there are loops") ? void (0 ) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2747, __extension__ __PRETTY_FUNCTION__)) | |||
2747 | "Must pass as many tile sizes as there are loops")(static_cast <bool> (TileSizes.size() == Loops.size() && "Must pass as many tile sizes as there are loops") ? void (0 ) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2747, __extension__ __PRETTY_FUNCTION__)); | |||
2748 | int NumLoops = Loops.size(); | |||
2749 | assert(NumLoops >= 1 && "At least one loop to tile required")(static_cast <bool> (NumLoops >= 1 && "At least one loop to tile required" ) ? void (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2749, __extension__ __PRETTY_FUNCTION__)); | |||
2750 | ||||
2751 | CanonicalLoopInfo *OutermostLoop = Loops.front(); | |||
2752 | CanonicalLoopInfo *InnermostLoop = Loops.back(); | |||
2753 | Function *F = OutermostLoop->getBody()->getParent(); | |||
2754 | BasicBlock *InnerEnter = InnermostLoop->getBody(); | |||
2755 | BasicBlock *InnerLatch = InnermostLoop->getLatch(); | |||
2756 | ||||
2757 | // Loop control blocks that may become orphaned later. | |||
2758 | SmallVector<BasicBlock *, 12> OldControlBBs; | |||
2759 | OldControlBBs.reserve(6 * Loops.size()); | |||
2760 | for (CanonicalLoopInfo *Loop : Loops) | |||
2761 | Loop->collectControlBlocks(OldControlBBs); | |||
2762 | ||||
2763 | // Collect original trip counts and induction variable to be accessible by | |||
2764 | // index. Also, the structure of the original loops is not preserved during | |||
2765 | // the construction of the tiled loops, so do it before we scavenge the BBs of | |||
2766 | // any original CanonicalLoopInfo. | |||
2767 | SmallVector<Value *, 4> OrigTripCounts, OrigIndVars; | |||
2768 | for (CanonicalLoopInfo *L : Loops) { | |||
2769 | assert(L->isValid() && "All input loops must be valid canonical loops")(static_cast <bool> (L->isValid() && "All input loops must be valid canonical loops" ) ? void (0) : __assert_fail ("L->isValid() && \"All input loops must be valid canonical loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2769, __extension__ __PRETTY_FUNCTION__)); | |||
2770 | OrigTripCounts.push_back(L->getTripCount()); | |||
2771 | OrigIndVars.push_back(L->getIndVar()); | |||
2772 | } | |||
2773 | ||||
2774 | // Collect the code between loop headers. These may contain SSA definitions | |||
2775 | // that are used in the loop nest body. To be usable with in the innermost | |||
2776 | // body, these BasicBlocks will be sunk into the loop nest body. That is, | |||
2777 | // these instructions may be executed more often than before the tiling. | |||
2778 | // TODO: It would be sufficient to only sink them into body of the | |||
2779 | // corresponding tile loop. | |||
2780 | SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode; | |||
2781 | for (int i = 0; i < NumLoops - 1; ++i) { | |||
2782 | CanonicalLoopInfo *Surrounding = Loops[i]; | |||
2783 | CanonicalLoopInfo *Nested = Loops[i + 1]; | |||
2784 | ||||
2785 | BasicBlock *EnterBB = Surrounding->getBody(); | |||
2786 | BasicBlock *ExitBB = Nested->getHeader(); | |||
2787 | InbetweenCode.emplace_back(EnterBB, ExitBB); | |||
2788 | } | |||
2789 | ||||
2790 | // Compute the trip counts of the floor loops. | |||
2791 | Builder.SetCurrentDebugLocation(DL); | |||
2792 | Builder.restoreIP(OutermostLoop->getPreheaderIP()); | |||
2793 | SmallVector<Value *, 4> FloorCount, FloorRems; | |||
2794 | for (int i = 0; i < NumLoops; ++i) { | |||
2795 | Value *TileSize = TileSizes[i]; | |||
2796 | Value *OrigTripCount = OrigTripCounts[i]; | |||
2797 | Type *IVType = OrigTripCount->getType(); | |||
2798 | ||||
2799 | Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize); | |||
2800 | Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize); | |||
2801 | ||||
2802 | // 0 if tripcount divides the tilesize, 1 otherwise. | |||
2803 | // 1 means we need an additional iteration for a partial tile. | |||
2804 | // | |||
2805 | // Unfortunately we cannot just use the roundup-formula | |||
2806 | // (tripcount + tilesize - 1)/tilesize | |||
2807 | // because the summation might overflow. We do not want introduce undefined | |||
2808 | // behavior when the untiled loop nest did not. | |||
2809 | Value *FloorTripOverflow = | |||
2810 | Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0)); | |||
2811 | ||||
2812 | FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType); | |||
2813 | FloorTripCount = | |||
2814 | Builder.CreateAdd(FloorTripCount, FloorTripOverflow, | |||
2815 | "omp_floor" + Twine(i) + ".tripcount", true); | |||
2816 | ||||
2817 | // Remember some values for later use. | |||
2818 | FloorCount.push_back(FloorTripCount); | |||
2819 | FloorRems.push_back(FloorTripRem); | |||
2820 | } | |||
2821 | ||||
2822 | // Generate the new loop nest, from the outermost to the innermost. | |||
2823 | std::vector<CanonicalLoopInfo *> Result; | |||
2824 | Result.reserve(NumLoops * 2); | |||
2825 | ||||
2826 | // The basic block of the surrounding loop that enters the nest generated | |||
2827 | // loop. | |||
2828 | BasicBlock *Enter = OutermostLoop->getPreheader(); | |||
2829 | ||||
2830 | // The basic block of the surrounding loop where the inner code should | |||
2831 | // continue. | |||
2832 | BasicBlock *Continue = OutermostLoop->getAfter(); | |||
2833 | ||||
2834 | // Where the next loop basic block should be inserted. | |||
2835 | BasicBlock *OutroInsertBefore = InnermostLoop->getExit(); | |||
2836 | ||||
2837 | auto EmbeddNewLoop = | |||
2838 | [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore]( | |||
2839 | Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * { | |||
2840 | CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton( | |||
2841 | DL, TripCount, F, InnerEnter, OutroInsertBefore, Name); | |||
2842 | redirectTo(Enter, EmbeddedLoop->getPreheader(), DL); | |||
2843 | redirectTo(EmbeddedLoop->getAfter(), Continue, DL); | |||
2844 | ||||
2845 | // Setup the position where the next embedded loop connects to this loop. | |||
2846 | Enter = EmbeddedLoop->getBody(); | |||
2847 | Continue = EmbeddedLoop->getLatch(); | |||
2848 | OutroInsertBefore = EmbeddedLoop->getLatch(); | |||
2849 | return EmbeddedLoop; | |||
2850 | }; | |||
2851 | ||||
2852 | auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts, | |||
2853 | const Twine &NameBase) { | |||
2854 | for (auto P : enumerate(TripCounts)) { | |||
2855 | CanonicalLoopInfo *EmbeddedLoop = | |||
2856 | EmbeddNewLoop(P.value(), NameBase + Twine(P.index())); | |||
2857 | Result.push_back(EmbeddedLoop); | |||
2858 | } | |||
2859 | }; | |||
2860 | ||||
2861 | EmbeddNewLoops(FloorCount, "floor"); | |||
2862 | ||||
2863 | // Within the innermost floor loop, emit the code that computes the tile | |||
2864 | // sizes. | |||
2865 | Builder.SetInsertPoint(Enter->getTerminator()); | |||
2866 | SmallVector<Value *, 4> TileCounts; | |||
2867 | for (int i = 0; i < NumLoops; ++i) { | |||
2868 | CanonicalLoopInfo *FloorLoop = Result[i]; | |||
2869 | Value *TileSize = TileSizes[i]; | |||
2870 | ||||
2871 | Value *FloorIsEpilogue = | |||
2872 | Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]); | |||
2873 | Value *TileTripCount = | |||
2874 | Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize); | |||
2875 | ||||
2876 | TileCounts.push_back(TileTripCount); | |||
2877 | } | |||
2878 | ||||
2879 | // Create the tile loops. | |||
2880 | EmbeddNewLoops(TileCounts, "tile"); | |||
2881 | ||||
2882 | // Insert the inbetween code into the body. | |||
2883 | BasicBlock *BodyEnter = Enter; | |||
2884 | BasicBlock *BodyEntered = nullptr; | |||
2885 | for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) { | |||
2886 | BasicBlock *EnterBB = P.first; | |||
2887 | BasicBlock *ExitBB = P.second; | |||
2888 | ||||
2889 | if (BodyEnter) | |||
2890 | redirectTo(BodyEnter, EnterBB, DL); | |||
2891 | else | |||
2892 | redirectAllPredecessorsTo(BodyEntered, EnterBB, DL); | |||
2893 | ||||
2894 | BodyEnter = nullptr; | |||
2895 | BodyEntered = ExitBB; | |||
2896 | } | |||
2897 | ||||
2898 | // Append the original loop nest body into the generated loop nest body. | |||
2899 | if (BodyEnter) | |||
2900 | redirectTo(BodyEnter, InnerEnter, DL); | |||
2901 | else | |||
2902 | redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL); | |||
2903 | redirectAllPredecessorsTo(InnerLatch, Continue, DL); | |||
2904 | ||||
2905 | // Replace the original induction variable with an induction variable computed | |||
2906 | // from the tile and floor induction variables. | |||
2907 | Builder.restoreIP(Result.back()->getBodyIP()); | |||
2908 | for (int i = 0; i < NumLoops; ++i) { | |||
2909 | CanonicalLoopInfo *FloorLoop = Result[i]; | |||
2910 | CanonicalLoopInfo *TileLoop = Result[NumLoops + i]; | |||
2911 | Value *OrigIndVar = OrigIndVars[i]; | |||
2912 | Value *Size = TileSizes[i]; | |||
2913 | ||||
2914 | Value *Scale = | |||
2915 | Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true); | |||
2916 | Value *Shift = | |||
2917 | Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true); | |||
2918 | OrigIndVar->replaceAllUsesWith(Shift); | |||
2919 | } | |||
2920 | ||||
2921 | // Remove unused parts of the original loops. | |||
2922 | removeUnusedBlocksFromParent(OldControlBBs); | |||
2923 | ||||
2924 | for (CanonicalLoopInfo *L : Loops) | |||
2925 | L->invalidate(); | |||
2926 | ||||
2927 | #ifndef NDEBUG | |||
2928 | for (CanonicalLoopInfo *GenL : Result) | |||
2929 | GenL->assertOK(); | |||
2930 | #endif | |||
2931 | return Result; | |||
2932 | } | |||
2933 | ||||
2934 | /// Attach metadata \p Properties to the basic block described by \p BB. If the | |||
2935 | /// basic block already has metadata, the basic block properties are appended. | |||
2936 | static void addBasicBlockMetadata(BasicBlock *BB, | |||
2937 | ArrayRef<Metadata *> Properties) { | |||
2938 | // Nothing to do if no property to attach. | |||
2939 | if (Properties.empty()) | |||
2940 | return; | |||
2941 | ||||
2942 | LLVMContext &Ctx = BB->getContext(); | |||
2943 | SmallVector<Metadata *> NewProperties; | |||
2944 | NewProperties.push_back(nullptr); | |||
2945 | ||||
2946 | // If the basic block already has metadata, prepend it to the new metadata. | |||
2947 | MDNode *Existing = BB->getTerminator()->getMetadata(LLVMContext::MD_loop); | |||
2948 | if (Existing) | |||
2949 | append_range(NewProperties, drop_begin(Existing->operands(), 1)); | |||
2950 | ||||
2951 | append_range(NewProperties, Properties); | |||
2952 | MDNode *BasicBlockID = MDNode::getDistinct(Ctx, NewProperties); | |||
2953 | BasicBlockID->replaceOperandWith(0, BasicBlockID); | |||
2954 | ||||
2955 | BB->getTerminator()->setMetadata(LLVMContext::MD_loop, BasicBlockID); | |||
2956 | } | |||
2957 | ||||
2958 | /// Attach loop metadata \p Properties to the loop described by \p Loop. If the | |||
2959 | /// loop already has metadata, the loop properties are appended. | |||
2960 | static void addLoopMetadata(CanonicalLoopInfo *Loop, | |||
2961 | ArrayRef<Metadata *> Properties) { | |||
2962 | assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo")(static_cast <bool> (Loop->isValid() && "Expecting a valid CanonicalLoopInfo" ) ? void (0) : __assert_fail ("Loop->isValid() && \"Expecting a valid CanonicalLoopInfo\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2962, __extension__ __PRETTY_FUNCTION__)); | |||
2963 | ||||
2964 | // Attach metadata to the loop's latch | |||
2965 | BasicBlock *Latch = Loop->getLatch(); | |||
2966 | assert(Latch && "A valid CanonicalLoopInfo must have a unique latch")(static_cast <bool> (Latch && "A valid CanonicalLoopInfo must have a unique latch" ) ? void (0) : __assert_fail ("Latch && \"A valid CanonicalLoopInfo must have a unique latch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2966, __extension__ __PRETTY_FUNCTION__)); | |||
2967 | addBasicBlockMetadata(Latch, Properties); | |||
2968 | } | |||
2969 | ||||
2970 | /// Attach llvm.access.group metadata to the memref instructions of \p Block | |||
2971 | static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup, | |||
2972 | LoopInfo &LI) { | |||
2973 | for (Instruction &I : *Block) { | |||
2974 | if (I.mayReadOrWriteMemory()) { | |||
2975 | // TODO: This instruction may already have access group from | |||
2976 | // other pragmas e.g. #pragma clang loop vectorize. Append | |||
2977 | // so that the existing metadata is not overwritten. | |||
2978 | I.setMetadata(LLVMContext::MD_access_group, AccessGroup); | |||
2979 | } | |||
2980 | } | |||
2981 | } | |||
2982 | ||||
2983 | void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) { | |||
2984 | LLVMContext &Ctx = Builder.getContext(); | |||
2985 | addLoopMetadata( | |||
2986 | Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), | |||
2987 | MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))}); | |||
2988 | } | |||
2989 | ||||
2990 | void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) { | |||
2991 | LLVMContext &Ctx = Builder.getContext(); | |||
2992 | addLoopMetadata( | |||
2993 | Loop, { | |||
2994 | MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), | |||
2995 | }); | |||
2996 | } | |||
2997 | ||||
2998 | void OpenMPIRBuilder::createIfVersion(CanonicalLoopInfo *CanonicalLoop, | |||
2999 | Value *IfCond, ValueToValueMapTy &VMap, | |||
3000 | const Twine &NamePrefix) { | |||
3001 | Function *F = CanonicalLoop->getFunction(); | |||
3002 | ||||
3003 | // Define where if branch should be inserted | |||
3004 | Instruction *SplitBefore; | |||
3005 | if (Instruction::classof(IfCond)) { | |||
3006 | SplitBefore = dyn_cast<Instruction>(IfCond); | |||
3007 | } else { | |||
3008 | SplitBefore = CanonicalLoop->getPreheader()->getTerminator(); | |||
3009 | } | |||
3010 | ||||
3011 | // TODO: We should not rely on pass manager. Currently we use pass manager | |||
3012 | // only for getting llvm::Loop which corresponds to given CanonicalLoopInfo | |||
3013 | // object. We should have a method which returns all blocks between | |||
3014 | // CanonicalLoopInfo::getHeader() and CanonicalLoopInfo::getAfter() | |||
3015 | FunctionAnalysisManager FAM; | |||
3016 | FAM.registerPass([]() { return DominatorTreeAnalysis(); }); | |||
3017 | FAM.registerPass([]() { return LoopAnalysis(); }); | |||
3018 | FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); | |||
3019 | ||||
3020 | // Get the loop which needs to be cloned | |||
3021 | LoopAnalysis LIA; | |||
3022 | LoopInfo &&LI = LIA.run(*F, FAM); | |||
3023 | Loop *L = LI.getLoopFor(CanonicalLoop->getHeader()); | |||
3024 | ||||
3025 | // Create additional blocks for the if statement | |||
3026 | BasicBlock *Head = SplitBefore->getParent(); | |||
3027 | Instruction *HeadOldTerm = Head->getTerminator(); | |||
3028 | llvm::LLVMContext &C = Head->getContext(); | |||
3029 | llvm::BasicBlock *ThenBlock = llvm::BasicBlock::Create( | |||
3030 | C, NamePrefix + ".if.then", Head->getParent(), Head->getNextNode()); | |||
3031 | llvm::BasicBlock *ElseBlock = llvm::BasicBlock::Create( | |||
3032 | C, NamePrefix + ".if.else", Head->getParent(), CanonicalLoop->getExit()); | |||
3033 | ||||
3034 | // Create if condition branch. | |||
3035 | Builder.SetInsertPoint(HeadOldTerm); | |||
3036 | Instruction *BrInstr = | |||
3037 | Builder.CreateCondBr(IfCond, ThenBlock, /*ifFalse*/ ElseBlock); | |||
3038 | InsertPointTy IP{BrInstr->getParent(), ++BrInstr->getIterator()}; | |||
3039 | // Then block contains branch to omp loop which needs to be vectorized | |||
3040 | spliceBB(IP, ThenBlock, false); | |||
3041 | ThenBlock->replaceSuccessorsPhiUsesWith(Head, ThenBlock); | |||
3042 | ||||
3043 | Builder.SetInsertPoint(ElseBlock); | |||
3044 | ||||
3045 | // Clone loop for the else branch | |||
3046 | SmallVector<BasicBlock *, 8> NewBlocks; | |||
3047 | ||||
3048 | VMap[CanonicalLoop->getPreheader()] = ElseBlock; | |||
3049 | for (BasicBlock *Block : L->getBlocks()) { | |||
3050 | BasicBlock *NewBB = CloneBasicBlock(Block, VMap, "", F); | |||
3051 | NewBB->moveBefore(CanonicalLoop->getExit()); | |||
3052 | VMap[Block] = NewBB; | |||
3053 | NewBlocks.push_back(NewBB); | |||
3054 | } | |||
3055 | remapInstructionsInBlocks(NewBlocks, VMap); | |||
3056 | Builder.CreateBr(NewBlocks.front()); | |||
3057 | } | |||
3058 | ||||
3059 | unsigned | |||
3060 | OpenMPIRBuilder::getOpenMPDefaultSimdAlign(const Triple &TargetTriple, | |||
3061 | const StringMap<bool> &Features) { | |||
3062 | if (TargetTriple.isX86()) { | |||
3063 | if (Features.lookup("avx512f")) | |||
3064 | return 512; | |||
3065 | else if (Features.lookup("avx")) | |||
3066 | return 256; | |||
3067 | return 128; | |||
3068 | } | |||
3069 | if (TargetTriple.isPPC()) | |||
3070 | return 128; | |||
3071 | if (TargetTriple.isWasm()) | |||
3072 | return 128; | |||
3073 | return 0; | |||
3074 | } | |||
3075 | ||||
3076 | void OpenMPIRBuilder::applySimd(CanonicalLoopInfo *CanonicalLoop, | |||
3077 | MapVector<Value *, Value *> AlignedVars, | |||
3078 | Value *IfCond, OrderKind Order, | |||
3079 | ConstantInt *Simdlen, ConstantInt *Safelen) { | |||
3080 | LLVMContext &Ctx = Builder.getContext(); | |||
3081 | ||||
3082 | Function *F = CanonicalLoop->getFunction(); | |||
3083 | ||||
3084 | // TODO: We should not rely on pass manager. Currently we use pass manager | |||
3085 | // only for getting llvm::Loop which corresponds to given CanonicalLoopInfo | |||
3086 | // object. We should have a method which returns all blocks between | |||
3087 | // CanonicalLoopInfo::getHeader() and CanonicalLoopInfo::getAfter() | |||
3088 | FunctionAnalysisManager FAM; | |||
3089 | FAM.registerPass([]() { return DominatorTreeAnalysis(); }); | |||
3090 | FAM.registerPass([]() { return LoopAnalysis(); }); | |||
3091 | FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); | |||
3092 | ||||
3093 | LoopAnalysis LIA; | |||
3094 | LoopInfo &&LI = LIA.run(*F, FAM); | |||
3095 | ||||
3096 | Loop *L = LI.getLoopFor(CanonicalLoop->getHeader()); | |||
3097 | if (AlignedVars.size()) { | |||
3098 | InsertPointTy IP = Builder.saveIP(); | |||
3099 | Builder.SetInsertPoint(CanonicalLoop->getPreheader()->getTerminator()); | |||
3100 | for (auto &AlignedItem : AlignedVars) { | |||
3101 | Value *AlignedPtr = AlignedItem.first; | |||
3102 | Value *Alignment = AlignedItem.second; | |||
3103 | Builder.CreateAlignmentAssumption(F->getParent()->getDataLayout(), | |||
3104 | AlignedPtr, Alignment); | |||
3105 | } | |||
3106 | Builder.restoreIP(IP); | |||
3107 | } | |||
3108 | ||||
3109 | if (IfCond) { | |||
3110 | ValueToValueMapTy VMap; | |||
3111 | createIfVersion(CanonicalLoop, IfCond, VMap, "simd"); | |||
3112 | // Add metadata to the cloned loop which disables vectorization | |||
3113 | Value *MappedLatch = VMap.lookup(CanonicalLoop->getLatch()); | |||
3114 | assert(MappedLatch &&(static_cast <bool> (MappedLatch && "Cannot find value which corresponds to original loop latch" ) ? void (0) : __assert_fail ("MappedLatch && \"Cannot find value which corresponds to original loop latch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3115, __extension__ __PRETTY_FUNCTION__)) | |||
3115 | "Cannot find value which corresponds to original loop latch")(static_cast <bool> (MappedLatch && "Cannot find value which corresponds to original loop latch" ) ? void (0) : __assert_fail ("MappedLatch && \"Cannot find value which corresponds to original loop latch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3115, __extension__ __PRETTY_FUNCTION__)); | |||
3116 | assert(isa<BasicBlock>(MappedLatch) &&(static_cast <bool> (isa<BasicBlock>(MappedLatch) && "Cannot cast mapped latch block value to BasicBlock" ) ? void (0) : __assert_fail ("isa<BasicBlock>(MappedLatch) && \"Cannot cast mapped latch block value to BasicBlock\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3117, __extension__ __PRETTY_FUNCTION__)) | |||
3117 | "Cannot cast mapped latch block value to BasicBlock")(static_cast <bool> (isa<BasicBlock>(MappedLatch) && "Cannot cast mapped latch block value to BasicBlock" ) ? void (0) : __assert_fail ("isa<BasicBlock>(MappedLatch) && \"Cannot cast mapped latch block value to BasicBlock\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3117, __extension__ __PRETTY_FUNCTION__)); | |||
3118 | BasicBlock *NewLatchBlock = dyn_cast<BasicBlock>(MappedLatch); | |||
3119 | ConstantAsMetadata *BoolConst = | |||
3120 | ConstantAsMetadata::get(ConstantInt::getFalse(Type::getInt1Ty(Ctx))); | |||
3121 | addBasicBlockMetadata( | |||
3122 | NewLatchBlock, | |||
3123 | {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"), | |||
3124 | BoolConst})}); | |||
3125 | } | |||
3126 | ||||
3127 | SmallSet<BasicBlock *, 8> Reachable; | |||
3128 | ||||
3129 | // Get the basic blocks from the loop in which memref instructions | |||
3130 | // can be found. | |||
3131 | // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo, | |||
3132 | // preferably without running any passes. | |||
3133 | for (BasicBlock *Block : L->getBlocks()) { | |||
3134 | if (Block == CanonicalLoop->getCond() || | |||
3135 | Block == CanonicalLoop->getHeader()) | |||
3136 | continue; | |||
3137 | Reachable.insert(Block); | |||
3138 | } | |||
3139 | ||||
3140 | SmallVector<Metadata *> LoopMDList; | |||
3141 | ||||
3142 | // In presence of finite 'safelen', it may be unsafe to mark all | |||
3143 | // the memory instructions parallel, because loop-carried | |||
3144 | // dependences of 'safelen' iterations are possible. | |||
3145 | // If clause order(concurrent) is specified then the memory instructions | |||
3146 | // are marked parallel even if 'safelen' is finite. | |||
3147 | if ((Safelen == nullptr) || (Order == OrderKind::OMP_ORDER_concurrent)) { | |||
3148 | // Add access group metadata to memory-access instructions. | |||
3149 | MDNode *AccessGroup = MDNode::getDistinct(Ctx, {}); | |||
3150 | for (BasicBlock *BB : Reachable) | |||
3151 | addSimdMetadata(BB, AccessGroup, LI); | |||
3152 | // TODO: If the loop has existing parallel access metadata, have | |||
3153 | // to combine two lists. | |||
3154 | LoopMDList.push_back(MDNode::get( | |||
3155 | Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccessGroup})); | |||
3156 | } | |||
3157 | ||||
3158 | // Use the above access group metadata to create loop level | |||
3159 | // metadata, which should be distinct for each loop. | |||
3160 | ConstantAsMetadata *BoolConst = | |||
3161 | ConstantAsMetadata::get(ConstantInt::getTrue(Type::getInt1Ty(Ctx))); | |||
3162 | LoopMDList.push_back(MDNode::get( | |||
3163 | Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"), BoolConst})); | |||
3164 | ||||
3165 | if (Simdlen || Safelen) { | |||
3166 | // If both simdlen and safelen clauses are specified, the value of the | |||
3167 | // simdlen parameter must be less than or equal to the value of the safelen | |||
3168 | // parameter. Therefore, use safelen only in the absence of simdlen. | |||
3169 | ConstantInt *VectorizeWidth = Simdlen == nullptr ? Safelen : Simdlen; | |||
3170 | LoopMDList.push_back( | |||
3171 | MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.width"), | |||
3172 | ConstantAsMetadata::get(VectorizeWidth)})); | |||
3173 | } | |||
3174 | ||||
3175 | addLoopMetadata(CanonicalLoop, LoopMDList); | |||
3176 | } | |||
3177 | ||||
3178 | /// Create the TargetMachine object to query the backend for optimization | |||
3179 | /// preferences. | |||
3180 | /// | |||
3181 | /// Ideally, this would be passed from the front-end to the OpenMPBuilder, but | |||
3182 | /// e.g. Clang does not pass it to its CodeGen layer and creates it only when | |||
3183 | /// needed for the LLVM pass pipline. We use some default options to avoid | |||
3184 | /// having to pass too many settings from the frontend that probably do not | |||
3185 | /// matter. | |||
3186 | /// | |||
3187 | /// Currently, TargetMachine is only used sometimes by the unrollLoopPartial | |||
3188 | /// method. If we are going to use TargetMachine for more purposes, especially | |||
3189 | /// those that are sensitive to TargetOptions, RelocModel and CodeModel, it | |||
3190 | /// might become be worth requiring front-ends to pass on their TargetMachine, | |||
3191 | /// or at least cache it between methods. Note that while fontends such as Clang | |||
3192 | /// have just a single main TargetMachine per translation unit, "target-cpu" and | |||
3193 | /// "target-features" that determine the TargetMachine are per-function and can | |||
3194 | /// be overrided using __attribute__((target("OPTIONS"))). | |||
3195 | static std::unique_ptr<TargetMachine> | |||
3196 | createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) { | |||
3197 | Module *M = F->getParent(); | |||
3198 | ||||
3199 | StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString(); | |||
3200 | StringRef Features = F->getFnAttribute("target-features").getValueAsString(); | |||
3201 | const std::string &Triple = M->getTargetTriple(); | |||
3202 | ||||
3203 | std::string Error; | |||
3204 | const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); | |||
3205 | if (!TheTarget) | |||
3206 | return {}; | |||
3207 | ||||
3208 | llvm::TargetOptions Options; | |||
3209 | return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine( | |||
3210 | Triple, CPU, Features, Options, /*RelocModel=*/std::nullopt, | |||
3211 | /*CodeModel=*/std::nullopt, OptLevel)); | |||
3212 | } | |||
3213 | ||||
3214 | /// Heuristically determine the best-performant unroll factor for \p CLI. This | |||
3215 | /// depends on the target processor. We are re-using the same heuristics as the | |||
3216 | /// LoopUnrollPass. | |||
3217 | static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) { | |||
3218 | Function *F = CLI->getFunction(); | |||
3219 | ||||
3220 | // Assume the user requests the most aggressive unrolling, even if the rest of | |||
3221 | // the code is optimized using a lower setting. | |||
3222 | CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive; | |||
3223 | std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel); | |||
3224 | ||||
3225 | FunctionAnalysisManager FAM; | |||
3226 | FAM.registerPass([]() { return TargetLibraryAnalysis(); }); | |||
3227 | FAM.registerPass([]() { return AssumptionAnalysis(); }); | |||
3228 | FAM.registerPass([]() { return DominatorTreeAnalysis(); }); | |||
3229 | FAM.registerPass([]() { return LoopAnalysis(); }); | |||
3230 | FAM.registerPass([]() { return ScalarEvolutionAnalysis(); }); | |||
3231 | FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); | |||
3232 | TargetIRAnalysis TIRA; | |||
3233 | if (TM) | |||
3234 | TIRA = TargetIRAnalysis( | |||
3235 | [&](const Function &F) { return TM->getTargetTransformInfo(F); }); | |||
3236 | FAM.registerPass([&]() { return TIRA; }); | |||
3237 | ||||
3238 | TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM); | |||
3239 | ScalarEvolutionAnalysis SEA; | |||
3240 | ScalarEvolution &&SE = SEA.run(*F, FAM); | |||
3241 | DominatorTreeAnalysis DTA; | |||
3242 | DominatorTree &&DT = DTA.run(*F, FAM); | |||
3243 | LoopAnalysis LIA; | |||
3244 | LoopInfo &&LI = LIA.run(*F, FAM); | |||
3245 | AssumptionAnalysis ACT; | |||
3246 | AssumptionCache &&AC = ACT.run(*F, FAM); | |||
3247 | OptimizationRemarkEmitter ORE{F}; | |||
3248 | ||||
3249 | Loop *L = LI.getLoopFor(CLI->getHeader()); | |||
3250 | assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop")(static_cast <bool> (L && "Expecting CanonicalLoopInfo to be recognized as a loop" ) ? void (0) : __assert_fail ("L && \"Expecting CanonicalLoopInfo to be recognized as a loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3250, __extension__ __PRETTY_FUNCTION__)); | |||
3251 | ||||
3252 | TargetTransformInfo::UnrollingPreferences UP = | |||
3253 | gatherUnrollingPreferences(L, SE, TTI, | |||
3254 | /*BlockFrequencyInfo=*/nullptr, | |||
3255 | /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel, | |||
3256 | /*UserThreshold=*/std::nullopt, | |||
3257 | /*UserCount=*/std::nullopt, | |||
3258 | /*UserAllowPartial=*/true, | |||
3259 | /*UserAllowRuntime=*/true, | |||
3260 | /*UserUpperBound=*/std::nullopt, | |||
3261 | /*UserFullUnrollMaxCount=*/std::nullopt); | |||
3262 | ||||
3263 | UP.Force = true; | |||
3264 | ||||
3265 | // Account for additional optimizations taking place before the LoopUnrollPass | |||
3266 | // would unroll the loop. | |||
3267 | UP.Threshold *= UnrollThresholdFactor; | |||
3268 | UP.PartialThreshold *= UnrollThresholdFactor; | |||
3269 | ||||
3270 | // Use normal unroll factors even if the rest of the code is optimized for | |||
3271 | // size. | |||
3272 | UP.OptSizeThreshold = UP.Threshold; | |||
3273 | UP.PartialOptSizeThreshold = UP.PartialThreshold; | |||
3274 | ||||
3275 | LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | |||
3276 | << " Threshold=" << UP.Threshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | |||
3277 | << " PartialThreshold=" << UP.PartialThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | |||
3278 | << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | |||
3279 | << " PartialOptSizeThreshold="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | |||
3280 | << UP.PartialOptSizeThreshold << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false); | |||
3281 | ||||
3282 | // Disable peeling. | |||
3283 | TargetTransformInfo::PeelingPreferences PP = | |||
3284 | gatherPeelingPreferences(L, SE, TTI, | |||
3285 | /*UserAllowPeeling=*/false, | |||
3286 | /*UserAllowProfileBasedPeeling=*/false, | |||
3287 | /*UnrollingSpecficValues=*/false); | |||
3288 | ||||
3289 | SmallPtrSet<const Value *, 32> EphValues; | |||
3290 | CodeMetrics::collectEphemeralValues(L, &AC, EphValues); | |||
3291 | ||||
3292 | // Assume that reads and writes to stack variables can be eliminated by | |||
3293 | // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's | |||
3294 | // size. | |||
3295 | for (BasicBlock *BB : L->blocks()) { | |||
3296 | for (Instruction &I : *BB) { | |||
3297 | Value *Ptr; | |||
3298 | if (auto *Load = dyn_cast<LoadInst>(&I)) { | |||
3299 | Ptr = Load->getPointerOperand(); | |||
3300 | } else if (auto *Store = dyn_cast<StoreInst>(&I)) { | |||
3301 | Ptr = Store->getPointerOperand(); | |||
3302 | } else | |||
3303 | continue; | |||
3304 | ||||
3305 | Ptr = Ptr->stripPointerCasts(); | |||
3306 | ||||
3307 | if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) { | |||
3308 | if (Alloca->getParent() == &F->getEntryBlock()) | |||
3309 | EphValues.insert(&I); | |||
3310 | } | |||
3311 | } | |||
3312 | } | |||
3313 | ||||
3314 | unsigned NumInlineCandidates; | |||
3315 | bool NotDuplicatable; | |||
3316 | bool Convergent; | |||
3317 | InstructionCost LoopSizeIC = | |||
3318 | ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent, | |||
3319 | TTI, EphValues, UP.BEInsns); | |||
3320 | LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSizeIC << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Estimated loop size is " << LoopSizeIC << "\n"; } } while (false); | |||
3321 | ||||
3322 | // Loop is not unrollable if the loop contains certain instructions. | |||
3323 | if (NotDuplicatable || Convergent || !LoopSizeIC.isValid()) { | |||
3324 | LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Loop not considered unrollable\n" ; } } while (false); | |||
3325 | return 1; | |||
3326 | } | |||
3327 | unsigned LoopSize = *LoopSizeIC.getValue(); | |||
3328 | ||||
3329 | // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might | |||
3330 | // be able to use it. | |||
3331 | int TripCount = 0; | |||
3332 | int MaxTripCount = 0; | |||
3333 | bool MaxOrZero = false; | |||
3334 | unsigned TripMultiple = 0; | |||
3335 | ||||
3336 | bool UseUpperBound = false; | |||
3337 | computeUnrollCount(L, TTI, DT, &LI, &AC, SE, EphValues, &ORE, TripCount, | |||
3338 | MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP, | |||
3339 | UseUpperBound); | |||
3340 | unsigned Factor = UP.Count; | |||
3341 | LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Suggesting unroll factor of " << Factor << "\n"; } } while (false); | |||
3342 | ||||
3343 | // This function returns 1 to signal to not unroll a loop. | |||
3344 | if (Factor == 0) | |||
3345 | return 1; | |||
3346 | return Factor; | |||
3347 | } | |||
3348 | ||||
3349 | void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, | |||
3350 | int32_t Factor, | |||
3351 | CanonicalLoopInfo **UnrolledCLI) { | |||
3352 | assert(Factor >= 0 && "Unroll factor must not be negative")(static_cast <bool> (Factor >= 0 && "Unroll factor must not be negative" ) ? void (0) : __assert_fail ("Factor >= 0 && \"Unroll factor must not be negative\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3352, __extension__ __PRETTY_FUNCTION__)); | |||
3353 | ||||
3354 | Function *F = Loop->getFunction(); | |||
3355 | LLVMContext &Ctx = F->getContext(); | |||
3356 | ||||
3357 | // If the unrolled loop is not used for another loop-associated directive, it | |||
3358 | // is sufficient to add metadata for the LoopUnrollPass. | |||
3359 | if (!UnrolledCLI) { | |||
3360 | SmallVector<Metadata *, 2> LoopMetadata; | |||
3361 | LoopMetadata.push_back( | |||
3362 | MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable"))); | |||
3363 | ||||
3364 | if (Factor >= 1) { | |||
3365 | ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( | |||
3366 | ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); | |||
3367 | LoopMetadata.push_back(MDNode::get( | |||
3368 | Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})); | |||
3369 | } | |||
3370 | ||||
3371 | addLoopMetadata(Loop, LoopMetadata); | |||
3372 | return; | |||
3373 | } | |||
3374 | ||||
3375 | // Heuristically determine the unroll factor. | |||
3376 | if (Factor == 0) | |||
3377 | Factor = computeHeuristicUnrollFactor(Loop); | |||
3378 | ||||
3379 | // No change required with unroll factor 1. | |||
3380 | if (Factor == 1) { | |||
3381 | *UnrolledCLI = Loop; | |||
3382 | return; | |||
3383 | } | |||
3384 | ||||
3385 | assert(Factor >= 2 &&(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger" ) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3386, __extension__ __PRETTY_FUNCTION__)) | |||
3386 | "unrolling only makes sense with a factor of 2 or larger")(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger" ) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3386, __extension__ __PRETTY_FUNCTION__)); | |||
3387 | ||||
3388 | Type *IndVarTy = Loop->getIndVarType(); | |||
3389 | ||||
3390 | // Apply partial unrolling by tiling the loop by the unroll-factor, then fully | |||
3391 | // unroll the inner loop. | |||
3392 | Value *FactorVal = | |||
3393 | ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor, | |||
3394 | /*isSigned=*/false)); | |||
3395 | std::vector<CanonicalLoopInfo *> LoopNest = | |||
3396 | tileLoops(DL, {Loop}, {FactorVal}); | |||
3397 | assert(LoopNest.size() == 2 && "Expect 2 loops after tiling")(static_cast <bool> (LoopNest.size() == 2 && "Expect 2 loops after tiling" ) ? void (0) : __assert_fail ("LoopNest.size() == 2 && \"Expect 2 loops after tiling\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3397, __extension__ __PRETTY_FUNCTION__)); | |||
3398 | *UnrolledCLI = LoopNest[0]; | |||
3399 | CanonicalLoopInfo *InnerLoop = LoopNest[1]; | |||
3400 | ||||
3401 | // LoopUnrollPass can only fully unroll loops with constant trip count. | |||
3402 | // Unroll by the unroll factor with a fallback epilog for the remainder | |||
3403 | // iterations if necessary. | |||
3404 | ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( | |||
3405 | ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); | |||
3406 | addLoopMetadata( | |||
3407 | InnerLoop, | |||
3408 | {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), | |||
3409 | MDNode::get( | |||
3410 | Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})}); | |||
3411 | ||||
3412 | #ifndef NDEBUG | |||
3413 | (*UnrolledCLI)->assertOK(); | |||
3414 | #endif | |||
3415 | } | |||
3416 | ||||
3417 | OpenMPIRBuilder::InsertPointTy | |||
3418 | OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc, | |||
3419 | llvm::Value *BufSize, llvm::Value *CpyBuf, | |||
3420 | llvm::Value *CpyFn, llvm::Value *DidIt) { | |||
3421 | if (!updateToLocation(Loc)) | |||
3422 | return Loc.IP; | |||
3423 | ||||
3424 | uint32_t SrcLocStrSize; | |||
3425 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3426 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3427 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3428 | ||||
3429 | llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt); | |||
3430 | ||||
3431 | Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD}; | |||
3432 | ||||
3433 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate); | |||
3434 | Builder.CreateCall(Fn, Args); | |||
3435 | ||||
3436 | return Builder.saveIP(); | |||
3437 | } | |||
3438 | ||||
3439 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSingle( | |||
3440 | const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, | |||
3441 | FinalizeCallbackTy FiniCB, bool IsNowait, llvm::Value *DidIt) { | |||
3442 | ||||
3443 | if (!updateToLocation(Loc)) | |||
3444 | return Loc.IP; | |||
3445 | ||||
3446 | // If needed (i.e. not null), initialize `DidIt` with 0 | |||
3447 | if (DidIt) { | |||
3448 | Builder.CreateStore(Builder.getInt32(0), DidIt); | |||
3449 | } | |||
3450 | ||||
3451 | Directive OMPD = Directive::OMPD_single; | |||
3452 | uint32_t SrcLocStrSize; | |||
3453 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3454 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3455 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3456 | Value *Args[] = {Ident, ThreadId}; | |||
3457 | ||||
3458 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single); | |||
3459 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | |||
3460 | ||||
3461 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single); | |||
3462 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | |||
3463 | ||||
3464 | // generates the following: | |||
3465 | // if (__kmpc_single()) { | |||
3466 | // .... single region ... | |||
3467 | // __kmpc_end_single | |||
3468 | // } | |||
3469 | // __kmpc_barrier | |||
3470 | ||||
3471 | EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
3472 | /*Conditional*/ true, | |||
3473 | /*hasFinalize*/ true); | |||
3474 | if (!IsNowait) | |||
3475 | createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), | |||
3476 | omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, | |||
3477 | /* CheckCancelFlag */ false); | |||
3478 | return Builder.saveIP(); | |||
3479 | } | |||
3480 | ||||
3481 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical( | |||
3482 | const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, | |||
3483 | FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) { | |||
3484 | ||||
3485 | if (!updateToLocation(Loc)) | |||
3486 | return Loc.IP; | |||
3487 | ||||
3488 | Directive OMPD = Directive::OMPD_critical; | |||
3489 | uint32_t SrcLocStrSize; | |||
3490 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3491 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3492 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3493 | Value *LockVar = getOMPCriticalRegionLock(CriticalName); | |||
3494 | Value *Args[] = {Ident, ThreadId, LockVar}; | |||
3495 | ||||
3496 | SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args)); | |||
3497 | Function *RTFn = nullptr; | |||
3498 | if (HintInst) { | |||
3499 | // Add Hint to entry Args and create call | |||
3500 | EnterArgs.push_back(HintInst); | |||
3501 | RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint); | |||
3502 | } else { | |||
3503 | RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical); | |||
3504 | } | |||
3505 | Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs); | |||
3506 | ||||
3507 | Function *ExitRTLFn = | |||
3508 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical); | |||
3509 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | |||
3510 | ||||
3511 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
3512 | /*Conditional*/ false, /*hasFinalize*/ true); | |||
3513 | } | |||
3514 | ||||
3515 | OpenMPIRBuilder::InsertPointTy | |||
3516 | OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc, | |||
3517 | InsertPointTy AllocaIP, unsigned NumLoops, | |||
3518 | ArrayRef<llvm::Value *> StoreValues, | |||
3519 | const Twine &Name, bool IsDependSource) { | |||
3520 | assert((static_cast <bool> (llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && "OpenMP runtime requires depend vec with i64 type") ? void ( 0) : __assert_fail ("llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && \"OpenMP runtime requires depend vec with i64 type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3523, __extension__ __PRETTY_FUNCTION__)) | |||
3521 | llvm::all_of(StoreValues,(static_cast <bool> (llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && "OpenMP runtime requires depend vec with i64 type") ? void ( 0) : __assert_fail ("llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && \"OpenMP runtime requires depend vec with i64 type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3523, __extension__ __PRETTY_FUNCTION__)) | |||
3522 | [](Value *SV) { return SV->getType()->isIntegerTy(64); }) &&(static_cast <bool> (llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && "OpenMP runtime requires depend vec with i64 type") ? void ( 0) : __assert_fail ("llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && \"OpenMP runtime requires depend vec with i64 type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3523, __extension__ __PRETTY_FUNCTION__)) | |||
3523 | "OpenMP runtime requires depend vec with i64 type")(static_cast <bool> (llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && "OpenMP runtime requires depend vec with i64 type") ? void ( 0) : __assert_fail ("llvm::all_of(StoreValues, [](Value *SV) { return SV->getType()->isIntegerTy(64); }) && \"OpenMP runtime requires depend vec with i64 type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3523, __extension__ __PRETTY_FUNCTION__)); | |||
3524 | ||||
3525 | if (!updateToLocation(Loc)) | |||
3526 | return Loc.IP; | |||
3527 | ||||
3528 | // Allocate space for vector and generate alloc instruction. | |||
3529 | auto *ArrI64Ty = ArrayType::get(Int64, NumLoops); | |||
3530 | Builder.restoreIP(AllocaIP); | |||
3531 | AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name); | |||
3532 | ArgsBase->setAlignment(Align(8)); | |||
3533 | Builder.restoreIP(Loc.IP); | |||
3534 | ||||
3535 | // Store the index value with offset in depend vector. | |||
3536 | for (unsigned I = 0; I < NumLoops; ++I) { | |||
3537 | Value *DependAddrGEPIter = Builder.CreateInBoundsGEP( | |||
3538 | ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)}); | |||
3539 | StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter); | |||
3540 | STInst->setAlignment(Align(8)); | |||
3541 | } | |||
3542 | ||||
3543 | Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP( | |||
3544 | ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)}); | |||
3545 | ||||
3546 | uint32_t SrcLocStrSize; | |||
3547 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3548 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3549 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3550 | Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP}; | |||
3551 | ||||
3552 | Function *RTLFn = nullptr; | |||
3553 | if (IsDependSource) | |||
3554 | RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post); | |||
3555 | else | |||
3556 | RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait); | |||
3557 | Builder.CreateCall(RTLFn, Args); | |||
3558 | ||||
3559 | return Builder.saveIP(); | |||
3560 | } | |||
3561 | ||||
3562 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd( | |||
3563 | const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, | |||
3564 | FinalizeCallbackTy FiniCB, bool IsThreads) { | |||
3565 | if (!updateToLocation(Loc)) | |||
3566 | return Loc.IP; | |||
3567 | ||||
3568 | Directive OMPD = Directive::OMPD_ordered; | |||
3569 | Instruction *EntryCall = nullptr; | |||
3570 | Instruction *ExitCall = nullptr; | |||
3571 | ||||
3572 | if (IsThreads) { | |||
3573 | uint32_t SrcLocStrSize; | |||
3574 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3575 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3576 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3577 | Value *Args[] = {Ident, ThreadId}; | |||
3578 | ||||
3579 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered); | |||
3580 | EntryCall = Builder.CreateCall(EntryRTLFn, Args); | |||
3581 | ||||
3582 | Function *ExitRTLFn = | |||
3583 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered); | |||
3584 | ExitCall = Builder.CreateCall(ExitRTLFn, Args); | |||
3585 | } | |||
3586 | ||||
3587 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | |||
3588 | /*Conditional*/ false, /*hasFinalize*/ true); | |||
3589 | } | |||
3590 | ||||
3591 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion( | |||
3592 | Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, | |||
3593 | BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional, | |||
3594 | bool HasFinalize, bool IsCancellable) { | |||
3595 | ||||
3596 | if (HasFinalize) | |||
3597 | FinalizationStack.push_back({FiniCB, OMPD, IsCancellable}); | |||
3598 | ||||
3599 | // Create inlined region's entry and body blocks, in preparation | |||
3600 | // for conditional creation | |||
3601 | BasicBlock *EntryBB = Builder.GetInsertBlock(); | |||
3602 | Instruction *SplitPos = EntryBB->getTerminator(); | |||
3603 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | |||
3604 | SplitPos = new UnreachableInst(Builder.getContext(), EntryBB); | |||
3605 | BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end"); | |||
3606 | BasicBlock *FiniBB = | |||
3607 | EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize"); | |||
3608 | ||||
3609 | Builder.SetInsertPoint(EntryBB->getTerminator()); | |||
3610 | emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional); | |||
3611 | ||||
3612 | // generate body | |||
3613 | BodyGenCB(/* AllocaIP */ InsertPointTy(), | |||
3614 | /* CodeGenIP */ Builder.saveIP()); | |||
3615 | ||||
3616 | // emit exit call and do any needed finalization. | |||
3617 | auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt()); | |||
3618 | assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors () == 1 && FiniBB->getTerminator()->getSuccessor (0) == ExitBB && "Unexpected control flow graph state!!" ) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3620, __extension__ __PRETTY_FUNCTION__)) | |||
3619 | FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors () == 1 && FiniBB->getTerminator()->getSuccessor (0) == ExitBB && "Unexpected control flow graph state!!" ) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3620, __extension__ __PRETTY_FUNCTION__)) | |||
3620 | "Unexpected control flow graph state!!")(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors () == 1 && FiniBB->getTerminator()->getSuccessor (0) == ExitBB && "Unexpected control flow graph state!!" ) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3620, __extension__ __PRETTY_FUNCTION__)); | |||
3621 | emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize); | |||
3622 | assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&(static_cast <bool> (FiniBB->getUniquePredecessor()-> getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!" ) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3623, __extension__ __PRETTY_FUNCTION__)) | |||
3623 | "Unexpected Control Flow State!")(static_cast <bool> (FiniBB->getUniquePredecessor()-> getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!" ) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3623, __extension__ __PRETTY_FUNCTION__)); | |||
3624 | MergeBlockIntoPredecessor(FiniBB); | |||
3625 | ||||
3626 | // If we are skipping the region of a non conditional, remove the exit | |||
3627 | // block, and clear the builder's insertion point. | |||
3628 | assert(SplitPos->getParent() == ExitBB &&(static_cast <bool> (SplitPos->getParent() == ExitBB && "Unexpected Insertion point location!") ? void (0 ) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3629, __extension__ __PRETTY_FUNCTION__)) | |||
3629 | "Unexpected Insertion point location!")(static_cast <bool> (SplitPos->getParent() == ExitBB && "Unexpected Insertion point location!") ? void (0 ) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3629, __extension__ __PRETTY_FUNCTION__)); | |||
3630 | auto merged = MergeBlockIntoPredecessor(ExitBB); | |||
3631 | BasicBlock *ExitPredBB = SplitPos->getParent(); | |||
3632 | auto InsertBB = merged ? ExitPredBB : ExitBB; | |||
3633 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | |||
3634 | SplitPos->eraseFromParent(); | |||
3635 | Builder.SetInsertPoint(InsertBB); | |||
3636 | ||||
3637 | return Builder.saveIP(); | |||
3638 | } | |||
3639 | ||||
3640 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry( | |||
3641 | Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) { | |||
3642 | // if nothing to do, Return current insertion point. | |||
3643 | if (!Conditional || !EntryCall) | |||
3644 | return Builder.saveIP(); | |||
3645 | ||||
3646 | BasicBlock *EntryBB = Builder.GetInsertBlock(); | |||
3647 | Value *CallBool = Builder.CreateIsNotNull(EntryCall); | |||
3648 | auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body"); | |||
3649 | auto *UI = new UnreachableInst(Builder.getContext(), ThenBB); | |||
3650 | ||||
3651 | // Emit thenBB and set the Builder's insertion point there for | |||
3652 | // body generation next. Place the block after the current block. | |||
3653 | Function *CurFn = EntryBB->getParent(); | |||
3654 | CurFn->insert(std::next(EntryBB->getIterator()), ThenBB); | |||
3655 | ||||
3656 | // Move Entry branch to end of ThenBB, and replace with conditional | |||
3657 | // branch (If-stmt) | |||
3658 | Instruction *EntryBBTI = EntryBB->getTerminator(); | |||
3659 | Builder.CreateCondBr(CallBool, ThenBB, ExitBB); | |||
3660 | EntryBBTI->removeFromParent(); | |||
3661 | Builder.SetInsertPoint(UI); | |||
3662 | Builder.Insert(EntryBBTI); | |||
3663 | UI->eraseFromParent(); | |||
3664 | Builder.SetInsertPoint(ThenBB->getTerminator()); | |||
3665 | ||||
3666 | // return an insertion point to ExitBB. | |||
3667 | return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt()); | |||
3668 | } | |||
3669 | ||||
3670 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit( | |||
3671 | omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, | |||
3672 | bool HasFinalize) { | |||
3673 | ||||
3674 | Builder.restoreIP(FinIP); | |||
3675 | ||||
3676 | // If there is finalization to do, emit it before the exit call | |||
3677 | if (HasFinalize) { | |||
3678 | assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3679, __extension__ __PRETTY_FUNCTION__)) | |||
3679 | "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3679, __extension__ __PRETTY_FUNCTION__)); | |||
3680 | ||||
3681 | FinalizationInfo Fi = FinalizationStack.pop_back_val(); | |||
3682 | assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")(static_cast <bool> (Fi.DK == OMPD && "Unexpected Directive for Finalization call!" ) ? void (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3682, __extension__ __PRETTY_FUNCTION__)); | |||
3683 | ||||
3684 | Fi.FiniCB(FinIP); | |||
3685 | ||||
3686 | BasicBlock *FiniBB = FinIP.getBlock(); | |||
3687 | Instruction *FiniBBTI = FiniBB->getTerminator(); | |||
3688 | ||||
3689 | // set Builder IP for call creation | |||
3690 | Builder.SetInsertPoint(FiniBBTI); | |||
3691 | } | |||
3692 | ||||
3693 | if (!ExitCall) | |||
3694 | return Builder.saveIP(); | |||
3695 | ||||
3696 | // place the Exitcall as last instruction before Finalization block terminator | |||
3697 | ExitCall->removeFromParent(); | |||
3698 | Builder.Insert(ExitCall); | |||
3699 | ||||
3700 | return IRBuilder<>::InsertPoint(ExitCall->getParent(), | |||
3701 | ExitCall->getIterator()); | |||
3702 | } | |||
3703 | ||||
3704 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks( | |||
3705 | InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, | |||
3706 | llvm::IntegerType *IntPtrTy, bool BranchtoEnd) { | |||
3707 | if (!IP.isSet()) | |||
3708 | return IP; | |||
3709 | ||||
3710 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
3711 | ||||
3712 | // creates the following CFG structure | |||
3713 | // OMP_Entry : (MasterAddr != PrivateAddr)? | |||
3714 | // F T | |||
3715 | // | \ | |||
3716 | // | copin.not.master | |||
3717 | // | / | |||
3718 | // v / | |||
3719 | // copyin.not.master.end | |||
3720 | // | | |||
3721 | // v | |||
3722 | // OMP.Entry.Next | |||
3723 | ||||
3724 | BasicBlock *OMP_Entry = IP.getBlock(); | |||
3725 | Function *CurFn = OMP_Entry->getParent(); | |||
3726 | BasicBlock *CopyBegin = | |||
3727 | BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn); | |||
3728 | BasicBlock *CopyEnd = nullptr; | |||
3729 | ||||
3730 | // If entry block is terminated, split to preserve the branch to following | |||
3731 | // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is. | |||
3732 | if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) { | |||
3733 | CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(), | |||
3734 | "copyin.not.master.end"); | |||
3735 | OMP_Entry->getTerminator()->eraseFromParent(); | |||
3736 | } else { | |||
3737 | CopyEnd = | |||
3738 | BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn); | |||
3739 | } | |||
3740 | ||||
3741 | Builder.SetInsertPoint(OMP_Entry); | |||
3742 | Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy); | |||
3743 | Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy); | |||
3744 | Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr); | |||
3745 | Builder.CreateCondBr(cmp, CopyBegin, CopyEnd); | |||
3746 | ||||
3747 | Builder.SetInsertPoint(CopyBegin); | |||
3748 | if (BranchtoEnd) | |||
3749 | Builder.SetInsertPoint(Builder.CreateBr(CopyEnd)); | |||
3750 | ||||
3751 | return Builder.saveIP(); | |||
3752 | } | |||
3753 | ||||
3754 | CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc, | |||
3755 | Value *Size, Value *Allocator, | |||
3756 | std::string Name) { | |||
3757 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
3758 | Builder.restoreIP(Loc.IP); | |||
3759 | ||||
3760 | uint32_t SrcLocStrSize; | |||
3761 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3762 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3763 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3764 | Value *Args[] = {ThreadId, Size, Allocator}; | |||
3765 | ||||
3766 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc); | |||
3767 | ||||
3768 | return Builder.CreateCall(Fn, Args, Name); | |||
3769 | } | |||
3770 | ||||
3771 | CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc, | |||
3772 | Value *Addr, Value *Allocator, | |||
3773 | std::string Name) { | |||
3774 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
3775 | Builder.restoreIP(Loc.IP); | |||
3776 | ||||
3777 | uint32_t SrcLocStrSize; | |||
3778 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3779 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3780 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3781 | Value *Args[] = {ThreadId, Addr, Allocator}; | |||
3782 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free); | |||
3783 | return Builder.CreateCall(Fn, Args, Name); | |||
3784 | } | |||
3785 | ||||
3786 | CallInst *OpenMPIRBuilder::createOMPInteropInit( | |||
3787 | const LocationDescription &Loc, Value *InteropVar, | |||
3788 | omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, | |||
3789 | Value *DependenceAddress, bool HaveNowaitClause) { | |||
3790 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
3791 | Builder.restoreIP(Loc.IP); | |||
3792 | ||||
3793 | uint32_t SrcLocStrSize; | |||
3794 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3795 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3796 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3797 | if (Device == nullptr) | |||
3798 | Device = ConstantInt::get(Int32, -1); | |||
3799 | Constant *InteropTypeVal = ConstantInt::get(Int32, (int)InteropType); | |||
3800 | if (NumDependences == nullptr) { | |||
3801 | NumDependences = ConstantInt::get(Int32, 0); | |||
3802 | PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); | |||
3803 | DependenceAddress = ConstantPointerNull::get(PointerTypeVar); | |||
3804 | } | |||
3805 | Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); | |||
3806 | Value *Args[] = { | |||
3807 | Ident, ThreadId, InteropVar, InteropTypeVal, | |||
3808 | Device, NumDependences, DependenceAddress, HaveNowaitClauseVal}; | |||
3809 | ||||
3810 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_init); | |||
3811 | ||||
3812 | return Builder.CreateCall(Fn, Args); | |||
3813 | } | |||
3814 | ||||
3815 | CallInst *OpenMPIRBuilder::createOMPInteropDestroy( | |||
3816 | const LocationDescription &Loc, Value *InteropVar, Value *Device, | |||
3817 | Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause) { | |||
3818 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
3819 | Builder.restoreIP(Loc.IP); | |||
3820 | ||||
3821 | uint32_t SrcLocStrSize; | |||
3822 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3823 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3824 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3825 | if (Device == nullptr) | |||
3826 | Device = ConstantInt::get(Int32, -1); | |||
3827 | if (NumDependences == nullptr) { | |||
3828 | NumDependences = ConstantInt::get(Int32, 0); | |||
3829 | PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); | |||
3830 | DependenceAddress = ConstantPointerNull::get(PointerTypeVar); | |||
3831 | } | |||
3832 | Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); | |||
3833 | Value *Args[] = { | |||
3834 | Ident, ThreadId, InteropVar, Device, | |||
3835 | NumDependences, DependenceAddress, HaveNowaitClauseVal}; | |||
3836 | ||||
3837 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_destroy); | |||
3838 | ||||
3839 | return Builder.CreateCall(Fn, Args); | |||
3840 | } | |||
3841 | ||||
3842 | CallInst *OpenMPIRBuilder::createOMPInteropUse(const LocationDescription &Loc, | |||
3843 | Value *InteropVar, Value *Device, | |||
3844 | Value *NumDependences, | |||
3845 | Value *DependenceAddress, | |||
3846 | bool HaveNowaitClause) { | |||
3847 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
3848 | Builder.restoreIP(Loc.IP); | |||
3849 | uint32_t SrcLocStrSize; | |||
3850 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3851 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3852 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3853 | if (Device == nullptr) | |||
3854 | Device = ConstantInt::get(Int32, -1); | |||
3855 | if (NumDependences == nullptr) { | |||
3856 | NumDependences = ConstantInt::get(Int32, 0); | |||
3857 | PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); | |||
3858 | DependenceAddress = ConstantPointerNull::get(PointerTypeVar); | |||
3859 | } | |||
3860 | Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); | |||
3861 | Value *Args[] = { | |||
3862 | Ident, ThreadId, InteropVar, Device, | |||
3863 | NumDependences, DependenceAddress, HaveNowaitClauseVal}; | |||
3864 | ||||
3865 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_use); | |||
3866 | ||||
3867 | return Builder.CreateCall(Fn, Args); | |||
3868 | } | |||
3869 | ||||
3870 | CallInst *OpenMPIRBuilder::createCachedThreadPrivate( | |||
3871 | const LocationDescription &Loc, llvm::Value *Pointer, | |||
3872 | llvm::ConstantInt *Size, const llvm::Twine &Name) { | |||
3873 | IRBuilder<>::InsertPointGuard IPG(Builder); | |||
3874 | Builder.restoreIP(Loc.IP); | |||
3875 | ||||
3876 | uint32_t SrcLocStrSize; | |||
3877 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3878 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3879 | Value *ThreadId = getOrCreateThreadID(Ident); | |||
3880 | Constant *ThreadPrivateCache = | |||
3881 | getOrCreateInternalVariable(Int8PtrPtr, Name.str()); | |||
3882 | llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache}; | |||
3883 | ||||
3884 | Function *Fn = | |||
3885 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached); | |||
3886 | ||||
3887 | return Builder.CreateCall(Fn, Args); | |||
3888 | } | |||
3889 | ||||
3890 | OpenMPIRBuilder::InsertPointTy | |||
3891 | OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD) { | |||
3892 | if (!updateToLocation(Loc)) | |||
3893 | return Loc.IP; | |||
3894 | ||||
3895 | uint32_t SrcLocStrSize; | |||
3896 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3897 | Constant *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3898 | ConstantInt *IsSPMDVal = ConstantInt::getSigned( | |||
3899 | IntegerType::getInt8Ty(Int8->getContext()), | |||
3900 | IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); | |||
3901 | ConstantInt *UseGenericStateMachine = | |||
3902 | ConstantInt::getBool(Int32->getContext(), !IsSPMD); | |||
3903 | ||||
3904 | Function *Fn = getOrCreateRuntimeFunctionPtr( | |||
3905 | omp::RuntimeFunction::OMPRTL___kmpc_target_init); | |||
3906 | ||||
3907 | CallInst *ThreadKind = Builder.CreateCall( | |||
3908 | Fn, {Ident, IsSPMDVal, UseGenericStateMachine}); | |||
3909 | ||||
3910 | Value *ExecUserCode = Builder.CreateICmpEQ( | |||
3911 | ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), | |||
3912 | "exec_user_code"); | |||
3913 | ||||
3914 | // ThreadKind = __kmpc_target_init(...) | |||
3915 | // if (ThreadKind == -1) | |||
3916 | // user_code | |||
3917 | // else | |||
3918 | // return; | |||
3919 | ||||
3920 | auto *UI = Builder.CreateUnreachable(); | |||
3921 | BasicBlock *CheckBB = UI->getParent(); | |||
3922 | BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry"); | |||
3923 | ||||
3924 | BasicBlock *WorkerExitBB = BasicBlock::Create( | |||
3925 | CheckBB->getContext(), "worker.exit", CheckBB->getParent()); | |||
3926 | Builder.SetInsertPoint(WorkerExitBB); | |||
3927 | Builder.CreateRetVoid(); | |||
3928 | ||||
3929 | auto *CheckBBTI = CheckBB->getTerminator(); | |||
3930 | Builder.SetInsertPoint(CheckBBTI); | |||
3931 | Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB); | |||
3932 | ||||
3933 | CheckBBTI->eraseFromParent(); | |||
3934 | UI->eraseFromParent(); | |||
3935 | ||||
3936 | // Continue in the "user_code" block, see diagram above and in | |||
3937 | // openmp/libomptarget/deviceRTLs/common/include/target.h . | |||
3938 | return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt()); | |||
3939 | } | |||
3940 | ||||
3941 | void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc, | |||
3942 | bool IsSPMD) { | |||
3943 | if (!updateToLocation(Loc)) | |||
3944 | return; | |||
3945 | ||||
3946 | uint32_t SrcLocStrSize; | |||
3947 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
3948 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
3949 | ConstantInt *IsSPMDVal = ConstantInt::getSigned( | |||
3950 | IntegerType::getInt8Ty(Int8->getContext()), | |||
3951 | IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); | |||
3952 | ||||
3953 | Function *Fn = getOrCreateRuntimeFunctionPtr( | |||
3954 | omp::RuntimeFunction::OMPRTL___kmpc_target_deinit); | |||
3955 | ||||
3956 | Builder.CreateCall(Fn, {Ident, IsSPMDVal}); | |||
3957 | } | |||
3958 | ||||
3959 | void OpenMPIRBuilder::setOutlinedTargetRegionFunctionAttributes( | |||
3960 | Function *OutlinedFn, int32_t NumTeams, int32_t NumThreads) { | |||
3961 | if (Config.isEmbedded()) { | |||
3962 | OutlinedFn->setLinkage(GlobalValue::WeakODRLinkage); | |||
3963 | // TODO: Determine if DSO local can be set to true. | |||
3964 | OutlinedFn->setDSOLocal(false); | |||
3965 | OutlinedFn->setVisibility(GlobalValue::ProtectedVisibility); | |||
3966 | if (Triple(M.getTargetTriple()).isAMDGCN()) | |||
3967 | OutlinedFn->setCallingConv(CallingConv::AMDGPU_KERNEL); | |||
3968 | } | |||
3969 | ||||
3970 | if (NumTeams > 0) | |||
3971 | OutlinedFn->addFnAttr("omp_target_num_teams", std::to_string(NumTeams)); | |||
3972 | if (NumThreads > 0) | |||
3973 | OutlinedFn->addFnAttr("omp_target_thread_limit", | |||
3974 | std::to_string(NumThreads)); | |||
3975 | } | |||
3976 | ||||
3977 | Constant *OpenMPIRBuilder::createOutlinedFunctionID(Function *OutlinedFn, | |||
3978 | StringRef EntryFnIDName) { | |||
3979 | if (Config.isEmbedded()) { | |||
3980 | assert(OutlinedFn && "The outlined function must exist if embedded")(static_cast <bool> (OutlinedFn && "The outlined function must exist if embedded" ) ? void (0) : __assert_fail ("OutlinedFn && \"The outlined function must exist if embedded\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3980, __extension__ __PRETTY_FUNCTION__)); | |||
3981 | return ConstantExpr::getBitCast(OutlinedFn, Builder.getInt8PtrTy()); | |||
3982 | } | |||
3983 | ||||
3984 | return new GlobalVariable( | |||
3985 | M, Builder.getInt8Ty(), /*isConstant=*/true, GlobalValue::WeakAnyLinkage, | |||
3986 | Constant::getNullValue(Builder.getInt8Ty()), EntryFnIDName); | |||
3987 | } | |||
3988 | ||||
3989 | Constant *OpenMPIRBuilder::createTargetRegionEntryAddr(Function *OutlinedFn, | |||
3990 | StringRef EntryFnName) { | |||
3991 | if (OutlinedFn) | |||
3992 | return OutlinedFn; | |||
3993 | ||||
3994 | assert(!M.getGlobalVariable(EntryFnName, true) &&(static_cast <bool> (!M.getGlobalVariable(EntryFnName, true ) && "Named kernel already exists?") ? void (0) : __assert_fail ("!M.getGlobalVariable(EntryFnName, true) && \"Named kernel already exists?\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3995, __extension__ __PRETTY_FUNCTION__)) | |||
3995 | "Named kernel already exists?")(static_cast <bool> (!M.getGlobalVariable(EntryFnName, true ) && "Named kernel already exists?") ? void (0) : __assert_fail ("!M.getGlobalVariable(EntryFnName, true) && \"Named kernel already exists?\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3995, __extension__ __PRETTY_FUNCTION__)); | |||
3996 | return new GlobalVariable( | |||
3997 | M, Builder.getInt8Ty(), /*isConstant=*/true, GlobalValue::InternalLinkage, | |||
3998 | Constant::getNullValue(Builder.getInt8Ty()), EntryFnName); | |||
3999 | } | |||
4000 | ||||
4001 | void OpenMPIRBuilder::emitTargetRegionFunction( | |||
4002 | TargetRegionEntryInfo &EntryInfo, | |||
4003 | FunctionGenCallback &GenerateFunctionCallback, int32_t NumTeams, | |||
4004 | int32_t NumThreads, bool IsOffloadEntry, Function *&OutlinedFn, | |||
4005 | Constant *&OutlinedFnID) { | |||
4006 | ||||
4007 | SmallString<64> EntryFnName; | |||
4008 | OffloadInfoManager.getTargetRegionEntryFnName(EntryFnName, EntryInfo); | |||
4009 | ||||
4010 | OutlinedFn = Config.isEmbedded() || !Config.openMPOffloadMandatory() | |||
4011 | ? GenerateFunctionCallback(EntryFnName) | |||
4012 | : nullptr; | |||
4013 | ||||
4014 | // If this target outline function is not an offload entry, we don't need to | |||
4015 | // register it. This may be in the case of a false if clause, or if there are | |||
4016 | // no OpenMP targets. | |||
4017 | if (!IsOffloadEntry) | |||
4018 | return; | |||
4019 | ||||
4020 | std::string EntryFnIDName = | |||
4021 | Config.isEmbedded() | |||
4022 | ? std::string(EntryFnName) | |||
4023 | : createPlatformSpecificName({EntryFnName, "region_id"}); | |||
4024 | ||||
4025 | OutlinedFnID = registerTargetRegionFunction( | |||
4026 | EntryInfo, OutlinedFn, EntryFnName, EntryFnIDName, NumTeams, NumThreads); | |||
4027 | } | |||
4028 | ||||
4029 | Constant *OpenMPIRBuilder::registerTargetRegionFunction( | |||
4030 | TargetRegionEntryInfo &EntryInfo, Function *OutlinedFn, | |||
4031 | StringRef EntryFnName, StringRef EntryFnIDName, int32_t NumTeams, | |||
4032 | int32_t NumThreads) { | |||
4033 | if (OutlinedFn) | |||
4034 | setOutlinedTargetRegionFunctionAttributes(OutlinedFn, NumTeams, NumThreads); | |||
4035 | auto OutlinedFnID = createOutlinedFunctionID(OutlinedFn, EntryFnIDName); | |||
4036 | auto EntryAddr = createTargetRegionEntryAddr(OutlinedFn, EntryFnName); | |||
4037 | OffloadInfoManager.registerTargetRegionEntryInfo( | |||
4038 | EntryInfo, EntryAddr, OutlinedFnID, | |||
4039 | OffloadEntriesInfoManager::OMPTargetRegionEntryTargetRegion); | |||
4040 | return OutlinedFnID; | |||
4041 | } | |||
4042 | ||||
4043 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createTargetData( | |||
4044 | const LocationDescription &Loc, OpenMPIRBuilder::InsertPointTy CodeGenIP, | |||
4045 | SmallVectorImpl<uint64_t> &MapTypeFlags, | |||
4046 | SmallVectorImpl<Constant *> &MapNames, struct MapperAllocas &MapperAllocas, | |||
4047 | bool IsBegin, int64_t DeviceID, Value *IfCond, | |||
4048 | BodyGenCallbackTy ProcessMapOpCB, BodyGenCallbackTy BodyGenCB) { | |||
4049 | if (!updateToLocation(Loc)) | |||
4050 | return InsertPointTy(); | |||
4051 | ||||
4052 | Builder.restoreIP(CodeGenIP); | |||
4053 | ||||
4054 | // LLVM utilities like blocks with terminators. | |||
4055 | // The UI acts as a resume point for code insertion after the BodyGen | |||
4056 | auto *UI = Builder.CreateUnreachable(); | |||
4057 | if (IfCond) { | |||
4058 | auto *ThenTI = | |||
4059 | SplitBlockAndInsertIfThen(IfCond, UI, /* Unreachable */ false); | |||
4060 | ThenTI->getParent()->setName("omp_if.then"); | |||
4061 | Builder.SetInsertPoint(ThenTI); | |||
4062 | } else { | |||
4063 | Builder.SetInsertPoint(UI); | |||
4064 | } | |||
4065 | ||||
4066 | ProcessMapOpCB(Builder.saveIP(), Builder.saveIP()); | |||
4067 | ||||
4068 | uint32_t SrcLocStrSize; | |||
4069 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | |||
4070 | Value *srcLocInfo = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | |||
4071 | ||||
4072 | GlobalVariable *MapTypesGV = | |||
4073 | createOffloadMaptypes(MapTypeFlags, ".offload_maptypes"); | |||
4074 | Value *MapTypesArg = Builder.CreateConstInBoundsGEP2_32( | |||
4075 | ArrayType::get(Builder.getInt64Ty(), MapTypeFlags.size()), MapTypesGV, | |||
4076 | /*Idx0=*/0, /*Idx1=*/0); | |||
4077 | ||||
4078 | GlobalVariable *MapNamesGV = | |||
4079 | createOffloadMapnames(MapNames, ".offload_mapnames"); | |||
4080 | Value *MapNamesArg = Builder.CreateConstInBoundsGEP2_32( | |||
4081 | ArrayType::get(Builder.getInt8PtrTy(), MapNames.size()), MapNamesGV, | |||
4082 | /*Idx0=*/0, /*Idx1=*/0); | |||
4083 | ||||
4084 | Function *beginMapperFunc = | |||
4085 | getOrCreateRuntimeFunctionPtr(omp::OMPRTL___tgt_target_data_begin_mapper); | |||
4086 | Function *endMapperFunc = | |||
4087 | getOrCreateRuntimeFunctionPtr(omp::OMPRTL___tgt_target_data_end_mapper); | |||
4088 | ||||
4089 | if (BodyGenCB) { | |||
4090 | // Create call to start the data region. | |||
4091 | emitMapperCall(Builder.saveIP(), beginMapperFunc, srcLocInfo, MapTypesArg, | |||
4092 | MapNamesArg, MapperAllocas, DeviceID, MapTypeFlags.size()); | |||
4093 | ||||
4094 | BodyGenCB(Builder.saveIP(), Builder.saveIP()); | |||
4095 | ||||
4096 | Builder.SetInsertPoint(UI->getParent()); | |||
4097 | // Create call to end the data region. | |||
4098 | emitMapperCall(Builder.saveIP(), endMapperFunc, srcLocInfo, MapTypesArg, | |||
4099 | MapNamesArg, MapperAllocas, DeviceID, MapTypeFlags.size()); | |||
4100 | } else { | |||
4101 | emitMapperCall(Builder.saveIP(), IsBegin ? beginMapperFunc : endMapperFunc, | |||
4102 | srcLocInfo, MapTypesArg, MapNamesArg, MapperAllocas, | |||
4103 | DeviceID, MapTypeFlags.size()); | |||
4104 | } | |||
4105 | ||||
4106 | // Update the insertion point and remove the terminator we introduced. | |||
4107 | Builder.SetInsertPoint(UI->getParent()); | |||
4108 | if (IfCond) | |||
4109 | UI->getParent()->setName("omp_if.end"); | |||
4110 | UI->eraseFromParent(); | |||
4111 | return Builder.saveIP(); | |||
4112 | } | |||
4113 | ||||
4114 | static Function * | |||
4115 | createOutlinedFunction(IRBuilderBase &Builder, StringRef FuncName, | |||
4116 | SmallVectorImpl<Value *> &Inputs, | |||
4117 | OpenMPIRBuilder::TargetBodyGenCallbackTy &CBFunc) { | |||
4118 | SmallVector<Type *> ParameterTypes; | |||
4119 | for (auto &Arg : Inputs) | |||
4120 | ParameterTypes.push_back(Arg->getType()); | |||
4121 | ||||
4122 | auto FuncType = FunctionType::get(Builder.getVoidTy(), ParameterTypes, | |||
4123 | /*isVarArg*/ false); | |||
4124 | auto Func = Function::Create(FuncType, GlobalValue::InternalLinkage, FuncName, | |||
4125 | Builder.GetInsertBlock()->getModule()); | |||
4126 | ||||
4127 | // Save insert point. | |||
4128 | auto OldInsertPoint = Builder.saveIP(); | |||
4129 | ||||
4130 | // Generate the region into the function. | |||
4131 | BasicBlock *EntryBB = BasicBlock::Create(Builder.getContext(), "entry", Func); | |||
4132 | Builder.SetInsertPoint(EntryBB); | |||
4133 | Builder.restoreIP(CBFunc(Builder.saveIP(), Builder.saveIP())); | |||
4134 | ||||
4135 | // Insert return instruction. | |||
4136 | Builder.CreateRetVoid(); | |||
4137 | ||||
4138 | // Rewrite uses of input valus to parameters. | |||
4139 | for (auto InArg : zip(Inputs, Func->args())) { | |||
4140 | Value *Input = std::get<0>(InArg); | |||
4141 | Argument &Arg = std::get<1>(InArg); | |||
4142 | ||||
4143 | // Collect all the instructions | |||
4144 | for (User *User : make_early_inc_range(Input->users())) | |||
4145 | if (auto Instr = dyn_cast<Instruction>(User)) | |||
4146 | if (Instr->getFunction() == Func) | |||
4147 | Instr->replaceUsesOfWith(Input, &Arg); | |||
4148 | } | |||
4149 | ||||
4150 | // Restore insert point. | |||
4151 | Builder.restoreIP(OldInsertPoint); | |||
4152 | ||||
4153 | return Func; | |||
4154 | } | |||
4155 | ||||
4156 | static void | |||
4157 | emitTargetOutlinedFunction(OpenMPIRBuilder &OMPBuilder, IRBuilderBase &Builder, | |||
4158 | TargetRegionEntryInfo &EntryInfo, | |||
4159 | Function *&OutlinedFn, int32_t NumTeams, | |||
4160 | int32_t NumThreads, SmallVectorImpl<Value *> &Inputs, | |||
4161 | OpenMPIRBuilder::TargetBodyGenCallbackTy &CBFunc) { | |||
4162 | ||||
4163 | OpenMPIRBuilder::FunctionGenCallback &&GenerateOutlinedFunction = | |||
4164 | [&Builder, &Inputs, &CBFunc](StringRef EntryFnName) { | |||
4165 | return createOutlinedFunction(Builder, EntryFnName, Inputs, CBFunc); | |||
4166 | }; | |||
4167 | ||||
4168 | Constant *OutlinedFnID; | |||
4169 | OMPBuilder.emitTargetRegionFunction(EntryInfo, GenerateOutlinedFunction, | |||
4170 | NumTeams, NumThreads, true, OutlinedFn, | |||
4171 | OutlinedFnID); | |||
4172 | } | |||
4173 | ||||
4174 | static void emitTargetCall(IRBuilderBase &Builder, Function *OutlinedFn, | |||
4175 | SmallVectorImpl<Value *> &Args) { | |||
4176 | // TODO: Add kernel launch call when device codegen is supported. | |||
4177 | Builder.CreateCall(OutlinedFn, Args); | |||
4178 | } | |||
4179 | ||||
4180 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createTarget( | |||
4181 | const LocationDescription &Loc, OpenMPIRBuilder::InsertPointTy CodeGenIP, | |||
4182 | TargetRegionEntryInfo &EntryInfo, int32_t NumTeams, int32_t NumThreads, | |||
4183 | SmallVectorImpl<Value *> &Args, TargetBodyGenCallbackTy CBFunc) { | |||
4184 | if (!updateToLocation(Loc)) | |||
4185 | return InsertPointTy(); | |||
4186 | ||||
4187 | Builder.restoreIP(CodeGenIP); | |||
4188 | ||||
4189 | Function *OutlinedFn; | |||
4190 | emitTargetOutlinedFunction(*this, Builder, EntryInfo, OutlinedFn, NumTeams, | |||
4191 | NumThreads, Args, CBFunc); | |||
4192 | emitTargetCall(Builder, OutlinedFn, Args); | |||
4193 | return Builder.saveIP(); | |||
4194 | } | |||
4195 | ||||
4196 | std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts, | |||
4197 | StringRef FirstSeparator, | |||
4198 | StringRef Separator) { | |||
4199 | SmallString<128> Buffer; | |||
4200 | llvm::raw_svector_ostream OS(Buffer); | |||
4201 | StringRef Sep = FirstSeparator; | |||
4202 | for (StringRef Part : Parts) { | |||
4203 | OS << Sep << Part; | |||
4204 | Sep = Separator; | |||
4205 | } | |||
4206 | return OS.str().str(); | |||
4207 | } | |||
4208 | ||||
4209 | std::string | |||
4210 | OpenMPIRBuilder::createPlatformSpecificName(ArrayRef<StringRef> Parts) const { | |||
4211 | return OpenMPIRBuilder::getNameWithSeparators(Parts, Config.firstSeparator(), | |||
4212 | Config.separator()); | |||
4213 | } | |||
4214 | ||||
4215 | GlobalVariable * | |||
4216 | OpenMPIRBuilder::getOrCreateInternalVariable(Type *Ty, const StringRef &Name, | |||
4217 | unsigned AddressSpace) { | |||
4218 | auto &Elem = *InternalVars.try_emplace(Name, nullptr).first; | |||
4219 | if (Elem.second) { | |||
4220 | assert(cast<PointerType>(Elem.second->getType())(static_cast <bool> (cast<PointerType>(Elem.second ->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && "OMP internal variable has different type than requested") ? void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4222, __extension__ __PRETTY_FUNCTION__)) | |||
4221 | ->isOpaqueOrPointeeTypeMatches(Ty) &&(static_cast <bool> (cast<PointerType>(Elem.second ->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && "OMP internal variable has different type than requested") ? void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4222, __extension__ __PRETTY_FUNCTION__)) | |||
4222 | "OMP internal variable has different type than requested")(static_cast <bool> (cast<PointerType>(Elem.second ->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && "OMP internal variable has different type than requested") ? void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4222, __extension__ __PRETTY_FUNCTION__)); | |||
4223 | } else { | |||
4224 | // TODO: investigate the appropriate linkage type used for the global | |||
4225 | // variable for possibly changing that to internal or private, or maybe | |||
4226 | // create different versions of the function for different OMP internal | |||
4227 | // variables. | |||
4228 | Elem.second = new GlobalVariable( | |||
4229 | M, Ty, /*IsConstant=*/false, GlobalValue::CommonLinkage, | |||
4230 | Constant::getNullValue(Ty), Elem.first(), | |||
4231 | /*InsertBefore=*/nullptr, GlobalValue::NotThreadLocal, AddressSpace); | |||
4232 | } | |||
4233 | ||||
4234 | return cast<GlobalVariable>(&*Elem.second); | |||
4235 | } | |||
4236 | ||||
4237 | Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) { | |||
4238 | std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); | |||
4239 | std::string Name = getNameWithSeparators({Prefix, "var"}, ".", "."); | |||
4240 | return getOrCreateInternalVariable(KmpCriticalNameTy, Name); | |||
4241 | } | |||
4242 | ||||
4243 | Value *OpenMPIRBuilder::getSizeInBytes(Value *BasePtr) { | |||
4244 | LLVMContext &Ctx = Builder.getContext(); | |||
4245 | Value *Null = Constant::getNullValue(BasePtr->getType()->getPointerTo()); | |||
4246 | Value *SizeGep = | |||
4247 | Builder.CreateGEP(BasePtr->getType(), Null, Builder.getInt32(1)); | |||
4248 | Value *SizePtrToInt = Builder.CreatePtrToInt(SizeGep, Type::getInt64Ty(Ctx)); | |||
4249 | return SizePtrToInt; | |||
4250 | } | |||
4251 | ||||
4252 | GlobalVariable * | |||
4253 | OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, | |||
4254 | std::string VarName) { | |||
4255 | llvm::Constant *MaptypesArrayInit = | |||
4256 | llvm::ConstantDataArray::get(M.getContext(), Mappings); | |||
4257 | auto *MaptypesArrayGlobal = new llvm::GlobalVariable( | |||
4258 | M, MaptypesArrayInit->getType(), | |||
4259 | /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit, | |||
4260 | VarName); | |||
4261 | MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); | |||
4262 | return MaptypesArrayGlobal; | |||
4263 | } | |||
4264 | ||||
4265 | void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc, | |||
4266 | InsertPointTy AllocaIP, | |||
4267 | unsigned NumOperands, | |||
4268 | struct MapperAllocas &MapperAllocas) { | |||
4269 | if (!updateToLocation(Loc)) | |||
4270 | return; | |||
4271 | ||||
4272 | auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); | |||
4273 | auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); | |||
4274 | Builder.restoreIP(AllocaIP); | |||
4275 | AllocaInst *ArgsBase = Builder.CreateAlloca( | |||
4276 | ArrI8PtrTy, /* ArraySize = */ nullptr, ".offload_baseptrs"); | |||
4277 | AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy, /* ArraySize = */ nullptr, | |||
4278 | ".offload_ptrs"); | |||
4279 | AllocaInst *ArgSizes = Builder.CreateAlloca( | |||
4280 | ArrI64Ty, /* ArraySize = */ nullptr, ".offload_sizes"); | |||
4281 | Builder.restoreIP(Loc.IP); | |||
4282 | MapperAllocas.ArgsBase = ArgsBase; | |||
4283 | MapperAllocas.Args = Args; | |||
4284 | MapperAllocas.ArgSizes = ArgSizes; | |||
4285 | } | |||
4286 | ||||
4287 | void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc, | |||
4288 | Function *MapperFunc, Value *SrcLocInfo, | |||
4289 | Value *MaptypesArg, Value *MapnamesArg, | |||
4290 | struct MapperAllocas &MapperAllocas, | |||
4291 | int64_t DeviceID, unsigned NumOperands) { | |||
4292 | if (!updateToLocation(Loc)) | |||
4293 | return; | |||
4294 | ||||
4295 | auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); | |||
4296 | auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); | |||
4297 | Value *ArgsBaseGEP = | |||
4298 | Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase, | |||
4299 | {Builder.getInt32(0), Builder.getInt32(0)}); | |||
4300 | Value *ArgsGEP = | |||
4301 | Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args, | |||
4302 | {Builder.getInt32(0), Builder.getInt32(0)}); | |||
4303 | Value *ArgSizesGEP = | |||
4304 | Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes, | |||
4305 | {Builder.getInt32(0), Builder.getInt32(0)}); | |||
4306 | Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo()); | |||
4307 | Builder.CreateCall(MapperFunc, | |||
4308 | {SrcLocInfo, Builder.getInt64(DeviceID), | |||
4309 | Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP, | |||
4310 | ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr}); | |||
4311 | } | |||
4312 | ||||
4313 | void OpenMPIRBuilder::emitOffloadingArraysArgument(IRBuilderBase &Builder, | |||
4314 | TargetDataRTArgs &RTArgs, | |||
4315 | TargetDataInfo &Info, | |||
4316 | bool EmitDebug, | |||
4317 | bool ForEndCall) { | |||
4318 | assert((!ForEndCall || Info.separateBeginEndCalls()) &&(static_cast <bool> ((!ForEndCall || Info.separateBeginEndCalls ()) && "expected region end call to runtime only when end call is separate" ) ? void (0) : __assert_fail ("(!ForEndCall || Info.separateBeginEndCalls()) && \"expected region end call to runtime only when end call is separate\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4319, __extension__ __PRETTY_FUNCTION__)) | |||
4319 | "expected region end call to runtime only when end call is separate")(static_cast <bool> ((!ForEndCall || Info.separateBeginEndCalls ()) && "expected region end call to runtime only when end call is separate" ) ? void (0) : __assert_fail ("(!ForEndCall || Info.separateBeginEndCalls()) && \"expected region end call to runtime only when end call is separate\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4319, __extension__ __PRETTY_FUNCTION__)); | |||
4320 | auto VoidPtrTy = Type::getInt8PtrTy(M.getContext()); | |||
4321 | auto VoidPtrPtrTy = VoidPtrTy->getPointerTo(0); | |||
4322 | auto Int64Ty = Type::getInt64Ty(M.getContext()); | |||
4323 | auto Int64PtrTy = Type::getInt64PtrTy(M.getContext()); | |||
4324 | ||||
4325 | if (!Info.NumberOfPtrs) { | |||
4326 | RTArgs.BasePointersArray = ConstantPointerNull::get(VoidPtrPtrTy); | |||
4327 | RTArgs.PointersArray = ConstantPointerNull::get(VoidPtrPtrTy); | |||
4328 | RTArgs.SizesArray = ConstantPointerNull::get(Int64PtrTy); | |||
4329 | RTArgs.MapTypesArray = ConstantPointerNull::get(Int64PtrTy); | |||
4330 | RTArgs.MapNamesArray = ConstantPointerNull::get(VoidPtrPtrTy); | |||
4331 | RTArgs.MappersArray = ConstantPointerNull::get(VoidPtrPtrTy); | |||
4332 | return; | |||
4333 | } | |||
4334 | ||||
4335 | RTArgs.BasePointersArray = Builder.CreateConstInBoundsGEP2_32( | |||
4336 | ArrayType::get(VoidPtrTy, Info.NumberOfPtrs), | |||
4337 | Info.RTArgs.BasePointersArray, | |||
4338 | /*Idx0=*/0, /*Idx1=*/0); | |||
4339 | RTArgs.PointersArray = Builder.CreateConstInBoundsGEP2_32( | |||
4340 | ArrayType::get(VoidPtrTy, Info.NumberOfPtrs), Info.RTArgs.PointersArray, | |||
4341 | /*Idx0=*/0, | |||
4342 | /*Idx1=*/0); | |||
4343 | RTArgs.SizesArray = Builder.CreateConstInBoundsGEP2_32( | |||
4344 | ArrayType::get(Int64Ty, Info.NumberOfPtrs), Info.RTArgs.SizesArray, | |||
4345 | /*Idx0=*/0, /*Idx1=*/0); | |||
4346 | RTArgs.MapTypesArray = Builder.CreateConstInBoundsGEP2_32( | |||
4347 | ArrayType::get(Int64Ty, Info.NumberOfPtrs), | |||
4348 | ForEndCall && Info.RTArgs.MapTypesArrayEnd ? Info.RTArgs.MapTypesArrayEnd | |||
4349 | : Info.RTArgs.MapTypesArray, | |||
4350 | /*Idx0=*/0, | |||
4351 | /*Idx1=*/0); | |||
4352 | ||||
4353 | // Only emit the mapper information arrays if debug information is | |||
4354 | // requested. | |||
4355 | if (!EmitDebug) | |||
4356 | RTArgs.MapNamesArray = ConstantPointerNull::get(VoidPtrPtrTy); | |||
4357 | else | |||
4358 | RTArgs.MapNamesArray = Builder.CreateConstInBoundsGEP2_32( | |||
4359 | ArrayType::get(VoidPtrTy, Info.NumberOfPtrs), Info.RTArgs.MapNamesArray, | |||
4360 | /*Idx0=*/0, | |||
4361 | /*Idx1=*/0); | |||
4362 | // If there is no user-defined mapper, set the mapper array to nullptr to | |||
4363 | // avoid an unnecessary data privatization | |||
4364 | if (!Info.HasMapper) | |||
4365 | RTArgs.MappersArray = ConstantPointerNull::get(VoidPtrPtrTy); | |||
4366 | else | |||
4367 | RTArgs.MappersArray = | |||
4368 | Builder.CreatePointerCast(Info.RTArgs.MappersArray, VoidPtrPtrTy); | |||
4369 | } | |||
4370 | ||||
4371 | bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic( | |||
4372 | const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { | |||
4373 | assert(!(AO == AtomicOrdering::NotAtomic ||(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering." ) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4375, __extension__ __PRETTY_FUNCTION__)) | |||
4374 | AO == llvm::AtomicOrdering::Unordered) &&(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering." ) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4375, __extension__ __PRETTY_FUNCTION__)) | |||
4375 | "Unexpected Atomic Ordering.")(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering." ) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4375, __extension__ __PRETTY_FUNCTION__)); | |||
4376 | ||||
4377 | bool Flush = false; | |||
4378 | llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic; | |||
4379 | ||||
4380 | switch (AK) { | |||
4381 | case Read: | |||
4382 | if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || | |||
4383 | AO == AtomicOrdering::SequentiallyConsistent) { | |||
4384 | FlushAO = AtomicOrdering::Acquire; | |||
4385 | Flush = true; | |||
4386 | } | |||
4387 | break; | |||
4388 | case Write: | |||
4389 | case Compare: | |||
4390 | case Update: | |||
4391 | if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || | |||
4392 | AO == AtomicOrdering::SequentiallyConsistent) { | |||
4393 | FlushAO = AtomicOrdering::Release; | |||
4394 | Flush = true; | |||
4395 | } | |||
4396 | break; | |||
4397 | case Capture: | |||
4398 | switch (AO) { | |||
4399 | case AtomicOrdering::Acquire: | |||
4400 | FlushAO = AtomicOrdering::Acquire; | |||
4401 | Flush = true; | |||
4402 | break; | |||
4403 | case AtomicOrdering::Release: | |||
4404 | FlushAO = AtomicOrdering::Release; | |||
4405 | Flush = true; | |||
4406 | break; | |||
4407 | case AtomicOrdering::AcquireRelease: | |||
4408 | case AtomicOrdering::SequentiallyConsistent: | |||
4409 | FlushAO = AtomicOrdering::AcquireRelease; | |||
4410 | Flush = true; | |||
4411 | break; | |||
4412 | default: | |||
4413 | // do nothing - leave silently. | |||
4414 | break; | |||
4415 | } | |||
4416 | } | |||
4417 | ||||
4418 | if (Flush) { | |||
4419 | // Currently Flush RT call still doesn't take memory_ordering, so for when | |||
4420 | // that happens, this tries to do the resolution of which atomic ordering | |||
4421 | // to use with but issue the flush call | |||
4422 | // TODO: pass `FlushAO` after memory ordering support is added | |||
4423 | (void)FlushAO; | |||
4424 | emitFlush(Loc); | |||
4425 | } | |||
4426 | ||||
4427 | // for AO == AtomicOrdering::Monotonic and all other case combinations | |||
4428 | // do nothing | |||
4429 | return Flush; | |||
4430 | } | |||
4431 | ||||
4432 | OpenMPIRBuilder::InsertPointTy | |||
4433 | OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, | |||
4434 | AtomicOpValue &X, AtomicOpValue &V, | |||
4435 | AtomicOrdering AO) { | |||
4436 | if (!updateToLocation(Loc)) | |||
4437 | return Loc.IP; | |||
4438 | ||||
4439 | Type *XTy = X.Var->getType(); | |||
4440 | assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4440, __extension__ __PRETTY_FUNCTION__)); | |||
4441 | Type *XElemTy = X.ElemTy; | |||
4442 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic read expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4444, __extension__ __PRETTY_FUNCTION__)) | |||
4443 | XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic read expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4444, __extension__ __PRETTY_FUNCTION__)) | |||
4444 | "OMP atomic read expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic read expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4444, __extension__ __PRETTY_FUNCTION__)); | |||
4445 | ||||
4446 | Value *XRead = nullptr; | |||
4447 | ||||
4448 | if (XElemTy->isIntegerTy()) { | |||
4449 | LoadInst *XLD = | |||
4450 | Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); | |||
4451 | XLD->setAtomic(AO); | |||
4452 | XRead = cast<Value>(XLD); | |||
4453 | } else { | |||
4454 | // We need to bitcast and perform atomic op as integer | |||
4455 | unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); | |||
4456 | IntegerType *IntCastTy = | |||
4457 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
4458 | Value *XBCast = Builder.CreateBitCast( | |||
4459 | X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast"); | |||
4460 | LoadInst *XLoad = | |||
4461 | Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load"); | |||
4462 | XLoad->setAtomic(AO); | |||
4463 | if (XElemTy->isFloatingPointTy()) { | |||
4464 | XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast"); | |||
4465 | } else { | |||
4466 | XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast"); | |||
4467 | } | |||
4468 | } | |||
4469 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); | |||
4470 | Builder.CreateStore(XRead, V.Var, V.IsVolatile); | |||
4471 | return Builder.saveIP(); | |||
4472 | } | |||
4473 | ||||
4474 | OpenMPIRBuilder::InsertPointTy | |||
4475 | OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, | |||
4476 | AtomicOpValue &X, Value *Expr, | |||
4477 | AtomicOrdering AO) { | |||
4478 | if (!updateToLocation(Loc)) | |||
4479 | return Loc.IP; | |||
4480 | ||||
4481 | Type *XTy = X.Var->getType(); | |||
4482 | assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4482, __extension__ __PRETTY_FUNCTION__)); | |||
4483 | Type *XElemTy = X.ElemTy; | |||
4484 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic write expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4486, __extension__ __PRETTY_FUNCTION__)) | |||
4485 | XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic write expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4486, __extension__ __PRETTY_FUNCTION__)) | |||
4486 | "OMP atomic write expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic write expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4486, __extension__ __PRETTY_FUNCTION__)); | |||
4487 | ||||
4488 | if (XElemTy->isIntegerTy()) { | |||
4489 | StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); | |||
4490 | XSt->setAtomic(AO); | |||
4491 | } else { | |||
4492 | // We need to bitcast and perform atomic op as integers | |||
4493 | unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); | |||
4494 | IntegerType *IntCastTy = | |||
4495 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
4496 | Value *XBCast = Builder.CreateBitCast( | |||
4497 | X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast"); | |||
4498 | Value *ExprCast = | |||
4499 | Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast"); | |||
4500 | StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile); | |||
4501 | XSt->setAtomic(AO); | |||
4502 | } | |||
4503 | ||||
4504 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); | |||
4505 | return Builder.saveIP(); | |||
4506 | } | |||
4507 | ||||
4508 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( | |||
4509 | const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, | |||
4510 | Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, | |||
4511 | AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) { | |||
4512 | assert(!isConflictIP(Loc.IP, AllocaIP) && "IPs must not be ambiguous")(static_cast <bool> (!isConflictIP(Loc.IP, AllocaIP) && "IPs must not be ambiguous") ? void (0) : __assert_fail ("!isConflictIP(Loc.IP, AllocaIP) && \"IPs must not be ambiguous\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4512, __extension__ __PRETTY_FUNCTION__)); | |||
4513 | if (!updateToLocation(Loc)) | |||
4514 | return Loc.IP; | |||
4515 | ||||
4516 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4517 | Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4518 | assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4519 | "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4520 | Type *XElemTy = X.ElemTy;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4521 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4522 | XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4523 | "OMP atomic update expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4524 | assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4525 | (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4526 | "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4527 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4519, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4523, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4526, __extension__ __PRETTY_FUNCTION__)); }; } } while (false); | |||
4528 | ||||
4529 | emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, RMWOp, UpdateOp, | |||
4530 | X.IsVolatile, IsXBinopExpr); | |||
4531 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); | |||
4532 | return Builder.saveIP(); | |||
4533 | } | |||
4534 | ||||
4535 | // FIXME: Duplicating AtomicExpand | |||
4536 | Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, | |||
4537 | AtomicRMWInst::BinOp RMWOp) { | |||
4538 | switch (RMWOp) { | |||
4539 | case AtomicRMWInst::Add: | |||
4540 | return Builder.CreateAdd(Src1, Src2); | |||
4541 | case AtomicRMWInst::Sub: | |||
4542 | return Builder.CreateSub(Src1, Src2); | |||
4543 | case AtomicRMWInst::And: | |||
4544 | return Builder.CreateAnd(Src1, Src2); | |||
4545 | case AtomicRMWInst::Nand: | |||
4546 | return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); | |||
4547 | case AtomicRMWInst::Or: | |||
4548 | return Builder.CreateOr(Src1, Src2); | |||
4549 | case AtomicRMWInst::Xor: | |||
4550 | return Builder.CreateXor(Src1, Src2); | |||
4551 | case AtomicRMWInst::Xchg: | |||
4552 | case AtomicRMWInst::FAdd: | |||
4553 | case AtomicRMWInst::FSub: | |||
4554 | case AtomicRMWInst::BAD_BINOP: | |||
4555 | case AtomicRMWInst::Max: | |||
4556 | case AtomicRMWInst::Min: | |||
4557 | case AtomicRMWInst::UMax: | |||
4558 | case AtomicRMWInst::UMin: | |||
4559 | case AtomicRMWInst::FMax: | |||
4560 | case AtomicRMWInst::FMin: | |||
4561 | case AtomicRMWInst::UIncWrap: | |||
4562 | case AtomicRMWInst::UDecWrap: | |||
4563 | llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4563); | |||
4564 | } | |||
4565 | llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4565); | |||
4566 | } | |||
4567 | ||||
4568 | std::pair<Value *, Value *> OpenMPIRBuilder::emitAtomicUpdate( | |||
4569 | InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr, | |||
4570 | AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, | |||
4571 | AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr) { | |||
4572 | // TODO: handle the case where XElemTy is not byte-sized or not a power of 2 | |||
4573 | // or a complex datatype. | |||
4574 | bool emitRMWOp = false; | |||
4575 | switch (RMWOp) { | |||
4576 | case AtomicRMWInst::Add: | |||
4577 | case AtomicRMWInst::And: | |||
4578 | case AtomicRMWInst::Nand: | |||
4579 | case AtomicRMWInst::Or: | |||
4580 | case AtomicRMWInst::Xor: | |||
4581 | case AtomicRMWInst::Xchg: | |||
4582 | emitRMWOp = XElemTy; | |||
4583 | break; | |||
4584 | case AtomicRMWInst::Sub: | |||
4585 | emitRMWOp = (IsXBinopExpr && XElemTy); | |||
4586 | break; | |||
4587 | default: | |||
4588 | emitRMWOp = false; | |||
4589 | } | |||
4590 | emitRMWOp &= XElemTy->isIntegerTy(); | |||
4591 | ||||
4592 | std::pair<Value *, Value *> Res; | |||
4593 | if (emitRMWOp) { | |||
4594 | Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); | |||
4595 | // not needed except in case of postfix captures. Generate anyway for | |||
4596 | // consistency with the else part. Will be removed with any DCE pass. | |||
4597 | // AtomicRMWInst::Xchg does not have a coressponding instruction. | |||
4598 | if (RMWOp == AtomicRMWInst::Xchg) | |||
4599 | Res.second = Res.first; | |||
4600 | else | |||
4601 | Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); | |||
4602 | } else { | |||
4603 | unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace(); | |||
4604 | IntegerType *IntCastTy = | |||
4605 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
4606 | Value *XBCast = | |||
4607 | Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); | |||
4608 | LoadInst *OldVal = | |||
4609 | Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); | |||
4610 | OldVal->setAtomic(AO); | |||
4611 | // CurBB | |||
4612 | // | /---\ | |||
4613 | // ContBB | | |||
4614 | // | \---/ | |||
4615 | // ExitBB | |||
4616 | BasicBlock *CurBB = Builder.GetInsertBlock(); | |||
4617 | Instruction *CurBBTI = CurBB->getTerminator(); | |||
4618 | CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); | |||
4619 | BasicBlock *ExitBB = | |||
4620 | CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); | |||
4621 | BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), | |||
4622 | X->getName() + ".atomic.cont"); | |||
4623 | ContBB->getTerminator()->eraseFromParent(); | |||
4624 | Builder.restoreIP(AllocaIP); | |||
4625 | AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy); | |||
4626 | NewAtomicAddr->setName(X->getName() + "x.new.val"); | |||
4627 | Builder.SetInsertPoint(ContBB); | |||
4628 | llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); | |||
4629 | PHI->addIncoming(OldVal, CurBB); | |||
4630 | IntegerType *NewAtomicCastTy = | |||
4631 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | |||
4632 | bool IsIntTy = XElemTy->isIntegerTy(); | |||
4633 | Value *NewAtomicIntAddr = | |||
4634 | (IsIntTy) | |||
4635 | ? NewAtomicAddr | |||
4636 | : Builder.CreateBitCast(NewAtomicAddr, | |||
4637 | NewAtomicCastTy->getPointerTo(Addrspace)); | |||
4638 | Value *OldExprVal = PHI; | |||
4639 | if (!IsIntTy) { | |||
4640 | if (XElemTy->isFloatingPointTy()) { | |||
4641 | OldExprVal = Builder.CreateBitCast(PHI, XElemTy, | |||
4642 | X->getName() + ".atomic.fltCast"); | |||
4643 | } else { | |||
4644 | OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, | |||
4645 | X->getName() + ".atomic.ptrCast"); | |||
4646 | } | |||
4647 | } | |||
4648 | ||||
4649 | Value *Upd = UpdateOp(OldExprVal, Builder); | |||
4650 | Builder.CreateStore(Upd, NewAtomicAddr); | |||
4651 | LoadInst *DesiredVal = Builder.CreateLoad(IntCastTy, NewAtomicIntAddr); | |||
4652 | Value *XAddr = | |||
4653 | (IsIntTy) | |||
4654 | ? X | |||
4655 | : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); | |||
4656 | AtomicOrdering Failure = | |||
4657 | llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); | |||
4658 | AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg( | |||
4659 | XAddr, PHI, DesiredVal, llvm::MaybeAlign(), AO, Failure); | |||
4660 | Result->setVolatile(VolatileX); | |||
4661 | Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); | |||
4662 | Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); | |||
4663 | PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); | |||
4664 | Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); | |||
4665 | ||||
4666 | Res.first = OldExprVal; | |||
4667 | Res.second = Upd; | |||
4668 | ||||
4669 | // set Insertion point in exit block | |||
4670 | if (UnreachableInst *ExitTI = | |||
4671 | dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { | |||
4672 | CurBBTI->eraseFromParent(); | |||
4673 | Builder.SetInsertPoint(ExitBB); | |||
4674 | } else { | |||
4675 | Builder.SetInsertPoint(ExitTI); | |||
4676 | } | |||
4677 | } | |||
4678 | ||||
4679 | return Res; | |||
4680 | } | |||
4681 | ||||
4682 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( | |||
4683 | const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, | |||
4684 | AtomicOpValue &V, Value *Expr, AtomicOrdering AO, | |||
4685 | AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, | |||
4686 | bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) { | |||
4687 | if (!updateToLocation(Loc)) | |||
4688 | return Loc.IP; | |||
4689 | ||||
4690 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4691 | Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4692 | assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4693 | "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4694 | Type *XElemTy = X.ElemTy;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4695 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4696 | XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4697 | "OMP atomic capture expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4698 | assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4699 | "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | |||
4700 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4693, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4697, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); }; } } while (false); | |||
4701 | ||||
4702 | // If UpdateExpr is 'x' updated with some `expr` not based on 'x', | |||
4703 | // 'x' is simply atomically rewritten with 'expr'. | |||
4704 | AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); | |||
4705 | std::pair<Value *, Value *> Result = | |||
4706 | emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, AtomicOp, UpdateOp, | |||
4707 | X.IsVolatile, IsXBinopExpr); | |||
4708 | ||||
4709 | Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); | |||
4710 | Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile); | |||
4711 | ||||
4712 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); | |||
4713 | return Builder.saveIP(); | |||
4714 | } | |||
4715 | ||||
4716 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCompare( | |||
4717 | const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, | |||
4718 | AtomicOpValue &R, Value *E, Value *D, AtomicOrdering AO, | |||
4719 | omp::OMPAtomicCompareOp Op, bool IsXBinopExpr, bool IsPostfixUpdate, | |||
4720 | bool IsFailOnly) { | |||
4721 | ||||
4722 | if (!updateToLocation(Loc)) | |||
4723 | return Loc.IP; | |||
4724 | ||||
4725 | assert(X.Var->getType()->isPointerTy() &&(static_cast <bool> (X.Var->getType()->isPointerTy () && "OMP atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("X.Var->getType()->isPointerTy() && \"OMP atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4726, __extension__ __PRETTY_FUNCTION__)) | |||
4726 | "OMP atomic expects a pointer to target memory")(static_cast <bool> (X.Var->getType()->isPointerTy () && "OMP atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("X.Var->getType()->isPointerTy() && \"OMP atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4726, __extension__ __PRETTY_FUNCTION__)); | |||
4727 | // compare capture | |||
4728 | if (V.Var) { | |||
4729 | assert(V.Var->getType()->isPointerTy() && "v.var must be of pointer type")(static_cast <bool> (V.Var->getType()->isPointerTy () && "v.var must be of pointer type") ? void (0) : __assert_fail ("V.Var->getType()->isPointerTy() && \"v.var must be of pointer type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4729, __extension__ __PRETTY_FUNCTION__)); | |||
4730 | assert(V.ElemTy == X.ElemTy && "x and v must be of same type")(static_cast <bool> (V.ElemTy == X.ElemTy && "x and v must be of same type" ) ? void (0) : __assert_fail ("V.ElemTy == X.ElemTy && \"x and v must be of same type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4730, __extension__ __PRETTY_FUNCTION__)); | |||
4731 | } | |||
4732 | ||||
4733 | bool IsInteger = E->getType()->isIntegerTy(); | |||
4734 | ||||
4735 | if (Op == OMPAtomicCompareOp::EQ) { | |||
4736 | AtomicOrdering Failure = AtomicCmpXchgInst::getStrongestFailureOrdering(AO); | |||
4737 | AtomicCmpXchgInst *Result = nullptr; | |||
4738 | if (!IsInteger) { | |||
4739 | unsigned Addrspace = | |||
4740 | cast<PointerType>(X.Var->getType())->getAddressSpace(); | |||
4741 | IntegerType *IntCastTy = | |||
4742 | IntegerType::get(M.getContext(), X.ElemTy->getScalarSizeInBits()); | |||
4743 | Value *XBCast = | |||
4744 | Builder.CreateBitCast(X.Var, IntCastTy->getPointerTo(Addrspace)); | |||
4745 | Value *EBCast = Builder.CreateBitCast(E, IntCastTy); | |||
4746 | Value *DBCast = Builder.CreateBitCast(D, IntCastTy); | |||
4747 | Result = Builder.CreateAtomicCmpXchg(XBCast, EBCast, DBCast, MaybeAlign(), | |||
4748 | AO, Failure); | |||
4749 | } else { | |||
4750 | Result = | |||
4751 | Builder.CreateAtomicCmpXchg(X.Var, E, D, MaybeAlign(), AO, Failure); | |||
4752 | } | |||
4753 | ||||
4754 | if (V.Var) { | |||
4755 | Value *OldValue = Builder.CreateExtractValue(Result, /*Idxs=*/0); | |||
4756 | if (!IsInteger) | |||
4757 | OldValue = Builder.CreateBitCast(OldValue, X.ElemTy); | |||
4758 | assert(OldValue->getType() == V.ElemTy &&(static_cast <bool> (OldValue->getType() == V.ElemTy && "OldValue and V must be of same type") ? void (0) : __assert_fail ("OldValue->getType() == V.ElemTy && \"OldValue and V must be of same type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4759, __extension__ __PRETTY_FUNCTION__)) | |||
4759 | "OldValue and V must be of same type")(static_cast <bool> (OldValue->getType() == V.ElemTy && "OldValue and V must be of same type") ? void (0) : __assert_fail ("OldValue->getType() == V.ElemTy && \"OldValue and V must be of same type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4759, __extension__ __PRETTY_FUNCTION__)); | |||
4760 | if (IsPostfixUpdate) { | |||
4761 | Builder.CreateStore(OldValue, V.Var, V.IsVolatile); | |||
4762 | } else { | |||
4763 | Value *SuccessOrFail = Builder.CreateExtractValue(Result, /*Idxs=*/1); | |||
4764 | if (IsFailOnly) { | |||
4765 | // CurBB---- | |||
4766 | // | | | |||
4767 | // v | | |||
4768 | // ContBB | | |||
4769 | // | | | |||
4770 | // v | | |||
4771 | // ExitBB <- | |||
4772 | // | |||
4773 | // where ContBB only contains the store of old value to 'v'. | |||
4774 | BasicBlock *CurBB = Builder.GetInsertBlock(); | |||
4775 | Instruction *CurBBTI = CurBB->getTerminator(); | |||
4776 | CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); | |||
4777 | BasicBlock *ExitBB = CurBB->splitBasicBlock( | |||
4778 | CurBBTI, X.Var->getName() + ".atomic.exit"); | |||
4779 | BasicBlock *ContBB = CurBB->splitBasicBlock( | |||
4780 | CurBB->getTerminator(), X.Var->getName() + ".atomic.cont"); | |||
4781 | ContBB->getTerminator()->eraseFromParent(); | |||
4782 | CurBB->getTerminator()->eraseFromParent(); | |||
4783 | ||||
4784 | Builder.CreateCondBr(SuccessOrFail, ExitBB, ContBB); | |||
4785 | ||||
4786 | Builder.SetInsertPoint(ContBB); | |||
4787 | Builder.CreateStore(OldValue, V.Var); | |||
4788 | Builder.CreateBr(ExitBB); | |||
4789 | ||||
4790 | if (UnreachableInst *ExitTI = | |||
4791 | dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { | |||
4792 | CurBBTI->eraseFromParent(); | |||
4793 | Builder.SetInsertPoint(ExitBB); | |||
4794 | } else { | |||
4795 | Builder.SetInsertPoint(ExitTI); | |||
4796 | } | |||
4797 | } else { | |||
4798 | Value *CapturedValue = | |||
4799 | Builder.CreateSelect(SuccessOrFail, E, OldValue); | |||
4800 | Builder.CreateStore(CapturedValue, V.Var, V.IsVolatile); | |||
4801 | } | |||
4802 | } | |||
4803 | } | |||
4804 | // The comparison result has to be stored. | |||
4805 | if (R.Var) { | |||
4806 | assert(R.Var->getType()->isPointerTy() &&(static_cast <bool> (R.Var->getType()->isPointerTy () && "r.var must be of pointer type") ? void (0) : __assert_fail ("R.Var->getType()->isPointerTy() && \"r.var must be of pointer type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4807, __extension__ __PRETTY_FUNCTION__)) | |||
4807 | "r.var must be of pointer type")(static_cast <bool> (R.Var->getType()->isPointerTy () && "r.var must be of pointer type") ? void (0) : __assert_fail ("R.Var->getType()->isPointerTy() && \"r.var must be of pointer type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4807, __extension__ __PRETTY_FUNCTION__)); | |||
4808 | assert(R.ElemTy->isIntegerTy() && "r must be of integral type")(static_cast <bool> (R.ElemTy->isIntegerTy() && "r must be of integral type") ? void (0) : __assert_fail ("R.ElemTy->isIntegerTy() && \"r must be of integral type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4808, __extension__ __PRETTY_FUNCTION__)); | |||
4809 | ||||
4810 | Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); | |||
4811 | Value *ResultCast = R.IsSigned | |||
4812 | ? Builder.CreateSExt(SuccessFailureVal, R.ElemTy) | |||
4813 | : Builder.CreateZExt(SuccessFailureVal, R.ElemTy); | |||
4814 | Builder.CreateStore(ResultCast, R.Var, R.IsVolatile); | |||
4815 | } | |||
4816 | } else { | |||
4817 | assert((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) &&(static_cast <bool> ((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && "Op should be either max or min at this point" ) ? void (0) : __assert_fail ("(Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && \"Op should be either max or min at this point\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4818, __extension__ __PRETTY_FUNCTION__)) | |||
4818 | "Op should be either max or min at this point")(static_cast <bool> ((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && "Op should be either max or min at this point" ) ? void (0) : __assert_fail ("(Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && \"Op should be either max or min at this point\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4818, __extension__ __PRETTY_FUNCTION__)); | |||
4819 | assert(!IsFailOnly && "IsFailOnly is only valid when the comparison is ==")(static_cast <bool> (!IsFailOnly && "IsFailOnly is only valid when the comparison is ==" ) ? void (0) : __assert_fail ("!IsFailOnly && \"IsFailOnly is only valid when the comparison is ==\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4819, __extension__ __PRETTY_FUNCTION__)); | |||
4820 | ||||
4821 | // Reverse the ordop as the OpenMP forms are different from LLVM forms. | |||
4822 | // Let's take max as example. | |||
4823 | // OpenMP form: | |||
4824 | // x = x > expr ? expr : x; | |||
4825 | // LLVM form: | |||
4826 | // *ptr = *ptr > val ? *ptr : val; | |||
4827 | // We need to transform to LLVM form. | |||
4828 | // x = x <= expr ? x : expr; | |||
4829 | AtomicRMWInst::BinOp NewOp; | |||
4830 | if (IsXBinopExpr) { | |||
4831 | if (IsInteger) { | |||
4832 | if (X.IsSigned) | |||
4833 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Min | |||
4834 | : AtomicRMWInst::Max; | |||
4835 | else | |||
4836 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMin | |||
4837 | : AtomicRMWInst::UMax; | |||
4838 | } else { | |||
4839 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::FMin | |||
4840 | : AtomicRMWInst::FMax; | |||
4841 | } | |||
4842 | } else { | |||
4843 | if (IsInteger) { | |||
4844 | if (X.IsSigned) | |||
4845 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Max | |||
4846 | : AtomicRMWInst::Min; | |||
4847 | else | |||
4848 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMax | |||
4849 | : AtomicRMWInst::UMin; | |||
4850 | } else { | |||
4851 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::FMax | |||
4852 | : AtomicRMWInst::FMin; | |||
4853 | } | |||
4854 | } | |||
4855 | ||||
4856 | AtomicRMWInst *OldValue = | |||
4857 | Builder.CreateAtomicRMW(NewOp, X.Var, E, MaybeAlign(), AO); | |||
4858 | if (V.Var) { | |||
4859 | Value *CapturedValue = nullptr; | |||
4860 | if (IsPostfixUpdate) { | |||
4861 | CapturedValue = OldValue; | |||
4862 | } else { | |||
4863 | CmpInst::Predicate Pred; | |||
4864 | switch (NewOp) { | |||
4865 | case AtomicRMWInst::Max: | |||
4866 | Pred = CmpInst::ICMP_SGT; | |||
4867 | break; | |||
4868 | case AtomicRMWInst::UMax: | |||
4869 | Pred = CmpInst::ICMP_UGT; | |||
4870 | break; | |||
4871 | case AtomicRMWInst::FMax: | |||
4872 | Pred = CmpInst::FCMP_OGT; | |||
4873 | break; | |||
4874 | case AtomicRMWInst::Min: | |||
4875 | Pred = CmpInst::ICMP_SLT; | |||
4876 | break; | |||
4877 | case AtomicRMWInst::UMin: | |||
4878 | Pred = CmpInst::ICMP_ULT; | |||
4879 | break; | |||
4880 | case AtomicRMWInst::FMin: | |||
4881 | Pred = CmpInst::FCMP_OLT; | |||
4882 | break; | |||
4883 | default: | |||
4884 | llvm_unreachable("unexpected comparison op")::llvm::llvm_unreachable_internal("unexpected comparison op", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4884); | |||
4885 | } | |||
4886 | Value *NonAtomicCmp = Builder.CreateCmp(Pred, OldValue, E); | |||
4887 | CapturedValue = Builder.CreateSelect(NonAtomicCmp, E, OldValue); | |||
4888 | } | |||
4889 | Builder.CreateStore(CapturedValue, V.Var, V.IsVolatile); | |||
4890 | } | |||
4891 | } | |||
4892 | ||||
4893 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Compare); | |||
4894 | ||||
4895 | return Builder.saveIP(); | |||
4896 | } | |||
4897 | ||||
4898 | GlobalVariable * | |||
4899 | OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, | |||
4900 | std::string VarName) { | |||
4901 | llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( | |||
4902 | llvm::ArrayType::get( | |||
4903 | llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), | |||
4904 | Names); | |||
4905 | auto *MapNamesArrayGlobal = new llvm::GlobalVariable( | |||
4906 | M, MapNamesArrayInit->getType(), | |||
4907 | /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit, | |||
4908 | VarName); | |||
4909 | return MapNamesArrayGlobal; | |||
4910 | } | |||
4911 | ||||
4912 | // Create all simple and struct types exposed by the runtime and remember | |||
4913 | // the llvm::PointerTypes of them for easy access later. | |||
4914 | void OpenMPIRBuilder::initializeTypes(Module &M) { | |||
4915 | LLVMContext &Ctx = M.getContext(); | |||
4916 | StructType *T; | |||
4917 | #define OMP_TYPE(VarName, InitValue) VarName = InitValue; | |||
4918 | #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ | |||
4919 | VarName##Ty = ArrayType::get(ElemTy, ArraySize); \ | |||
4920 | VarName##PtrTy = PointerType::getUnqual(VarName##Ty); | |||
4921 | #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ | |||
4922 | VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \ | |||
4923 | VarName##Ptr = PointerType::getUnqual(VarName); | |||
4924 | #define OMP_STRUCT_TYPE(VarName, StructName, Packed, ...) \ | |||
4925 | T = StructType::getTypeByName(Ctx, StructName); \ | |||
4926 | if (!T) \ | |||
4927 | T = StructType::create(Ctx, {__VA_ARGS__}, StructName, Packed); \ | |||
4928 | VarName = T; \ | |||
4929 | VarName##Ptr = PointerType::getUnqual(T); | |||
4930 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | |||
4931 | } | |||
4932 | ||||
4933 | void OpenMPIRBuilder::OutlineInfo::collectBlocks( | |||
4934 | SmallPtrSetImpl<BasicBlock *> &BlockSet, | |||
4935 | SmallVectorImpl<BasicBlock *> &BlockVector) { | |||
4936 | SmallVector<BasicBlock *, 32> Worklist; | |||
4937 | BlockSet.insert(EntryBB); | |||
4938 | BlockSet.insert(ExitBB); | |||
4939 | ||||
4940 | Worklist.push_back(EntryBB); | |||
4941 | while (!Worklist.empty()) { | |||
4942 | BasicBlock *BB = Worklist.pop_back_val(); | |||
4943 | BlockVector.push_back(BB); | |||
4944 | for (BasicBlock *SuccBB : successors(BB)) | |||
4945 | if (BlockSet.insert(SuccBB).second) | |||
4946 | Worklist.push_back(SuccBB); | |||
4947 | } | |||
4948 | } | |||
4949 | ||||
4950 | void OpenMPIRBuilder::createOffloadEntry(Constant *ID, Constant *Addr, | |||
4951 | uint64_t Size, int32_t Flags, | |||
4952 | GlobalValue::LinkageTypes) { | |||
4953 | if (!Config.isTargetCodegen()) { | |||
4954 | emitOffloadingEntry(ID, Addr->getName(), Size, Flags); | |||
4955 | return; | |||
4956 | } | |||
4957 | // TODO: Add support for global variables on the device after declare target | |||
4958 | // support. | |||
4959 | Function *Fn = dyn_cast<Function>(Addr); | |||
4960 | if (!Fn) | |||
4961 | return; | |||
4962 | ||||
4963 | Module &M = *(Fn->getParent()); | |||
4964 | LLVMContext &Ctx = M.getContext(); | |||
4965 | ||||
4966 | // Get "nvvm.annotations" metadata node. | |||
4967 | NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); | |||
4968 | ||||
4969 | Metadata *MDVals[] = { | |||
4970 | ConstantAsMetadata::get(Fn), MDString::get(Ctx, "kernel"), | |||
4971 | ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(Ctx), 1))}; | |||
4972 | // Append metadata to nvvm.annotations. | |||
4973 | MD->addOperand(MDNode::get(Ctx, MDVals)); | |||
4974 | ||||
4975 | // Add a function attribute for the kernel. | |||
4976 | Fn->addFnAttr(Attribute::get(Ctx, "kernel")); | |||
4977 | if (Triple(M.getTargetTriple()).isAMDGCN()) | |||
4978 | Fn->addFnAttr("uniform-work-group-size", "true"); | |||
4979 | } | |||
4980 | ||||
4981 | // We only generate metadata for function that contain target regions. | |||
4982 | void OpenMPIRBuilder::createOffloadEntriesAndInfoMetadata( | |||
4983 | EmitMetadataErrorReportFunctionTy &ErrorFn) { | |||
4984 | ||||
4985 | // If there are no entries, we don't need to do anything. | |||
4986 | if (OffloadInfoManager.empty()) | |||
4987 | return; | |||
4988 | ||||
4989 | LLVMContext &C = M.getContext(); | |||
4990 | SmallVector<std::pair<const OffloadEntriesInfoManager::OffloadEntryInfo *, | |||
4991 | TargetRegionEntryInfo>, | |||
4992 | 16> | |||
4993 | OrderedEntries(OffloadInfoManager.size()); | |||
4994 | ||||
4995 | // Auxiliary methods to create metadata values and strings. | |||
4996 | auto &&GetMDInt = [this](unsigned V) { | |||
4997 | return ConstantAsMetadata::get(ConstantInt::get(Builder.getInt32Ty(), V)); | |||
4998 | }; | |||
4999 | ||||
5000 | auto &&GetMDString = [&C](StringRef V) { return MDString::get(C, V); }; | |||
5001 | ||||
5002 | // Create the offloading info metadata node. | |||
5003 | NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info"); | |||
5004 | auto &&TargetRegionMetadataEmitter = | |||
5005 | [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString]( | |||
5006 | const TargetRegionEntryInfo &EntryInfo, | |||
5007 | const OffloadEntriesInfoManager::OffloadEntryInfoTargetRegion &E) { | |||
5008 | // Generate metadata for target regions. Each entry of this metadata | |||
5009 | // contains: | |||
5010 | // - Entry 0 -> Kind of this type of metadata (0). | |||
5011 | // - Entry 1 -> Device ID of the file where the entry was identified. | |||
5012 | // - Entry 2 -> File ID of the file where the entry was identified. | |||
5013 | // - Entry 3 -> Mangled name of the function where the entry was | |||
5014 | // identified. | |||
5015 | // - Entry 4 -> Line in the file where the entry was identified. | |||
5016 | // - Entry 5 -> Count of regions at this DeviceID/FilesID/Line. | |||
5017 | // - Entry 6 -> Order the entry was created. | |||
5018 | // The first element of the metadata node is the kind. | |||
5019 | Metadata *Ops[] = { | |||
5020 | GetMDInt(E.getKind()), GetMDInt(EntryInfo.DeviceID), | |||
5021 | GetMDInt(EntryInfo.FileID), GetMDString(EntryInfo.ParentName), | |||
5022 | GetMDInt(EntryInfo.Line), GetMDInt(EntryInfo.Count), | |||
5023 | GetMDInt(E.getOrder())}; | |||
5024 | ||||
5025 | // Save this entry in the right position of the ordered entries array. | |||
5026 | OrderedEntries[E.getOrder()] = std::make_pair(&E, EntryInfo); | |||
5027 | ||||
5028 | // Add metadata to the named metadata node. | |||
5029 | MD->addOperand(MDNode::get(C, Ops)); | |||
5030 | }; | |||
5031 | ||||
5032 | OffloadInfoManager.actOnTargetRegionEntriesInfo(TargetRegionMetadataEmitter); | |||
5033 | ||||
5034 | // Create function that emits metadata for each device global variable entry; | |||
5035 | auto &&DeviceGlobalVarMetadataEmitter = | |||
5036 | [&C, &OrderedEntries, &GetMDInt, &GetMDString, MD]( | |||
5037 | StringRef MangledName, | |||
5038 | const OffloadEntriesInfoManager::OffloadEntryInfoDeviceGlobalVar &E) { | |||
5039 | // Generate metadata for global variables. Each entry of this metadata | |||
5040 | // contains: | |||
5041 | // - Entry 0 -> Kind of this type of metadata (1). | |||
5042 | // - Entry 1 -> Mangled name of the variable. | |||
5043 | // - Entry 2 -> Declare target kind. | |||
5044 | // - Entry 3 -> Order the entry was created. | |||
5045 | // The first element of the metadata node is the kind. | |||
5046 | Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDString(MangledName), | |||
5047 | GetMDInt(E.getFlags()), GetMDInt(E.getOrder())}; | |||
5048 | ||||
5049 | // Save this entry in the right position of the ordered entries array. | |||
5050 | TargetRegionEntryInfo varInfo(MangledName, 0, 0, 0); | |||
5051 | OrderedEntries[E.getOrder()] = std::make_pair(&E, varInfo); | |||
5052 | ||||
5053 | // Add metadata to the named metadata node. | |||
5054 | MD->addOperand(MDNode::get(C, Ops)); | |||
5055 | }; | |||
5056 | ||||
5057 | OffloadInfoManager.actOnDeviceGlobalVarEntriesInfo( | |||
5058 | DeviceGlobalVarMetadataEmitter); | |||
5059 | ||||
5060 | for (const auto &E : OrderedEntries) { | |||
5061 | assert(E.first && "All ordered entries must exist!")(static_cast <bool> (E.first && "All ordered entries must exist!" ) ? void (0) : __assert_fail ("E.first && \"All ordered entries must exist!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5061, __extension__ __PRETTY_FUNCTION__)); | |||
5062 | if (const auto *CE = | |||
5063 | dyn_cast<OffloadEntriesInfoManager::OffloadEntryInfoTargetRegion>( | |||
5064 | E.first)) { | |||
5065 | if (!CE->getID() || !CE->getAddress()) { | |||
5066 | // Do not blame the entry if the parent funtion is not emitted. | |||
5067 | TargetRegionEntryInfo EntryInfo = E.second; | |||
5068 | StringRef FnName = EntryInfo.ParentName; | |||
5069 | if (!M.getNamedValue(FnName)) | |||
5070 | continue; | |||
5071 | ErrorFn(EMIT_MD_TARGET_REGION_ERROR, EntryInfo); | |||
5072 | continue; | |||
5073 | } | |||
5074 | createOffloadEntry(CE->getID(), CE->getAddress(), | |||
5075 | /*Size=*/0, CE->getFlags(), | |||
5076 | GlobalValue::WeakAnyLinkage); | |||
5077 | } else if (const auto *CE = dyn_cast< | |||
5078 | OffloadEntriesInfoManager::OffloadEntryInfoDeviceGlobalVar>( | |||
5079 | E.first)) { | |||
5080 | OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind Flags = | |||
5081 | static_cast<OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind>( | |||
5082 | CE->getFlags()); | |||
5083 | switch (Flags) { | |||
5084 | case OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo: { | |||
5085 | if (Config.isEmbedded() && Config.hasRequiresUnifiedSharedMemory()) | |||
5086 | continue; | |||
5087 | if (!CE->getAddress()) { | |||
5088 | ErrorFn(EMIT_MD_DECLARE_TARGET_ERROR, E.second); | |||
5089 | continue; | |||
5090 | } | |||
5091 | // The vaiable has no definition - no need to add the entry. | |||
5092 | if (CE->getVarSize() == 0) | |||
5093 | continue; | |||
5094 | break; | |||
5095 | } | |||
5096 | case OffloadEntriesInfoManager::OMPTargetGlobalVarEntryLink: | |||
5097 | assert(((Config.isEmbedded() && !CE->getAddress()) ||(static_cast <bool> (((Config.isEmbedded() && ! CE->getAddress()) || (!Config.isEmbedded() && CE-> getAddress())) && "Declaret target link address is set." ) ? void (0) : __assert_fail ("((Config.isEmbedded() && !CE->getAddress()) || (!Config.isEmbedded() && CE->getAddress())) && \"Declaret target link address is set.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5099, __extension__ __PRETTY_FUNCTION__)) | |||
5098 | (!Config.isEmbedded() && CE->getAddress())) &&(static_cast <bool> (((Config.isEmbedded() && ! CE->getAddress()) || (!Config.isEmbedded() && CE-> getAddress())) && "Declaret target link address is set." ) ? void (0) : __assert_fail ("((Config.isEmbedded() && !CE->getAddress()) || (!Config.isEmbedded() && CE->getAddress())) && \"Declaret target link address is set.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5099, __extension__ __PRETTY_FUNCTION__)) | |||
5099 | "Declaret target link address is set.")(static_cast <bool> (((Config.isEmbedded() && ! CE->getAddress()) || (!Config.isEmbedded() && CE-> getAddress())) && "Declaret target link address is set." ) ? void (0) : __assert_fail ("((Config.isEmbedded() && !CE->getAddress()) || (!Config.isEmbedded() && CE->getAddress())) && \"Declaret target link address is set.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5099, __extension__ __PRETTY_FUNCTION__)); | |||
5100 | if (Config.isEmbedded()) | |||
5101 | continue; | |||
5102 | if (!CE->getAddress()) { | |||
5103 | ErrorFn(EMIT_MD_GLOBAL_VAR_LINK_ERROR, TargetRegionEntryInfo()); | |||
5104 | continue; | |||
5105 | } | |||
5106 | break; | |||
5107 | } | |||
5108 | ||||
5109 | // Hidden or internal symbols on the device are not externally visible. | |||
5110 | // We should not attempt to register them by creating an offloading | |||
5111 | // entry. | |||
5112 | if (auto *GV = dyn_cast<GlobalValue>(CE->getAddress())) | |||
5113 | if (GV->hasLocalLinkage() || GV->hasHiddenVisibility()) | |||
5114 | continue; | |||
5115 | ||||
5116 | createOffloadEntry(CE->getAddress(), CE->getAddress(), CE->getVarSize(), | |||
5117 | Flags, CE->getLinkage()); | |||
5118 | ||||
5119 | } else { | |||
5120 | llvm_unreachable("Unsupported entry kind.")::llvm::llvm_unreachable_internal("Unsupported entry kind.", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 5120); | |||
5121 | } | |||
5122 | } | |||
5123 | } | |||
5124 | ||||
5125 | void TargetRegionEntryInfo::getTargetRegionEntryFnName( | |||
5126 | SmallVectorImpl<char> &Name, StringRef ParentName, unsigned DeviceID, | |||
5127 | unsigned FileID, unsigned Line, unsigned Count) { | |||
5128 | raw_svector_ostream OS(Name); | |||
5129 | OS << "__omp_offloading" << llvm::format("_%x", DeviceID) | |||
5130 | << llvm::format("_%x_", FileID) << ParentName << "_l" << Line; | |||
5131 | if (Count) | |||
5132 | OS << "_" << Count; | |||
5133 | } | |||
5134 | ||||
5135 | void OffloadEntriesInfoManager::getTargetRegionEntryFnName( | |||
5136 | SmallVectorImpl<char> &Name, const TargetRegionEntryInfo &EntryInfo) { | |||
5137 | unsigned NewCount = getTargetRegionEntryInfoCount(EntryInfo); | |||
5138 | TargetRegionEntryInfo::getTargetRegionEntryFnName( | |||
5139 | Name, EntryInfo.ParentName, EntryInfo.DeviceID, EntryInfo.FileID, | |||
5140 | EntryInfo.Line, NewCount); | |||
5141 | } | |||
5142 | ||||
5143 | /// Loads all the offload entries information from the host IR | |||
5144 | /// metadata. | |||
5145 | void OpenMPIRBuilder::loadOffloadInfoMetadata(Module &M) { | |||
5146 | // If we are in target mode, load the metadata from the host IR. This code has | |||
5147 | // to match the metadata creation in createOffloadEntriesAndInfoMetadata(). | |||
5148 | ||||
5149 | NamedMDNode *MD = M.getNamedMetadata(ompOffloadInfoName); | |||
5150 | if (!MD) | |||
5151 | return; | |||
5152 | ||||
5153 | for (MDNode *MN : MD->operands()) { | |||
5154 | auto &&GetMDInt = [MN](unsigned Idx) { | |||
5155 | auto *V = cast<ConstantAsMetadata>(MN->getOperand(Idx)); | |||
5156 | return cast<ConstantInt>(V->getValue())->getZExtValue(); | |||
5157 | }; | |||
5158 | ||||
5159 | auto &&GetMDString = [MN](unsigned Idx) { | |||
5160 | auto *V = cast<MDString>(MN->getOperand(Idx)); | |||
5161 | return V->getString(); | |||
5162 | }; | |||
5163 | ||||
5164 | switch (GetMDInt(0)) { | |||
5165 | default: | |||
5166 | llvm_unreachable("Unexpected metadata!")::llvm::llvm_unreachable_internal("Unexpected metadata!", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 5166); | |||
5167 | break; | |||
5168 | case OffloadEntriesInfoManager::OffloadEntryInfo:: | |||
5169 | OffloadingEntryInfoTargetRegion: { | |||
5170 | TargetRegionEntryInfo EntryInfo(/*ParentName=*/GetMDString(3), | |||
5171 | /*DeviceID=*/GetMDInt(1), | |||
5172 | /*FileID=*/GetMDInt(2), | |||
5173 | /*Line=*/GetMDInt(4), | |||
5174 | /*Count=*/GetMDInt(5)); | |||
5175 | OffloadInfoManager.initializeTargetRegionEntryInfo(EntryInfo, | |||
5176 | /*Order=*/GetMDInt(6)); | |||
5177 | break; | |||
5178 | } | |||
5179 | case OffloadEntriesInfoManager::OffloadEntryInfo:: | |||
5180 | OffloadingEntryInfoDeviceGlobalVar: | |||
5181 | OffloadInfoManager.initializeDeviceGlobalVarEntryInfo( | |||
5182 | /*MangledName=*/GetMDString(1), | |||
5183 | static_cast<OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind>( | |||
5184 | /*Flags=*/GetMDInt(2)), | |||
5185 | /*Order=*/GetMDInt(3)); | |||
5186 | break; | |||
5187 | } | |||
5188 | } | |||
5189 | } | |||
5190 | ||||
5191 | bool OffloadEntriesInfoManager::empty() const { | |||
5192 | return OffloadEntriesTargetRegion.empty() && | |||
5193 | OffloadEntriesDeviceGlobalVar.empty(); | |||
5194 | } | |||
5195 | ||||
5196 | unsigned OffloadEntriesInfoManager::getTargetRegionEntryInfoCount( | |||
5197 | const TargetRegionEntryInfo &EntryInfo) const { | |||
5198 | auto It = OffloadEntriesTargetRegionCount.find( | |||
5199 | getTargetRegionEntryCountKey(EntryInfo)); | |||
5200 | if (It == OffloadEntriesTargetRegionCount.end()) | |||
5201 | return 0; | |||
5202 | return It->second; | |||
5203 | } | |||
5204 | ||||
5205 | void OffloadEntriesInfoManager::incrementTargetRegionEntryInfoCount( | |||
5206 | const TargetRegionEntryInfo &EntryInfo) { | |||
5207 | OffloadEntriesTargetRegionCount[getTargetRegionEntryCountKey(EntryInfo)] = | |||
5208 | EntryInfo.Count + 1; | |||
5209 | } | |||
5210 | ||||
5211 | /// Initialize target region entry. | |||
5212 | void OffloadEntriesInfoManager::initializeTargetRegionEntryInfo( | |||
5213 | const TargetRegionEntryInfo &EntryInfo, unsigned Order) { | |||
5214 | OffloadEntriesTargetRegion[EntryInfo] = | |||
5215 | OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr, | |||
5216 | OMPTargetRegionEntryTargetRegion); | |||
5217 | ++OffloadingEntriesNum; | |||
5218 | } | |||
5219 | ||||
5220 | void OffloadEntriesInfoManager::registerTargetRegionEntryInfo( | |||
5221 | TargetRegionEntryInfo EntryInfo, Constant *Addr, Constant *ID, | |||
5222 | OMPTargetRegionEntryKind Flags) { | |||
5223 | assert(EntryInfo.Count == 0 && "expected default EntryInfo")(static_cast <bool> (EntryInfo.Count == 0 && "expected default EntryInfo" ) ? void (0) : __assert_fail ("EntryInfo.Count == 0 && \"expected default EntryInfo\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5223, __extension__ __PRETTY_FUNCTION__)); | |||
5224 | ||||
5225 | // Update the EntryInfo with the next available count for this location. | |||
5226 | EntryInfo.Count = getTargetRegionEntryInfoCount(EntryInfo); | |||
5227 | ||||
5228 | // If we are emitting code for a target, the entry is already initialized, | |||
5229 | // only has to be registered. | |||
5230 | if (OMPBuilder->Config.isEmbedded()) { | |||
5231 | // This could happen if the device compilation is invoked standalone. | |||
5232 | if (!hasTargetRegionEntryInfo(EntryInfo)) { | |||
5233 | return; | |||
5234 | } | |||
5235 | auto &Entry = OffloadEntriesTargetRegion[EntryInfo]; | |||
5236 | Entry.setAddress(Addr); | |||
5237 | Entry.setID(ID); | |||
5238 | Entry.setFlags(Flags); | |||
5239 | } else { | |||
5240 | if (Flags == OffloadEntriesInfoManager::OMPTargetRegionEntryTargetRegion && | |||
5241 | hasTargetRegionEntryInfo(EntryInfo, /*IgnoreAddressId*/ true)) | |||
5242 | return; | |||
5243 | assert(!hasTargetRegionEntryInfo(EntryInfo) &&(static_cast <bool> (!hasTargetRegionEntryInfo(EntryInfo ) && "Target region entry already registered!") ? void (0) : __assert_fail ("!hasTargetRegionEntryInfo(EntryInfo) && \"Target region entry already registered!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5244, __extension__ __PRETTY_FUNCTION__)) | |||
5244 | "Target region entry already registered!")(static_cast <bool> (!hasTargetRegionEntryInfo(EntryInfo ) && "Target region entry already registered!") ? void (0) : __assert_fail ("!hasTargetRegionEntryInfo(EntryInfo) && \"Target region entry already registered!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5244, __extension__ __PRETTY_FUNCTION__)); | |||
5245 | OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags); | |||
5246 | OffloadEntriesTargetRegion[EntryInfo] = Entry; | |||
5247 | ++OffloadingEntriesNum; | |||
5248 | } | |||
5249 | incrementTargetRegionEntryInfoCount(EntryInfo); | |||
5250 | } | |||
5251 | ||||
5252 | bool OffloadEntriesInfoManager::hasTargetRegionEntryInfo( | |||
5253 | TargetRegionEntryInfo EntryInfo, bool IgnoreAddressId) const { | |||
5254 | ||||
5255 | // Update the EntryInfo with the next available count for this location. | |||
5256 | EntryInfo.Count = getTargetRegionEntryInfoCount(EntryInfo); | |||
5257 | ||||
5258 | auto It = OffloadEntriesTargetRegion.find(EntryInfo); | |||
5259 | if (It == OffloadEntriesTargetRegion.end()) { | |||
5260 | return false; | |||
5261 | } | |||
5262 | // Fail if this entry is already registered. | |||
5263 | if (!IgnoreAddressId && (It->second.getAddress() || It->second.getID())) | |||
5264 | return false; | |||
5265 | return true; | |||
5266 | } | |||
5267 | ||||
5268 | void OffloadEntriesInfoManager::actOnTargetRegionEntriesInfo( | |||
5269 | const OffloadTargetRegionEntryInfoActTy &Action) { | |||
5270 | // Scan all target region entries and perform the provided action. | |||
5271 | for (const auto &It : OffloadEntriesTargetRegion) { | |||
5272 | Action(It.first, It.second); | |||
5273 | } | |||
5274 | } | |||
5275 | ||||
5276 | void OffloadEntriesInfoManager::initializeDeviceGlobalVarEntryInfo( | |||
5277 | StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order) { | |||
5278 | OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags); | |||
5279 | ++OffloadingEntriesNum; | |||
5280 | } | |||
5281 | ||||
5282 | void OffloadEntriesInfoManager::registerDeviceGlobalVarEntryInfo( | |||
5283 | StringRef VarName, Constant *Addr, int64_t VarSize, | |||
5284 | OMPTargetGlobalVarEntryKind Flags, GlobalValue::LinkageTypes Linkage) { | |||
5285 | if (OMPBuilder->Config.isEmbedded()) { | |||
5286 | // This could happen if the device compilation is invoked standalone. | |||
5287 | if (!hasDeviceGlobalVarEntryInfo(VarName)) | |||
5288 | return; | |||
5289 | auto &Entry = OffloadEntriesDeviceGlobalVar[VarName]; | |||
5290 | if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) { | |||
5291 | if (Entry.getVarSize() == 0) { | |||
5292 | Entry.setVarSize(VarSize); | |||
5293 | Entry.setLinkage(Linkage); | |||
5294 | } | |||
5295 | return; | |||
5296 | } | |||
5297 | Entry.setVarSize(VarSize); | |||
5298 | Entry.setLinkage(Linkage); | |||
5299 | Entry.setAddress(Addr); | |||
5300 | } else { | |||
5301 | if (hasDeviceGlobalVarEntryInfo(VarName)) { | |||
5302 | auto &Entry = OffloadEntriesDeviceGlobalVar[VarName]; | |||
5303 | assert(Entry.isValid() && Entry.getFlags() == Flags &&(static_cast <bool> (Entry.isValid() && Entry.getFlags () == Flags && "Entry not initialized!") ? void (0) : __assert_fail ("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5304, __extension__ __PRETTY_FUNCTION__)) | |||
5304 | "Entry not initialized!")(static_cast <bool> (Entry.isValid() && Entry.getFlags () == Flags && "Entry not initialized!") ? void (0) : __assert_fail ("Entry.isValid() && Entry.getFlags() == Flags && \"Entry not initialized!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5304, __extension__ __PRETTY_FUNCTION__)); | |||
5305 | if (Entry.getVarSize() == 0) { | |||
5306 | Entry.setVarSize(VarSize); | |||
5307 | Entry.setLinkage(Linkage); | |||
5308 | } | |||
5309 | return; | |||
5310 | } | |||
5311 | OffloadEntriesDeviceGlobalVar.try_emplace(VarName, OffloadingEntriesNum, | |||
5312 | Addr, VarSize, Flags, Linkage); | |||
5313 | ++OffloadingEntriesNum; | |||
5314 | } | |||
5315 | } | |||
5316 | ||||
5317 | void OffloadEntriesInfoManager::actOnDeviceGlobalVarEntriesInfo( | |||
5318 | const OffloadDeviceGlobalVarEntryInfoActTy &Action) { | |||
5319 | // Scan all target region entries and perform the provided action. | |||
5320 | for (const auto &E : OffloadEntriesDeviceGlobalVar) | |||
5321 | Action(E.getKey(), E.getValue()); | |||
5322 | } | |||
5323 | ||||
5324 | void CanonicalLoopInfo::collectControlBlocks( | |||
5325 | SmallVectorImpl<BasicBlock *> &BBs) { | |||
5326 | // We only count those BBs as control block for which we do not need to | |||
5327 | // reverse the CFG, i.e. not the loop body which can contain arbitrary control | |||
5328 | // flow. For consistency, this also means we do not add the Body block, which | |||
5329 | // is just the entry to the body code. | |||
5330 | BBs.reserve(BBs.size() + 6); | |||
5331 | BBs.append({getPreheader(), Header, Cond, Latch, Exit, getAfter()}); | |||
5332 | } | |||
5333 | ||||
5334 | BasicBlock *CanonicalLoopInfo::getPreheader() const { | |||
5335 | assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5335, __extension__ __PRETTY_FUNCTION__)); | |||
5336 | for (BasicBlock *Pred : predecessors(Header)) { | |||
5337 | if (Pred != Latch) | |||
5338 | return Pred; | |||
5339 | } | |||
5340 | llvm_unreachable("Missing preheader")::llvm::llvm_unreachable_internal("Missing preheader", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 5340); | |||
5341 | } | |||
5342 | ||||
5343 | void CanonicalLoopInfo::setTripCount(Value *TripCount) { | |||
5344 | assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5344, __extension__ __PRETTY_FUNCTION__)); | |||
5345 | ||||
5346 | Instruction *CmpI = &getCond()->front(); | |||
5347 | assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")(static_cast <bool> (isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount") ? void (0) : __assert_fail ("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5347, __extension__ __PRETTY_FUNCTION__)); | |||
5348 | CmpI->setOperand(1, TripCount); | |||
5349 | ||||
5350 | #ifndef NDEBUG | |||
5351 | assertOK(); | |||
5352 | #endif | |||
5353 | } | |||
5354 | ||||
5355 | void CanonicalLoopInfo::mapIndVar( | |||
5356 | llvm::function_ref<Value *(Instruction *)> Updater) { | |||
5357 | assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5357, __extension__ __PRETTY_FUNCTION__)); | |||
5358 | ||||
5359 | Instruction *OldIV = getIndVar(); | |||
5360 | ||||
5361 | // Record all uses excluding those introduced by the updater. Uses by the | |||
5362 | // CanonicalLoopInfo itself to keep track of the number of iterations are | |||
5363 | // excluded. | |||
5364 | SmallVector<Use *> ReplacableUses; | |||
5365 | for (Use &U : OldIV->uses()) { | |||
5366 | auto *User = dyn_cast<Instruction>(U.getUser()); | |||
5367 | if (!User) | |||
5368 | continue; | |||
5369 | if (User->getParent() == getCond()) | |||
5370 | continue; | |||
5371 | if (User->getParent() == getLatch()) | |||
5372 | continue; | |||
5373 | ReplacableUses.push_back(&U); | |||
5374 | } | |||
5375 | ||||
5376 | // Run the updater that may introduce new uses | |||
5377 | Value *NewIV = Updater(OldIV); | |||
5378 | ||||
5379 | // Replace the old uses with the value returned by the updater. | |||
5380 | for (Use *U : ReplacableUses) | |||
5381 | U->set(NewIV); | |||
5382 | ||||
5383 | #ifndef NDEBUG | |||
5384 | assertOK(); | |||
5385 | #endif | |||
5386 | } | |||
5387 | ||||
5388 | void CanonicalLoopInfo::assertOK() const { | |||
5389 | #ifndef NDEBUG | |||
5390 | // No constraints if this object currently does not describe a loop. | |||
5391 | if (!isValid()) | |||
5392 | return; | |||
5393 | ||||
5394 | BasicBlock *Preheader = getPreheader(); | |||
5395 | BasicBlock *Body = getBody(); | |||
5396 | BasicBlock *After = getAfter(); | |||
5397 | ||||
5398 | // Verify standard control-flow we use for OpenMP loops. | |||
5399 | assert(Preheader)(static_cast <bool> (Preheader) ? void (0) : __assert_fail ("Preheader", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5399 , __extension__ __PRETTY_FUNCTION__)); | |||
5400 | assert(isa<BranchInst>(Preheader->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Preheader-> getTerminator()) && "Preheader must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5401, __extension__ __PRETTY_FUNCTION__)) | |||
5401 | "Preheader must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Preheader-> getTerminator()) && "Preheader must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5401, __extension__ __PRETTY_FUNCTION__)); | |||
5402 | assert(Preheader->getSingleSuccessor() == Header &&(static_cast <bool> (Preheader->getSingleSuccessor() == Header && "Preheader must jump to header") ? void (0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5403, __extension__ __PRETTY_FUNCTION__)) | |||
5403 | "Preheader must jump to header")(static_cast <bool> (Preheader->getSingleSuccessor() == Header && "Preheader must jump to header") ? void (0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5403, __extension__ __PRETTY_FUNCTION__)); | |||
5404 | ||||
5405 | assert(Header)(static_cast <bool> (Header) ? void (0) : __assert_fail ("Header", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5405 , __extension__ __PRETTY_FUNCTION__)); | |||
5406 | assert(isa<BranchInst>(Header->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Header->getTerminator ()) && "Header must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5407, __extension__ __PRETTY_FUNCTION__)) | |||
5407 | "Header must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Header->getTerminator ()) && "Header must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5407, __extension__ __PRETTY_FUNCTION__)); | |||
5408 | assert(Header->getSingleSuccessor() == Cond &&(static_cast <bool> (Header->getSingleSuccessor() == Cond && "Header must jump to exiting block") ? void ( 0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5409, __extension__ __PRETTY_FUNCTION__)) | |||
5409 | "Header must jump to exiting block")(static_cast <bool> (Header->getSingleSuccessor() == Cond && "Header must jump to exiting block") ? void ( 0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5409, __extension__ __PRETTY_FUNCTION__)); | |||
5410 | ||||
5411 | assert(Cond)(static_cast <bool> (Cond) ? void (0) : __assert_fail ( "Cond", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5411, __extension__ __PRETTY_FUNCTION__)); | |||
5412 | assert(Cond->getSinglePredecessor() == Header &&(static_cast <bool> (Cond->getSinglePredecessor() == Header && "Exiting block only reachable from header" ) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5413, __extension__ __PRETTY_FUNCTION__)) | |||
5413 | "Exiting block only reachable from header")(static_cast <bool> (Cond->getSinglePredecessor() == Header && "Exiting block only reachable from header" ) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5413, __extension__ __PRETTY_FUNCTION__)); | |||
5414 | ||||
5415 | assert(isa<BranchInst>(Cond->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Cond->getTerminator ()) && "Exiting block must terminate with conditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5416, __extension__ __PRETTY_FUNCTION__)) | |||
5416 | "Exiting block must terminate with conditional branch")(static_cast <bool> (isa<BranchInst>(Cond->getTerminator ()) && "Exiting block must terminate with conditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5416, __extension__ __PRETTY_FUNCTION__)); | |||
5417 | assert(size(successors(Cond)) == 2 &&(static_cast <bool> (size(successors(Cond)) == 2 && "Exiting block must have two successors") ? void (0) : __assert_fail ("size(successors(Cond)) == 2 && \"Exiting block must have two successors\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5418, __extension__ __PRETTY_FUNCTION__)) | |||
5418 | "Exiting block must have two successors")(static_cast <bool> (size(successors(Cond)) == 2 && "Exiting block must have two successors") ? void (0) : __assert_fail ("size(successors(Cond)) == 2 && \"Exiting block must have two successors\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5418, __extension__ __PRETTY_FUNCTION__)); | |||
5419 | assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5420, __extension__ __PRETTY_FUNCTION__)) | |||
5420 | "Exiting block's first successor jump to the body")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5420, __extension__ __PRETTY_FUNCTION__)); | |||
5421 | assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5422, __extension__ __PRETTY_FUNCTION__)) | |||
5422 | "Exiting block's second successor must exit the loop")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5422, __extension__ __PRETTY_FUNCTION__)); | |||
5423 | ||||
5424 | assert(Body)(static_cast <bool> (Body) ? void (0) : __assert_fail ( "Body", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5424, __extension__ __PRETTY_FUNCTION__)); | |||
5425 | assert(Body->getSinglePredecessor() == Cond &&(static_cast <bool> (Body->getSinglePredecessor() == Cond && "Body only reachable from exiting block") ? void (0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5426, __extension__ __PRETTY_FUNCTION__)) | |||
5426 | "Body only reachable from exiting block")(static_cast <bool> (Body->getSinglePredecessor() == Cond && "Body only reachable from exiting block") ? void (0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5426, __extension__ __PRETTY_FUNCTION__)); | |||
5427 | assert(!isa<PHINode>(Body->front()))(static_cast <bool> (!isa<PHINode>(Body->front ())) ? void (0) : __assert_fail ("!isa<PHINode>(Body->front())" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5427, __extension__ __PRETTY_FUNCTION__)); | |||
5428 | ||||
5429 | assert(Latch)(static_cast <bool> (Latch) ? void (0) : __assert_fail ( "Latch", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5429, __extension__ __PRETTY_FUNCTION__)); | |||
5430 | assert(isa<BranchInst>(Latch->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Latch->getTerminator ()) && "Latch must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5431, __extension__ __PRETTY_FUNCTION__)) | |||
5431 | "Latch must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Latch->getTerminator ()) && "Latch must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5431, __extension__ __PRETTY_FUNCTION__)); | |||
5432 | assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")(static_cast <bool> (Latch->getSingleSuccessor() == Header && "Latch must jump to header") ? void (0) : __assert_fail ("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5432, __extension__ __PRETTY_FUNCTION__)); | |||
5433 | // TODO: To support simple redirecting of the end of the body code that has | |||
5434 | // multiple; introduce another auxiliary basic block like preheader and after. | |||
5435 | assert(Latch->getSinglePredecessor() != nullptr)(static_cast <bool> (Latch->getSinglePredecessor() != nullptr) ? void (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5435, __extension__ __PRETTY_FUNCTION__)); | |||
5436 | assert(!isa<PHINode>(Latch->front()))(static_cast <bool> (!isa<PHINode>(Latch->front ())) ? void (0) : __assert_fail ("!isa<PHINode>(Latch->front())" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5436, __extension__ __PRETTY_FUNCTION__)); | |||
5437 | ||||
5438 | assert(Exit)(static_cast <bool> (Exit) ? void (0) : __assert_fail ( "Exit", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5438, __extension__ __PRETTY_FUNCTION__)); | |||
5439 | assert(isa<BranchInst>(Exit->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Exit->getTerminator ()) && "Exit block must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5440, __extension__ __PRETTY_FUNCTION__)) | |||
5440 | "Exit block must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Exit->getTerminator ()) && "Exit block must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 5440, __extension__ __ |