File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/IR/IRBuilder.h |
Warning: | line 187, column 10 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===// | ||||||
2 | // | ||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
6 | // | ||||||
7 | //===----------------------------------------------------------------------===// | ||||||
8 | /// \file | ||||||
9 | /// | ||||||
10 | /// This file implements the OpenMPIRBuilder class, which is used as a | ||||||
11 | /// convenient way to create LLVM instructions for OpenMP directives. | ||||||
12 | /// | ||||||
13 | //===----------------------------------------------------------------------===// | ||||||
14 | |||||||
15 | #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" | ||||||
16 | #include "llvm/ADT/SmallSet.h" | ||||||
17 | #include "llvm/ADT/StringRef.h" | ||||||
18 | #include "llvm/Analysis/AssumptionCache.h" | ||||||
19 | #include "llvm/Analysis/CodeMetrics.h" | ||||||
20 | #include "llvm/Analysis/LoopInfo.h" | ||||||
21 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||||
22 | #include "llvm/Analysis/ScalarEvolution.h" | ||||||
23 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||||
24 | #include "llvm/IR/CFG.h" | ||||||
25 | #include "llvm/IR/Constants.h" | ||||||
26 | #include "llvm/IR/DebugInfoMetadata.h" | ||||||
27 | #include "llvm/IR/GlobalVariable.h" | ||||||
28 | #include "llvm/IR/IRBuilder.h" | ||||||
29 | #include "llvm/IR/MDBuilder.h" | ||||||
30 | #include "llvm/IR/PassManager.h" | ||||||
31 | #include "llvm/IR/Value.h" | ||||||
32 | #include "llvm/MC/TargetRegistry.h" | ||||||
33 | #include "llvm/Support/CommandLine.h" | ||||||
34 | #include "llvm/Target/TargetMachine.h" | ||||||
35 | #include "llvm/Target/TargetOptions.h" | ||||||
36 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | ||||||
37 | #include "llvm/Transforms/Utils/CodeExtractor.h" | ||||||
38 | #include "llvm/Transforms/Utils/LoopPeel.h" | ||||||
39 | #include "llvm/Transforms/Utils/UnrollLoop.h" | ||||||
40 | |||||||
41 | #include <cstdint> | ||||||
42 | |||||||
43 | #define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder" | ||||||
44 | |||||||
45 | using namespace llvm; | ||||||
46 | using namespace omp; | ||||||
47 | |||||||
48 | static cl::opt<bool> | ||||||
49 | OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden, | ||||||
50 | cl::desc("Use optimistic attributes describing " | ||||||
51 | "'as-if' properties of runtime calls."), | ||||||
52 | cl::init(false)); | ||||||
53 | |||||||
54 | static cl::opt<double> UnrollThresholdFactor( | ||||||
55 | "openmp-ir-builder-unroll-threshold-factor", cl::Hidden, | ||||||
56 | cl::desc("Factor for the unroll threshold to account for code " | ||||||
57 | "simplifications still taking place"), | ||||||
58 | cl::init(1.5)); | ||||||
59 | |||||||
60 | #ifndef NDEBUG | ||||||
61 | /// Return whether IP1 and IP2 are ambiguous, i.e. that inserting instructions | ||||||
62 | /// at position IP1 may change the meaning of IP2 or vice-versa. This is because | ||||||
63 | /// an InsertPoint stores the instruction before something is inserted. For | ||||||
64 | /// instance, if both point to the same instruction, two IRBuilders alternating | ||||||
65 | /// creating instruction will cause the instructions to be interleaved. | ||||||
66 | static bool isConflictIP(IRBuilder<>::InsertPoint IP1, | ||||||
67 | IRBuilder<>::InsertPoint IP2) { | ||||||
68 | if (!IP1.isSet() || !IP2.isSet()) | ||||||
69 | return false; | ||||||
70 | return IP1.getBlock() == IP2.getBlock() && IP1.getPoint() == IP2.getPoint(); | ||||||
71 | } | ||||||
72 | |||||||
73 | static bool isValidWorkshareLoopScheduleType(OMPScheduleType SchedType) { | ||||||
74 | // Valid ordered/unordered and base algorithm combinations. | ||||||
75 | switch (SchedType & ~OMPScheduleType::MonotonicityMask) { | ||||||
76 | case OMPScheduleType::UnorderedStaticChunked: | ||||||
77 | case OMPScheduleType::UnorderedStatic: | ||||||
78 | case OMPScheduleType::UnorderedDynamicChunked: | ||||||
79 | case OMPScheduleType::UnorderedGuidedChunked: | ||||||
80 | case OMPScheduleType::UnorderedRuntime: | ||||||
81 | case OMPScheduleType::UnorderedAuto: | ||||||
82 | case OMPScheduleType::UnorderedTrapezoidal: | ||||||
83 | case OMPScheduleType::UnorderedGreedy: | ||||||
84 | case OMPScheduleType::UnorderedBalanced: | ||||||
85 | case OMPScheduleType::UnorderedGuidedIterativeChunked: | ||||||
86 | case OMPScheduleType::UnorderedGuidedAnalyticalChunked: | ||||||
87 | case OMPScheduleType::UnorderedSteal: | ||||||
88 | case OMPScheduleType::UnorderedStaticBalancedChunked: | ||||||
89 | case OMPScheduleType::UnorderedGuidedSimd: | ||||||
90 | case OMPScheduleType::UnorderedRuntimeSimd: | ||||||
91 | case OMPScheduleType::OrderedStaticChunked: | ||||||
92 | case OMPScheduleType::OrderedStatic: | ||||||
93 | case OMPScheduleType::OrderedDynamicChunked: | ||||||
94 | case OMPScheduleType::OrderedGuidedChunked: | ||||||
95 | case OMPScheduleType::OrderedRuntime: | ||||||
96 | case OMPScheduleType::OrderedAuto: | ||||||
97 | case OMPScheduleType::OrderdTrapezoidal: | ||||||
98 | case OMPScheduleType::NomergeUnorderedStaticChunked: | ||||||
99 | case OMPScheduleType::NomergeUnorderedStatic: | ||||||
100 | case OMPScheduleType::NomergeUnorderedDynamicChunked: | ||||||
101 | case OMPScheduleType::NomergeUnorderedGuidedChunked: | ||||||
102 | case OMPScheduleType::NomergeUnorderedRuntime: | ||||||
103 | case OMPScheduleType::NomergeUnorderedAuto: | ||||||
104 | case OMPScheduleType::NomergeUnorderedTrapezoidal: | ||||||
105 | case OMPScheduleType::NomergeUnorderedGreedy: | ||||||
106 | case OMPScheduleType::NomergeUnorderedBalanced: | ||||||
107 | case OMPScheduleType::NomergeUnorderedGuidedIterativeChunked: | ||||||
108 | case OMPScheduleType::NomergeUnorderedGuidedAnalyticalChunked: | ||||||
109 | case OMPScheduleType::NomergeUnorderedSteal: | ||||||
110 | case OMPScheduleType::NomergeOrderedStaticChunked: | ||||||
111 | case OMPScheduleType::NomergeOrderedStatic: | ||||||
112 | case OMPScheduleType::NomergeOrderedDynamicChunked: | ||||||
113 | case OMPScheduleType::NomergeOrderedGuidedChunked: | ||||||
114 | case OMPScheduleType::NomergeOrderedRuntime: | ||||||
115 | case OMPScheduleType::NomergeOrderedAuto: | ||||||
116 | case OMPScheduleType::NomergeOrderedTrapezoidal: | ||||||
117 | break; | ||||||
118 | default: | ||||||
119 | return false; | ||||||
120 | } | ||||||
121 | |||||||
122 | // Must not set both monotonicity modifiers at the same time. | ||||||
123 | OMPScheduleType MonotonicityFlags = | ||||||
124 | SchedType & OMPScheduleType::MonotonicityMask; | ||||||
125 | if (MonotonicityFlags == OMPScheduleType::MonotonicityMask) | ||||||
126 | return false; | ||||||
127 | |||||||
128 | return true; | ||||||
129 | } | ||||||
130 | #endif | ||||||
131 | |||||||
132 | /// Determine which scheduling algorithm to use, determined from schedule clause | ||||||
133 | /// arguments. | ||||||
134 | static OMPScheduleType | ||||||
135 | getOpenMPBaseScheduleType(llvm::omp::ScheduleKind ClauseKind, bool HasChunks, | ||||||
136 | bool HasSimdModifier) { | ||||||
137 | // Currently, the default schedule it static. | ||||||
138 | switch (ClauseKind) { | ||||||
139 | case OMP_SCHEDULE_Default: | ||||||
140 | case OMP_SCHEDULE_Static: | ||||||
141 | return HasChunks ? OMPScheduleType::BaseStaticChunked | ||||||
142 | : OMPScheduleType::BaseStatic; | ||||||
143 | case OMP_SCHEDULE_Dynamic: | ||||||
144 | return OMPScheduleType::BaseDynamicChunked; | ||||||
145 | case OMP_SCHEDULE_Guided: | ||||||
146 | return HasSimdModifier ? OMPScheduleType::BaseGuidedSimd | ||||||
147 | : OMPScheduleType::BaseGuidedChunked; | ||||||
148 | case OMP_SCHEDULE_Auto: | ||||||
149 | return llvm::omp::OMPScheduleType::BaseAuto; | ||||||
150 | case OMP_SCHEDULE_Runtime: | ||||||
151 | return HasSimdModifier ? OMPScheduleType::BaseRuntimeSimd | ||||||
152 | : OMPScheduleType::BaseRuntime; | ||||||
153 | } | ||||||
154 | llvm_unreachable("unhandled schedule clause argument")::llvm::llvm_unreachable_internal("unhandled schedule clause argument" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 154); | ||||||
155 | } | ||||||
156 | |||||||
157 | /// Adds ordering modifier flags to schedule type. | ||||||
158 | static OMPScheduleType | ||||||
159 | getOpenMPOrderingScheduleType(OMPScheduleType BaseScheduleType, | ||||||
160 | bool HasOrderedClause) { | ||||||
161 | assert((BaseScheduleType & OMPScheduleType::ModifierMask) ==(static_cast <bool> ((BaseScheduleType & OMPScheduleType ::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set" ) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 163, __extension__ __PRETTY_FUNCTION__)) | ||||||
162 | OMPScheduleType::None &&(static_cast <bool> ((BaseScheduleType & OMPScheduleType ::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set" ) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 163, __extension__ __PRETTY_FUNCTION__)) | ||||||
163 | "Must not have ordering nor monotonicity flags already set")(static_cast <bool> ((BaseScheduleType & OMPScheduleType ::ModifierMask) == OMPScheduleType::None && "Must not have ordering nor monotonicity flags already set" ) ? void (0) : __assert_fail ("(BaseScheduleType & OMPScheduleType::ModifierMask) == OMPScheduleType::None && \"Must not have ordering nor monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 163, __extension__ __PRETTY_FUNCTION__)); | ||||||
164 | |||||||
165 | OMPScheduleType OrderingModifier = HasOrderedClause | ||||||
166 | ? OMPScheduleType::ModifierOrdered | ||||||
167 | : OMPScheduleType::ModifierUnordered; | ||||||
168 | OMPScheduleType OrderingScheduleType = BaseScheduleType | OrderingModifier; | ||||||
169 | |||||||
170 | // Unsupported combinations | ||||||
171 | if (OrderingScheduleType == | ||||||
172 | (OMPScheduleType::BaseGuidedSimd | OMPScheduleType::ModifierOrdered)) | ||||||
173 | return OMPScheduleType::OrderedGuidedChunked; | ||||||
174 | else if (OrderingScheduleType == (OMPScheduleType::BaseRuntimeSimd | | ||||||
175 | OMPScheduleType::ModifierOrdered)) | ||||||
176 | return OMPScheduleType::OrderedRuntime; | ||||||
177 | |||||||
178 | return OrderingScheduleType; | ||||||
179 | } | ||||||
180 | |||||||
181 | /// Adds monotonicity modifier flags to schedule type. | ||||||
182 | static OMPScheduleType | ||||||
183 | getOpenMPMonotonicityScheduleType(OMPScheduleType ScheduleType, | ||||||
184 | bool HasSimdModifier, bool HasMonotonic, | ||||||
185 | bool HasNonmonotonic, bool HasOrderedClause) { | ||||||
186 | assert((ScheduleType & OMPScheduleType::MonotonicityMask) ==(static_cast <bool> ((ScheduleType & OMPScheduleType ::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set" ) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 188, __extension__ __PRETTY_FUNCTION__)) | ||||||
187 | OMPScheduleType::None &&(static_cast <bool> ((ScheduleType & OMPScheduleType ::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set" ) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 188, __extension__ __PRETTY_FUNCTION__)) | ||||||
188 | "Must not have monotonicity flags already set")(static_cast <bool> ((ScheduleType & OMPScheduleType ::MonotonicityMask) == OMPScheduleType::None && "Must not have monotonicity flags already set" ) ? void (0) : __assert_fail ("(ScheduleType & OMPScheduleType::MonotonicityMask) == OMPScheduleType::None && \"Must not have monotonicity flags already set\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 188, __extension__ __PRETTY_FUNCTION__)); | ||||||
189 | assert((!HasMonotonic || !HasNonmonotonic) &&(static_cast <bool> ((!HasMonotonic || !HasNonmonotonic ) && "Monotonic and Nonmonotonic are contradicting each other" ) ? void (0) : __assert_fail ("(!HasMonotonic || !HasNonmonotonic) && \"Monotonic and Nonmonotonic are contradicting each other\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 190, __extension__ __PRETTY_FUNCTION__)) | ||||||
190 | "Monotonic and Nonmonotonic are contradicting each other")(static_cast <bool> ((!HasMonotonic || !HasNonmonotonic ) && "Monotonic and Nonmonotonic are contradicting each other" ) ? void (0) : __assert_fail ("(!HasMonotonic || !HasNonmonotonic) && \"Monotonic and Nonmonotonic are contradicting each other\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 190, __extension__ __PRETTY_FUNCTION__)); | ||||||
191 | |||||||
192 | if (HasMonotonic) { | ||||||
193 | return ScheduleType | OMPScheduleType::ModifierMonotonic; | ||||||
194 | } else if (HasNonmonotonic) { | ||||||
195 | return ScheduleType | OMPScheduleType::ModifierNonmonotonic; | ||||||
196 | } else { | ||||||
197 | // OpenMP 5.1, 2.11.4 Worksharing-Loop Construct, Description. | ||||||
198 | // If the static schedule kind is specified or if the ordered clause is | ||||||
199 | // specified, and if the nonmonotonic modifier is not specified, the | ||||||
200 | // effect is as if the monotonic modifier is specified. Otherwise, unless | ||||||
201 | // the monotonic modifier is specified, the effect is as if the | ||||||
202 | // nonmonotonic modifier is specified. | ||||||
203 | OMPScheduleType BaseScheduleType = | ||||||
204 | ScheduleType & ~OMPScheduleType::ModifierMask; | ||||||
205 | if ((BaseScheduleType == OMPScheduleType::BaseStatic) || | ||||||
206 | (BaseScheduleType == OMPScheduleType::BaseStaticChunked) || | ||||||
207 | HasOrderedClause) { | ||||||
208 | // The monotonic is used by default in openmp runtime library, so no need | ||||||
209 | // to set it. | ||||||
210 | return ScheduleType; | ||||||
211 | } else { | ||||||
212 | return ScheduleType | OMPScheduleType::ModifierNonmonotonic; | ||||||
213 | } | ||||||
214 | } | ||||||
215 | } | ||||||
216 | |||||||
217 | /// Determine the schedule type using schedule and ordering clause arguments. | ||||||
218 | static OMPScheduleType | ||||||
219 | computeOpenMPScheduleType(ScheduleKind ClauseKind, bool HasChunks, | ||||||
220 | bool HasSimdModifier, bool HasMonotonicModifier, | ||||||
221 | bool HasNonmonotonicModifier, bool HasOrderedClause) { | ||||||
222 | OMPScheduleType BaseSchedule = | ||||||
223 | getOpenMPBaseScheduleType(ClauseKind, HasChunks, HasSimdModifier); | ||||||
224 | OMPScheduleType OrderedSchedule = | ||||||
225 | getOpenMPOrderingScheduleType(BaseSchedule, HasOrderedClause); | ||||||
226 | OMPScheduleType Result = getOpenMPMonotonicityScheduleType( | ||||||
227 | OrderedSchedule, HasSimdModifier, HasMonotonicModifier, | ||||||
228 | HasNonmonotonicModifier, HasOrderedClause); | ||||||
229 | |||||||
230 | assert(isValidWorkshareLoopScheduleType(Result))(static_cast <bool> (isValidWorkshareLoopScheduleType(Result )) ? void (0) : __assert_fail ("isValidWorkshareLoopScheduleType(Result)" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 230, __extension__ __PRETTY_FUNCTION__)); | ||||||
231 | return Result; | ||||||
232 | } | ||||||
233 | |||||||
234 | /// Make \p Source branch to \p Target. | ||||||
235 | /// | ||||||
236 | /// Handles two situations: | ||||||
237 | /// * \p Source already has an unconditional branch. | ||||||
238 | /// * \p Source is a degenerate block (no terminator because the BB is | ||||||
239 | /// the current head of the IR construction). | ||||||
240 | static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) { | ||||||
241 | if (Instruction *Term = Source->getTerminator()) { | ||||||
242 | auto *Br = cast<BranchInst>(Term); | ||||||
243 | assert(!Br->isConditional() &&(static_cast <bool> (!Br->isConditional() && "BB's terminator must be an unconditional branch (or degenerate)" ) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 244, __extension__ __PRETTY_FUNCTION__)) | ||||||
244 | "BB's terminator must be an unconditional branch (or degenerate)")(static_cast <bool> (!Br->isConditional() && "BB's terminator must be an unconditional branch (or degenerate)" ) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 244, __extension__ __PRETTY_FUNCTION__)); | ||||||
245 | BasicBlock *Succ = Br->getSuccessor(0); | ||||||
246 | Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true); | ||||||
247 | Br->setSuccessor(0, Target); | ||||||
248 | return; | ||||||
249 | } | ||||||
250 | |||||||
251 | auto *NewBr = BranchInst::Create(Target, Source); | ||||||
252 | NewBr->setDebugLoc(DL); | ||||||
253 | } | ||||||
254 | |||||||
255 | /// Move the instruction after an InsertPoint to the beginning of another | ||||||
256 | /// BasicBlock. | ||||||
257 | /// | ||||||
258 | /// The instructions after \p IP are moved to the beginning of \p New which must | ||||||
259 | /// not have any PHINodes. If \p CreateBranch is true, a branch instruction to | ||||||
260 | /// \p New will be added such that there is no semantic change. Otherwise, the | ||||||
261 | /// \p IP insert block remains degenerate and it is up to the caller to insert a | ||||||
262 | /// terminator. | ||||||
263 | static void spliceBB(OpenMPIRBuilder::InsertPointTy IP, BasicBlock *New, | ||||||
264 | bool CreateBranch) { | ||||||
265 | assert(New->getFirstInsertionPt() == New->begin() &&(static_cast <bool> (New->getFirstInsertionPt() == New ->begin() && "Target BB must not have PHI nodes") ? void (0) : __assert_fail ("New->getFirstInsertionPt() == New->begin() && \"Target BB must not have PHI nodes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 266, __extension__ __PRETTY_FUNCTION__)) | ||||||
266 | "Target BB must not have PHI nodes")(static_cast <bool> (New->getFirstInsertionPt() == New ->begin() && "Target BB must not have PHI nodes") ? void (0) : __assert_fail ("New->getFirstInsertionPt() == New->begin() && \"Target BB must not have PHI nodes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 266, __extension__ __PRETTY_FUNCTION__)); | ||||||
267 | |||||||
268 | // Move instructions to new block. | ||||||
269 | BasicBlock *Old = IP.getBlock(); | ||||||
270 | New->getInstList().splice(New->begin(), Old->getInstList(), IP.getPoint(), | ||||||
271 | Old->end()); | ||||||
272 | |||||||
273 | if (CreateBranch) | ||||||
274 | BranchInst::Create(New, Old); | ||||||
275 | } | ||||||
276 | |||||||
277 | /// Splice a BasicBlock at an IRBuilder's current insertion point. Its new | ||||||
278 | /// insert location will stick to after the instruction before the insertion | ||||||
279 | /// point (instead of moving with the instruction the InsertPoint stores | ||||||
280 | /// internally). | ||||||
281 | static void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch) { | ||||||
282 | DebugLoc DebugLoc = Builder.getCurrentDebugLocation(); | ||||||
283 | BasicBlock *Old = Builder.GetInsertBlock(); | ||||||
284 | |||||||
285 | spliceBB(Builder.saveIP(), New, CreateBranch); | ||||||
286 | if (CreateBranch) | ||||||
287 | Builder.SetInsertPoint(Old->getTerminator()); | ||||||
288 | else | ||||||
289 | Builder.SetInsertPoint(Old); | ||||||
290 | |||||||
291 | // SetInsertPoint also updates the Builder's debug location, but we want to | ||||||
292 | // keep the one the Builder was configured to use. | ||||||
293 | Builder.SetCurrentDebugLocation(DebugLoc); | ||||||
294 | } | ||||||
295 | |||||||
296 | /// Split a BasicBlock at an InsertPoint, even if the block is degenerate | ||||||
297 | /// (missing the terminator). | ||||||
298 | /// | ||||||
299 | /// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed | ||||||
300 | /// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch | ||||||
301 | /// is true, a branch to the new successor will new created such that | ||||||
302 | /// semantically there is no change; otherwise the block of the insertion point | ||||||
303 | /// remains degenerate and it is the caller's responsibility to insert a | ||||||
304 | /// terminator. Returns the new successor block. | ||||||
305 | static BasicBlock *splitBB(OpenMPIRBuilder::InsertPointTy IP, bool CreateBranch, | ||||||
306 | llvm::Twine Name = {}) { | ||||||
307 | BasicBlock *Old = IP.getBlock(); | ||||||
308 | BasicBlock *New = BasicBlock::Create( | ||||||
309 | Old->getContext(), Name.isTriviallyEmpty() ? Old->getName() : Name, | ||||||
310 | Old->getParent(), Old->getNextNode()); | ||||||
311 | spliceBB(IP, New, CreateBranch); | ||||||
312 | New->replaceSuccessorsPhiUsesWith(Old, New); | ||||||
313 | return New; | ||||||
314 | } | ||||||
315 | |||||||
316 | /// Split a BasicBlock at \p Builder's insertion point, even if the block is | ||||||
317 | /// degenerate (missing the terminator). Its new insert location will stick to | ||||||
318 | /// after the instruction before the insertion point (instead of moving with the | ||||||
319 | /// instruction the InsertPoint stores internally). | ||||||
320 | static BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, | ||||||
321 | llvm::Twine Name = {}) { | ||||||
322 | DebugLoc DebugLoc = Builder.getCurrentDebugLocation(); | ||||||
323 | BasicBlock *New = splitBB(Builder.saveIP(), CreateBranch, Name); | ||||||
324 | if (CreateBranch) | ||||||
325 | Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator()); | ||||||
326 | else | ||||||
327 | Builder.SetInsertPoint(Builder.GetInsertBlock()); | ||||||
328 | // SetInsertPoint also updates the Builder's debug location, but we want to | ||||||
329 | // keep the one the Builder was configured to use. | ||||||
330 | Builder.SetCurrentDebugLocation(DebugLoc); | ||||||
331 | return New; | ||||||
332 | } | ||||||
333 | |||||||
334 | void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) { | ||||||
335 | LLVMContext &Ctx = Fn.getContext(); | ||||||
336 | |||||||
337 | // Get the function's current attributes. | ||||||
338 | auto Attrs = Fn.getAttributes(); | ||||||
339 | auto FnAttrs = Attrs.getFnAttrs(); | ||||||
340 | auto RetAttrs = Attrs.getRetAttrs(); | ||||||
341 | SmallVector<AttributeSet, 4> ArgAttrs; | ||||||
342 | for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo) | ||||||
343 | ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo)); | ||||||
344 | |||||||
345 | #define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet; | ||||||
346 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | ||||||
347 | |||||||
348 | // Add attributes to the function declaration. | ||||||
349 | switch (FnID) { | ||||||
350 | #define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \ | ||||||
351 | case Enum: \ | ||||||
352 | FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \ | ||||||
353 | RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \ | ||||||
354 | for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \ | ||||||
355 | ArgAttrs[ArgNo] = \ | ||||||
356 | ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \ | ||||||
357 | Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \ | ||||||
358 | break; | ||||||
359 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | ||||||
360 | default: | ||||||
361 | // Attributes are optional. | ||||||
362 | break; | ||||||
363 | } | ||||||
364 | } | ||||||
365 | |||||||
366 | FunctionCallee | ||||||
367 | OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) { | ||||||
368 | FunctionType *FnTy = nullptr; | ||||||
369 | Function *Fn = nullptr; | ||||||
370 | |||||||
371 | // Try to find the declation in the module first. | ||||||
372 | switch (FnID) { | ||||||
373 | #define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \ | ||||||
374 | case Enum: \ | ||||||
375 | FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \ | ||||||
376 | IsVarArg); \ | ||||||
377 | Fn = M.getFunction(Str); \ | ||||||
378 | break; | ||||||
379 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | ||||||
380 | } | ||||||
381 | |||||||
382 | if (!Fn) { | ||||||
383 | // Create a new declaration if we need one. | ||||||
384 | switch (FnID) { | ||||||
385 | #define OMP_RTL(Enum, Str, ...) \ | ||||||
386 | case Enum: \ | ||||||
387 | Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \ | ||||||
388 | break; | ||||||
389 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | ||||||
390 | } | ||||||
391 | |||||||
392 | // Add information if the runtime function takes a callback function | ||||||
393 | if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) { | ||||||
394 | if (!Fn->hasMetadata(LLVMContext::MD_callback)) { | ||||||
395 | LLVMContext &Ctx = Fn->getContext(); | ||||||
396 | MDBuilder MDB(Ctx); | ||||||
397 | // Annotate the callback behavior of the runtime function: | ||||||
398 | // - The callback callee is argument number 2 (microtask). | ||||||
399 | // - The first two arguments of the callback callee are unknown (-1). | ||||||
400 | // - All variadic arguments to the runtime function are passed to the | ||||||
401 | // callback callee. | ||||||
402 | Fn->addMetadata( | ||||||
403 | LLVMContext::MD_callback, | ||||||
404 | *MDNode::get(Ctx, {MDB.createCallbackEncoding( | ||||||
405 | 2, {-1, -1}, /* VarArgsArePassed */ true)})); | ||||||
406 | } | ||||||
407 | } | ||||||
408 | |||||||
409 | LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false) | ||||||
410 | << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false); | ||||||
411 | addAttributes(FnID, *Fn); | ||||||
412 | |||||||
413 | } else { | ||||||
414 | LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false) | ||||||
415 | << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function " << Fn->getName() << " with type " << *Fn ->getFunctionType() << "\n"; } } while (false); | ||||||
416 | } | ||||||
417 | |||||||
418 | assert(Fn && "Failed to create OpenMP runtime function")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function" ) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 418, __extension__ __PRETTY_FUNCTION__)); | ||||||
419 | |||||||
420 | // Cast the function to the expected type if necessary | ||||||
421 | Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo()); | ||||||
422 | return {FnTy, C}; | ||||||
423 | } | ||||||
424 | |||||||
425 | Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) { | ||||||
426 | FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID); | ||||||
427 | auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee()); | ||||||
428 | assert(Fn && "Failed to create OpenMP runtime function pointer")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function pointer" ) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 428, __extension__ __PRETTY_FUNCTION__)); | ||||||
429 | return Fn; | ||||||
430 | } | ||||||
431 | |||||||
432 | void OpenMPIRBuilder::initialize() { initializeTypes(M); } | ||||||
433 | |||||||
434 | void OpenMPIRBuilder::finalize(Function *Fn) { | ||||||
435 | SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; | ||||||
436 | SmallVector<BasicBlock *, 32> Blocks; | ||||||
437 | SmallVector<OutlineInfo, 16> DeferredOutlines; | ||||||
438 | for (OutlineInfo &OI : OutlineInfos) { | ||||||
439 | // Skip functions that have not finalized yet; may happen with nested | ||||||
440 | // function generation. | ||||||
441 | if (Fn && OI.getFunction() != Fn) { | ||||||
442 | DeferredOutlines.push_back(OI); | ||||||
443 | continue; | ||||||
444 | } | ||||||
445 | |||||||
446 | ParallelRegionBlockSet.clear(); | ||||||
447 | Blocks.clear(); | ||||||
448 | OI.collectBlocks(ParallelRegionBlockSet, Blocks); | ||||||
449 | |||||||
450 | Function *OuterFn = OI.getFunction(); | ||||||
451 | CodeExtractorAnalysisCache CEAC(*OuterFn); | ||||||
452 | CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, | ||||||
453 | /* AggregateArgs */ true, | ||||||
454 | /* BlockFrequencyInfo */ nullptr, | ||||||
455 | /* BranchProbabilityInfo */ nullptr, | ||||||
456 | /* AssumptionCache */ nullptr, | ||||||
457 | /* AllowVarArgs */ true, | ||||||
458 | /* AllowAlloca */ true, | ||||||
459 | /* AllocaBlock*/ OI.OuterAllocaBB, | ||||||
460 | /* Suffix */ ".omp_par"); | ||||||
461 | |||||||
462 | LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Before outlining: " << *OuterFn << "\n"; } } while (false); | ||||||
463 | LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Entry " << OI .EntryBB->getName() << " Exit: " << OI.ExitBB-> getName() << "\n"; } } while (false) | ||||||
464 | << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Entry " << OI .EntryBB->getName() << " Exit: " << OI.ExitBB-> getName() << "\n"; } } while (false); | ||||||
465 | assert(Extractor.isEligible() &&(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!" ) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 466, __extension__ __PRETTY_FUNCTION__)) | ||||||
466 | "Expected OpenMP outlining to be possible!")(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!" ) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 466, __extension__ __PRETTY_FUNCTION__)); | ||||||
467 | |||||||
468 | for (auto *V : OI.ExcludeArgsFromAggregate) | ||||||
469 | Extractor.excludeArgFromAggregate(V); | ||||||
470 | |||||||
471 | Function *OutlinedFn = Extractor.extractCodeRegion(CEAC); | ||||||
472 | |||||||
473 | LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "After outlining: " << *OuterFn << "\n"; } } while (false); | ||||||
474 | LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << " Outlined function: " << *OutlinedFn << "\n"; } } while (false); | ||||||
475 | assert(OutlinedFn->getReturnType()->isVoidTy() &&(static_cast <bool> (OutlinedFn->getReturnType()-> isVoidTy() && "OpenMP outlined functions should not return a value!" ) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 476, __extension__ __PRETTY_FUNCTION__)) | ||||||
476 | "OpenMP outlined functions should not return a value!")(static_cast <bool> (OutlinedFn->getReturnType()-> isVoidTy() && "OpenMP outlined functions should not return a value!" ) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 476, __extension__ __PRETTY_FUNCTION__)); | ||||||
477 | |||||||
478 | // For compability with the clang CG we move the outlined function after the | ||||||
479 | // one with the parallel region. | ||||||
480 | OutlinedFn->removeFromParent(); | ||||||
481 | M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn); | ||||||
482 | |||||||
483 | // Remove the artificial entry introduced by the extractor right away, we | ||||||
484 | // made our own entry block after all. | ||||||
485 | { | ||||||
486 | BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock(); | ||||||
487 | assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)(static_cast <bool> (ArtificialEntry.getUniqueSuccessor () == OI.EntryBB) ? void (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 487, __extension__ __PRETTY_FUNCTION__)); | ||||||
488 | assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)(static_cast <bool> (OI.EntryBB->getUniquePredecessor () == &ArtificialEntry) ? void (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 488, __extension__ __PRETTY_FUNCTION__)); | ||||||
489 | // Move instructions from the to-be-deleted ArtificialEntry to the entry | ||||||
490 | // basic block of the parallel region. CodeExtractor generates | ||||||
491 | // instructions to unwrap the aggregate argument and may sink | ||||||
492 | // allocas/bitcasts for values that are solely used in the outlined region | ||||||
493 | // and do not escape. | ||||||
494 | assert(!ArtificialEntry.empty() &&(static_cast <bool> (!ArtificialEntry.empty() && "Expected instructions to add in the outlined region entry") ? void (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to add in the outlined region entry\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 495, __extension__ __PRETTY_FUNCTION__)) | ||||||
495 | "Expected instructions to add in the outlined region entry")(static_cast <bool> (!ArtificialEntry.empty() && "Expected instructions to add in the outlined region entry") ? void (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to add in the outlined region entry\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 495, __extension__ __PRETTY_FUNCTION__)); | ||||||
496 | for (BasicBlock::reverse_iterator It = ArtificialEntry.rbegin(), | ||||||
497 | End = ArtificialEntry.rend(); | ||||||
498 | It != End;) { | ||||||
499 | Instruction &I = *It; | ||||||
500 | It++; | ||||||
501 | |||||||
502 | if (I.isTerminator()) | ||||||
503 | continue; | ||||||
504 | |||||||
505 | I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt()); | ||||||
506 | } | ||||||
507 | |||||||
508 | OI.EntryBB->moveBefore(&ArtificialEntry); | ||||||
509 | ArtificialEntry.eraseFromParent(); | ||||||
510 | } | ||||||
511 | assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)(static_cast <bool> (&OutlinedFn->getEntryBlock( ) == OI.EntryBB) ? void (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 511, __extension__ __PRETTY_FUNCTION__)); | ||||||
512 | assert(OutlinedFn && OutlinedFn->getNumUses() == 1)(static_cast <bool> (OutlinedFn && OutlinedFn-> getNumUses() == 1) ? void (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 512, __extension__ __PRETTY_FUNCTION__)); | ||||||
513 | |||||||
514 | // Run a user callback, e.g. to add attributes. | ||||||
515 | if (OI.PostOutlineCB) | ||||||
516 | OI.PostOutlineCB(*OutlinedFn); | ||||||
517 | } | ||||||
518 | |||||||
519 | // Remove work items that have been completed. | ||||||
520 | OutlineInfos = std::move(DeferredOutlines); | ||||||
521 | } | ||||||
522 | |||||||
523 | OpenMPIRBuilder::~OpenMPIRBuilder() { | ||||||
524 | assert(OutlineInfos.empty() && "There must be no outstanding outlinings")(static_cast <bool> (OutlineInfos.empty() && "There must be no outstanding outlinings" ) ? void (0) : __assert_fail ("OutlineInfos.empty() && \"There must be no outstanding outlinings\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 524, __extension__ __PRETTY_FUNCTION__)); | ||||||
525 | } | ||||||
526 | |||||||
527 | GlobalValue *OpenMPIRBuilder::createGlobalFlag(unsigned Value, StringRef Name) { | ||||||
528 | IntegerType *I32Ty = Type::getInt32Ty(M.getContext()); | ||||||
529 | auto *GV = | ||||||
530 | new GlobalVariable(M, I32Ty, | ||||||
531 | /* isConstant = */ true, GlobalValue::WeakODRLinkage, | ||||||
532 | ConstantInt::get(I32Ty, Value), Name); | ||||||
533 | GV->setVisibility(GlobalValue::HiddenVisibility); | ||||||
534 | |||||||
535 | return GV; | ||||||
536 | } | ||||||
537 | |||||||
538 | Constant *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr, | ||||||
539 | uint32_t SrcLocStrSize, | ||||||
540 | IdentFlag LocFlags, | ||||||
541 | unsigned Reserve2Flags) { | ||||||
542 | // Enable "C-mode". | ||||||
543 | LocFlags |= OMP_IDENT_FLAG_KMPC; | ||||||
544 | |||||||
545 | Constant *&Ident = | ||||||
546 | IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}]; | ||||||
547 | if (!Ident) { | ||||||
548 | Constant *I32Null = ConstantInt::getNullValue(Int32); | ||||||
549 | Constant *IdentData[] = {I32Null, | ||||||
550 | ConstantInt::get(Int32, uint32_t(LocFlags)), | ||||||
551 | ConstantInt::get(Int32, Reserve2Flags), | ||||||
552 | ConstantInt::get(Int32, SrcLocStrSize), SrcLocStr}; | ||||||
553 | Constant *Initializer = | ||||||
554 | ConstantStruct::get(OpenMPIRBuilder::Ident, IdentData); | ||||||
555 | |||||||
556 | // Look for existing encoding of the location + flags, not needed but | ||||||
557 | // minimizes the difference to the existing solution while we transition. | ||||||
558 | for (GlobalVariable &GV : M.getGlobalList()) | ||||||
559 | if (GV.getValueType() == OpenMPIRBuilder::Ident && GV.hasInitializer()) | ||||||
560 | if (GV.getInitializer() == Initializer) | ||||||
561 | Ident = &GV; | ||||||
562 | |||||||
563 | if (!Ident) { | ||||||
564 | auto *GV = new GlobalVariable( | ||||||
565 | M, OpenMPIRBuilder::Ident, | ||||||
566 | /* isConstant = */ true, GlobalValue::PrivateLinkage, Initializer, "", | ||||||
567 | nullptr, GlobalValue::NotThreadLocal, | ||||||
568 | M.getDataLayout().getDefaultGlobalsAddressSpace()); | ||||||
569 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); | ||||||
570 | GV->setAlignment(Align(8)); | ||||||
571 | Ident = GV; | ||||||
572 | } | ||||||
573 | } | ||||||
574 | |||||||
575 | return ConstantExpr::getPointerBitCastOrAddrSpaceCast(Ident, IdentPtr); | ||||||
576 | } | ||||||
577 | |||||||
578 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr, | ||||||
579 | uint32_t &SrcLocStrSize) { | ||||||
580 | SrcLocStrSize = LocStr.size(); | ||||||
581 | Constant *&SrcLocStr = SrcLocStrMap[LocStr]; | ||||||
582 | if (!SrcLocStr) { | ||||||
583 | Constant *Initializer = | ||||||
584 | ConstantDataArray::getString(M.getContext(), LocStr); | ||||||
585 | |||||||
586 | // Look for existing encoding of the location, not needed but minimizes the | ||||||
587 | // difference to the existing solution while we transition. | ||||||
588 | for (GlobalVariable &GV : M.getGlobalList()) | ||||||
589 | if (GV.isConstant() && GV.hasInitializer() && | ||||||
590 | GV.getInitializer() == Initializer) | ||||||
591 | return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr); | ||||||
592 | |||||||
593 | SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "", | ||||||
594 | /* AddressSpace */ 0, &M); | ||||||
595 | } | ||||||
596 | return SrcLocStr; | ||||||
597 | } | ||||||
598 | |||||||
599 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName, | ||||||
600 | StringRef FileName, | ||||||
601 | unsigned Line, unsigned Column, | ||||||
602 | uint32_t &SrcLocStrSize) { | ||||||
603 | SmallString<128> Buffer; | ||||||
604 | Buffer.push_back(';'); | ||||||
605 | Buffer.append(FileName); | ||||||
606 | Buffer.push_back(';'); | ||||||
607 | Buffer.append(FunctionName); | ||||||
608 | Buffer.push_back(';'); | ||||||
609 | Buffer.append(std::to_string(Line)); | ||||||
610 | Buffer.push_back(';'); | ||||||
611 | Buffer.append(std::to_string(Column)); | ||||||
612 | Buffer.push_back(';'); | ||||||
613 | Buffer.push_back(';'); | ||||||
614 | return getOrCreateSrcLocStr(Buffer.str(), SrcLocStrSize); | ||||||
615 | } | ||||||
616 | |||||||
617 | Constant * | ||||||
618 | OpenMPIRBuilder::getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize) { | ||||||
619 | StringRef UnknownLoc = ";unknown;unknown;0;0;;"; | ||||||
620 | return getOrCreateSrcLocStr(UnknownLoc, SrcLocStrSize); | ||||||
621 | } | ||||||
622 | |||||||
623 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, | ||||||
624 | uint32_t &SrcLocStrSize, | ||||||
625 | Function *F) { | ||||||
626 | DILocation *DIL = DL.get(); | ||||||
627 | if (!DIL) | ||||||
628 | return getOrCreateDefaultSrcLocStr(SrcLocStrSize); | ||||||
629 | StringRef FileName = M.getName(); | ||||||
630 | if (DIFile *DIF = DIL->getFile()) | ||||||
631 | if (Optional<StringRef> Source = DIF->getSource()) | ||||||
632 | FileName = *Source; | ||||||
633 | StringRef Function = DIL->getScope()->getSubprogram()->getName(); | ||||||
634 | if (Function.empty() && F) | ||||||
635 | Function = F->getName(); | ||||||
636 | return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(), | ||||||
637 | DIL->getColumn(), SrcLocStrSize); | ||||||
638 | } | ||||||
639 | |||||||
640 | Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc, | ||||||
641 | uint32_t &SrcLocStrSize) { | ||||||
642 | return getOrCreateSrcLocStr(Loc.DL, SrcLocStrSize, | ||||||
643 | Loc.IP.getBlock()->getParent()); | ||||||
644 | } | ||||||
645 | |||||||
646 | Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) { | ||||||
647 | return Builder.CreateCall( | ||||||
648 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident, | ||||||
649 | "omp_global_thread_num"); | ||||||
650 | } | ||||||
651 | |||||||
652 | OpenMPIRBuilder::InsertPointTy | ||||||
653 | OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK, | ||||||
654 | bool ForceSimpleCall, bool CheckCancelFlag) { | ||||||
655 | if (!updateToLocation(Loc)) | ||||||
656 | return Loc.IP; | ||||||
657 | return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag); | ||||||
658 | } | ||||||
659 | |||||||
660 | OpenMPIRBuilder::InsertPointTy | ||||||
661 | OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind, | ||||||
662 | bool ForceSimpleCall, bool CheckCancelFlag) { | ||||||
663 | // Build call __kmpc_cancel_barrier(loc, thread_id) or | ||||||
664 | // __kmpc_barrier(loc, thread_id); | ||||||
665 | |||||||
666 | IdentFlag BarrierLocFlags; | ||||||
667 | switch (Kind) { | ||||||
668 | case OMPD_for: | ||||||
669 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR; | ||||||
670 | break; | ||||||
671 | case OMPD_sections: | ||||||
672 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS; | ||||||
673 | break; | ||||||
674 | case OMPD_single: | ||||||
675 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE; | ||||||
676 | break; | ||||||
677 | case OMPD_barrier: | ||||||
678 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL; | ||||||
679 | break; | ||||||
680 | default: | ||||||
681 | BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL; | ||||||
682 | break; | ||||||
683 | } | ||||||
684 | |||||||
685 | uint32_t SrcLocStrSize; | ||||||
686 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
687 | Value *Args[] = { | ||||||
688 | getOrCreateIdent(SrcLocStr, SrcLocStrSize, BarrierLocFlags), | ||||||
689 | getOrCreateThreadID(getOrCreateIdent(SrcLocStr, SrcLocStrSize))}; | ||||||
690 | |||||||
691 | // If we are in a cancellable parallel region, barriers are cancellation | ||||||
692 | // points. | ||||||
693 | // TODO: Check why we would force simple calls or to ignore the cancel flag. | ||||||
694 | bool UseCancelBarrier = | ||||||
695 | !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel); | ||||||
696 | |||||||
697 | Value *Result = | ||||||
698 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr( | ||||||
699 | UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier | ||||||
700 | : OMPRTL___kmpc_barrier), | ||||||
701 | Args); | ||||||
702 | |||||||
703 | if (UseCancelBarrier && CheckCancelFlag) | ||||||
704 | emitCancelationCheckImpl(Result, OMPD_parallel); | ||||||
705 | |||||||
706 | return Builder.saveIP(); | ||||||
707 | } | ||||||
708 | |||||||
709 | OpenMPIRBuilder::InsertPointTy | ||||||
710 | OpenMPIRBuilder::createCancel(const LocationDescription &Loc, | ||||||
711 | Value *IfCondition, | ||||||
712 | omp::Directive CanceledDirective) { | ||||||
713 | if (!updateToLocation(Loc)) | ||||||
714 | return Loc.IP; | ||||||
715 | |||||||
716 | // LLVM utilities like blocks with terminators. | ||||||
717 | auto *UI = Builder.CreateUnreachable(); | ||||||
718 | |||||||
719 | Instruction *ThenTI = UI, *ElseTI = nullptr; | ||||||
720 | if (IfCondition) | ||||||
721 | SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); | ||||||
722 | Builder.SetInsertPoint(ThenTI); | ||||||
723 | |||||||
724 | Value *CancelKind = nullptr; | ||||||
725 | switch (CanceledDirective) { | ||||||
726 | #define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \ | ||||||
727 | case DirectiveEnum: \ | ||||||
728 | CancelKind = Builder.getInt32(Value); \ | ||||||
729 | break; | ||||||
730 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | ||||||
731 | default: | ||||||
732 | llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 732); | ||||||
733 | } | ||||||
734 | |||||||
735 | uint32_t SrcLocStrSize; | ||||||
736 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
737 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
738 | Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind}; | ||||||
739 | Value *Result = Builder.CreateCall( | ||||||
740 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args); | ||||||
741 | auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) { | ||||||
742 | if (CanceledDirective == OMPD_parallel) { | ||||||
743 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
744 | Builder.restoreIP(IP); | ||||||
745 | createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), | ||||||
746 | omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, | ||||||
747 | /* CheckCancelFlag */ false); | ||||||
748 | } | ||||||
749 | }; | ||||||
750 | |||||||
751 | // The actual cancel logic is shared with others, e.g., cancel_barriers. | ||||||
752 | emitCancelationCheckImpl(Result, CanceledDirective, ExitCB); | ||||||
753 | |||||||
754 | // Update the insertion point and remove the terminator we introduced. | ||||||
755 | Builder.SetInsertPoint(UI->getParent()); | ||||||
756 | UI->eraseFromParent(); | ||||||
757 | |||||||
758 | return Builder.saveIP(); | ||||||
759 | } | ||||||
760 | |||||||
761 | void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag, | ||||||
762 | omp::Directive CanceledDirective, | ||||||
763 | FinalizeCallbackTy ExitCB) { | ||||||
764 | assert(isLastFinalizationInfoCancellable(CanceledDirective) &&(static_cast <bool> (isLastFinalizationInfoCancellable( CanceledDirective) && "Unexpected cancellation!") ? void (0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 765, __extension__ __PRETTY_FUNCTION__)) | ||||||
765 | "Unexpected cancellation!")(static_cast <bool> (isLastFinalizationInfoCancellable( CanceledDirective) && "Unexpected cancellation!") ? void (0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 765, __extension__ __PRETTY_FUNCTION__)); | ||||||
766 | |||||||
767 | // For a cancel barrier we create two new blocks. | ||||||
768 | BasicBlock *BB = Builder.GetInsertBlock(); | ||||||
769 | BasicBlock *NonCancellationBlock; | ||||||
770 | if (Builder.GetInsertPoint() == BB->end()) { | ||||||
771 | // TODO: This branch will not be needed once we moved to the | ||||||
772 | // OpenMPIRBuilder codegen completely. | ||||||
773 | NonCancellationBlock = BasicBlock::Create( | ||||||
774 | BB->getContext(), BB->getName() + ".cont", BB->getParent()); | ||||||
775 | } else { | ||||||
776 | NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint()); | ||||||
777 | BB->getTerminator()->eraseFromParent(); | ||||||
778 | Builder.SetInsertPoint(BB); | ||||||
779 | } | ||||||
780 | BasicBlock *CancellationBlock = BasicBlock::Create( | ||||||
781 | BB->getContext(), BB->getName() + ".cncl", BB->getParent()); | ||||||
782 | |||||||
783 | // Jump to them based on the return value. | ||||||
784 | Value *Cmp = Builder.CreateIsNull(CancelFlag); | ||||||
785 | Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock, | ||||||
786 | /* TODO weight */ nullptr, nullptr); | ||||||
787 | |||||||
788 | // From the cancellation block we finalize all variables and go to the | ||||||
789 | // post finalization block that is known to the FiniCB callback. | ||||||
790 | Builder.SetInsertPoint(CancellationBlock); | ||||||
791 | if (ExitCB) | ||||||
792 | ExitCB(Builder.saveIP()); | ||||||
793 | auto &FI = FinalizationStack.back(); | ||||||
794 | FI.FiniCB(Builder.saveIP()); | ||||||
795 | |||||||
796 | // The continuation block is where code generation continues. | ||||||
797 | Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin()); | ||||||
798 | } | ||||||
799 | |||||||
800 | IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel( | ||||||
801 | const LocationDescription &Loc, InsertPointTy OuterAllocaIP, | ||||||
802 | BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, | ||||||
803 | FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, | ||||||
804 | omp::ProcBindKind ProcBind, bool IsCancellable) { | ||||||
805 | assert(!isConflictIP(Loc.IP, OuterAllocaIP) && "IPs must not be ambiguous")(static_cast <bool> (!isConflictIP(Loc.IP, OuterAllocaIP ) && "IPs must not be ambiguous") ? void (0) : __assert_fail ("!isConflictIP(Loc.IP, OuterAllocaIP) && \"IPs must not be ambiguous\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 805, __extension__ __PRETTY_FUNCTION__)); | ||||||
806 | |||||||
807 | if (!updateToLocation(Loc)) | ||||||
808 | return Loc.IP; | ||||||
809 | |||||||
810 | uint32_t SrcLocStrSize; | ||||||
811 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
812 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
813 | Value *ThreadID = getOrCreateThreadID(Ident); | ||||||
814 | |||||||
815 | if (NumThreads) { | ||||||
816 | // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads) | ||||||
817 | Value *Args[] = { | ||||||
818 | Ident, ThreadID, | ||||||
819 | Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)}; | ||||||
820 | Builder.CreateCall( | ||||||
821 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args); | ||||||
822 | } | ||||||
823 | |||||||
824 | if (ProcBind != OMP_PROC_BIND_default) { | ||||||
825 | // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind) | ||||||
826 | Value *Args[] = { | ||||||
827 | Ident, ThreadID, | ||||||
828 | ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)}; | ||||||
829 | Builder.CreateCall( | ||||||
830 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args); | ||||||
831 | } | ||||||
832 | |||||||
833 | BasicBlock *InsertBB = Builder.GetInsertBlock(); | ||||||
834 | Function *OuterFn = InsertBB->getParent(); | ||||||
835 | |||||||
836 | // Save the outer alloca block because the insertion iterator may get | ||||||
837 | // invalidated and we still need this later. | ||||||
838 | BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock(); | ||||||
839 | |||||||
840 | // Vector to remember instructions we used only during the modeling but which | ||||||
841 | // we want to delete at the end. | ||||||
842 | SmallVector<Instruction *, 4> ToBeDeleted; | ||||||
843 | |||||||
844 | // Change the location to the outer alloca insertion point to create and | ||||||
845 | // initialize the allocas we pass into the parallel region. | ||||||
846 | Builder.restoreIP(OuterAllocaIP); | ||||||
847 | AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr"); | ||||||
848 | AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr"); | ||||||
849 | |||||||
850 | // If there is an if condition we actually use the TIDAddr and ZeroAddr in the | ||||||
851 | // program, otherwise we only need them for modeling purposes to get the | ||||||
852 | // associated arguments in the outlined function. In the former case, | ||||||
853 | // initialize the allocas properly, in the latter case, delete them later. | ||||||
854 | if (IfCondition) { | ||||||
855 | Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr); | ||||||
856 | Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr); | ||||||
857 | } else { | ||||||
858 | ToBeDeleted.push_back(TIDAddr); | ||||||
859 | ToBeDeleted.push_back(ZeroAddr); | ||||||
860 | } | ||||||
861 | |||||||
862 | // Create an artificial insertion point that will also ensure the blocks we | ||||||
863 | // are about to split are not degenerated. | ||||||
864 | auto *UI = new UnreachableInst(Builder.getContext(), InsertBB); | ||||||
865 | |||||||
866 | Instruction *ThenTI = UI, *ElseTI = nullptr; | ||||||
867 | if (IfCondition) | ||||||
868 | SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI); | ||||||
869 | |||||||
870 | BasicBlock *ThenBB = ThenTI->getParent(); | ||||||
871 | BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry"); | ||||||
872 | BasicBlock *PRegBodyBB = | ||||||
873 | PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region"); | ||||||
874 | BasicBlock *PRegPreFiniBB = | ||||||
875 | PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize"); | ||||||
876 | BasicBlock *PRegExitBB = | ||||||
877 | PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit"); | ||||||
878 | |||||||
879 | auto FiniCBWrapper = [&](InsertPointTy IP) { | ||||||
880 | // Hide "open-ended" blocks from the given FiniCB by setting the right jump | ||||||
881 | // target to the region exit block. | ||||||
882 | if (IP.getBlock()->end() == IP.getPoint()) { | ||||||
883 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
884 | Builder.restoreIP(IP); | ||||||
885 | Instruction *I = Builder.CreateBr(PRegExitBB); | ||||||
886 | IP = InsertPointTy(I->getParent(), I->getIterator()); | ||||||
887 | } | ||||||
888 | assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (IP.getBlock()->getTerminator()-> getNumSuccessors() == 1 && IP.getBlock()->getTerminator ()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!" ) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 890, __extension__ __PRETTY_FUNCTION__)) | ||||||
889 | IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&(static_cast <bool> (IP.getBlock()->getTerminator()-> getNumSuccessors() == 1 && IP.getBlock()->getTerminator ()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!" ) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 890, __extension__ __PRETTY_FUNCTION__)) | ||||||
890 | "Unexpected insertion point for finalization call!")(static_cast <bool> (IP.getBlock()->getTerminator()-> getNumSuccessors() == 1 && IP.getBlock()->getTerminator ()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!" ) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 890, __extension__ __PRETTY_FUNCTION__)); | ||||||
891 | return FiniCB(IP); | ||||||
892 | }; | ||||||
893 | |||||||
894 | FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable}); | ||||||
895 | |||||||
896 | // Generate the privatization allocas in the block that will become the entry | ||||||
897 | // of the outlined function. | ||||||
898 | Builder.SetInsertPoint(PRegEntryBB->getTerminator()); | ||||||
899 | InsertPointTy InnerAllocaIP = Builder.saveIP(); | ||||||
900 | |||||||
901 | AllocaInst *PrivTIDAddr = | ||||||
902 | Builder.CreateAlloca(Int32, nullptr, "tid.addr.local"); | ||||||
903 | Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid"); | ||||||
904 | |||||||
905 | // Add some fake uses for OpenMP provided arguments. | ||||||
906 | ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use")); | ||||||
907 | Instruction *ZeroAddrUse = | ||||||
908 | Builder.CreateLoad(Int32, ZeroAddr, "zero.addr.use"); | ||||||
909 | ToBeDeleted.push_back(ZeroAddrUse); | ||||||
910 | |||||||
911 | // ThenBB | ||||||
912 | // | | ||||||
913 | // V | ||||||
914 | // PRegionEntryBB <- Privatization allocas are placed here. | ||||||
915 | // | | ||||||
916 | // V | ||||||
917 | // PRegionBodyBB <- BodeGen is invoked here. | ||||||
918 | // | | ||||||
919 | // V | ||||||
920 | // PRegPreFiniBB <- The block we will start finalization from. | ||||||
921 | // | | ||||||
922 | // V | ||||||
923 | // PRegionExitBB <- A common exit to simplify block collection. | ||||||
924 | // | ||||||
925 | |||||||
926 | LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Before body codegen: " << *OuterFn << "\n"; } } while (false); | ||||||
927 | |||||||
928 | // Let the caller create the body. | ||||||
929 | assert(BodyGenCB && "Expected body generation callback!")(static_cast <bool> (BodyGenCB && "Expected body generation callback!" ) ? void (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 929, __extension__ __PRETTY_FUNCTION__)); | ||||||
930 | InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin()); | ||||||
931 | BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB); | ||||||
932 | |||||||
933 | LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "After body codegen: " << *OuterFn << "\n"; } } while (false); | ||||||
934 | |||||||
935 | FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call); | ||||||
936 | if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) { | ||||||
937 | if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) { | ||||||
938 | llvm::LLVMContext &Ctx = F->getContext(); | ||||||
939 | MDBuilder MDB(Ctx); | ||||||
940 | // Annotate the callback behavior of the __kmpc_fork_call: | ||||||
941 | // - The callback callee is argument number 2 (microtask). | ||||||
942 | // - The first two arguments of the callback callee are unknown (-1). | ||||||
943 | // - All variadic arguments to the __kmpc_fork_call are passed to the | ||||||
944 | // callback callee. | ||||||
945 | F->addMetadata( | ||||||
946 | llvm::LLVMContext::MD_callback, | ||||||
947 | *llvm::MDNode::get( | ||||||
948 | Ctx, {MDB.createCallbackEncoding(2, {-1, -1}, | ||||||
949 | /* VarArgsArePassed */ true)})); | ||||||
950 | } | ||||||
951 | } | ||||||
952 | |||||||
953 | OutlineInfo OI; | ||||||
954 | OI.PostOutlineCB = [=](Function &OutlinedFn) { | ||||||
955 | // Add some known attributes. | ||||||
956 | OutlinedFn.addParamAttr(0, Attribute::NoAlias); | ||||||
957 | OutlinedFn.addParamAttr(1, Attribute::NoAlias); | ||||||
958 | OutlinedFn.addFnAttr(Attribute::NoUnwind); | ||||||
959 | OutlinedFn.addFnAttr(Attribute::NoRecurse); | ||||||
960 | |||||||
961 | assert(OutlinedFn.arg_size() >= 2 &&(static_cast <bool> (OutlinedFn.arg_size() >= 2 && "Expected at least tid and bounded tid as arguments") ? void (0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 962, __extension__ __PRETTY_FUNCTION__)) | ||||||
962 | "Expected at least tid and bounded tid as arguments")(static_cast <bool> (OutlinedFn.arg_size() >= 2 && "Expected at least tid and bounded tid as arguments") ? void (0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 962, __extension__ __PRETTY_FUNCTION__)); | ||||||
963 | unsigned NumCapturedVars = | ||||||
964 | OutlinedFn.arg_size() - /* tid & bounded tid */ 2; | ||||||
965 | |||||||
966 | CallInst *CI = cast<CallInst>(OutlinedFn.user_back()); | ||||||
967 | CI->getParent()->setName("omp_parallel"); | ||||||
968 | Builder.SetInsertPoint(CI); | ||||||
969 | |||||||
970 | // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn); | ||||||
971 | Value *ForkCallArgs[] = { | ||||||
972 | Ident, Builder.getInt32(NumCapturedVars), | ||||||
973 | Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)}; | ||||||
974 | |||||||
975 | SmallVector<Value *, 16> RealArgs; | ||||||
976 | RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs)); | ||||||
977 | RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end()); | ||||||
978 | |||||||
979 | Builder.CreateCall(RTLFn, RealArgs); | ||||||
980 | |||||||
981 | LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "With fork_call placed: " << *Builder.GetInsertBlock()->getParent() << "\n" ; } } while (false) | ||||||
982 | << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "With fork_call placed: " << *Builder.GetInsertBlock()->getParent() << "\n" ; } } while (false); | ||||||
983 | |||||||
984 | InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end()); | ||||||
985 | |||||||
986 | // Initialize the local TID stack location with the argument value. | ||||||
987 | Builder.SetInsertPoint(PrivTID); | ||||||
988 | Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin(); | ||||||
989 | Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr); | ||||||
990 | |||||||
991 | // If no "if" clause was present we do not need the call created during | ||||||
992 | // outlining, otherwise we reuse it in the serialized parallel region. | ||||||
993 | if (!ElseTI) { | ||||||
994 | CI->eraseFromParent(); | ||||||
995 | } else { | ||||||
996 | |||||||
997 | // If an "if" clause was present we are now generating the serialized | ||||||
998 | // version into the "else" branch. | ||||||
999 | Builder.SetInsertPoint(ElseTI); | ||||||
1000 | |||||||
1001 | // Build calls __kmpc_serialized_parallel(&Ident, GTid); | ||||||
1002 | Value *SerializedParallelCallArgs[] = {Ident, ThreadID}; | ||||||
1003 | Builder.CreateCall( | ||||||
1004 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel), | ||||||
1005 | SerializedParallelCallArgs); | ||||||
1006 | |||||||
1007 | // OutlinedFn(>id, &zero, CapturedStruct); | ||||||
1008 | CI->removeFromParent(); | ||||||
1009 | Builder.Insert(CI); | ||||||
1010 | |||||||
1011 | // __kmpc_end_serialized_parallel(&Ident, GTid); | ||||||
1012 | Value *EndArgs[] = {Ident, ThreadID}; | ||||||
1013 | Builder.CreateCall( | ||||||
1014 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel), | ||||||
1015 | EndArgs); | ||||||
1016 | |||||||
1017 | LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "With serialized parallel region: " << *Builder.GetInsertBlock()->getParent() << "\n" ; } } while (false) | ||||||
1018 | << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "With serialized parallel region: " << *Builder.GetInsertBlock()->getParent() << "\n" ; } } while (false); | ||||||
1019 | } | ||||||
1020 | |||||||
1021 | for (Instruction *I : ToBeDeleted) | ||||||
1022 | I->eraseFromParent(); | ||||||
1023 | }; | ||||||
1024 | |||||||
1025 | // Adjust the finalization stack, verify the adjustment, and call the | ||||||
1026 | // finalize function a last time to finalize values between the pre-fini | ||||||
1027 | // block and the exit block if we left the parallel "the normal way". | ||||||
1028 | auto FiniInfo = FinalizationStack.pop_back_val(); | ||||||
1029 | (void)FiniInfo; | ||||||
1030 | assert(FiniInfo.DK == OMPD_parallel &&(static_cast <bool> (FiniInfo.DK == OMPD_parallel && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1031, __extension__ __PRETTY_FUNCTION__)) | ||||||
1031 | "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_parallel && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1031, __extension__ __PRETTY_FUNCTION__)); | ||||||
1032 | |||||||
1033 | Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator(); | ||||||
1034 | |||||||
1035 | InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator()); | ||||||
1036 | FiniCB(PreFiniIP); | ||||||
1037 | |||||||
1038 | OI.OuterAllocaBB = OuterAllocaBlock; | ||||||
1039 | OI.EntryBB = PRegEntryBB; | ||||||
1040 | OI.ExitBB = PRegExitBB; | ||||||
1041 | |||||||
1042 | SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet; | ||||||
1043 | SmallVector<BasicBlock *, 32> Blocks; | ||||||
1044 | OI.collectBlocks(ParallelRegionBlockSet, Blocks); | ||||||
1045 | |||||||
1046 | // Ensure a single exit node for the outlined region by creating one. | ||||||
1047 | // We might have multiple incoming edges to the exit now due to finalizations, | ||||||
1048 | // e.g., cancel calls that cause the control flow to leave the region. | ||||||
1049 | BasicBlock *PRegOutlinedExitBB = PRegExitBB; | ||||||
1050 | PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt()); | ||||||
1051 | PRegOutlinedExitBB->setName("omp.par.outlined.exit"); | ||||||
1052 | Blocks.push_back(PRegOutlinedExitBB); | ||||||
1053 | |||||||
1054 | CodeExtractorAnalysisCache CEAC(*OuterFn); | ||||||
1055 | CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr, | ||||||
1056 | /* AggregateArgs */ false, | ||||||
1057 | /* BlockFrequencyInfo */ nullptr, | ||||||
1058 | /* BranchProbabilityInfo */ nullptr, | ||||||
1059 | /* AssumptionCache */ nullptr, | ||||||
1060 | /* AllowVarArgs */ true, | ||||||
1061 | /* AllowAlloca */ true, | ||||||
1062 | /* AllocationBlock */ OuterAllocaBlock, | ||||||
1063 | /* Suffix */ ".omp_par"); | ||||||
1064 | |||||||
1065 | // Find inputs to, outputs from the code region. | ||||||
1066 | BasicBlock *CommonExit = nullptr; | ||||||
1067 | SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands; | ||||||
1068 | Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit); | ||||||
1069 | Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands); | ||||||
1070 | |||||||
1071 | LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Before privatization: " << *OuterFn << "\n"; } } while (false); | ||||||
1072 | |||||||
1073 | FunctionCallee TIDRTLFn = | ||||||
1074 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num); | ||||||
1075 | |||||||
1076 | auto PrivHelper = [&](Value &V) { | ||||||
1077 | if (&V == TIDAddr || &V == ZeroAddr) { | ||||||
1078 | OI.ExcludeArgsFromAggregate.push_back(&V); | ||||||
1079 | return; | ||||||
1080 | } | ||||||
1081 | |||||||
1082 | SetVector<Use *> Uses; | ||||||
1083 | for (Use &U : V.uses()) | ||||||
1084 | if (auto *UserI = dyn_cast<Instruction>(U.getUser())) | ||||||
1085 | if (ParallelRegionBlockSet.count(UserI->getParent())) | ||||||
1086 | Uses.insert(&U); | ||||||
1087 | |||||||
1088 | // __kmpc_fork_call expects extra arguments as pointers. If the input | ||||||
1089 | // already has a pointer type, everything is fine. Otherwise, store the | ||||||
1090 | // value onto stack and load it back inside the to-be-outlined region. This | ||||||
1091 | // will ensure only the pointer will be passed to the function. | ||||||
1092 | // FIXME: if there are more than 15 trailing arguments, they must be | ||||||
1093 | // additionally packed in a struct. | ||||||
1094 | Value *Inner = &V; | ||||||
1095 | if (!V.getType()->isPointerTy()) { | ||||||
1096 | IRBuilder<>::InsertPointGuard Guard(Builder); | ||||||
1097 | LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: " << V << "\n"; } } while (false); | ||||||
1098 | |||||||
1099 | Builder.restoreIP(OuterAllocaIP); | ||||||
1100 | Value *Ptr = | ||||||
1101 | Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded"); | ||||||
1102 | |||||||
1103 | // Store to stack at end of the block that currently branches to the entry | ||||||
1104 | // block of the to-be-outlined region. | ||||||
1105 | Builder.SetInsertPoint(InsertBB, | ||||||
1106 | InsertBB->getTerminator()->getIterator()); | ||||||
1107 | Builder.CreateStore(&V, Ptr); | ||||||
1108 | |||||||
1109 | // Load back next to allocations in the to-be-outlined region. | ||||||
1110 | Builder.restoreIP(InnerAllocaIP); | ||||||
1111 | Inner = Builder.CreateLoad(V.getType(), Ptr); | ||||||
1112 | } | ||||||
1113 | |||||||
1114 | Value *ReplacementValue = nullptr; | ||||||
1115 | CallInst *CI = dyn_cast<CallInst>(&V); | ||||||
1116 | if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) { | ||||||
1117 | ReplacementValue = PrivTID; | ||||||
1118 | } else { | ||||||
1119 | Builder.restoreIP( | ||||||
1120 | PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue)); | ||||||
1121 | assert(ReplacementValue &&(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!" ) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1122, __extension__ __PRETTY_FUNCTION__)) | ||||||
1122 | "Expected copy/create callback to set replacement value!")(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!" ) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1122, __extension__ __PRETTY_FUNCTION__)); | ||||||
1123 | if (ReplacementValue == &V) | ||||||
1124 | return; | ||||||
1125 | } | ||||||
1126 | |||||||
1127 | for (Use *UPtr : Uses) | ||||||
1128 | UPtr->set(ReplacementValue); | ||||||
1129 | }; | ||||||
1130 | |||||||
1131 | // Reset the inner alloca insertion as it will be used for loading the values | ||||||
1132 | // wrapped into pointers before passing them into the to-be-outlined region. | ||||||
1133 | // Configure it to insert immediately after the fake use of zero address so | ||||||
1134 | // that they are available in the generated body and so that the | ||||||
1135 | // OpenMP-related values (thread ID and zero address pointers) remain leading | ||||||
1136 | // in the argument list. | ||||||
1137 | InnerAllocaIP = IRBuilder<>::InsertPoint( | ||||||
1138 | ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator()); | ||||||
1139 | |||||||
1140 | // Reset the outer alloca insertion point to the entry of the relevant block | ||||||
1141 | // in case it was invalidated. | ||||||
1142 | OuterAllocaIP = IRBuilder<>::InsertPoint( | ||||||
1143 | OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt()); | ||||||
1144 | |||||||
1145 | for (Value *Input : Inputs) { | ||||||
1146 | LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Captured input: " << *Input << "\n"; } } while (false); | ||||||
1147 | PrivHelper(*Input); | ||||||
1148 | } | ||||||
1149 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ) | ||||||
1150 | for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ) | ||||||
1151 | LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ) | ||||||
1152 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (Value *Output : Outputs) do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType( "openmp-ir-builder")) { dbgs() << "Captured output: " << *Output << "\n"; } } while (false); }; } } while (false ); | ||||||
1153 | assert(Outputs.empty() &&(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!" ) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1154, __extension__ __PRETTY_FUNCTION__)) | ||||||
1154 | "OpenMP outlining should not produce live-out values!")(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!" ) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1154, __extension__ __PRETTY_FUNCTION__)); | ||||||
1155 | |||||||
1156 | LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "After privatization: " << *OuterFn << "\n"; } } while (false); | ||||||
1157 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false) | ||||||
1158 | for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false) | ||||||
1159 | dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false) | ||||||
1160 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() << " PBR: " << BB->getName() << "\n"; }; } } while (false); | ||||||
1161 | |||||||
1162 | // Register the outlined info. | ||||||
1163 | addOutlineInfo(std::move(OI)); | ||||||
1164 | |||||||
1165 | InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end()); | ||||||
1166 | UI->eraseFromParent(); | ||||||
1167 | |||||||
1168 | return AfterIP; | ||||||
1169 | } | ||||||
1170 | |||||||
1171 | void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) { | ||||||
1172 | // Build call void __kmpc_flush(ident_t *loc) | ||||||
1173 | uint32_t SrcLocStrSize; | ||||||
1174 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
1175 | Value *Args[] = {getOrCreateIdent(SrcLocStr, SrcLocStrSize)}; | ||||||
1176 | |||||||
1177 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args); | ||||||
1178 | } | ||||||
1179 | |||||||
1180 | void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) { | ||||||
1181 | if (!updateToLocation(Loc)) | ||||||
1182 | return; | ||||||
1183 | emitFlush(Loc); | ||||||
1184 | } | ||||||
1185 | |||||||
1186 | void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) { | ||||||
1187 | // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 | ||||||
1188 | // global_tid); | ||||||
1189 | uint32_t SrcLocStrSize; | ||||||
1190 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
1191 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
1192 | Value *Args[] = {Ident, getOrCreateThreadID(Ident)}; | ||||||
1193 | |||||||
1194 | // Ignore return result until untied tasks are supported. | ||||||
1195 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait), | ||||||
1196 | Args); | ||||||
1197 | } | ||||||
1198 | |||||||
1199 | void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) { | ||||||
1200 | if (!updateToLocation(Loc)) | ||||||
1201 | return; | ||||||
1202 | emitTaskwaitImpl(Loc); | ||||||
1203 | } | ||||||
1204 | |||||||
1205 | void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) { | ||||||
1206 | // Build call __kmpc_omp_taskyield(loc, thread_id, 0); | ||||||
1207 | uint32_t SrcLocStrSize; | ||||||
1208 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
1209 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
1210 | Constant *I32Null = ConstantInt::getNullValue(Int32); | ||||||
1211 | Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null}; | ||||||
1212 | |||||||
1213 | Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield), | ||||||
1214 | Args); | ||||||
1215 | } | ||||||
1216 | |||||||
1217 | void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) { | ||||||
1218 | if (!updateToLocation(Loc)) | ||||||
1219 | return; | ||||||
1220 | emitTaskyieldImpl(Loc); | ||||||
1221 | } | ||||||
1222 | |||||||
1223 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections( | ||||||
1224 | const LocationDescription &Loc, InsertPointTy AllocaIP, | ||||||
1225 | ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, | ||||||
1226 | FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) { | ||||||
1227 | assert(!isConflictIP(AllocaIP, Loc.IP) && "Dedicated IP allocas required")(static_cast <bool> (!isConflictIP(AllocaIP, Loc.IP) && "Dedicated IP allocas required") ? void (0) : __assert_fail ( "!isConflictIP(AllocaIP, Loc.IP) && \"Dedicated IP allocas required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1227, __extension__ __PRETTY_FUNCTION__)); | ||||||
1228 | |||||||
1229 | if (!updateToLocation(Loc)) | ||||||
1230 | return Loc.IP; | ||||||
1231 | |||||||
1232 | auto FiniCBWrapper = [&](InsertPointTy IP) { | ||||||
1233 | if (IP.getBlock()->end() != IP.getPoint()) | ||||||
1234 | return FiniCB(IP); | ||||||
1235 | // This must be done otherwise any nested constructs using FinalizeOMPRegion | ||||||
1236 | // will fail because that function requires the Finalization Basic Block to | ||||||
1237 | // have a terminator, which is already removed by EmitOMPRegionBody. | ||||||
1238 | // IP is currently at cancelation block. | ||||||
1239 | // We need to backtrack to the condition block to fetch | ||||||
1240 | // the exit block and create a branch from cancelation | ||||||
1241 | // to exit block. | ||||||
1242 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
1243 | Builder.restoreIP(IP); | ||||||
1244 | auto *CaseBB = IP.getBlock()->getSinglePredecessor(); | ||||||
1245 | auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); | ||||||
1246 | auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); | ||||||
1247 | Instruction *I = Builder.CreateBr(ExitBB); | ||||||
1248 | IP = InsertPointTy(I->getParent(), I->getIterator()); | ||||||
1249 | return FiniCB(IP); | ||||||
1250 | }; | ||||||
1251 | |||||||
1252 | FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable}); | ||||||
1253 | |||||||
1254 | // Each section is emitted as a switch case | ||||||
1255 | // Each finalization callback is handled from clang.EmitOMPSectionDirective() | ||||||
1256 | // -> OMP.createSection() which generates the IR for each section | ||||||
1257 | // Iterate through all sections and emit a switch construct: | ||||||
1258 | // switch (IV) { | ||||||
1259 | // case 0: | ||||||
1260 | // <SectionStmt[0]>; | ||||||
1261 | // break; | ||||||
1262 | // ... | ||||||
1263 | // case <NumSection> - 1: | ||||||
1264 | // <SectionStmt[<NumSection> - 1]>; | ||||||
1265 | // break; | ||||||
1266 | // } | ||||||
1267 | // ... | ||||||
1268 | // section_loop.after: | ||||||
1269 | // <FiniCB>; | ||||||
1270 | auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) { | ||||||
1271 | auto *CurFn = CodeGenIP.getBlock()->getParent(); | ||||||
1272 | auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor(); | ||||||
1273 | auto *ForExitBB = CodeGenIP.getBlock() | ||||||
1274 | ->getSinglePredecessor() | ||||||
1275 | ->getTerminator() | ||||||
1276 | ->getSuccessor(1); | ||||||
1277 | SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB); | ||||||
1278 | Builder.restoreIP(CodeGenIP); | ||||||
1279 | unsigned CaseNumber = 0; | ||||||
1280 | for (auto SectionCB : SectionCBs) { | ||||||
1281 | auto *CaseBB = BasicBlock::Create(M.getContext(), | ||||||
1282 | "omp_section_loop.body.case", CurFn); | ||||||
1283 | SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB); | ||||||
1284 | Builder.SetInsertPoint(CaseBB); | ||||||
1285 | SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB); | ||||||
1286 | CaseNumber++; | ||||||
1287 | } | ||||||
1288 | // remove the existing terminator from body BB since there can be no | ||||||
1289 | // terminators after switch/case | ||||||
1290 | CodeGenIP.getBlock()->getTerminator()->eraseFromParent(); | ||||||
1291 | }; | ||||||
1292 | // Loop body ends here | ||||||
1293 | // LowerBound, UpperBound, and STride for createCanonicalLoop | ||||||
1294 | Type *I32Ty = Type::getInt32Ty(M.getContext()); | ||||||
1295 | Value *LB = ConstantInt::get(I32Ty, 0); | ||||||
1296 | Value *UB = ConstantInt::get(I32Ty, SectionCBs.size()); | ||||||
1297 | Value *ST = ConstantInt::get(I32Ty, 1); | ||||||
1298 | llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop( | ||||||
1299 | Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop"); | ||||||
1300 | Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator()); | ||||||
1301 | AllocaIP = Builder.saveIP(); | ||||||
1302 | InsertPointTy AfterIP = | ||||||
1303 | applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, !IsNowait); | ||||||
1304 | BasicBlock *LoopAfterBB = AfterIP.getBlock(); | ||||||
1305 | Instruction *SplitPos = LoopAfterBB->getTerminator(); | ||||||
1306 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | ||||||
1307 | SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB); | ||||||
1308 | // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB, | ||||||
1309 | // which requires a BB with branch | ||||||
1310 | BasicBlock *ExitBB = | ||||||
1311 | LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end"); | ||||||
1312 | SplitPos->eraseFromParent(); | ||||||
1313 | |||||||
1314 | // Apply the finalization callback in LoopAfterBB | ||||||
1315 | auto FiniInfo = FinalizationStack.pop_back_val(); | ||||||
1316 | assert(FiniInfo.DK == OMPD_sections &&(static_cast <bool> (FiniInfo.DK == OMPD_sections && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1317, __extension__ __PRETTY_FUNCTION__)) | ||||||
1317 | "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_sections && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1317, __extension__ __PRETTY_FUNCTION__)); | ||||||
1318 | Builder.SetInsertPoint(LoopAfterBB->getTerminator()); | ||||||
1319 | FiniInfo.FiniCB(Builder.saveIP()); | ||||||
1320 | Builder.SetInsertPoint(ExitBB); | ||||||
1321 | |||||||
1322 | return Builder.saveIP(); | ||||||
1323 | } | ||||||
1324 | |||||||
1325 | OpenMPIRBuilder::InsertPointTy | ||||||
1326 | OpenMPIRBuilder::createSection(const LocationDescription &Loc, | ||||||
1327 | BodyGenCallbackTy BodyGenCB, | ||||||
1328 | FinalizeCallbackTy FiniCB) { | ||||||
1329 | if (!updateToLocation(Loc)) | ||||||
1330 | return Loc.IP; | ||||||
1331 | |||||||
1332 | auto FiniCBWrapper = [&](InsertPointTy IP) { | ||||||
1333 | if (IP.getBlock()->end() != IP.getPoint()) | ||||||
1334 | return FiniCB(IP); | ||||||
1335 | // This must be done otherwise any nested constructs using FinalizeOMPRegion | ||||||
1336 | // will fail because that function requires the Finalization Basic Block to | ||||||
1337 | // have a terminator, which is already removed by EmitOMPRegionBody. | ||||||
1338 | // IP is currently at cancelation block. | ||||||
1339 | // We need to backtrack to the condition block to fetch | ||||||
1340 | // the exit block and create a branch from cancelation | ||||||
1341 | // to exit block. | ||||||
1342 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
1343 | Builder.restoreIP(IP); | ||||||
1344 | auto *CaseBB = Loc.IP.getBlock(); | ||||||
1345 | auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor(); | ||||||
1346 | auto *ExitBB = CondBB->getTerminator()->getSuccessor(1); | ||||||
1347 | Instruction *I = Builder.CreateBr(ExitBB); | ||||||
1348 | IP = InsertPointTy(I->getParent(), I->getIterator()); | ||||||
1349 | return FiniCB(IP); | ||||||
1350 | }; | ||||||
1351 | |||||||
1352 | Directive OMPD = Directive::OMPD_sections; | ||||||
1353 | // Since we are using Finalization Callback here, HasFinalize | ||||||
1354 | // and IsCancellable have to be true | ||||||
1355 | return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper, | ||||||
1356 | /*Conditional*/ false, /*hasFinalize*/ true, | ||||||
1357 | /*IsCancellable*/ true); | ||||||
1358 | } | ||||||
1359 | |||||||
1360 | /// Create a function with a unique name and a "void (i8*, i8*)" signature in | ||||||
1361 | /// the given module and return it. | ||||||
1362 | Function *getFreshReductionFunc(Module &M) { | ||||||
1363 | Type *VoidTy = Type::getVoidTy(M.getContext()); | ||||||
1364 | Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext()); | ||||||
1365 | auto *FuncTy = | ||||||
1366 | FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false); | ||||||
1367 | return Function::Create(FuncTy, GlobalVariable::InternalLinkage, | ||||||
1368 | M.getDataLayout().getDefaultGlobalsAddressSpace(), | ||||||
1369 | ".omp.reduction.func", &M); | ||||||
1370 | } | ||||||
1371 | |||||||
1372 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions( | ||||||
1373 | const LocationDescription &Loc, InsertPointTy AllocaIP, | ||||||
1374 | ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) { | ||||||
1375 | for (const ReductionInfo &RI : ReductionInfos) { | ||||||
1376 | (void)RI; | ||||||
1377 | assert(RI.Variable && "expected non-null variable")(static_cast <bool> (RI.Variable && "expected non-null variable" ) ? void (0) : __assert_fail ("RI.Variable && \"expected non-null variable\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1377, __extension__ __PRETTY_FUNCTION__)); | ||||||
1378 | assert(RI.PrivateVariable && "expected non-null private variable")(static_cast <bool> (RI.PrivateVariable && "expected non-null private variable" ) ? void (0) : __assert_fail ("RI.PrivateVariable && \"expected non-null private variable\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1378, __extension__ __PRETTY_FUNCTION__)); | ||||||
1379 | assert(RI.ReductionGen && "expected non-null reduction generator callback")(static_cast <bool> (RI.ReductionGen && "expected non-null reduction generator callback" ) ? void (0) : __assert_fail ("RI.ReductionGen && \"expected non-null reduction generator callback\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1379, __extension__ __PRETTY_FUNCTION__)); | ||||||
1380 | assert(RI.Variable->getType() == RI.PrivateVariable->getType() &&(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable ->getType() && "expected variables and their private equivalents to have the same " "type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1382, __extension__ __PRETTY_FUNCTION__)) | ||||||
1381 | "expected variables and their private equivalents to have the same "(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable ->getType() && "expected variables and their private equivalents to have the same " "type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1382, __extension__ __PRETTY_FUNCTION__)) | ||||||
1382 | "type")(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable ->getType() && "expected variables and their private equivalents to have the same " "type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1382, __extension__ __PRETTY_FUNCTION__)); | ||||||
1383 | assert(RI.Variable->getType()->isPointerTy() &&(static_cast <bool> (RI.Variable->getType()->isPointerTy () && "expected variables to be pointers") ? void (0) : __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1384, __extension__ __PRETTY_FUNCTION__)) | ||||||
1384 | "expected variables to be pointers")(static_cast <bool> (RI.Variable->getType()->isPointerTy () && "expected variables to be pointers") ? void (0) : __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1384, __extension__ __PRETTY_FUNCTION__)); | ||||||
1385 | } | ||||||
1386 | |||||||
1387 | if (!updateToLocation(Loc)) | ||||||
1388 | return InsertPointTy(); | ||||||
1389 | |||||||
1390 | BasicBlock *InsertBlock = Loc.IP.getBlock(); | ||||||
1391 | BasicBlock *ContinuationBlock = | ||||||
1392 | InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize"); | ||||||
1393 | InsertBlock->getTerminator()->eraseFromParent(); | ||||||
1394 | |||||||
1395 | // Create and populate array of type-erased pointers to private reduction | ||||||
1396 | // values. | ||||||
1397 | unsigned NumReductions = ReductionInfos.size(); | ||||||
1398 | Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions); | ||||||
1399 | Builder.restoreIP(AllocaIP); | ||||||
1400 | Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array"); | ||||||
1401 | |||||||
1402 | Builder.SetInsertPoint(InsertBlock, InsertBlock->end()); | ||||||
1403 | |||||||
1404 | for (auto En : enumerate(ReductionInfos)) { | ||||||
1405 | unsigned Index = En.index(); | ||||||
1406 | const ReductionInfo &RI = En.value(); | ||||||
1407 | Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64( | ||||||
1408 | RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index)); | ||||||
1409 | Value *Casted = | ||||||
1410 | Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(), | ||||||
1411 | "private.red.var." + Twine(Index) + ".casted"); | ||||||
1412 | Builder.CreateStore(Casted, RedArrayElemPtr); | ||||||
1413 | } | ||||||
1414 | |||||||
1415 | // Emit a call to the runtime function that orchestrates the reduction. | ||||||
1416 | // Declare the reduction function in the process. | ||||||
1417 | Function *Func = Builder.GetInsertBlock()->getParent(); | ||||||
1418 | Module *Module = Func->getParent(); | ||||||
1419 | Value *RedArrayPtr = | ||||||
1420 | Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr"); | ||||||
1421 | uint32_t SrcLocStrSize; | ||||||
1422 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
1423 | bool CanGenerateAtomic = | ||||||
1424 | llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) { | ||||||
1425 | return RI.AtomicReductionGen; | ||||||
1426 | }); | ||||||
1427 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize, | ||||||
1428 | CanGenerateAtomic | ||||||
1429 | ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE | ||||||
1430 | : IdentFlag(0)); | ||||||
1431 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
1432 | Constant *NumVariables = Builder.getInt32(NumReductions); | ||||||
1433 | const DataLayout &DL = Module->getDataLayout(); | ||||||
1434 | unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy); | ||||||
1435 | Constant *RedArraySize = Builder.getInt64(RedArrayByteSize); | ||||||
1436 | Function *ReductionFunc = getFreshReductionFunc(*Module); | ||||||
1437 | Value *Lock = getOMPCriticalRegionLock(".reduction"); | ||||||
1438 | Function *ReduceFunc = getOrCreateRuntimeFunctionPtr( | ||||||
1439 | IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait | ||||||
1440 | : RuntimeFunction::OMPRTL___kmpc_reduce); | ||||||
1441 | CallInst *ReduceCall = | ||||||
1442 | Builder.CreateCall(ReduceFunc, | ||||||
1443 | {Ident, ThreadId, NumVariables, RedArraySize, | ||||||
1444 | RedArrayPtr, ReductionFunc, Lock}, | ||||||
1445 | "reduce"); | ||||||
1446 | |||||||
1447 | // Create final reduction entry blocks for the atomic and non-atomic case. | ||||||
1448 | // Emit IR that dispatches control flow to one of the blocks based on the | ||||||
1449 | // reduction supporting the atomic mode. | ||||||
1450 | BasicBlock *NonAtomicRedBlock = | ||||||
1451 | BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func); | ||||||
1452 | BasicBlock *AtomicRedBlock = | ||||||
1453 | BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func); | ||||||
1454 | SwitchInst *Switch = | ||||||
1455 | Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2); | ||||||
1456 | Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock); | ||||||
1457 | Switch->addCase(Builder.getInt32(2), AtomicRedBlock); | ||||||
1458 | |||||||
1459 | // Populate the non-atomic reduction using the elementwise reduction function. | ||||||
1460 | // This loads the elements from the global and private variables and reduces | ||||||
1461 | // them before storing back the result to the global variable. | ||||||
1462 | Builder.SetInsertPoint(NonAtomicRedBlock); | ||||||
1463 | for (auto En : enumerate(ReductionInfos)) { | ||||||
1464 | const ReductionInfo &RI = En.value(); | ||||||
1465 | Type *ValueType = RI.ElementType; | ||||||
1466 | Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable, | ||||||
1467 | "red.value." + Twine(En.index())); | ||||||
1468 | Value *PrivateRedValue = | ||||||
1469 | Builder.CreateLoad(ValueType, RI.PrivateVariable, | ||||||
1470 | "red.private.value." + Twine(En.index())); | ||||||
1471 | Value *Reduced; | ||||||
1472 | Builder.restoreIP( | ||||||
1473 | RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced)); | ||||||
1474 | if (!Builder.GetInsertBlock()) | ||||||
1475 | return InsertPointTy(); | ||||||
1476 | Builder.CreateStore(Reduced, RI.Variable); | ||||||
1477 | } | ||||||
1478 | Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr( | ||||||
1479 | IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait | ||||||
1480 | : RuntimeFunction::OMPRTL___kmpc_end_reduce); | ||||||
1481 | Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock}); | ||||||
1482 | Builder.CreateBr(ContinuationBlock); | ||||||
1483 | |||||||
1484 | // Populate the atomic reduction using the atomic elementwise reduction | ||||||
1485 | // function. There are no loads/stores here because they will be happening | ||||||
1486 | // inside the atomic elementwise reduction. | ||||||
1487 | Builder.SetInsertPoint(AtomicRedBlock); | ||||||
1488 | if (CanGenerateAtomic) { | ||||||
1489 | for (const ReductionInfo &RI : ReductionInfos) { | ||||||
1490 | Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.ElementType, | ||||||
1491 | RI.Variable, RI.PrivateVariable)); | ||||||
1492 | if (!Builder.GetInsertBlock()) | ||||||
1493 | return InsertPointTy(); | ||||||
1494 | } | ||||||
1495 | Builder.CreateBr(ContinuationBlock); | ||||||
1496 | } else { | ||||||
1497 | Builder.CreateUnreachable(); | ||||||
1498 | } | ||||||
1499 | |||||||
1500 | // Populate the outlined reduction function using the elementwise reduction | ||||||
1501 | // function. Partial values are extracted from the type-erased array of | ||||||
1502 | // pointers to private variables. | ||||||
1503 | BasicBlock *ReductionFuncBlock = | ||||||
1504 | BasicBlock::Create(Module->getContext(), "", ReductionFunc); | ||||||
1505 | Builder.SetInsertPoint(ReductionFuncBlock); | ||||||
1506 | Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0), | ||||||
1507 | RedArrayTy->getPointerTo()); | ||||||
1508 | Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1), | ||||||
1509 | RedArrayTy->getPointerTo()); | ||||||
1510 | for (auto En : enumerate(ReductionInfos)) { | ||||||
1511 | const ReductionInfo &RI = En.value(); | ||||||
1512 | Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( | ||||||
1513 | RedArrayTy, LHSArrayPtr, 0, En.index()); | ||||||
1514 | Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr); | ||||||
1515 | Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType()); | ||||||
1516 | Value *LHS = Builder.CreateLoad(RI.ElementType, LHSPtr); | ||||||
1517 | Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64( | ||||||
1518 | RedArrayTy, RHSArrayPtr, 0, En.index()); | ||||||
1519 | Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr); | ||||||
1520 | Value *RHSPtr = | ||||||
1521 | Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType()); | ||||||
1522 | Value *RHS = Builder.CreateLoad(RI.ElementType, RHSPtr); | ||||||
1523 | Value *Reduced; | ||||||
1524 | Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced)); | ||||||
1525 | if (!Builder.GetInsertBlock()) | ||||||
1526 | return InsertPointTy(); | ||||||
1527 | Builder.CreateStore(Reduced, LHSPtr); | ||||||
1528 | } | ||||||
1529 | Builder.CreateRetVoid(); | ||||||
1530 | |||||||
1531 | Builder.SetInsertPoint(ContinuationBlock); | ||||||
1532 | return Builder.saveIP(); | ||||||
1533 | } | ||||||
1534 | |||||||
1535 | OpenMPIRBuilder::InsertPointTy | ||||||
1536 | OpenMPIRBuilder::createMaster(const LocationDescription &Loc, | ||||||
1537 | BodyGenCallbackTy BodyGenCB, | ||||||
1538 | FinalizeCallbackTy FiniCB) { | ||||||
1539 | |||||||
1540 | if (!updateToLocation(Loc)) | ||||||
1541 | return Loc.IP; | ||||||
1542 | |||||||
1543 | Directive OMPD = Directive::OMPD_master; | ||||||
1544 | uint32_t SrcLocStrSize; | ||||||
1545 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
1546 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
1547 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
1548 | Value *Args[] = {Ident, ThreadId}; | ||||||
1549 | |||||||
1550 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master); | ||||||
1551 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | ||||||
1552 | |||||||
1553 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master); | ||||||
1554 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | ||||||
1555 | |||||||
1556 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | ||||||
1557 | /*Conditional*/ true, /*hasFinalize*/ true); | ||||||
1558 | } | ||||||
1559 | |||||||
1560 | OpenMPIRBuilder::InsertPointTy | ||||||
1561 | OpenMPIRBuilder::createMasked(const LocationDescription &Loc, | ||||||
1562 | BodyGenCallbackTy BodyGenCB, | ||||||
1563 | FinalizeCallbackTy FiniCB, Value *Filter) { | ||||||
1564 | if (!updateToLocation(Loc)) | ||||||
1565 | return Loc.IP; | ||||||
1566 | |||||||
1567 | Directive OMPD = Directive::OMPD_masked; | ||||||
1568 | uint32_t SrcLocStrSize; | ||||||
1569 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
1570 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
1571 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
1572 | Value *Args[] = {Ident, ThreadId, Filter}; | ||||||
1573 | Value *ArgsEnd[] = {Ident, ThreadId}; | ||||||
1574 | |||||||
1575 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked); | ||||||
1576 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | ||||||
1577 | |||||||
1578 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked); | ||||||
1579 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd); | ||||||
1580 | |||||||
1581 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | ||||||
1582 | /*Conditional*/ true, /*hasFinalize*/ true); | ||||||
1583 | } | ||||||
1584 | |||||||
1585 | CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( | ||||||
1586 | DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, | ||||||
1587 | BasicBlock *PostInsertBefore, const Twine &Name) { | ||||||
1588 | Module *M = F->getParent(); | ||||||
1589 | LLVMContext &Ctx = M->getContext(); | ||||||
1590 | Type *IndVarTy = TripCount->getType(); | ||||||
1591 | |||||||
1592 | // Create the basic block structure. | ||||||
1593 | BasicBlock *Preheader = | ||||||
1594 | BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore); | ||||||
1595 | BasicBlock *Header = | ||||||
1596 | BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore); | ||||||
1597 | BasicBlock *Cond = | ||||||
1598 | BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore); | ||||||
1599 | BasicBlock *Body = | ||||||
1600 | BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore); | ||||||
1601 | BasicBlock *Latch = | ||||||
1602 | BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore); | ||||||
1603 | BasicBlock *Exit = | ||||||
1604 | BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore); | ||||||
1605 | BasicBlock *After = | ||||||
1606 | BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore); | ||||||
1607 | |||||||
1608 | // Use specified DebugLoc for new instructions. | ||||||
1609 | Builder.SetCurrentDebugLocation(DL); | ||||||
1610 | |||||||
1611 | Builder.SetInsertPoint(Preheader); | ||||||
1612 | Builder.CreateBr(Header); | ||||||
1613 | |||||||
1614 | Builder.SetInsertPoint(Header); | ||||||
1615 | PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv"); | ||||||
1616 | IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader); | ||||||
1617 | Builder.CreateBr(Cond); | ||||||
1618 | |||||||
1619 | Builder.SetInsertPoint(Cond); | ||||||
1620 | Value *Cmp = | ||||||
1621 | Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp"); | ||||||
1622 | Builder.CreateCondBr(Cmp, Body, Exit); | ||||||
1623 | |||||||
1624 | Builder.SetInsertPoint(Body); | ||||||
1625 | Builder.CreateBr(Latch); | ||||||
1626 | |||||||
1627 | Builder.SetInsertPoint(Latch); | ||||||
1628 | Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1), | ||||||
1629 | "omp_" + Name + ".next", /*HasNUW=*/true); | ||||||
1630 | Builder.CreateBr(Header); | ||||||
1631 | IndVarPHI->addIncoming(Next, Latch); | ||||||
1632 | |||||||
1633 | Builder.SetInsertPoint(Exit); | ||||||
1634 | Builder.CreateBr(After); | ||||||
1635 | |||||||
1636 | // Remember and return the canonical control flow. | ||||||
1637 | LoopInfos.emplace_front(); | ||||||
1638 | CanonicalLoopInfo *CL = &LoopInfos.front(); | ||||||
1639 | |||||||
1640 | CL->Header = Header; | ||||||
1641 | CL->Cond = Cond; | ||||||
1642 | CL->Latch = Latch; | ||||||
1643 | CL->Exit = Exit; | ||||||
1644 | |||||||
1645 | #ifndef NDEBUG | ||||||
1646 | CL->assertOK(); | ||||||
1647 | #endif | ||||||
1648 | return CL; | ||||||
1649 | } | ||||||
1650 | |||||||
1651 | CanonicalLoopInfo * | ||||||
1652 | OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, | ||||||
1653 | LoopBodyGenCallbackTy BodyGenCB, | ||||||
1654 | Value *TripCount, const Twine &Name) { | ||||||
1655 | BasicBlock *BB = Loc.IP.getBlock(); | ||||||
1656 | BasicBlock *NextBB = BB->getNextNode(); | ||||||
1657 | |||||||
1658 | CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(), | ||||||
1659 | NextBB, NextBB, Name); | ||||||
1660 | BasicBlock *After = CL->getAfter(); | ||||||
1661 | |||||||
1662 | // If location is not set, don't connect the loop. | ||||||
1663 | if (updateToLocation(Loc)) { | ||||||
1664 | // Split the loop at the insertion point: Branch to the preheader and move | ||||||
1665 | // every following instruction to after the loop (the After BB). Also, the | ||||||
1666 | // new successor is the loop's after block. | ||||||
1667 | spliceBB(Builder, After, /*CreateBranch=*/false); | ||||||
1668 | Builder.CreateBr(CL->getPreheader()); | ||||||
1669 | } | ||||||
1670 | |||||||
1671 | // Emit the body content. We do it after connecting the loop to the CFG to | ||||||
1672 | // avoid that the callback encounters degenerate BBs. | ||||||
1673 | BodyGenCB(CL->getBodyIP(), CL->getIndVar()); | ||||||
1674 | |||||||
1675 | #ifndef NDEBUG | ||||||
1676 | CL->assertOK(); | ||||||
1677 | #endif | ||||||
1678 | return CL; | ||||||
1679 | } | ||||||
1680 | |||||||
1681 | CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop( | ||||||
1682 | const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, | ||||||
1683 | Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, | ||||||
1684 | InsertPointTy ComputeIP, const Twine &Name) { | ||||||
1685 | |||||||
1686 | // Consider the following difficulties (assuming 8-bit signed integers): | ||||||
1687 | // * Adding \p Step to the loop counter which passes \p Stop may overflow: | ||||||
1688 | // DO I = 1, 100, 50 | ||||||
1689 | /// * A \p Step of INT_MIN cannot not be normalized to a positive direction: | ||||||
1690 | // DO I = 100, 0, -128 | ||||||
1691 | |||||||
1692 | // Start, Stop and Step must be of the same integer type. | ||||||
1693 | auto *IndVarTy = cast<IntegerType>(Start->getType()); | ||||||
1694 | assert(IndVarTy == Stop->getType() && "Stop type mismatch")(static_cast <bool> (IndVarTy == Stop->getType() && "Stop type mismatch") ? void (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1694, __extension__ __PRETTY_FUNCTION__)); | ||||||
1695 | assert(IndVarTy == Step->getType() && "Step type mismatch")(static_cast <bool> (IndVarTy == Step->getType() && "Step type mismatch") ? void (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1695, __extension__ __PRETTY_FUNCTION__)); | ||||||
1696 | |||||||
1697 | LocationDescription ComputeLoc = | ||||||
1698 | ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; | ||||||
1699 | updateToLocation(ComputeLoc); | ||||||
1700 | |||||||
1701 | ConstantInt *Zero = ConstantInt::get(IndVarTy, 0); | ||||||
1702 | ConstantInt *One = ConstantInt::get(IndVarTy, 1); | ||||||
1703 | |||||||
1704 | // Like Step, but always positive. | ||||||
1705 | Value *Incr = Step; | ||||||
1706 | |||||||
1707 | // Distance between Start and Stop; always positive. | ||||||
1708 | Value *Span; | ||||||
1709 | |||||||
1710 | // Condition whether there are no iterations are executed at all, e.g. because | ||||||
1711 | // UB < LB. | ||||||
1712 | Value *ZeroCmp; | ||||||
1713 | |||||||
1714 | if (IsSigned) { | ||||||
1715 | // Ensure that increment is positive. If not, negate and invert LB and UB. | ||||||
1716 | Value *IsNeg = Builder.CreateICmpSLT(Step, Zero); | ||||||
1717 | Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step); | ||||||
1718 | Value *LB = Builder.CreateSelect(IsNeg, Stop, Start); | ||||||
1719 | Value *UB = Builder.CreateSelect(IsNeg, Start, Stop); | ||||||
1720 | Span = Builder.CreateSub(UB, LB, "", false, true); | ||||||
1721 | ZeroCmp = Builder.CreateICmp( | ||||||
1722 | InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB); | ||||||
1723 | } else { | ||||||
1724 | Span = Builder.CreateSub(Stop, Start, "", true); | ||||||
1725 | ZeroCmp = Builder.CreateICmp( | ||||||
1726 | InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start); | ||||||
1727 | } | ||||||
1728 | |||||||
1729 | Value *CountIfLooping; | ||||||
1730 | if (InclusiveStop) { | ||||||
1731 | CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One); | ||||||
1732 | } else { | ||||||
1733 | // Avoid incrementing past stop since it could overflow. | ||||||
1734 | Value *CountIfTwo = Builder.CreateAdd( | ||||||
1735 | Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One); | ||||||
1736 | Value *OneCmp = Builder.CreateICmp( | ||||||
1737 | InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr); | ||||||
1738 | CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo); | ||||||
1739 | } | ||||||
1740 | Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping, | ||||||
1741 | "omp_" + Name + ".tripcount"); | ||||||
1742 | |||||||
1743 | auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { | ||||||
1744 | Builder.restoreIP(CodeGenIP); | ||||||
1745 | Value *Span = Builder.CreateMul(IV, Step); | ||||||
1746 | Value *IndVar = Builder.CreateAdd(Span, Start); | ||||||
1747 | BodyGenCB(Builder.saveIP(), IndVar); | ||||||
1748 | }; | ||||||
1749 | LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP(); | ||||||
1750 | return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name); | ||||||
1751 | } | ||||||
1752 | |||||||
1753 | // Returns an LLVM function to call for initializing loop bounds using OpenMP | ||||||
1754 | // static scheduling depending on `type`. Only i32 and i64 are supported by the | ||||||
1755 | // runtime. Always interpret integers as unsigned similarly to | ||||||
1756 | // CanonicalLoopInfo. | ||||||
1757 | static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M, | ||||||
1758 | OpenMPIRBuilder &OMPBuilder) { | ||||||
1759 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | ||||||
1760 | if (Bitwidth == 32) | ||||||
1761 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
1762 | M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u); | ||||||
1763 | if (Bitwidth == 64) | ||||||
1764 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
1765 | M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u); | ||||||
1766 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1766); | ||||||
1767 | } | ||||||
1768 | |||||||
1769 | OpenMPIRBuilder::InsertPointTy | ||||||
1770 | OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, | ||||||
1771 | InsertPointTy AllocaIP, | ||||||
1772 | bool NeedsBarrier) { | ||||||
1773 | assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1773, __extension__ __PRETTY_FUNCTION__)); | ||||||
1774 | assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1775, __extension__ __PRETTY_FUNCTION__)) | ||||||
1775 | "Require dedicated allocate IP")(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1775, __extension__ __PRETTY_FUNCTION__)); | ||||||
1776 | |||||||
1777 | // Set up the source location value for OpenMP runtime. | ||||||
1778 | Builder.restoreIP(CLI->getPreheaderIP()); | ||||||
1779 | Builder.SetCurrentDebugLocation(DL); | ||||||
1780 | |||||||
1781 | uint32_t SrcLocStrSize; | ||||||
1782 | Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); | ||||||
1783 | Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
1784 | |||||||
1785 | // Declare useful OpenMP runtime functions. | ||||||
1786 | Value *IV = CLI->getIndVar(); | ||||||
1787 | Type *IVTy = IV->getType(); | ||||||
1788 | FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this); | ||||||
1789 | FunctionCallee StaticFini = | ||||||
1790 | getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); | ||||||
1791 | |||||||
1792 | // Allocate space for computed loop bounds as expected by the "init" function. | ||||||
1793 | Builder.restoreIP(AllocaIP); | ||||||
1794 | Type *I32Type = Type::getInt32Ty(M.getContext()); | ||||||
1795 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | ||||||
1796 | Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); | ||||||
1797 | Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); | ||||||
1798 | Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); | ||||||
1799 | |||||||
1800 | // At the end of the preheader, prepare for calling the "init" function by | ||||||
1801 | // storing the current loop bounds into the allocated space. A canonical loop | ||||||
1802 | // always iterates from 0 to trip-count with step 1. Note that "init" expects | ||||||
1803 | // and produces an inclusive upper bound. | ||||||
1804 | Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); | ||||||
1805 | Constant *Zero = ConstantInt::get(IVTy, 0); | ||||||
1806 | Constant *One = ConstantInt::get(IVTy, 1); | ||||||
1807 | Builder.CreateStore(Zero, PLowerBound); | ||||||
1808 | Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One); | ||||||
1809 | Builder.CreateStore(UpperBound, PUpperBound); | ||||||
1810 | Builder.CreateStore(One, PStride); | ||||||
1811 | |||||||
1812 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | ||||||
1813 | |||||||
1814 | Constant *SchedulingType = ConstantInt::get( | ||||||
1815 | I32Type, static_cast<int>(OMPScheduleType::UnorderedStatic)); | ||||||
1816 | |||||||
1817 | // Call the "init" function and update the trip count of the loop with the | ||||||
1818 | // value it produced. | ||||||
1819 | Builder.CreateCall(StaticInit, | ||||||
1820 | {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound, | ||||||
1821 | PUpperBound, PStride, One, Zero}); | ||||||
1822 | Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound); | ||||||
1823 | Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound); | ||||||
1824 | Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound); | ||||||
1825 | Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One); | ||||||
1826 | CLI->setTripCount(TripCount); | ||||||
1827 | |||||||
1828 | // Update all uses of the induction variable except the one in the condition | ||||||
1829 | // block that compares it with the actual upper bound, and the increment in | ||||||
1830 | // the latch block. | ||||||
1831 | |||||||
1832 | CLI->mapIndVar([&](Instruction *OldIV) -> Value * { | ||||||
1833 | Builder.SetInsertPoint(CLI->getBody(), | ||||||
1834 | CLI->getBody()->getFirstInsertionPt()); | ||||||
1835 | Builder.SetCurrentDebugLocation(DL); | ||||||
1836 | return Builder.CreateAdd(OldIV, LowerBound); | ||||||
1837 | }); | ||||||
1838 | |||||||
1839 | // In the "exit" block, call the "fini" function. | ||||||
1840 | Builder.SetInsertPoint(CLI->getExit(), | ||||||
1841 | CLI->getExit()->getTerminator()->getIterator()); | ||||||
1842 | Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); | ||||||
1843 | |||||||
1844 | // Add the barrier if requested. | ||||||
1845 | if (NeedsBarrier) | ||||||
1846 | createBarrier(LocationDescription(Builder.saveIP(), DL), | ||||||
1847 | omp::Directive::OMPD_for, /* ForceSimpleCall */ false, | ||||||
1848 | /* CheckCancelFlag */ false); | ||||||
1849 | |||||||
1850 | InsertPointTy AfterIP = CLI->getAfterIP(); | ||||||
1851 | CLI->invalidate(); | ||||||
1852 | |||||||
1853 | return AfterIP; | ||||||
1854 | } | ||||||
1855 | |||||||
1856 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop( | ||||||
1857 | DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, | ||||||
1858 | bool NeedsBarrier, Value *ChunkSize) { | ||||||
1859 | assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1859, __extension__ __PRETTY_FUNCTION__)); | ||||||
1860 | assert(ChunkSize && "Chunk size is required")(static_cast <bool> (ChunkSize && "Chunk size is required" ) ? void (0) : __assert_fail ("ChunkSize && \"Chunk size is required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1860, __extension__ __PRETTY_FUNCTION__)); | ||||||
1861 | |||||||
1862 | LLVMContext &Ctx = CLI->getFunction()->getContext(); | ||||||
1863 | Value *IV = CLI->getIndVar(); | ||||||
1864 | Value *OrigTripCount = CLI->getTripCount(); | ||||||
1865 | Type *IVTy = IV->getType(); | ||||||
1866 | assert(IVTy->getIntegerBitWidth() <= 64 &&(static_cast <bool> (IVTy->getIntegerBitWidth() <= 64 && "Max supported tripcount bitwidth is 64 bits") ? void (0) : __assert_fail ("IVTy->getIntegerBitWidth() <= 64 && \"Max supported tripcount bitwidth is 64 bits\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1867, __extension__ __PRETTY_FUNCTION__)) | ||||||
1867 | "Max supported tripcount bitwidth is 64 bits")(static_cast <bool> (IVTy->getIntegerBitWidth() <= 64 && "Max supported tripcount bitwidth is 64 bits") ? void (0) : __assert_fail ("IVTy->getIntegerBitWidth() <= 64 && \"Max supported tripcount bitwidth is 64 bits\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 1867, __extension__ __PRETTY_FUNCTION__)); | ||||||
1868 | Type *InternalIVTy = IVTy->getIntegerBitWidth() <= 32 ? Type::getInt32Ty(Ctx) | ||||||
1869 | : Type::getInt64Ty(Ctx); | ||||||
1870 | Type *I32Type = Type::getInt32Ty(M.getContext()); | ||||||
1871 | Constant *Zero = ConstantInt::get(InternalIVTy, 0); | ||||||
1872 | Constant *One = ConstantInt::get(InternalIVTy, 1); | ||||||
1873 | |||||||
1874 | // Declare useful OpenMP runtime functions. | ||||||
1875 | FunctionCallee StaticInit = | ||||||
1876 | getKmpcForStaticInitForType(InternalIVTy, M, *this); | ||||||
1877 | FunctionCallee StaticFini = | ||||||
1878 | getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini); | ||||||
1879 | |||||||
1880 | // Allocate space for computed loop bounds as expected by the "init" function. | ||||||
1881 | Builder.restoreIP(AllocaIP); | ||||||
1882 | Builder.SetCurrentDebugLocation(DL); | ||||||
1883 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | ||||||
1884 | Value *PLowerBound = | ||||||
1885 | Builder.CreateAlloca(InternalIVTy, nullptr, "p.lowerbound"); | ||||||
1886 | Value *PUpperBound = | ||||||
1887 | Builder.CreateAlloca(InternalIVTy, nullptr, "p.upperbound"); | ||||||
1888 | Value *PStride = Builder.CreateAlloca(InternalIVTy, nullptr, "p.stride"); | ||||||
1889 | |||||||
1890 | // Set up the source location value for the OpenMP runtime. | ||||||
1891 | Builder.restoreIP(CLI->getPreheaderIP()); | ||||||
1892 | Builder.SetCurrentDebugLocation(DL); | ||||||
1893 | |||||||
1894 | // TODO: Detect overflow in ubsan or max-out with current tripcount. | ||||||
1895 | Value *CastedChunkSize = | ||||||
1896 | Builder.CreateZExtOrTrunc(ChunkSize, InternalIVTy, "chunksize"); | ||||||
1897 | Value *CastedTripCount = | ||||||
1898 | Builder.CreateZExt(OrigTripCount, InternalIVTy, "tripcount"); | ||||||
1899 | |||||||
1900 | Constant *SchedulingType = ConstantInt::get( | ||||||
1901 | I32Type, static_cast<int>(OMPScheduleType::UnorderedStaticChunked)); | ||||||
1902 | Builder.CreateStore(Zero, PLowerBound); | ||||||
1903 | Value *OrigUpperBound = Builder.CreateSub(CastedTripCount, One); | ||||||
1904 | Builder.CreateStore(OrigUpperBound, PUpperBound); | ||||||
1905 | Builder.CreateStore(One, PStride); | ||||||
1906 | |||||||
1907 | // Call the "init" function and update the trip count of the loop with the | ||||||
1908 | // value it produced. | ||||||
1909 | uint32_t SrcLocStrSize; | ||||||
1910 | Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); | ||||||
1911 | Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
1912 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | ||||||
1913 | Builder.CreateCall(StaticInit, | ||||||
1914 | {/*loc=*/SrcLoc, /*global_tid=*/ThreadNum, | ||||||
1915 | /*schedtype=*/SchedulingType, /*plastiter=*/PLastIter, | ||||||
1916 | /*plower=*/PLowerBound, /*pupper=*/PUpperBound, | ||||||
1917 | /*pstride=*/PStride, /*incr=*/One, | ||||||
1918 | /*chunk=*/CastedChunkSize}); | ||||||
1919 | |||||||
1920 | // Load values written by the "init" function. | ||||||
1921 | Value *FirstChunkStart = | ||||||
1922 | Builder.CreateLoad(InternalIVTy, PLowerBound, "omp_firstchunk.lb"); | ||||||
1923 | Value *FirstChunkStop = | ||||||
1924 | Builder.CreateLoad(InternalIVTy, PUpperBound, "omp_firstchunk.ub"); | ||||||
1925 | Value *FirstChunkEnd = Builder.CreateAdd(FirstChunkStop, One); | ||||||
1926 | Value *ChunkRange = | ||||||
1927 | Builder.CreateSub(FirstChunkEnd, FirstChunkStart, "omp_chunk.range"); | ||||||
1928 | Value *NextChunkStride = | ||||||
1929 | Builder.CreateLoad(InternalIVTy, PStride, "omp_dispatch.stride"); | ||||||
1930 | |||||||
1931 | // Create outer "dispatch" loop for enumerating the chunks. | ||||||
1932 | BasicBlock *DispatchEnter = splitBB(Builder, true); | ||||||
1933 | Value *DispatchCounter; | ||||||
1934 | CanonicalLoopInfo *DispatchCLI = createCanonicalLoop( | ||||||
1935 | {Builder.saveIP(), DL}, | ||||||
1936 | [&](InsertPointTy BodyIP, Value *Counter) { DispatchCounter = Counter; }, | ||||||
1937 | FirstChunkStart, CastedTripCount, NextChunkStride, | ||||||
1938 | /*IsSigned=*/false, /*InclusiveStop=*/false, /*ComputeIP=*/{}, | ||||||
1939 | "dispatch"); | ||||||
1940 | |||||||
1941 | // Remember the BasicBlocks of the dispatch loop we need, then invalidate to | ||||||
1942 | // not have to preserve the canonical invariant. | ||||||
1943 | BasicBlock *DispatchBody = DispatchCLI->getBody(); | ||||||
1944 | BasicBlock *DispatchLatch = DispatchCLI->getLatch(); | ||||||
1945 | BasicBlock *DispatchExit = DispatchCLI->getExit(); | ||||||
1946 | BasicBlock *DispatchAfter = DispatchCLI->getAfter(); | ||||||
1947 | DispatchCLI->invalidate(); | ||||||
1948 | |||||||
1949 | // Rewire the original loop to become the chunk loop inside the dispatch loop. | ||||||
1950 | redirectTo(DispatchAfter, CLI->getAfter(), DL); | ||||||
1951 | redirectTo(CLI->getExit(), DispatchLatch, DL); | ||||||
1952 | redirectTo(DispatchBody, DispatchEnter, DL); | ||||||
1953 | |||||||
1954 | // Prepare the prolog of the chunk loop. | ||||||
1955 | Builder.restoreIP(CLI->getPreheaderIP()); | ||||||
1956 | Builder.SetCurrentDebugLocation(DL); | ||||||
1957 | |||||||
1958 | // Compute the number of iterations of the chunk loop. | ||||||
1959 | Builder.SetInsertPoint(CLI->getPreheader()->getTerminator()); | ||||||
1960 | Value *ChunkEnd = Builder.CreateAdd(DispatchCounter, ChunkRange); | ||||||
1961 | Value *IsLastChunk = | ||||||
1962 | Builder.CreateICmpUGE(ChunkEnd, CastedTripCount, "omp_chunk.is_last"); | ||||||
1963 | Value *CountUntilOrigTripCount = | ||||||
1964 | Builder.CreateSub(CastedTripCount, DispatchCounter); | ||||||
1965 | Value *ChunkTripCount = Builder.CreateSelect( | ||||||
1966 | IsLastChunk, CountUntilOrigTripCount, ChunkRange, "omp_chunk.tripcount"); | ||||||
1967 | Value *BackcastedChunkTC = | ||||||
1968 | Builder.CreateTrunc(ChunkTripCount, IVTy, "omp_chunk.tripcount.trunc"); | ||||||
1969 | CLI->setTripCount(BackcastedChunkTC); | ||||||
1970 | |||||||
1971 | // Update all uses of the induction variable except the one in the condition | ||||||
1972 | // block that compares it with the actual upper bound, and the increment in | ||||||
1973 | // the latch block. | ||||||
1974 | Value *BackcastedDispatchCounter = | ||||||
1975 | Builder.CreateTrunc(DispatchCounter, IVTy, "omp_dispatch.iv.trunc"); | ||||||
1976 | CLI->mapIndVar([&](Instruction *) -> Value * { | ||||||
1977 | Builder.restoreIP(CLI->getBodyIP()); | ||||||
1978 | return Builder.CreateAdd(IV, BackcastedDispatchCounter); | ||||||
1979 | }); | ||||||
1980 | |||||||
1981 | // In the "exit" block, call the "fini" function. | ||||||
1982 | Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt()); | ||||||
1983 | Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum}); | ||||||
1984 | |||||||
1985 | // Add the barrier if requested. | ||||||
1986 | if (NeedsBarrier) | ||||||
1987 | createBarrier(LocationDescription(Builder.saveIP(), DL), OMPD_for, | ||||||
1988 | /*ForceSimpleCall=*/false, /*CheckCancelFlag=*/false); | ||||||
1989 | |||||||
1990 | #ifndef NDEBUG | ||||||
1991 | // Even though we currently do not support applying additional methods to it, | ||||||
1992 | // the chunk loop should remain a canonical loop. | ||||||
1993 | CLI->assertOK(); | ||||||
1994 | #endif | ||||||
1995 | |||||||
1996 | return {DispatchAfter, DispatchAfter->getFirstInsertionPt()}; | ||||||
1997 | } | ||||||
1998 | |||||||
1999 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoop( | ||||||
2000 | DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, | ||||||
2001 | bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind, | ||||||
2002 | llvm::Value *ChunkSize, bool HasSimdModifier, bool HasMonotonicModifier, | ||||||
2003 | bool HasNonmonotonicModifier, bool HasOrderedClause) { | ||||||
2004 | OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType( | ||||||
2005 | SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier, | ||||||
2006 | HasNonmonotonicModifier, HasOrderedClause); | ||||||
2007 | |||||||
2008 | bool IsOrdered = (EffectiveScheduleType & OMPScheduleType::ModifierOrdered) == | ||||||
2009 | OMPScheduleType::ModifierOrdered; | ||||||
2010 | switch (EffectiveScheduleType & ~OMPScheduleType::ModifierMask) { | ||||||
2011 | case OMPScheduleType::BaseStatic: | ||||||
2012 | assert(!ChunkSize && "No chunk size with static-chunked schedule")(static_cast <bool> (!ChunkSize && "No chunk size with static-chunked schedule" ) ? void (0) : __assert_fail ("!ChunkSize && \"No chunk size with static-chunked schedule\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2012, __extension__ __PRETTY_FUNCTION__)); | ||||||
2013 | if (IsOrdered) | ||||||
2014 | return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, | ||||||
2015 | NeedsBarrier, ChunkSize); | ||||||
2016 | // FIXME: Monotonicity ignored? | ||||||
2017 | return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier); | ||||||
2018 | |||||||
2019 | case OMPScheduleType::BaseStaticChunked: | ||||||
2020 | if (IsOrdered) | ||||||
2021 | return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, | ||||||
2022 | NeedsBarrier, ChunkSize); | ||||||
2023 | // FIXME: Monotonicity ignored? | ||||||
2024 | return applyStaticChunkedWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier, | ||||||
2025 | ChunkSize); | ||||||
2026 | |||||||
2027 | case OMPScheduleType::BaseRuntime: | ||||||
2028 | case OMPScheduleType::BaseAuto: | ||||||
2029 | case OMPScheduleType::BaseGreedy: | ||||||
2030 | case OMPScheduleType::BaseBalanced: | ||||||
2031 | case OMPScheduleType::BaseSteal: | ||||||
2032 | case OMPScheduleType::BaseGuidedSimd: | ||||||
2033 | case OMPScheduleType::BaseRuntimeSimd: | ||||||
2034 | assert(!ChunkSize &&(static_cast <bool> (!ChunkSize && "schedule type does not support user-defined chunk sizes" ) ? void (0) : __assert_fail ("!ChunkSize && \"schedule type does not support user-defined chunk sizes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2035, __extension__ __PRETTY_FUNCTION__)) | ||||||
2035 | "schedule type does not support user-defined chunk sizes")(static_cast <bool> (!ChunkSize && "schedule type does not support user-defined chunk sizes" ) ? void (0) : __assert_fail ("!ChunkSize && \"schedule type does not support user-defined chunk sizes\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2035, __extension__ __PRETTY_FUNCTION__)); | ||||||
2036 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||
2037 | case OMPScheduleType::BaseDynamicChunked: | ||||||
2038 | case OMPScheduleType::BaseGuidedChunked: | ||||||
2039 | case OMPScheduleType::BaseGuidedIterativeChunked: | ||||||
2040 | case OMPScheduleType::BaseGuidedAnalyticalChunked: | ||||||
2041 | case OMPScheduleType::BaseStaticBalancedChunked: | ||||||
2042 | return applyDynamicWorkshareLoop(DL, CLI, AllocaIP, EffectiveScheduleType, | ||||||
2043 | NeedsBarrier, ChunkSize); | ||||||
2044 | |||||||
2045 | default: | ||||||
2046 | llvm_unreachable("Unknown/unimplemented schedule kind")::llvm::llvm_unreachable_internal("Unknown/unimplemented schedule kind" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2046); | ||||||
2047 | } | ||||||
2048 | } | ||||||
2049 | |||||||
2050 | /// Returns an LLVM function to call for initializing loop bounds using OpenMP | ||||||
2051 | /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by | ||||||
2052 | /// the runtime. Always interpret integers as unsigned similarly to | ||||||
2053 | /// CanonicalLoopInfo. | ||||||
2054 | static FunctionCallee | ||||||
2055 | getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | ||||||
2056 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | ||||||
2057 | if (Bitwidth == 32) | ||||||
2058 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
2059 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u); | ||||||
2060 | if (Bitwidth == 64) | ||||||
2061 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
2062 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u); | ||||||
2063 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2063); | ||||||
2064 | } | ||||||
2065 | |||||||
2066 | /// Returns an LLVM function to call for updating the next loop using OpenMP | ||||||
2067 | /// dynamic scheduling depending on `type`. Only i32 and i64 are supported by | ||||||
2068 | /// the runtime. Always interpret integers as unsigned similarly to | ||||||
2069 | /// CanonicalLoopInfo. | ||||||
2070 | static FunctionCallee | ||||||
2071 | getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | ||||||
2072 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | ||||||
2073 | if (Bitwidth == 32) | ||||||
2074 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
2075 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u); | ||||||
2076 | if (Bitwidth == 64) | ||||||
2077 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
2078 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u); | ||||||
2079 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2079); | ||||||
2080 | } | ||||||
2081 | |||||||
2082 | /// Returns an LLVM function to call for finalizing the dynamic loop using | ||||||
2083 | /// depending on `type`. Only i32 and i64 are supported by the runtime. Always | ||||||
2084 | /// interpret integers as unsigned similarly to CanonicalLoopInfo. | ||||||
2085 | static FunctionCallee | ||||||
2086 | getKmpcForDynamicFiniForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) { | ||||||
2087 | unsigned Bitwidth = Ty->getIntegerBitWidth(); | ||||||
2088 | if (Bitwidth == 32) | ||||||
2089 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
2090 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_4u); | ||||||
2091 | if (Bitwidth == 64) | ||||||
2092 | return OMPBuilder.getOrCreateRuntimeFunction( | ||||||
2093 | M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_fini_8u); | ||||||
2094 | llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2094); | ||||||
2095 | } | ||||||
2096 | |||||||
2097 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop( | ||||||
2098 | DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, | ||||||
2099 | OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) { | ||||||
2100 | assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2100, __extension__ __PRETTY_FUNCTION__)); | ||||||
2101 | assert(!isConflictIP(AllocaIP, CLI->getPreheaderIP()) &&(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2102, __extension__ __PRETTY_FUNCTION__)) | ||||||
2102 | "Require dedicated allocate IP")(static_cast <bool> (!isConflictIP(AllocaIP, CLI->getPreheaderIP ()) && "Require dedicated allocate IP") ? void (0) : __assert_fail ("!isConflictIP(AllocaIP, CLI->getPreheaderIP()) && \"Require dedicated allocate IP\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2102, __extension__ __PRETTY_FUNCTION__)); | ||||||
2103 | assert(isValidWorkshareLoopScheduleType(SchedType) &&(static_cast <bool> (isValidWorkshareLoopScheduleType(SchedType ) && "Require valid schedule type") ? void (0) : __assert_fail ("isValidWorkshareLoopScheduleType(SchedType) && \"Require valid schedule type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2104, __extension__ __PRETTY_FUNCTION__)) | ||||||
2104 | "Require valid schedule type")(static_cast <bool> (isValidWorkshareLoopScheduleType(SchedType ) && "Require valid schedule type") ? void (0) : __assert_fail ("isValidWorkshareLoopScheduleType(SchedType) && \"Require valid schedule type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2104, __extension__ __PRETTY_FUNCTION__)); | ||||||
2105 | |||||||
2106 | bool Ordered = (SchedType & OMPScheduleType::ModifierOrdered) == | ||||||
2107 | OMPScheduleType::ModifierOrdered; | ||||||
2108 | |||||||
2109 | // Set up the source location value for OpenMP runtime. | ||||||
2110 | Builder.SetCurrentDebugLocation(DL); | ||||||
2111 | |||||||
2112 | uint32_t SrcLocStrSize; | ||||||
2113 | Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); | ||||||
2114 | Value *SrcLoc = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
2115 | |||||||
2116 | // Declare useful OpenMP runtime functions. | ||||||
2117 | Value *IV = CLI->getIndVar(); | ||||||
2118 | Type *IVTy = IV->getType(); | ||||||
2119 | FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this); | ||||||
2120 | FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this); | ||||||
2121 | |||||||
2122 | // Allocate space for computed loop bounds as expected by the "init" function. | ||||||
2123 | Builder.restoreIP(AllocaIP); | ||||||
2124 | Type *I32Type = Type::getInt32Ty(M.getContext()); | ||||||
2125 | Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter"); | ||||||
2126 | Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound"); | ||||||
2127 | Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound"); | ||||||
2128 | Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride"); | ||||||
2129 | |||||||
2130 | // At the end of the preheader, prepare for calling the "init" function by | ||||||
2131 | // storing the current loop bounds into the allocated space. A canonical loop | ||||||
2132 | // always iterates from 0 to trip-count with step 1. Note that "init" expects | ||||||
2133 | // and produces an inclusive upper bound. | ||||||
2134 | BasicBlock *PreHeader = CLI->getPreheader(); | ||||||
2135 | Builder.SetInsertPoint(PreHeader->getTerminator()); | ||||||
2136 | Constant *One = ConstantInt::get(IVTy, 1); | ||||||
2137 | Builder.CreateStore(One, PLowerBound); | ||||||
2138 | Value *UpperBound = CLI->getTripCount(); | ||||||
2139 | Builder.CreateStore(UpperBound, PUpperBound); | ||||||
2140 | Builder.CreateStore(One, PStride); | ||||||
2141 | |||||||
2142 | BasicBlock *Header = CLI->getHeader(); | ||||||
2143 | BasicBlock *Exit = CLI->getExit(); | ||||||
2144 | BasicBlock *Cond = CLI->getCond(); | ||||||
2145 | BasicBlock *Latch = CLI->getLatch(); | ||||||
2146 | InsertPointTy AfterIP = CLI->getAfterIP(); | ||||||
2147 | |||||||
2148 | // The CLI will be "broken" in the code below, as the loop is no longer | ||||||
2149 | // a valid canonical loop. | ||||||
2150 | |||||||
2151 | if (!Chunk) | ||||||
2152 | Chunk = One; | ||||||
2153 | |||||||
2154 | Value *ThreadNum = getOrCreateThreadID(SrcLoc); | ||||||
2155 | |||||||
2156 | Constant *SchedulingType = | ||||||
2157 | ConstantInt::get(I32Type, static_cast<int>(SchedType)); | ||||||
2158 | |||||||
2159 | // Call the "init" function. | ||||||
2160 | Builder.CreateCall(DynamicInit, | ||||||
2161 | {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One, | ||||||
2162 | UpperBound, /* step */ One, Chunk}); | ||||||
2163 | |||||||
2164 | // An outer loop around the existing one. | ||||||
2165 | BasicBlock *OuterCond = BasicBlock::Create( | ||||||
2166 | PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond", | ||||||
2167 | PreHeader->getParent()); | ||||||
2168 | // This needs to be 32-bit always, so can't use the IVTy Zero above. | ||||||
2169 | Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt()); | ||||||
2170 | Value *Res = | ||||||
2171 | Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter, | ||||||
2172 | PLowerBound, PUpperBound, PStride}); | ||||||
2173 | Constant *Zero32 = ConstantInt::get(I32Type, 0); | ||||||
2174 | Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32); | ||||||
2175 | Value *LowerBound = | ||||||
2176 | Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb"); | ||||||
2177 | Builder.CreateCondBr(MoreWork, Header, Exit); | ||||||
2178 | |||||||
2179 | // Change PHI-node in loop header to use outer cond rather than preheader, | ||||||
2180 | // and set IV to the LowerBound. | ||||||
2181 | Instruction *Phi = &Header->front(); | ||||||
2182 | auto *PI = cast<PHINode>(Phi); | ||||||
2183 | PI->setIncomingBlock(0, OuterCond); | ||||||
2184 | PI->setIncomingValue(0, LowerBound); | ||||||
2185 | |||||||
2186 | // Then set the pre-header to jump to the OuterCond | ||||||
2187 | Instruction *Term = PreHeader->getTerminator(); | ||||||
2188 | auto *Br = cast<BranchInst>(Term); | ||||||
2189 | Br->setSuccessor(0, OuterCond); | ||||||
2190 | |||||||
2191 | // Modify the inner condition: | ||||||
2192 | // * Use the UpperBound returned from the DynamicNext call. | ||||||
2193 | // * jump to the loop outer loop when done with one of the inner loops. | ||||||
2194 | Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt()); | ||||||
2195 | UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub"); | ||||||
2196 | Instruction *Comp = &*Builder.GetInsertPoint(); | ||||||
2197 | auto *CI = cast<CmpInst>(Comp); | ||||||
2198 | CI->setOperand(1, UpperBound); | ||||||
2199 | // Redirect the inner exit to branch to outer condition. | ||||||
2200 | Instruction *Branch = &Cond->back(); | ||||||
2201 | auto *BI = cast<BranchInst>(Branch); | ||||||
2202 | assert(BI->getSuccessor(1) == Exit)(static_cast <bool> (BI->getSuccessor(1) == Exit) ? void (0) : __assert_fail ("BI->getSuccessor(1) == Exit", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 2202, __extension__ __PRETTY_FUNCTION__)); | ||||||
2203 | BI->setSuccessor(1, OuterCond); | ||||||
2204 | |||||||
2205 | // Call the "fini" function if "ordered" is present in wsloop directive. | ||||||
2206 | if (Ordered) { | ||||||
2207 | Builder.SetInsertPoint(&Latch->back()); | ||||||
2208 | FunctionCallee DynamicFini = getKmpcForDynamicFiniForType(IVTy, M, *this); | ||||||
2209 | Builder.CreateCall(DynamicFini, {SrcLoc, ThreadNum}); | ||||||
2210 | } | ||||||
2211 | |||||||
2212 | // Add the barrier if requested. | ||||||
2213 | if (NeedsBarrier) { | ||||||
2214 | Builder.SetInsertPoint(&Exit->back()); | ||||||
2215 | createBarrier(LocationDescription(Builder.saveIP(), DL), | ||||||
2216 | omp::Directive::OMPD_for, /* ForceSimpleCall */ false, | ||||||
2217 | /* CheckCancelFlag */ false); | ||||||
2218 | } | ||||||
2219 | |||||||
2220 | CLI->invalidate(); | ||||||
2221 | return AfterIP; | ||||||
2222 | } | ||||||
2223 | |||||||
2224 | /// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is, | ||||||
2225 | /// after this \p OldTarget will be orphaned. | ||||||
2226 | static void redirectAllPredecessorsTo(BasicBlock *OldTarget, | ||||||
2227 | BasicBlock *NewTarget, DebugLoc DL) { | ||||||
2228 | for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget))) | ||||||
2229 | redirectTo(Pred, NewTarget, DL); | ||||||
2230 | } | ||||||
2231 | |||||||
2232 | /// Determine which blocks in \p BBs are reachable from outside and remove the | ||||||
2233 | /// ones that are not reachable from the function. | ||||||
2234 | static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) { | ||||||
2235 | SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()}; | ||||||
2236 | auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) { | ||||||
2237 | for (Use &U : BB->uses()) { | ||||||
2238 | auto *UseInst = dyn_cast<Instruction>(U.getUser()); | ||||||
2239 | if (!UseInst) | ||||||
2240 | continue; | ||||||
2241 | if (BBsToErase.count(UseInst->getParent())) | ||||||
2242 | continue; | ||||||
2243 | return true; | ||||||
2244 | } | ||||||
2245 | return false; | ||||||
2246 | }; | ||||||
2247 | |||||||
2248 | while (true) { | ||||||
2249 | bool Changed = false; | ||||||
2250 | for (BasicBlock *BB : make_early_inc_range(BBsToErase)) { | ||||||
2251 | if (HasRemainingUses(BB)) { | ||||||
2252 | BBsToErase.erase(BB); | ||||||
2253 | Changed = true; | ||||||
2254 | } | ||||||
2255 | } | ||||||
2256 | if (!Changed) | ||||||
2257 | break; | ||||||
2258 | } | ||||||
2259 | |||||||
2260 | SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end()); | ||||||
2261 | DeleteDeadBlocks(BBVec); | ||||||
2262 | } | ||||||
2263 | |||||||
2264 | CanonicalLoopInfo * | ||||||
2265 | OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, | ||||||
2266 | InsertPointTy ComputeIP) { | ||||||
2267 | assert(Loops.size() >= 1 && "At least one loop required")(static_cast <bool> (Loops.size() >= 1 && "At least one loop required" ) ? void (0) : __assert_fail ("Loops.size() >= 1 && \"At least one loop required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2267, __extension__ __PRETTY_FUNCTION__)); | ||||||
2268 | size_t NumLoops = Loops.size(); | ||||||
2269 | |||||||
2270 | // Nothing to do if there is already just one loop. | ||||||
2271 | if (NumLoops == 1) | ||||||
2272 | return Loops.front(); | ||||||
2273 | |||||||
2274 | CanonicalLoopInfo *Outermost = Loops.front(); | ||||||
2275 | CanonicalLoopInfo *Innermost = Loops.back(); | ||||||
2276 | BasicBlock *OrigPreheader = Outermost->getPreheader(); | ||||||
2277 | BasicBlock *OrigAfter = Outermost->getAfter(); | ||||||
2278 | Function *F = OrigPreheader->getParent(); | ||||||
2279 | |||||||
2280 | // Loop control blocks that may become orphaned later. | ||||||
2281 | SmallVector<BasicBlock *, 12> OldControlBBs; | ||||||
2282 | OldControlBBs.reserve(6 * Loops.size()); | ||||||
2283 | for (CanonicalLoopInfo *Loop : Loops) | ||||||
2284 | Loop->collectControlBlocks(OldControlBBs); | ||||||
2285 | |||||||
2286 | // Setup the IRBuilder for inserting the trip count computation. | ||||||
2287 | Builder.SetCurrentDebugLocation(DL); | ||||||
2288 | if (ComputeIP.isSet()) | ||||||
2289 | Builder.restoreIP(ComputeIP); | ||||||
2290 | else | ||||||
2291 | Builder.restoreIP(Outermost->getPreheaderIP()); | ||||||
2292 | |||||||
2293 | // Derive the collapsed' loop trip count. | ||||||
2294 | // TODO: Find common/largest indvar type. | ||||||
2295 | Value *CollapsedTripCount = nullptr; | ||||||
2296 | for (CanonicalLoopInfo *L : Loops) { | ||||||
2297 | assert(L->isValid() &&(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops" ) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2298, __extension__ __PRETTY_FUNCTION__)) | ||||||
2298 | "All loops to collapse must be valid canonical loops")(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops" ) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2298, __extension__ __PRETTY_FUNCTION__)); | ||||||
2299 | Value *OrigTripCount = L->getTripCount(); | ||||||
2300 | if (!CollapsedTripCount) { | ||||||
2301 | CollapsedTripCount = OrigTripCount; | ||||||
2302 | continue; | ||||||
2303 | } | ||||||
2304 | |||||||
2305 | // TODO: Enable UndefinedSanitizer to diagnose an overflow here. | ||||||
2306 | CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount, | ||||||
2307 | {}, /*HasNUW=*/true); | ||||||
2308 | } | ||||||
2309 | |||||||
2310 | // Create the collapsed loop control flow. | ||||||
2311 | CanonicalLoopInfo *Result = | ||||||
2312 | createLoopSkeleton(DL, CollapsedTripCount, F, | ||||||
2313 | OrigPreheader->getNextNode(), OrigAfter, "collapsed"); | ||||||
2314 | |||||||
2315 | // Build the collapsed loop body code. | ||||||
2316 | // Start with deriving the input loop induction variables from the collapsed | ||||||
2317 | // one, using a divmod scheme. To preserve the original loops' order, the | ||||||
2318 | // innermost loop use the least significant bits. | ||||||
2319 | Builder.restoreIP(Result->getBodyIP()); | ||||||
2320 | |||||||
2321 | Value *Leftover = Result->getIndVar(); | ||||||
2322 | SmallVector<Value *> NewIndVars; | ||||||
2323 | NewIndVars.resize(NumLoops); | ||||||
2324 | for (int i = NumLoops - 1; i >= 1; --i) { | ||||||
2325 | Value *OrigTripCount = Loops[i]->getTripCount(); | ||||||
2326 | |||||||
2327 | Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount); | ||||||
2328 | NewIndVars[i] = NewIndVar; | ||||||
2329 | |||||||
2330 | Leftover = Builder.CreateUDiv(Leftover, OrigTripCount); | ||||||
2331 | } | ||||||
2332 | // Outermost loop gets all the remaining bits. | ||||||
2333 | NewIndVars[0] = Leftover; | ||||||
2334 | |||||||
2335 | // Construct the loop body control flow. | ||||||
2336 | // We progressively construct the branch structure following in direction of | ||||||
2337 | // the control flow, from the leading in-between code, the loop nest body, the | ||||||
2338 | // trailing in-between code, and rejoining the collapsed loop's latch. | ||||||
2339 | // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If | ||||||
2340 | // the ContinueBlock is set, continue with that block. If ContinuePred, use | ||||||
2341 | // its predecessors as sources. | ||||||
2342 | BasicBlock *ContinueBlock = Result->getBody(); | ||||||
2343 | BasicBlock *ContinuePred = nullptr; | ||||||
2344 | auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest, | ||||||
2345 | BasicBlock *NextSrc) { | ||||||
2346 | if (ContinueBlock) | ||||||
2347 | redirectTo(ContinueBlock, Dest, DL); | ||||||
2348 | else | ||||||
2349 | redirectAllPredecessorsTo(ContinuePred, Dest, DL); | ||||||
2350 | |||||||
2351 | ContinueBlock = nullptr; | ||||||
2352 | ContinuePred = NextSrc; | ||||||
2353 | }; | ||||||
2354 | |||||||
2355 | // The code before the nested loop of each level. | ||||||
2356 | // Because we are sinking it into the nest, it will be executed more often | ||||||
2357 | // that the original loop. More sophisticated schemes could keep track of what | ||||||
2358 | // the in-between code is and instantiate it only once per thread. | ||||||
2359 | for (size_t i = 0; i < NumLoops - 1; ++i) | ||||||
2360 | ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader()); | ||||||
2361 | |||||||
2362 | // Connect the loop nest body. | ||||||
2363 | ContinueWith(Innermost->getBody(), Innermost->getLatch()); | ||||||
2364 | |||||||
2365 | // The code after the nested loop at each level. | ||||||
2366 | for (size_t i = NumLoops - 1; i > 0; --i) | ||||||
2367 | ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch()); | ||||||
2368 | |||||||
2369 | // Connect the finished loop to the collapsed loop latch. | ||||||
2370 | ContinueWith(Result->getLatch(), nullptr); | ||||||
2371 | |||||||
2372 | // Replace the input loops with the new collapsed loop. | ||||||
2373 | redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL); | ||||||
2374 | redirectTo(Result->getAfter(), Outermost->getAfter(), DL); | ||||||
2375 | |||||||
2376 | // Replace the input loop indvars with the derived ones. | ||||||
2377 | for (size_t i = 0; i < NumLoops; ++i) | ||||||
2378 | Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]); | ||||||
2379 | |||||||
2380 | // Remove unused parts of the input loops. | ||||||
2381 | removeUnusedBlocksFromParent(OldControlBBs); | ||||||
2382 | |||||||
2383 | for (CanonicalLoopInfo *L : Loops) | ||||||
2384 | L->invalidate(); | ||||||
2385 | |||||||
2386 | #ifndef NDEBUG | ||||||
2387 | Result->assertOK(); | ||||||
2388 | #endif | ||||||
2389 | return Result; | ||||||
2390 | } | ||||||
2391 | |||||||
2392 | std::vector<CanonicalLoopInfo *> | ||||||
2393 | OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, | ||||||
2394 | ArrayRef<Value *> TileSizes) { | ||||||
2395 | assert(TileSizes.size() == Loops.size() &&(static_cast <bool> (TileSizes.size() == Loops.size() && "Must pass as many tile sizes as there are loops") ? void (0 ) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2396, __extension__ __PRETTY_FUNCTION__)) | ||||||
2396 | "Must pass as many tile sizes as there are loops")(static_cast <bool> (TileSizes.size() == Loops.size() && "Must pass as many tile sizes as there are loops") ? void (0 ) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2396, __extension__ __PRETTY_FUNCTION__)); | ||||||
2397 | int NumLoops = Loops.size(); | ||||||
2398 | assert(NumLoops >= 1 && "At least one loop to tile required")(static_cast <bool> (NumLoops >= 1 && "At least one loop to tile required" ) ? void (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2398, __extension__ __PRETTY_FUNCTION__)); | ||||||
2399 | |||||||
2400 | CanonicalLoopInfo *OutermostLoop = Loops.front(); | ||||||
2401 | CanonicalLoopInfo *InnermostLoop = Loops.back(); | ||||||
2402 | Function *F = OutermostLoop->getBody()->getParent(); | ||||||
2403 | BasicBlock *InnerEnter = InnermostLoop->getBody(); | ||||||
2404 | BasicBlock *InnerLatch = InnermostLoop->getLatch(); | ||||||
2405 | |||||||
2406 | // Loop control blocks that may become orphaned later. | ||||||
2407 | SmallVector<BasicBlock *, 12> OldControlBBs; | ||||||
2408 | OldControlBBs.reserve(6 * Loops.size()); | ||||||
2409 | for (CanonicalLoopInfo *Loop : Loops) | ||||||
2410 | Loop->collectControlBlocks(OldControlBBs); | ||||||
2411 | |||||||
2412 | // Collect original trip counts and induction variable to be accessible by | ||||||
2413 | // index. Also, the structure of the original loops is not preserved during | ||||||
2414 | // the construction of the tiled loops, so do it before we scavenge the BBs of | ||||||
2415 | // any original CanonicalLoopInfo. | ||||||
2416 | SmallVector<Value *, 4> OrigTripCounts, OrigIndVars; | ||||||
2417 | for (CanonicalLoopInfo *L : Loops) { | ||||||
2418 | assert(L->isValid() && "All input loops must be valid canonical loops")(static_cast <bool> (L->isValid() && "All input loops must be valid canonical loops" ) ? void (0) : __assert_fail ("L->isValid() && \"All input loops must be valid canonical loops\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2418, __extension__ __PRETTY_FUNCTION__)); | ||||||
2419 | OrigTripCounts.push_back(L->getTripCount()); | ||||||
2420 | OrigIndVars.push_back(L->getIndVar()); | ||||||
2421 | } | ||||||
2422 | |||||||
2423 | // Collect the code between loop headers. These may contain SSA definitions | ||||||
2424 | // that are used in the loop nest body. To be usable with in the innermost | ||||||
2425 | // body, these BasicBlocks will be sunk into the loop nest body. That is, | ||||||
2426 | // these instructions may be executed more often than before the tiling. | ||||||
2427 | // TODO: It would be sufficient to only sink them into body of the | ||||||
2428 | // corresponding tile loop. | ||||||
2429 | SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode; | ||||||
2430 | for (int i = 0; i < NumLoops - 1; ++i) { | ||||||
2431 | CanonicalLoopInfo *Surrounding = Loops[i]; | ||||||
2432 | CanonicalLoopInfo *Nested = Loops[i + 1]; | ||||||
2433 | |||||||
2434 | BasicBlock *EnterBB = Surrounding->getBody(); | ||||||
2435 | BasicBlock *ExitBB = Nested->getHeader(); | ||||||
2436 | InbetweenCode.emplace_back(EnterBB, ExitBB); | ||||||
2437 | } | ||||||
2438 | |||||||
2439 | // Compute the trip counts of the floor loops. | ||||||
2440 | Builder.SetCurrentDebugLocation(DL); | ||||||
2441 | Builder.restoreIP(OutermostLoop->getPreheaderIP()); | ||||||
2442 | SmallVector<Value *, 4> FloorCount, FloorRems; | ||||||
2443 | for (int i = 0; i < NumLoops; ++i) { | ||||||
2444 | Value *TileSize = TileSizes[i]; | ||||||
2445 | Value *OrigTripCount = OrigTripCounts[i]; | ||||||
2446 | Type *IVType = OrigTripCount->getType(); | ||||||
2447 | |||||||
2448 | Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize); | ||||||
2449 | Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize); | ||||||
2450 | |||||||
2451 | // 0 if tripcount divides the tilesize, 1 otherwise. | ||||||
2452 | // 1 means we need an additional iteration for a partial tile. | ||||||
2453 | // | ||||||
2454 | // Unfortunately we cannot just use the roundup-formula | ||||||
2455 | // (tripcount + tilesize - 1)/tilesize | ||||||
2456 | // because the summation might overflow. We do not want introduce undefined | ||||||
2457 | // behavior when the untiled loop nest did not. | ||||||
2458 | Value *FloorTripOverflow = | ||||||
2459 | Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0)); | ||||||
2460 | |||||||
2461 | FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType); | ||||||
2462 | FloorTripCount = | ||||||
2463 | Builder.CreateAdd(FloorTripCount, FloorTripOverflow, | ||||||
2464 | "omp_floor" + Twine(i) + ".tripcount", true); | ||||||
2465 | |||||||
2466 | // Remember some values for later use. | ||||||
2467 | FloorCount.push_back(FloorTripCount); | ||||||
2468 | FloorRems.push_back(FloorTripRem); | ||||||
2469 | } | ||||||
2470 | |||||||
2471 | // Generate the new loop nest, from the outermost to the innermost. | ||||||
2472 | std::vector<CanonicalLoopInfo *> Result; | ||||||
2473 | Result.reserve(NumLoops * 2); | ||||||
2474 | |||||||
2475 | // The basic block of the surrounding loop that enters the nest generated | ||||||
2476 | // loop. | ||||||
2477 | BasicBlock *Enter = OutermostLoop->getPreheader(); | ||||||
2478 | |||||||
2479 | // The basic block of the surrounding loop where the inner code should | ||||||
2480 | // continue. | ||||||
2481 | BasicBlock *Continue = OutermostLoop->getAfter(); | ||||||
2482 | |||||||
2483 | // Where the next loop basic block should be inserted. | ||||||
2484 | BasicBlock *OutroInsertBefore = InnermostLoop->getExit(); | ||||||
2485 | |||||||
2486 | auto EmbeddNewLoop = | ||||||
2487 | [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore]( | ||||||
2488 | Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * { | ||||||
2489 | CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton( | ||||||
2490 | DL, TripCount, F, InnerEnter, OutroInsertBefore, Name); | ||||||
2491 | redirectTo(Enter, EmbeddedLoop->getPreheader(), DL); | ||||||
2492 | redirectTo(EmbeddedLoop->getAfter(), Continue, DL); | ||||||
2493 | |||||||
2494 | // Setup the position where the next embedded loop connects to this loop. | ||||||
2495 | Enter = EmbeddedLoop->getBody(); | ||||||
2496 | Continue = EmbeddedLoop->getLatch(); | ||||||
2497 | OutroInsertBefore = EmbeddedLoop->getLatch(); | ||||||
2498 | return EmbeddedLoop; | ||||||
2499 | }; | ||||||
2500 | |||||||
2501 | auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts, | ||||||
2502 | const Twine &NameBase) { | ||||||
2503 | for (auto P : enumerate(TripCounts)) { | ||||||
2504 | CanonicalLoopInfo *EmbeddedLoop = | ||||||
2505 | EmbeddNewLoop(P.value(), NameBase + Twine(P.index())); | ||||||
2506 | Result.push_back(EmbeddedLoop); | ||||||
2507 | } | ||||||
2508 | }; | ||||||
2509 | |||||||
2510 | EmbeddNewLoops(FloorCount, "floor"); | ||||||
2511 | |||||||
2512 | // Within the innermost floor loop, emit the code that computes the tile | ||||||
2513 | // sizes. | ||||||
2514 | Builder.SetInsertPoint(Enter->getTerminator()); | ||||||
2515 | SmallVector<Value *, 4> TileCounts; | ||||||
2516 | for (int i = 0; i < NumLoops; ++i) { | ||||||
2517 | CanonicalLoopInfo *FloorLoop = Result[i]; | ||||||
2518 | Value *TileSize = TileSizes[i]; | ||||||
2519 | |||||||
2520 | Value *FloorIsEpilogue = | ||||||
2521 | Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]); | ||||||
2522 | Value *TileTripCount = | ||||||
2523 | Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize); | ||||||
2524 | |||||||
2525 | TileCounts.push_back(TileTripCount); | ||||||
2526 | } | ||||||
2527 | |||||||
2528 | // Create the tile loops. | ||||||
2529 | EmbeddNewLoops(TileCounts, "tile"); | ||||||
2530 | |||||||
2531 | // Insert the inbetween code into the body. | ||||||
2532 | BasicBlock *BodyEnter = Enter; | ||||||
2533 | BasicBlock *BodyEntered = nullptr; | ||||||
2534 | for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) { | ||||||
2535 | BasicBlock *EnterBB = P.first; | ||||||
2536 | BasicBlock *ExitBB = P.second; | ||||||
2537 | |||||||
2538 | if (BodyEnter) | ||||||
2539 | redirectTo(BodyEnter, EnterBB, DL); | ||||||
2540 | else | ||||||
2541 | redirectAllPredecessorsTo(BodyEntered, EnterBB, DL); | ||||||
2542 | |||||||
2543 | BodyEnter = nullptr; | ||||||
2544 | BodyEntered = ExitBB; | ||||||
2545 | } | ||||||
2546 | |||||||
2547 | // Append the original loop nest body into the generated loop nest body. | ||||||
2548 | if (BodyEnter) | ||||||
2549 | redirectTo(BodyEnter, InnerEnter, DL); | ||||||
2550 | else | ||||||
2551 | redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL); | ||||||
2552 | redirectAllPredecessorsTo(InnerLatch, Continue, DL); | ||||||
2553 | |||||||
2554 | // Replace the original induction variable with an induction variable computed | ||||||
2555 | // from the tile and floor induction variables. | ||||||
2556 | Builder.restoreIP(Result.back()->getBodyIP()); | ||||||
2557 | for (int i = 0; i < NumLoops; ++i) { | ||||||
2558 | CanonicalLoopInfo *FloorLoop = Result[i]; | ||||||
2559 | CanonicalLoopInfo *TileLoop = Result[NumLoops + i]; | ||||||
2560 | Value *OrigIndVar = OrigIndVars[i]; | ||||||
2561 | Value *Size = TileSizes[i]; | ||||||
2562 | |||||||
2563 | Value *Scale = | ||||||
2564 | Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true); | ||||||
2565 | Value *Shift = | ||||||
2566 | Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true); | ||||||
2567 | OrigIndVar->replaceAllUsesWith(Shift); | ||||||
2568 | } | ||||||
2569 | |||||||
2570 | // Remove unused parts of the original loops. | ||||||
2571 | removeUnusedBlocksFromParent(OldControlBBs); | ||||||
2572 | |||||||
2573 | for (CanonicalLoopInfo *L : Loops) | ||||||
2574 | L->invalidate(); | ||||||
2575 | |||||||
2576 | #ifndef NDEBUG | ||||||
2577 | for (CanonicalLoopInfo *GenL : Result) | ||||||
2578 | GenL->assertOK(); | ||||||
2579 | #endif | ||||||
2580 | return Result; | ||||||
2581 | } | ||||||
2582 | |||||||
2583 | /// Attach loop metadata \p Properties to the loop described by \p Loop. If the | ||||||
2584 | /// loop already has metadata, the loop properties are appended. | ||||||
2585 | static void addLoopMetadata(CanonicalLoopInfo *Loop, | ||||||
2586 | ArrayRef<Metadata *> Properties) { | ||||||
2587 | assert(Loop->isValid() && "Expecting a valid CanonicalLoopInfo")(static_cast <bool> (Loop->isValid() && "Expecting a valid CanonicalLoopInfo" ) ? void (0) : __assert_fail ("Loop->isValid() && \"Expecting a valid CanonicalLoopInfo\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2587, __extension__ __PRETTY_FUNCTION__)); | ||||||
2588 | |||||||
2589 | // Nothing to do if no property to attach. | ||||||
2590 | if (Properties.empty()) | ||||||
2591 | return; | ||||||
2592 | |||||||
2593 | LLVMContext &Ctx = Loop->getFunction()->getContext(); | ||||||
2594 | SmallVector<Metadata *> NewLoopProperties; | ||||||
2595 | NewLoopProperties.push_back(nullptr); | ||||||
2596 | |||||||
2597 | // If the loop already has metadata, prepend it to the new metadata. | ||||||
2598 | BasicBlock *Latch = Loop->getLatch(); | ||||||
2599 | assert(Latch && "A valid CanonicalLoopInfo must have a unique latch")(static_cast <bool> (Latch && "A valid CanonicalLoopInfo must have a unique latch" ) ? void (0) : __assert_fail ("Latch && \"A valid CanonicalLoopInfo must have a unique latch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2599, __extension__ __PRETTY_FUNCTION__)); | ||||||
2600 | MDNode *Existing = Latch->getTerminator()->getMetadata(LLVMContext::MD_loop); | ||||||
2601 | if (Existing) | ||||||
2602 | append_range(NewLoopProperties, drop_begin(Existing->operands(), 1)); | ||||||
2603 | |||||||
2604 | append_range(NewLoopProperties, Properties); | ||||||
2605 | MDNode *LoopID = MDNode::getDistinct(Ctx, NewLoopProperties); | ||||||
2606 | LoopID->replaceOperandWith(0, LoopID); | ||||||
2607 | |||||||
2608 | Latch->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID); | ||||||
2609 | } | ||||||
2610 | |||||||
2611 | /// Attach llvm.access.group metadata to the memref instructions of \p Block | ||||||
2612 | static void addSimdMetadata(BasicBlock *Block, MDNode *AccessGroup, | ||||||
2613 | LoopInfo &LI) { | ||||||
2614 | for (Instruction &I : *Block) { | ||||||
2615 | if (I.mayReadOrWriteMemory()) { | ||||||
2616 | // TODO: This instruction may already have access group from | ||||||
2617 | // other pragmas e.g. #pragma clang loop vectorize. Append | ||||||
2618 | // so that the existing metadata is not overwritten. | ||||||
2619 | I.setMetadata(LLVMContext::MD_access_group, AccessGroup); | ||||||
2620 | } | ||||||
2621 | } | ||||||
2622 | } | ||||||
2623 | |||||||
2624 | void OpenMPIRBuilder::unrollLoopFull(DebugLoc, CanonicalLoopInfo *Loop) { | ||||||
2625 | LLVMContext &Ctx = Builder.getContext(); | ||||||
2626 | addLoopMetadata( | ||||||
2627 | Loop, {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), | ||||||
2628 | MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.full"))}); | ||||||
2629 | } | ||||||
2630 | |||||||
2631 | void OpenMPIRBuilder::unrollLoopHeuristic(DebugLoc, CanonicalLoopInfo *Loop) { | ||||||
2632 | LLVMContext &Ctx = Builder.getContext(); | ||||||
2633 | addLoopMetadata( | ||||||
2634 | Loop, { | ||||||
2635 | MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), | ||||||
2636 | }); | ||||||
2637 | } | ||||||
2638 | |||||||
2639 | void OpenMPIRBuilder::applySimd(DebugLoc, CanonicalLoopInfo *CanonicalLoop) { | ||||||
2640 | LLVMContext &Ctx = Builder.getContext(); | ||||||
2641 | |||||||
2642 | Function *F = CanonicalLoop->getFunction(); | ||||||
2643 | |||||||
2644 | FunctionAnalysisManager FAM; | ||||||
2645 | FAM.registerPass([]() { return DominatorTreeAnalysis(); }); | ||||||
2646 | FAM.registerPass([]() { return LoopAnalysis(); }); | ||||||
2647 | FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); | ||||||
2648 | |||||||
2649 | LoopAnalysis LIA; | ||||||
2650 | LoopInfo &&LI = LIA.run(*F, FAM); | ||||||
2651 | |||||||
2652 | Loop *L = LI.getLoopFor(CanonicalLoop->getHeader()); | ||||||
2653 | |||||||
2654 | SmallSet<BasicBlock *, 8> Reachable; | ||||||
2655 | |||||||
2656 | // Get the basic blocks from the loop in which memref instructions | ||||||
2657 | // can be found. | ||||||
2658 | // TODO: Generalize getting all blocks inside a CanonicalizeLoopInfo, | ||||||
2659 | // preferably without running any passes. | ||||||
2660 | for (BasicBlock *Block : L->getBlocks()) { | ||||||
2661 | if (Block == CanonicalLoop->getCond() || | ||||||
2662 | Block == CanonicalLoop->getHeader()) | ||||||
2663 | continue; | ||||||
2664 | Reachable.insert(Block); | ||||||
2665 | } | ||||||
2666 | |||||||
2667 | // Add access group metadata to memory-access instructions. | ||||||
2668 | MDNode *AccessGroup = MDNode::getDistinct(Ctx, {}); | ||||||
2669 | for (BasicBlock *BB : Reachable) | ||||||
2670 | addSimdMetadata(BB, AccessGroup, LI); | ||||||
2671 | |||||||
2672 | // Use the above access group metadata to create loop level | ||||||
2673 | // metadata, which should be distinct for each loop. | ||||||
2674 | ConstantAsMetadata *BoolConst = | ||||||
2675 | ConstantAsMetadata::get(ConstantInt::getTrue(Type::getInt1Ty(Ctx))); | ||||||
2676 | // TODO: If the loop has existing parallel access metadata, have | ||||||
2677 | // to combine two lists. | ||||||
2678 | addLoopMetadata( | ||||||
2679 | CanonicalLoop, | ||||||
2680 | {MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), | ||||||
2681 | AccessGroup}), | ||||||
2682 | MDNode::get(Ctx, {MDString::get(Ctx, "llvm.loop.vectorize.enable"), | ||||||
2683 | BoolConst})}); | ||||||
2684 | } | ||||||
2685 | |||||||
2686 | /// Create the TargetMachine object to query the backend for optimization | ||||||
2687 | /// preferences. | ||||||
2688 | /// | ||||||
2689 | /// Ideally, this would be passed from the front-end to the OpenMPBuilder, but | ||||||
2690 | /// e.g. Clang does not pass it to its CodeGen layer and creates it only when | ||||||
2691 | /// needed for the LLVM pass pipline. We use some default options to avoid | ||||||
2692 | /// having to pass too many settings from the frontend that probably do not | ||||||
2693 | /// matter. | ||||||
2694 | /// | ||||||
2695 | /// Currently, TargetMachine is only used sometimes by the unrollLoopPartial | ||||||
2696 | /// method. If we are going to use TargetMachine for more purposes, especially | ||||||
2697 | /// those that are sensitive to TargetOptions, RelocModel and CodeModel, it | ||||||
2698 | /// might become be worth requiring front-ends to pass on their TargetMachine, | ||||||
2699 | /// or at least cache it between methods. Note that while fontends such as Clang | ||||||
2700 | /// have just a single main TargetMachine per translation unit, "target-cpu" and | ||||||
2701 | /// "target-features" that determine the TargetMachine are per-function and can | ||||||
2702 | /// be overrided using __attribute__((target("OPTIONS"))). | ||||||
2703 | static std::unique_ptr<TargetMachine> | ||||||
2704 | createTargetMachine(Function *F, CodeGenOpt::Level OptLevel) { | ||||||
2705 | Module *M = F->getParent(); | ||||||
2706 | |||||||
2707 | StringRef CPU = F->getFnAttribute("target-cpu").getValueAsString(); | ||||||
2708 | StringRef Features = F->getFnAttribute("target-features").getValueAsString(); | ||||||
2709 | const std::string &Triple = M->getTargetTriple(); | ||||||
2710 | |||||||
2711 | std::string Error; | ||||||
2712 | const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); | ||||||
2713 | if (!TheTarget) | ||||||
2714 | return {}; | ||||||
2715 | |||||||
2716 | llvm::TargetOptions Options; | ||||||
2717 | return std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine( | ||||||
2718 | Triple, CPU, Features, Options, /*RelocModel=*/None, /*CodeModel=*/None, | ||||||
2719 | OptLevel)); | ||||||
2720 | } | ||||||
2721 | |||||||
2722 | /// Heuristically determine the best-performant unroll factor for \p CLI. This | ||||||
2723 | /// depends on the target processor. We are re-using the same heuristics as the | ||||||
2724 | /// LoopUnrollPass. | ||||||
2725 | static int32_t computeHeuristicUnrollFactor(CanonicalLoopInfo *CLI) { | ||||||
2726 | Function *F = CLI->getFunction(); | ||||||
2727 | |||||||
2728 | // Assume the user requests the most aggressive unrolling, even if the rest of | ||||||
2729 | // the code is optimized using a lower setting. | ||||||
2730 | CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive; | ||||||
2731 | std::unique_ptr<TargetMachine> TM = createTargetMachine(F, OptLevel); | ||||||
2732 | |||||||
2733 | FunctionAnalysisManager FAM; | ||||||
2734 | FAM.registerPass([]() { return TargetLibraryAnalysis(); }); | ||||||
2735 | FAM.registerPass([]() { return AssumptionAnalysis(); }); | ||||||
2736 | FAM.registerPass([]() { return DominatorTreeAnalysis(); }); | ||||||
2737 | FAM.registerPass([]() { return LoopAnalysis(); }); | ||||||
2738 | FAM.registerPass([]() { return ScalarEvolutionAnalysis(); }); | ||||||
2739 | FAM.registerPass([]() { return PassInstrumentationAnalysis(); }); | ||||||
2740 | TargetIRAnalysis TIRA; | ||||||
2741 | if (TM) | ||||||
2742 | TIRA = TargetIRAnalysis( | ||||||
2743 | [&](const Function &F) { return TM->getTargetTransformInfo(F); }); | ||||||
2744 | FAM.registerPass([&]() { return TIRA; }); | ||||||
2745 | |||||||
2746 | TargetIRAnalysis::Result &&TTI = TIRA.run(*F, FAM); | ||||||
2747 | ScalarEvolutionAnalysis SEA; | ||||||
2748 | ScalarEvolution &&SE = SEA.run(*F, FAM); | ||||||
2749 | DominatorTreeAnalysis DTA; | ||||||
2750 | DominatorTree &&DT = DTA.run(*F, FAM); | ||||||
2751 | LoopAnalysis LIA; | ||||||
2752 | LoopInfo &&LI = LIA.run(*F, FAM); | ||||||
2753 | AssumptionAnalysis ACT; | ||||||
2754 | AssumptionCache &&AC = ACT.run(*F, FAM); | ||||||
2755 | OptimizationRemarkEmitter ORE{F}; | ||||||
2756 | |||||||
2757 | Loop *L = LI.getLoopFor(CLI->getHeader()); | ||||||
2758 | assert(L && "Expecting CanonicalLoopInfo to be recognized as a loop")(static_cast <bool> (L && "Expecting CanonicalLoopInfo to be recognized as a loop" ) ? void (0) : __assert_fail ("L && \"Expecting CanonicalLoopInfo to be recognized as a loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2758, __extension__ __PRETTY_FUNCTION__)); | ||||||
2759 | |||||||
2760 | TargetTransformInfo::UnrollingPreferences UP = | ||||||
2761 | gatherUnrollingPreferences(L, SE, TTI, | ||||||
2762 | /*BlockFrequencyInfo=*/nullptr, | ||||||
2763 | /*ProfileSummaryInfo=*/nullptr, ORE, OptLevel, | ||||||
2764 | /*UserThreshold=*/None, | ||||||
2765 | /*UserCount=*/None, | ||||||
2766 | /*UserAllowPartial=*/true, | ||||||
2767 | /*UserAllowRuntime=*/true, | ||||||
2768 | /*UserUpperBound=*/None, | ||||||
2769 | /*UserFullUnrollMaxCount=*/None); | ||||||
2770 | |||||||
2771 | UP.Force = true; | ||||||
2772 | |||||||
2773 | // Account for additional optimizations taking place before the LoopUnrollPass | ||||||
2774 | // would unroll the loop. | ||||||
2775 | UP.Threshold *= UnrollThresholdFactor; | ||||||
2776 | UP.PartialThreshold *= UnrollThresholdFactor; | ||||||
2777 | |||||||
2778 | // Use normal unroll factors even if the rest of the code is optimized for | ||||||
2779 | // size. | ||||||
2780 | UP.OptSizeThreshold = UP.Threshold; | ||||||
2781 | UP.PartialOptSizeThreshold = UP.PartialThreshold; | ||||||
2782 | |||||||
2783 | LLVM_DEBUG(dbgs() << "Unroll heuristic thresholds:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | ||||||
2784 | << " Threshold=" << UP.Threshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | ||||||
2785 | << " PartialThreshold=" << UP.PartialThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | ||||||
2786 | << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | ||||||
2787 | << " PartialOptSizeThreshold="do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false) | ||||||
2788 | << UP.PartialOptSizeThreshold << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Unroll heuristic thresholds:\n" << " Threshold=" << UP.Threshold << "\n" << " PartialThreshold=" << UP.PartialThreshold << "\n" << " OptSizeThreshold=" << UP.OptSizeThreshold << "\n" << " PartialOptSizeThreshold=" << UP.PartialOptSizeThreshold << "\n"; } } while (false); | ||||||
2789 | |||||||
2790 | // Disable peeling. | ||||||
2791 | TargetTransformInfo::PeelingPreferences PP = | ||||||
2792 | gatherPeelingPreferences(L, SE, TTI, | ||||||
2793 | /*UserAllowPeeling=*/false, | ||||||
2794 | /*UserAllowProfileBasedPeeling=*/false, | ||||||
2795 | /*UnrollingSpecficValues=*/false); | ||||||
2796 | |||||||
2797 | SmallPtrSet<const Value *, 32> EphValues; | ||||||
2798 | CodeMetrics::collectEphemeralValues(L, &AC, EphValues); | ||||||
2799 | |||||||
2800 | // Assume that reads and writes to stack variables can be eliminated by | ||||||
2801 | // Mem2Reg, SROA or LICM. That is, don't count them towards the loop body's | ||||||
2802 | // size. | ||||||
2803 | for (BasicBlock *BB : L->blocks()) { | ||||||
2804 | for (Instruction &I : *BB) { | ||||||
2805 | Value *Ptr; | ||||||
2806 | if (auto *Load = dyn_cast<LoadInst>(&I)) { | ||||||
2807 | Ptr = Load->getPointerOperand(); | ||||||
2808 | } else if (auto *Store = dyn_cast<StoreInst>(&I)) { | ||||||
2809 | Ptr = Store->getPointerOperand(); | ||||||
2810 | } else | ||||||
2811 | continue; | ||||||
2812 | |||||||
2813 | Ptr = Ptr->stripPointerCasts(); | ||||||
2814 | |||||||
2815 | if (auto *Alloca = dyn_cast<AllocaInst>(Ptr)) { | ||||||
2816 | if (Alloca->getParent() == &F->getEntryBlock()) | ||||||
2817 | EphValues.insert(&I); | ||||||
2818 | } | ||||||
2819 | } | ||||||
2820 | } | ||||||
2821 | |||||||
2822 | unsigned NumInlineCandidates; | ||||||
2823 | bool NotDuplicatable; | ||||||
2824 | bool Convergent; | ||||||
2825 | unsigned LoopSize = | ||||||
2826 | ApproximateLoopSize(L, NumInlineCandidates, NotDuplicatable, Convergent, | ||||||
2827 | TTI, EphValues, UP.BEInsns); | ||||||
2828 | LLVM_DEBUG(dbgs() << "Estimated loop size is " << LoopSize << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Estimated loop size is " << LoopSize << "\n"; } } while (false); | ||||||
2829 | |||||||
2830 | // Loop is not unrollable if the loop contains certain instructions. | ||||||
2831 | if (NotDuplicatable || Convergent) { | ||||||
2832 | LLVM_DEBUG(dbgs() << "Loop not considered unrollable\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Loop not considered unrollable\n" ; } } while (false); | ||||||
2833 | return 1; | ||||||
2834 | } | ||||||
2835 | |||||||
2836 | // TODO: Determine trip count of \p CLI if constant, computeUnrollCount might | ||||||
2837 | // be able to use it. | ||||||
2838 | int TripCount = 0; | ||||||
2839 | int MaxTripCount = 0; | ||||||
2840 | bool MaxOrZero = false; | ||||||
2841 | unsigned TripMultiple = 0; | ||||||
2842 | |||||||
2843 | bool UseUpperBound = false; | ||||||
2844 | computeUnrollCount(L, TTI, DT, &LI, SE, EphValues, &ORE, TripCount, | ||||||
2845 | MaxTripCount, MaxOrZero, TripMultiple, LoopSize, UP, PP, | ||||||
2846 | UseUpperBound); | ||||||
2847 | unsigned Factor = UP.Count; | ||||||
2848 | LLVM_DEBUG(dbgs() << "Suggesting unroll factor of " << Factor << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { dbgs() << "Suggesting unroll factor of " << Factor << "\n"; } } while (false); | ||||||
2849 | |||||||
2850 | // This function returns 1 to signal to not unroll a loop. | ||||||
2851 | if (Factor == 0) | ||||||
2852 | return 1; | ||||||
2853 | return Factor; | ||||||
2854 | } | ||||||
2855 | |||||||
2856 | void OpenMPIRBuilder::unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, | ||||||
2857 | int32_t Factor, | ||||||
2858 | CanonicalLoopInfo **UnrolledCLI) { | ||||||
2859 | assert(Factor >= 0 && "Unroll factor must not be negative")(static_cast <bool> (Factor >= 0 && "Unroll factor must not be negative" ) ? void (0) : __assert_fail ("Factor >= 0 && \"Unroll factor must not be negative\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2859, __extension__ __PRETTY_FUNCTION__)); | ||||||
2860 | |||||||
2861 | Function *F = Loop->getFunction(); | ||||||
2862 | LLVMContext &Ctx = F->getContext(); | ||||||
2863 | |||||||
2864 | // If the unrolled loop is not used for another loop-associated directive, it | ||||||
2865 | // is sufficient to add metadata for the LoopUnrollPass. | ||||||
2866 | if (!UnrolledCLI) { | ||||||
2867 | SmallVector<Metadata *, 2> LoopMetadata; | ||||||
2868 | LoopMetadata.push_back( | ||||||
2869 | MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable"))); | ||||||
2870 | |||||||
2871 | if (Factor >= 1) { | ||||||
2872 | ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( | ||||||
2873 | ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); | ||||||
2874 | LoopMetadata.push_back(MDNode::get( | ||||||
2875 | Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})); | ||||||
2876 | } | ||||||
2877 | |||||||
2878 | addLoopMetadata(Loop, LoopMetadata); | ||||||
2879 | return; | ||||||
2880 | } | ||||||
2881 | |||||||
2882 | // Heuristically determine the unroll factor. | ||||||
2883 | if (Factor == 0) | ||||||
2884 | Factor = computeHeuristicUnrollFactor(Loop); | ||||||
2885 | |||||||
2886 | // No change required with unroll factor 1. | ||||||
2887 | if (Factor == 1) { | ||||||
2888 | *UnrolledCLI = Loop; | ||||||
2889 | return; | ||||||
2890 | } | ||||||
2891 | |||||||
2892 | assert(Factor >= 2 &&(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger" ) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2893, __extension__ __PRETTY_FUNCTION__)) | ||||||
2893 | "unrolling only makes sense with a factor of 2 or larger")(static_cast <bool> (Factor >= 2 && "unrolling only makes sense with a factor of 2 or larger" ) ? void (0) : __assert_fail ("Factor >= 2 && \"unrolling only makes sense with a factor of 2 or larger\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2893, __extension__ __PRETTY_FUNCTION__)); | ||||||
2894 | |||||||
2895 | Type *IndVarTy = Loop->getIndVarType(); | ||||||
2896 | |||||||
2897 | // Apply partial unrolling by tiling the loop by the unroll-factor, then fully | ||||||
2898 | // unroll the inner loop. | ||||||
2899 | Value *FactorVal = | ||||||
2900 | ConstantInt::get(IndVarTy, APInt(IndVarTy->getIntegerBitWidth(), Factor, | ||||||
2901 | /*isSigned=*/false)); | ||||||
2902 | std::vector<CanonicalLoopInfo *> LoopNest = | ||||||
2903 | tileLoops(DL, {Loop}, {FactorVal}); | ||||||
2904 | assert(LoopNest.size() == 2 && "Expect 2 loops after tiling")(static_cast <bool> (LoopNest.size() == 2 && "Expect 2 loops after tiling" ) ? void (0) : __assert_fail ("LoopNest.size() == 2 && \"Expect 2 loops after tiling\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 2904, __extension__ __PRETTY_FUNCTION__)); | ||||||
2905 | *UnrolledCLI = LoopNest[0]; | ||||||
2906 | CanonicalLoopInfo *InnerLoop = LoopNest[1]; | ||||||
2907 | |||||||
2908 | // LoopUnrollPass can only fully unroll loops with constant trip count. | ||||||
2909 | // Unroll by the unroll factor with a fallback epilog for the remainder | ||||||
2910 | // iterations if necessary. | ||||||
2911 | ConstantAsMetadata *FactorConst = ConstantAsMetadata::get( | ||||||
2912 | ConstantInt::get(Type::getInt32Ty(Ctx), APInt(32, Factor))); | ||||||
2913 | addLoopMetadata( | ||||||
2914 | InnerLoop, | ||||||
2915 | {MDNode::get(Ctx, MDString::get(Ctx, "llvm.loop.unroll.enable")), | ||||||
2916 | MDNode::get( | ||||||
2917 | Ctx, {MDString::get(Ctx, "llvm.loop.unroll.count"), FactorConst})}); | ||||||
2918 | |||||||
2919 | #ifndef NDEBUG | ||||||
2920 | (*UnrolledCLI)->assertOK(); | ||||||
2921 | #endif | ||||||
2922 | } | ||||||
2923 | |||||||
2924 | OpenMPIRBuilder::InsertPointTy | ||||||
2925 | OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc, | ||||||
2926 | llvm::Value *BufSize, llvm::Value *CpyBuf, | ||||||
2927 | llvm::Value *CpyFn, llvm::Value *DidIt) { | ||||||
2928 | if (!updateToLocation(Loc)) | ||||||
2929 | return Loc.IP; | ||||||
2930 | |||||||
2931 | uint32_t SrcLocStrSize; | ||||||
2932 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
2933 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
2934 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
2935 | |||||||
2936 | llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt); | ||||||
2937 | |||||||
2938 | Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD}; | ||||||
2939 | |||||||
2940 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate); | ||||||
2941 | Builder.CreateCall(Fn, Args); | ||||||
2942 | |||||||
2943 | return Builder.saveIP(); | ||||||
2944 | } | ||||||
2945 | |||||||
2946 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSingle( | ||||||
2947 | const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, | ||||||
2948 | FinalizeCallbackTy FiniCB, bool IsNowait, llvm::Value *DidIt) { | ||||||
2949 | |||||||
2950 | if (!updateToLocation(Loc)) | ||||||
2951 | return Loc.IP; | ||||||
2952 | |||||||
2953 | // If needed (i.e. not null), initialize `DidIt` with 0 | ||||||
2954 | if (DidIt) { | ||||||
2955 | Builder.CreateStore(Builder.getInt32(0), DidIt); | ||||||
2956 | } | ||||||
2957 | |||||||
2958 | Directive OMPD = Directive::OMPD_single; | ||||||
2959 | uint32_t SrcLocStrSize; | ||||||
2960 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
2961 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
2962 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
2963 | Value *Args[] = {Ident, ThreadId}; | ||||||
2964 | |||||||
2965 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single); | ||||||
2966 | Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args); | ||||||
2967 | |||||||
2968 | Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single); | ||||||
2969 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | ||||||
2970 | |||||||
2971 | // generates the following: | ||||||
2972 | // if (__kmpc_single()) { | ||||||
2973 | // .... single region ... | ||||||
2974 | // __kmpc_end_single | ||||||
2975 | // } | ||||||
2976 | // __kmpc_barrier | ||||||
2977 | |||||||
2978 | EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | ||||||
2979 | /*Conditional*/ true, | ||||||
2980 | /*hasFinalize*/ true); | ||||||
2981 | if (!IsNowait) | ||||||
2982 | createBarrier(LocationDescription(Builder.saveIP(), Loc.DL), | ||||||
2983 | omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false, | ||||||
2984 | /* CheckCancelFlag */ false); | ||||||
2985 | return Builder.saveIP(); | ||||||
2986 | } | ||||||
2987 | |||||||
2988 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical( | ||||||
2989 | const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, | ||||||
2990 | FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) { | ||||||
2991 | |||||||
2992 | if (!updateToLocation(Loc)) | ||||||
2993 | return Loc.IP; | ||||||
2994 | |||||||
2995 | Directive OMPD = Directive::OMPD_critical; | ||||||
2996 | uint32_t SrcLocStrSize; | ||||||
2997 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
2998 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
2999 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3000 | Value *LockVar = getOMPCriticalRegionLock(CriticalName); | ||||||
3001 | Value *Args[] = {Ident, ThreadId, LockVar}; | ||||||
3002 | |||||||
3003 | SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args)); | ||||||
3004 | Function *RTFn = nullptr; | ||||||
3005 | if (HintInst) { | ||||||
3006 | // Add Hint to entry Args and create call | ||||||
3007 | EnterArgs.push_back(HintInst); | ||||||
3008 | RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint); | ||||||
3009 | } else { | ||||||
3010 | RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical); | ||||||
3011 | } | ||||||
3012 | Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs); | ||||||
3013 | |||||||
3014 | Function *ExitRTLFn = | ||||||
3015 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical); | ||||||
3016 | Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args); | ||||||
3017 | |||||||
3018 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | ||||||
3019 | /*Conditional*/ false, /*hasFinalize*/ true); | ||||||
3020 | } | ||||||
3021 | |||||||
3022 | OpenMPIRBuilder::InsertPointTy | ||||||
3023 | OpenMPIRBuilder::createOrderedDepend(const LocationDescription &Loc, | ||||||
3024 | InsertPointTy AllocaIP, unsigned NumLoops, | ||||||
3025 | ArrayRef<llvm::Value *> StoreValues, | ||||||
3026 | const Twine &Name, bool IsDependSource) { | ||||||
3027 | for (size_t I = 0; I < StoreValues.size(); I++) | ||||||
3028 | assert(StoreValues[I]->getType()->isIntegerTy(64) &&(static_cast <bool> (StoreValues[I]->getType()->isIntegerTy (64) && "OpenMP runtime requires depend vec with i64 type" ) ? void (0) : __assert_fail ("StoreValues[I]->getType()->isIntegerTy(64) && \"OpenMP runtime requires depend vec with i64 type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3029, __extension__ __PRETTY_FUNCTION__)) | ||||||
3029 | "OpenMP runtime requires depend vec with i64 type")(static_cast <bool> (StoreValues[I]->getType()->isIntegerTy (64) && "OpenMP runtime requires depend vec with i64 type" ) ? void (0) : __assert_fail ("StoreValues[I]->getType()->isIntegerTy(64) && \"OpenMP runtime requires depend vec with i64 type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3029, __extension__ __PRETTY_FUNCTION__)); | ||||||
3030 | |||||||
3031 | if (!updateToLocation(Loc)) | ||||||
3032 | return Loc.IP; | ||||||
3033 | |||||||
3034 | // Allocate space for vector and generate alloc instruction. | ||||||
3035 | auto *ArrI64Ty = ArrayType::get(Int64, NumLoops); | ||||||
3036 | Builder.restoreIP(AllocaIP); | ||||||
3037 | AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI64Ty, nullptr, Name); | ||||||
3038 | ArgsBase->setAlignment(Align(8)); | ||||||
3039 | Builder.restoreIP(Loc.IP); | ||||||
3040 | |||||||
3041 | // Store the index value with offset in depend vector. | ||||||
3042 | for (unsigned I = 0; I < NumLoops; ++I) { | ||||||
3043 | Value *DependAddrGEPIter = Builder.CreateInBoundsGEP( | ||||||
3044 | ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(I)}); | ||||||
3045 | StoreInst *STInst = Builder.CreateStore(StoreValues[I], DependAddrGEPIter); | ||||||
3046 | STInst->setAlignment(Align(8)); | ||||||
3047 | } | ||||||
3048 | |||||||
3049 | Value *DependBaseAddrGEP = Builder.CreateInBoundsGEP( | ||||||
3050 | ArrI64Ty, ArgsBase, {Builder.getInt64(0), Builder.getInt64(0)}); | ||||||
3051 | |||||||
3052 | uint32_t SrcLocStrSize; | ||||||
3053 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3054 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3055 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3056 | Value *Args[] = {Ident, ThreadId, DependBaseAddrGEP}; | ||||||
3057 | |||||||
3058 | Function *RTLFn = nullptr; | ||||||
3059 | if (IsDependSource) | ||||||
3060 | RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_post); | ||||||
3061 | else | ||||||
3062 | RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_doacross_wait); | ||||||
3063 | Builder.CreateCall(RTLFn, Args); | ||||||
3064 | |||||||
3065 | return Builder.saveIP(); | ||||||
3066 | } | ||||||
3067 | |||||||
3068 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createOrderedThreadsSimd( | ||||||
3069 | const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, | ||||||
3070 | FinalizeCallbackTy FiniCB, bool IsThreads) { | ||||||
3071 | if (!updateToLocation(Loc)) | ||||||
3072 | return Loc.IP; | ||||||
3073 | |||||||
3074 | Directive OMPD = Directive::OMPD_ordered; | ||||||
3075 | Instruction *EntryCall = nullptr; | ||||||
3076 | Instruction *ExitCall = nullptr; | ||||||
3077 | |||||||
3078 | if (IsThreads) { | ||||||
3079 | uint32_t SrcLocStrSize; | ||||||
3080 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3081 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3082 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3083 | Value *Args[] = {Ident, ThreadId}; | ||||||
3084 | |||||||
3085 | Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_ordered); | ||||||
3086 | EntryCall = Builder.CreateCall(EntryRTLFn, Args); | ||||||
3087 | |||||||
3088 | Function *ExitRTLFn = | ||||||
3089 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_ordered); | ||||||
3090 | ExitCall = Builder.CreateCall(ExitRTLFn, Args); | ||||||
3091 | } | ||||||
3092 | |||||||
3093 | return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB, | ||||||
3094 | /*Conditional*/ false, /*hasFinalize*/ true); | ||||||
3095 | } | ||||||
3096 | |||||||
3097 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion( | ||||||
3098 | Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, | ||||||
3099 | BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional, | ||||||
3100 | bool HasFinalize, bool IsCancellable) { | ||||||
3101 | |||||||
3102 | if (HasFinalize) | ||||||
3103 | FinalizationStack.push_back({FiniCB, OMPD, IsCancellable}); | ||||||
3104 | |||||||
3105 | // Create inlined region's entry and body blocks, in preparation | ||||||
3106 | // for conditional creation | ||||||
3107 | BasicBlock *EntryBB = Builder.GetInsertBlock(); | ||||||
3108 | Instruction *SplitPos = EntryBB->getTerminator(); | ||||||
3109 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | ||||||
3110 | SplitPos = new UnreachableInst(Builder.getContext(), EntryBB); | ||||||
3111 | BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end"); | ||||||
3112 | BasicBlock *FiniBB = | ||||||
3113 | EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize"); | ||||||
3114 | |||||||
3115 | Builder.SetInsertPoint(EntryBB->getTerminator()); | ||||||
3116 | emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional); | ||||||
3117 | |||||||
3118 | // generate body | ||||||
3119 | BodyGenCB(/* AllocaIP */ InsertPointTy(), | ||||||
3120 | /* CodeGenIP */ Builder.saveIP(), *FiniBB); | ||||||
3121 | |||||||
3122 | // If we didn't emit a branch to FiniBB during body generation, it means | ||||||
3123 | // FiniBB is unreachable (e.g. while(1);). stop generating all the | ||||||
3124 | // unreachable blocks, and remove anything we are not going to use. | ||||||
3125 | auto SkipEmittingRegion = FiniBB->hasNPredecessors(0); | ||||||
3126 | if (SkipEmittingRegion) { | ||||||
3127 | FiniBB->eraseFromParent(); | ||||||
3128 | ExitCall->eraseFromParent(); | ||||||
3129 | // Discard finalization if we have it. | ||||||
3130 | if (HasFinalize) { | ||||||
3131 | assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3132, __extension__ __PRETTY_FUNCTION__)) | ||||||
3132 | "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3132, __extension__ __PRETTY_FUNCTION__)); | ||||||
3133 | FinalizationStack.pop_back(); | ||||||
3134 | } | ||||||
3135 | } else { | ||||||
3136 | // emit exit call and do any needed finalization. | ||||||
3137 | auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt()); | ||||||
3138 | assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors () == 1 && FiniBB->getTerminator()->getSuccessor (0) == ExitBB && "Unexpected control flow graph state!!" ) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3140, __extension__ __PRETTY_FUNCTION__)) | ||||||
3139 | FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors () == 1 && FiniBB->getTerminator()->getSuccessor (0) == ExitBB && "Unexpected control flow graph state!!" ) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3140, __extension__ __PRETTY_FUNCTION__)) | ||||||
3140 | "Unexpected control flow graph state!!")(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors () == 1 && FiniBB->getTerminator()->getSuccessor (0) == ExitBB && "Unexpected control flow graph state!!" ) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3140, __extension__ __PRETTY_FUNCTION__)); | ||||||
3141 | emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize); | ||||||
3142 | assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&(static_cast <bool> (FiniBB->getUniquePredecessor()-> getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!" ) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3143, __extension__ __PRETTY_FUNCTION__)) | ||||||
3143 | "Unexpected Control Flow State!")(static_cast <bool> (FiniBB->getUniquePredecessor()-> getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!" ) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3143, __extension__ __PRETTY_FUNCTION__)); | ||||||
3144 | MergeBlockIntoPredecessor(FiniBB); | ||||||
3145 | } | ||||||
3146 | |||||||
3147 | // If we are skipping the region of a non conditional, remove the exit | ||||||
3148 | // block, and clear the builder's insertion point. | ||||||
3149 | assert(SplitPos->getParent() == ExitBB &&(static_cast <bool> (SplitPos->getParent() == ExitBB && "Unexpected Insertion point location!") ? void (0 ) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3150, __extension__ __PRETTY_FUNCTION__)) | ||||||
3150 | "Unexpected Insertion point location!")(static_cast <bool> (SplitPos->getParent() == ExitBB && "Unexpected Insertion point location!") ? void (0 ) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3150, __extension__ __PRETTY_FUNCTION__)); | ||||||
3151 | if (!Conditional && SkipEmittingRegion) { | ||||||
3152 | ExitBB->eraseFromParent(); | ||||||
3153 | Builder.ClearInsertionPoint(); | ||||||
3154 | } else { | ||||||
3155 | auto merged = MergeBlockIntoPredecessor(ExitBB); | ||||||
3156 | BasicBlock *ExitPredBB = SplitPos->getParent(); | ||||||
3157 | auto InsertBB = merged ? ExitPredBB : ExitBB; | ||||||
3158 | if (!isa_and_nonnull<BranchInst>(SplitPos)) | ||||||
3159 | SplitPos->eraseFromParent(); | ||||||
3160 | Builder.SetInsertPoint(InsertBB); | ||||||
3161 | } | ||||||
3162 | |||||||
3163 | return Builder.saveIP(); | ||||||
3164 | } | ||||||
3165 | |||||||
3166 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry( | ||||||
3167 | Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) { | ||||||
3168 | // if nothing to do, Return current insertion point. | ||||||
3169 | if (!Conditional || !EntryCall) | ||||||
3170 | return Builder.saveIP(); | ||||||
3171 | |||||||
3172 | BasicBlock *EntryBB = Builder.GetInsertBlock(); | ||||||
3173 | Value *CallBool = Builder.CreateIsNotNull(EntryCall); | ||||||
3174 | auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body"); | ||||||
3175 | auto *UI = new UnreachableInst(Builder.getContext(), ThenBB); | ||||||
3176 | |||||||
3177 | // Emit thenBB and set the Builder's insertion point there for | ||||||
3178 | // body generation next. Place the block after the current block. | ||||||
3179 | Function *CurFn = EntryBB->getParent(); | ||||||
3180 | CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB); | ||||||
3181 | |||||||
3182 | // Move Entry branch to end of ThenBB, and replace with conditional | ||||||
3183 | // branch (If-stmt) | ||||||
3184 | Instruction *EntryBBTI = EntryBB->getTerminator(); | ||||||
3185 | Builder.CreateCondBr(CallBool, ThenBB, ExitBB); | ||||||
3186 | EntryBBTI->removeFromParent(); | ||||||
3187 | Builder.SetInsertPoint(UI); | ||||||
3188 | Builder.Insert(EntryBBTI); | ||||||
3189 | UI->eraseFromParent(); | ||||||
3190 | Builder.SetInsertPoint(ThenBB->getTerminator()); | ||||||
3191 | |||||||
3192 | // return an insertion point to ExitBB. | ||||||
3193 | return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt()); | ||||||
3194 | } | ||||||
3195 | |||||||
3196 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit( | ||||||
3197 | omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, | ||||||
3198 | bool HasFinalize) { | ||||||
3199 | |||||||
3200 | Builder.restoreIP(FinIP); | ||||||
3201 | |||||||
3202 | // If there is finalization to do, emit it before the exit call | ||||||
3203 | if (HasFinalize) { | ||||||
3204 | assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3205, __extension__ __PRETTY_FUNCTION__)) | ||||||
3205 | "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() && "Unexpected finalization stack state!") ? void (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3205, __extension__ __PRETTY_FUNCTION__)); | ||||||
3206 | |||||||
3207 | FinalizationInfo Fi = FinalizationStack.pop_back_val(); | ||||||
3208 | assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")(static_cast <bool> (Fi.DK == OMPD && "Unexpected Directive for Finalization call!" ) ? void (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3208, __extension__ __PRETTY_FUNCTION__)); | ||||||
3209 | |||||||
3210 | Fi.FiniCB(FinIP); | ||||||
3211 | |||||||
3212 | BasicBlock *FiniBB = FinIP.getBlock(); | ||||||
3213 | Instruction *FiniBBTI = FiniBB->getTerminator(); | ||||||
3214 | |||||||
3215 | // set Builder IP for call creation | ||||||
3216 | Builder.SetInsertPoint(FiniBBTI); | ||||||
3217 | } | ||||||
3218 | |||||||
3219 | if (!ExitCall) | ||||||
3220 | return Builder.saveIP(); | ||||||
3221 | |||||||
3222 | // place the Exitcall as last instruction before Finalization block terminator | ||||||
3223 | ExitCall->removeFromParent(); | ||||||
3224 | Builder.Insert(ExitCall); | ||||||
3225 | |||||||
3226 | return IRBuilder<>::InsertPoint(ExitCall->getParent(), | ||||||
3227 | ExitCall->getIterator()); | ||||||
3228 | } | ||||||
3229 | |||||||
3230 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks( | ||||||
3231 | InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, | ||||||
3232 | llvm::IntegerType *IntPtrTy, bool BranchtoEnd) { | ||||||
3233 | if (!IP.isSet()) | ||||||
3234 | return IP; | ||||||
3235 | |||||||
3236 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
3237 | |||||||
3238 | // creates the following CFG structure | ||||||
3239 | // OMP_Entry : (MasterAddr != PrivateAddr)? | ||||||
3240 | // F T | ||||||
3241 | // | \ | ||||||
3242 | // | copin.not.master | ||||||
3243 | // | / | ||||||
3244 | // v / | ||||||
3245 | // copyin.not.master.end | ||||||
3246 | // | | ||||||
3247 | // v | ||||||
3248 | // OMP.Entry.Next | ||||||
3249 | |||||||
3250 | BasicBlock *OMP_Entry = IP.getBlock(); | ||||||
3251 | Function *CurFn = OMP_Entry->getParent(); | ||||||
3252 | BasicBlock *CopyBegin = | ||||||
3253 | BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn); | ||||||
3254 | BasicBlock *CopyEnd = nullptr; | ||||||
3255 | |||||||
3256 | // If entry block is terminated, split to preserve the branch to following | ||||||
3257 | // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is. | ||||||
3258 | if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) { | ||||||
3259 | CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(), | ||||||
3260 | "copyin.not.master.end"); | ||||||
3261 | OMP_Entry->getTerminator()->eraseFromParent(); | ||||||
3262 | } else { | ||||||
3263 | CopyEnd = | ||||||
3264 | BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn); | ||||||
3265 | } | ||||||
3266 | |||||||
3267 | Builder.SetInsertPoint(OMP_Entry); | ||||||
3268 | Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy); | ||||||
3269 | Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy); | ||||||
3270 | Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr); | ||||||
3271 | Builder.CreateCondBr(cmp, CopyBegin, CopyEnd); | ||||||
3272 | |||||||
3273 | Builder.SetInsertPoint(CopyBegin); | ||||||
3274 | if (BranchtoEnd) | ||||||
3275 | Builder.SetInsertPoint(Builder.CreateBr(CopyEnd)); | ||||||
3276 | |||||||
3277 | return Builder.saveIP(); | ||||||
3278 | } | ||||||
3279 | |||||||
3280 | CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc, | ||||||
3281 | Value *Size, Value *Allocator, | ||||||
3282 | std::string Name) { | ||||||
3283 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
3284 | Builder.restoreIP(Loc.IP); | ||||||
3285 | |||||||
3286 | uint32_t SrcLocStrSize; | ||||||
3287 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3288 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3289 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3290 | Value *Args[] = {ThreadId, Size, Allocator}; | ||||||
3291 | |||||||
3292 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc); | ||||||
3293 | |||||||
3294 | return Builder.CreateCall(Fn, Args, Name); | ||||||
3295 | } | ||||||
3296 | |||||||
3297 | CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc, | ||||||
3298 | Value *Addr, Value *Allocator, | ||||||
3299 | std::string Name) { | ||||||
3300 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
3301 | Builder.restoreIP(Loc.IP); | ||||||
3302 | |||||||
3303 | uint32_t SrcLocStrSize; | ||||||
3304 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3305 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3306 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3307 | Value *Args[] = {ThreadId, Addr, Allocator}; | ||||||
3308 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free); | ||||||
3309 | return Builder.CreateCall(Fn, Args, Name); | ||||||
3310 | } | ||||||
3311 | |||||||
3312 | CallInst *OpenMPIRBuilder::createOMPInteropInit( | ||||||
3313 | const LocationDescription &Loc, Value *InteropVar, | ||||||
3314 | omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, | ||||||
3315 | Value *DependenceAddress, bool HaveNowaitClause) { | ||||||
3316 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
3317 | Builder.restoreIP(Loc.IP); | ||||||
3318 | |||||||
3319 | uint32_t SrcLocStrSize; | ||||||
3320 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3321 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3322 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3323 | if (Device == nullptr) | ||||||
3324 | Device = ConstantInt::get(Int32, -1); | ||||||
3325 | Constant *InteropTypeVal = ConstantInt::get(Int64, (int)InteropType); | ||||||
3326 | if (NumDependences == nullptr) { | ||||||
3327 | NumDependences = ConstantInt::get(Int32, 0); | ||||||
3328 | PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); | ||||||
3329 | DependenceAddress = ConstantPointerNull::get(PointerTypeVar); | ||||||
3330 | } | ||||||
3331 | Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); | ||||||
3332 | Value *Args[] = { | ||||||
3333 | Ident, ThreadId, InteropVar, InteropTypeVal, | ||||||
3334 | Device, NumDependences, DependenceAddress, HaveNowaitClauseVal}; | ||||||
3335 | |||||||
3336 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_init); | ||||||
3337 | |||||||
3338 | return Builder.CreateCall(Fn, Args); | ||||||
3339 | } | ||||||
3340 | |||||||
3341 | CallInst *OpenMPIRBuilder::createOMPInteropDestroy( | ||||||
3342 | const LocationDescription &Loc, Value *InteropVar, Value *Device, | ||||||
3343 | Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause) { | ||||||
3344 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
3345 | Builder.restoreIP(Loc.IP); | ||||||
3346 | |||||||
3347 | uint32_t SrcLocStrSize; | ||||||
3348 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3349 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3350 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3351 | if (Device == nullptr) | ||||||
3352 | Device = ConstantInt::get(Int32, -1); | ||||||
3353 | if (NumDependences == nullptr) { | ||||||
3354 | NumDependences = ConstantInt::get(Int32, 0); | ||||||
3355 | PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); | ||||||
3356 | DependenceAddress = ConstantPointerNull::get(PointerTypeVar); | ||||||
3357 | } | ||||||
3358 | Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); | ||||||
3359 | Value *Args[] = { | ||||||
3360 | Ident, ThreadId, InteropVar, Device, | ||||||
3361 | NumDependences, DependenceAddress, HaveNowaitClauseVal}; | ||||||
3362 | |||||||
3363 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_destroy); | ||||||
3364 | |||||||
3365 | return Builder.CreateCall(Fn, Args); | ||||||
3366 | } | ||||||
3367 | |||||||
3368 | CallInst *OpenMPIRBuilder::createOMPInteropUse(const LocationDescription &Loc, | ||||||
3369 | Value *InteropVar, Value *Device, | ||||||
3370 | Value *NumDependences, | ||||||
3371 | Value *DependenceAddress, | ||||||
3372 | bool HaveNowaitClause) { | ||||||
3373 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
3374 | Builder.restoreIP(Loc.IP); | ||||||
3375 | uint32_t SrcLocStrSize; | ||||||
3376 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3377 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3378 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3379 | if (Device == nullptr) | ||||||
3380 | Device = ConstantInt::get(Int32, -1); | ||||||
3381 | if (NumDependences == nullptr) { | ||||||
3382 | NumDependences = ConstantInt::get(Int32, 0); | ||||||
3383 | PointerType *PointerTypeVar = Type::getInt8PtrTy(M.getContext()); | ||||||
3384 | DependenceAddress = ConstantPointerNull::get(PointerTypeVar); | ||||||
3385 | } | ||||||
3386 | Value *HaveNowaitClauseVal = ConstantInt::get(Int32, HaveNowaitClause); | ||||||
3387 | Value *Args[] = { | ||||||
3388 | Ident, ThreadId, InteropVar, Device, | ||||||
3389 | NumDependences, DependenceAddress, HaveNowaitClauseVal}; | ||||||
3390 | |||||||
3391 | Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___tgt_interop_use); | ||||||
3392 | |||||||
3393 | return Builder.CreateCall(Fn, Args); | ||||||
3394 | } | ||||||
3395 | |||||||
3396 | CallInst *OpenMPIRBuilder::createCachedThreadPrivate( | ||||||
3397 | const LocationDescription &Loc, llvm::Value *Pointer, | ||||||
3398 | llvm::ConstantInt *Size, const llvm::Twine &Name) { | ||||||
3399 | IRBuilder<>::InsertPointGuard IPG(Builder); | ||||||
3400 | Builder.restoreIP(Loc.IP); | ||||||
3401 | |||||||
3402 | uint32_t SrcLocStrSize; | ||||||
3403 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3404 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3405 | Value *ThreadId = getOrCreateThreadID(Ident); | ||||||
3406 | Constant *ThreadPrivateCache = | ||||||
3407 | getOrCreateOMPInternalVariable(Int8PtrPtr, Name); | ||||||
3408 | llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache}; | ||||||
3409 | |||||||
3410 | Function *Fn = | ||||||
3411 | getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached); | ||||||
3412 | |||||||
3413 | return Builder.CreateCall(Fn, Args); | ||||||
3414 | } | ||||||
3415 | |||||||
3416 | OpenMPIRBuilder::InsertPointTy | ||||||
3417 | OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, | ||||||
3418 | bool RequiresFullRuntime) { | ||||||
3419 | if (!updateToLocation(Loc)) | ||||||
3420 | return Loc.IP; | ||||||
3421 | |||||||
3422 | uint32_t SrcLocStrSize; | ||||||
3423 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3424 | Constant *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3425 | ConstantInt *IsSPMDVal = ConstantInt::getSigned( | ||||||
3426 | IntegerType::getInt8Ty(Int8->getContext()), | ||||||
3427 | IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); | ||||||
3428 | ConstantInt *UseGenericStateMachine = | ||||||
3429 | ConstantInt::getBool(Int32->getContext(), !IsSPMD); | ||||||
3430 | ConstantInt *RequiresFullRuntimeVal = | ||||||
3431 | ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); | ||||||
3432 | |||||||
3433 | Function *Fn = getOrCreateRuntimeFunctionPtr( | ||||||
3434 | omp::RuntimeFunction::OMPRTL___kmpc_target_init); | ||||||
3435 | |||||||
3436 | CallInst *ThreadKind = Builder.CreateCall( | ||||||
3437 | Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal}); | ||||||
3438 | |||||||
3439 | Value *ExecUserCode = Builder.CreateICmpEQ( | ||||||
3440 | ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), | ||||||
3441 | "exec_user_code"); | ||||||
3442 | |||||||
3443 | // ThreadKind = __kmpc_target_init(...) | ||||||
3444 | // if (ThreadKind == -1) | ||||||
3445 | // user_code | ||||||
3446 | // else | ||||||
3447 | // return; | ||||||
3448 | |||||||
3449 | auto *UI = Builder.CreateUnreachable(); | ||||||
3450 | BasicBlock *CheckBB = UI->getParent(); | ||||||
3451 | BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry"); | ||||||
3452 | |||||||
3453 | BasicBlock *WorkerExitBB = BasicBlock::Create( | ||||||
3454 | CheckBB->getContext(), "worker.exit", CheckBB->getParent()); | ||||||
3455 | Builder.SetInsertPoint(WorkerExitBB); | ||||||
3456 | Builder.CreateRetVoid(); | ||||||
3457 | |||||||
3458 | auto *CheckBBTI = CheckBB->getTerminator(); | ||||||
3459 | Builder.SetInsertPoint(CheckBBTI); | ||||||
3460 | Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB); | ||||||
3461 | |||||||
3462 | CheckBBTI->eraseFromParent(); | ||||||
3463 | UI->eraseFromParent(); | ||||||
3464 | |||||||
3465 | // Continue in the "user_code" block, see diagram above and in | ||||||
3466 | // openmp/libomptarget/deviceRTLs/common/include/target.h . | ||||||
3467 | return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt()); | ||||||
3468 | } | ||||||
3469 | |||||||
3470 | void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc, | ||||||
3471 | bool IsSPMD, | ||||||
3472 | bool RequiresFullRuntime) { | ||||||
3473 | if (!updateToLocation(Loc)) | ||||||
3474 | return; | ||||||
3475 | |||||||
3476 | uint32_t SrcLocStrSize; | ||||||
3477 | Constant *SrcLocStr = getOrCreateSrcLocStr(Loc, SrcLocStrSize); | ||||||
3478 | Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); | ||||||
3479 | ConstantInt *IsSPMDVal = ConstantInt::getSigned( | ||||||
3480 | IntegerType::getInt8Ty(Int8->getContext()), | ||||||
3481 | IsSPMD ? OMP_TGT_EXEC_MODE_SPMD : OMP_TGT_EXEC_MODE_GENERIC); | ||||||
3482 | ConstantInt *RequiresFullRuntimeVal = | ||||||
3483 | ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime); | ||||||
3484 | |||||||
3485 | Function *Fn = getOrCreateRuntimeFunctionPtr( | ||||||
3486 | omp::RuntimeFunction::OMPRTL___kmpc_target_deinit); | ||||||
3487 | |||||||
3488 | Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal}); | ||||||
3489 | } | ||||||
3490 | |||||||
3491 | std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts, | ||||||
3492 | StringRef FirstSeparator, | ||||||
3493 | StringRef Separator) { | ||||||
3494 | SmallString<128> Buffer; | ||||||
3495 | llvm::raw_svector_ostream OS(Buffer); | ||||||
3496 | StringRef Sep = FirstSeparator; | ||||||
3497 | for (StringRef Part : Parts) { | ||||||
3498 | OS << Sep << Part; | ||||||
3499 | Sep = Separator; | ||||||
3500 | } | ||||||
3501 | return OS.str().str(); | ||||||
3502 | } | ||||||
3503 | |||||||
3504 | Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable( | ||||||
3505 | llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) { | ||||||
3506 | // TODO: Replace the twine arg with stringref to get rid of the conversion | ||||||
3507 | // logic. However This is taken from current implementation in clang as is. | ||||||
3508 | // Since this method is used in many places exclusively for OMP internal use | ||||||
3509 | // we will keep it as is for temporarily until we move all users to the | ||||||
3510 | // builder and then, if possible, fix it everywhere in one go. | ||||||
3511 | SmallString<256> Buffer; | ||||||
3512 | llvm::raw_svector_ostream Out(Buffer); | ||||||
3513 | Out << Name; | ||||||
3514 | StringRef RuntimeName = Out.str(); | ||||||
3515 | auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first; | ||||||
3516 | if (Elem.second) { | ||||||
3517 | assert(cast<PointerType>(Elem.second->getType())(static_cast <bool> (cast<PointerType>(Elem.second ->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && "OMP internal variable has different type than requested") ? void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3519, __extension__ __PRETTY_FUNCTION__)) | ||||||
3518 | ->isOpaqueOrPointeeTypeMatches(Ty) &&(static_cast <bool> (cast<PointerType>(Elem.second ->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && "OMP internal variable has different type than requested") ? void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3519, __extension__ __PRETTY_FUNCTION__)) | ||||||
3519 | "OMP internal variable has different type than requested")(static_cast <bool> (cast<PointerType>(Elem.second ->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && "OMP internal variable has different type than requested") ? void (0) : __assert_fail ("cast<PointerType>(Elem.second->getType()) ->isOpaqueOrPointeeTypeMatches(Ty) && \"OMP internal variable has different type than requested\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3519, __extension__ __PRETTY_FUNCTION__)); | ||||||
3520 | } else { | ||||||
3521 | // TODO: investigate the appropriate linkage type used for the global | ||||||
3522 | // variable for possibly changing that to internal or private, or maybe | ||||||
3523 | // create different versions of the function for different OMP internal | ||||||
3524 | // variables. | ||||||
3525 | Elem.second = new llvm::GlobalVariable( | ||||||
3526 | M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage, | ||||||
3527 | llvm::Constant::getNullValue(Ty), Elem.first(), | ||||||
3528 | /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, | ||||||
3529 | AddressSpace); | ||||||
3530 | } | ||||||
3531 | |||||||
3532 | return Elem.second; | ||||||
3533 | } | ||||||
3534 | |||||||
3535 | Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) { | ||||||
3536 | std::string Prefix = Twine("gomp_critical_user_", CriticalName).str(); | ||||||
3537 | std::string Name = getNameWithSeparators({Prefix, "var"}, ".", "."); | ||||||
3538 | return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name); | ||||||
3539 | } | ||||||
3540 | |||||||
3541 | GlobalVariable * | ||||||
3542 | OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, | ||||||
3543 | std::string VarName) { | ||||||
3544 | llvm::Constant *MaptypesArrayInit = | ||||||
3545 | llvm::ConstantDataArray::get(M.getContext(), Mappings); | ||||||
3546 | auto *MaptypesArrayGlobal = new llvm::GlobalVariable( | ||||||
3547 | M, MaptypesArrayInit->getType(), | ||||||
3548 | /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit, | ||||||
3549 | VarName); | ||||||
3550 | MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); | ||||||
3551 | return MaptypesArrayGlobal; | ||||||
3552 | } | ||||||
3553 | |||||||
3554 | void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc, | ||||||
3555 | InsertPointTy AllocaIP, | ||||||
3556 | unsigned NumOperands, | ||||||
3557 | struct MapperAllocas &MapperAllocas) { | ||||||
3558 | if (!updateToLocation(Loc)) | ||||||
3559 | return; | ||||||
3560 | |||||||
3561 | auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); | ||||||
3562 | auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); | ||||||
3563 | Builder.restoreIP(AllocaIP); | ||||||
3564 | AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy); | ||||||
3565 | AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy); | ||||||
3566 | AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty); | ||||||
3567 | Builder.restoreIP(Loc.IP); | ||||||
3568 | MapperAllocas.ArgsBase = ArgsBase; | ||||||
3569 | MapperAllocas.Args = Args; | ||||||
3570 | MapperAllocas.ArgSizes = ArgSizes; | ||||||
3571 | } | ||||||
3572 | |||||||
3573 | void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc, | ||||||
3574 | Function *MapperFunc, Value *SrcLocInfo, | ||||||
3575 | Value *MaptypesArg, Value *MapnamesArg, | ||||||
3576 | struct MapperAllocas &MapperAllocas, | ||||||
3577 | int64_t DeviceID, unsigned NumOperands) { | ||||||
3578 | if (!updateToLocation(Loc)) | ||||||
3579 | return; | ||||||
3580 | |||||||
3581 | auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands); | ||||||
3582 | auto *ArrI64Ty = ArrayType::get(Int64, NumOperands); | ||||||
3583 | Value *ArgsBaseGEP = | ||||||
3584 | Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase, | ||||||
3585 | {Builder.getInt32(0), Builder.getInt32(0)}); | ||||||
3586 | Value *ArgsGEP = | ||||||
3587 | Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args, | ||||||
3588 | {Builder.getInt32(0), Builder.getInt32(0)}); | ||||||
3589 | Value *ArgSizesGEP = | ||||||
3590 | Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes, | ||||||
3591 | {Builder.getInt32(0), Builder.getInt32(0)}); | ||||||
3592 | Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo()); | ||||||
3593 | Builder.CreateCall(MapperFunc, | ||||||
3594 | {SrcLocInfo, Builder.getInt64(DeviceID), | ||||||
3595 | Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP, | ||||||
3596 | ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr}); | ||||||
3597 | } | ||||||
3598 | |||||||
3599 | bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic( | ||||||
3600 | const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) { | ||||||
3601 | assert(!(AO == AtomicOrdering::NotAtomic ||(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering." ) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3603, __extension__ __PRETTY_FUNCTION__)) | ||||||
3602 | AO == llvm::AtomicOrdering::Unordered) &&(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering." ) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3603, __extension__ __PRETTY_FUNCTION__)) | ||||||
3603 | "Unexpected Atomic Ordering.")(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering." ) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3603, __extension__ __PRETTY_FUNCTION__)); | ||||||
3604 | |||||||
3605 | bool Flush = false; | ||||||
3606 | llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic; | ||||||
3607 | |||||||
3608 | switch (AK) { | ||||||
3609 | case Read: | ||||||
3610 | if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease || | ||||||
3611 | AO == AtomicOrdering::SequentiallyConsistent) { | ||||||
3612 | FlushAO = AtomicOrdering::Acquire; | ||||||
3613 | Flush = true; | ||||||
3614 | } | ||||||
3615 | break; | ||||||
3616 | case Write: | ||||||
3617 | case Compare: | ||||||
3618 | case Update: | ||||||
3619 | if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease || | ||||||
3620 | AO == AtomicOrdering::SequentiallyConsistent) { | ||||||
3621 | FlushAO = AtomicOrdering::Release; | ||||||
3622 | Flush = true; | ||||||
3623 | } | ||||||
3624 | break; | ||||||
3625 | case Capture: | ||||||
3626 | switch (AO) { | ||||||
3627 | case AtomicOrdering::Acquire: | ||||||
3628 | FlushAO = AtomicOrdering::Acquire; | ||||||
3629 | Flush = true; | ||||||
3630 | break; | ||||||
3631 | case AtomicOrdering::Release: | ||||||
3632 | FlushAO = AtomicOrdering::Release; | ||||||
3633 | Flush = true; | ||||||
3634 | break; | ||||||
3635 | case AtomicOrdering::AcquireRelease: | ||||||
3636 | case AtomicOrdering::SequentiallyConsistent: | ||||||
3637 | FlushAO = AtomicOrdering::AcquireRelease; | ||||||
3638 | Flush = true; | ||||||
3639 | break; | ||||||
3640 | default: | ||||||
3641 | // do nothing - leave silently. | ||||||
3642 | break; | ||||||
3643 | } | ||||||
3644 | } | ||||||
3645 | |||||||
3646 | if (Flush) { | ||||||
3647 | // Currently Flush RT call still doesn't take memory_ordering, so for when | ||||||
3648 | // that happens, this tries to do the resolution of which atomic ordering | ||||||
3649 | // to use with but issue the flush call | ||||||
3650 | // TODO: pass `FlushAO` after memory ordering support is added | ||||||
3651 | (void)FlushAO; | ||||||
3652 | emitFlush(Loc); | ||||||
3653 | } | ||||||
3654 | |||||||
3655 | // for AO == AtomicOrdering::Monotonic and all other case combinations | ||||||
3656 | // do nothing | ||||||
3657 | return Flush; | ||||||
3658 | } | ||||||
3659 | |||||||
3660 | OpenMPIRBuilder::InsertPointTy | ||||||
3661 | OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, | ||||||
3662 | AtomicOpValue &X, AtomicOpValue &V, | ||||||
3663 | AtomicOrdering AO) { | ||||||
3664 | if (!updateToLocation(Loc)) | ||||||
3665 | return Loc.IP; | ||||||
3666 | |||||||
3667 | Type *XTy = X.Var->getType(); | ||||||
3668 | assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3668, __extension__ __PRETTY_FUNCTION__)); | ||||||
3669 | Type *XElemTy = X.ElemTy; | ||||||
3670 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic read expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3672, __extension__ __PRETTY_FUNCTION__)) | ||||||
3671 | XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic read expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3672, __extension__ __PRETTY_FUNCTION__)) | ||||||
3672 | "OMP atomic read expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic read expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3672, __extension__ __PRETTY_FUNCTION__)); | ||||||
3673 | |||||||
3674 | Value *XRead = nullptr; | ||||||
3675 | |||||||
3676 | if (XElemTy->isIntegerTy()) { | ||||||
3677 | LoadInst *XLD = | ||||||
3678 | Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); | ||||||
3679 | XLD->setAtomic(AO); | ||||||
3680 | XRead = cast<Value>(XLD); | ||||||
3681 | } else { | ||||||
3682 | // We need to bitcast and perform atomic op as integer | ||||||
3683 | unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); | ||||||
3684 | IntegerType *IntCastTy = | ||||||
3685 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | ||||||
3686 | Value *XBCast = Builder.CreateBitCast( | ||||||
3687 | X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast"); | ||||||
3688 | LoadInst *XLoad = | ||||||
3689 | Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load"); | ||||||
3690 | XLoad->setAtomic(AO); | ||||||
3691 | if (XElemTy->isFloatingPointTy()) { | ||||||
3692 | XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast"); | ||||||
3693 | } else { | ||||||
3694 | XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast"); | ||||||
3695 | } | ||||||
3696 | } | ||||||
3697 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read); | ||||||
3698 | Builder.CreateStore(XRead, V.Var, V.IsVolatile); | ||||||
3699 | return Builder.saveIP(); | ||||||
3700 | } | ||||||
3701 | |||||||
3702 | OpenMPIRBuilder::InsertPointTy | ||||||
3703 | OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc, | ||||||
3704 | AtomicOpValue &X, Value *Expr, | ||||||
3705 | AtomicOrdering AO) { | ||||||
3706 | if (!updateToLocation(Loc)) | ||||||
3707 | return Loc.IP; | ||||||
3708 | |||||||
3709 | Type *XTy = X.Var->getType(); | ||||||
3710 | assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3710, __extension__ __PRETTY_FUNCTION__)); | ||||||
3711 | Type *XElemTy = X.ElemTy; | ||||||
3712 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic write expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3714, __extension__ __PRETTY_FUNCTION__)) | ||||||
3713 | XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic write expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3714, __extension__ __PRETTY_FUNCTION__)) | ||||||
3714 | "OMP atomic write expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic write expected a scalar type") ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3714, __extension__ __PRETTY_FUNCTION__)); | ||||||
3715 | |||||||
3716 | if (XElemTy->isIntegerTy()) { | ||||||
3717 | StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile); | ||||||
3718 | XSt->setAtomic(AO); | ||||||
3719 | } else { | ||||||
3720 | // We need to bitcast and perform atomic op as integers | ||||||
3721 | unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace(); | ||||||
3722 | IntegerType *IntCastTy = | ||||||
3723 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | ||||||
3724 | Value *XBCast = Builder.CreateBitCast( | ||||||
3725 | X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast"); | ||||||
3726 | Value *ExprCast = | ||||||
3727 | Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast"); | ||||||
3728 | StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile); | ||||||
3729 | XSt->setAtomic(AO); | ||||||
3730 | } | ||||||
3731 | |||||||
3732 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write); | ||||||
3733 | return Builder.saveIP(); | ||||||
3734 | } | ||||||
3735 | |||||||
3736 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate( | ||||||
3737 | const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, | ||||||
3738 | Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, | ||||||
3739 | AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr) { | ||||||
3740 | assert(!isConflictIP(Loc.IP, AllocaIP) && "IPs must not be ambiguous")(static_cast <bool> (!isConflictIP(Loc.IP, AllocaIP) && "IPs must not be ambiguous") ? void (0) : __assert_fail ("!isConflictIP(Loc.IP, AllocaIP) && \"IPs must not be ambiguous\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3740, __extension__ __PRETTY_FUNCTION__)); | ||||||
| |||||||
3741 | if (!updateToLocation(Loc)) | ||||||
3742 | return Loc.IP; | ||||||
3743 | |||||||
3744 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3745 | Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3746 | assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3747 | "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3748 | Type *XElemTy = X.ElemTy;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3749 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3750 | XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3751 | "OMP atomic update expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3752 | assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3753 | (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3754 | "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3755 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3747, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic update expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3751, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst:: UMin) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3754, __extension__ __PRETTY_FUNCTION__)); }; } } while (false); | ||||||
3756 | |||||||
3757 | emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, RMWOp, UpdateOp, | ||||||
3758 | X.IsVolatile, IsXBinopExpr); | ||||||
3759 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update); | ||||||
3760 | return Builder.saveIP(); | ||||||
3761 | } | ||||||
3762 | |||||||
3763 | Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2, | ||||||
3764 | AtomicRMWInst::BinOp RMWOp) { | ||||||
3765 | switch (RMWOp) { | ||||||
3766 | case AtomicRMWInst::Add: | ||||||
3767 | return Builder.CreateAdd(Src1, Src2); | ||||||
3768 | case AtomicRMWInst::Sub: | ||||||
3769 | return Builder.CreateSub(Src1, Src2); | ||||||
3770 | case AtomicRMWInst::And: | ||||||
3771 | return Builder.CreateAnd(Src1, Src2); | ||||||
3772 | case AtomicRMWInst::Nand: | ||||||
3773 | return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2)); | ||||||
3774 | case AtomicRMWInst::Or: | ||||||
3775 | return Builder.CreateOr(Src1, Src2); | ||||||
3776 | case AtomicRMWInst::Xor: | ||||||
3777 | return Builder.CreateXor(Src1, Src2); | ||||||
3778 | case AtomicRMWInst::Xchg: | ||||||
3779 | case AtomicRMWInst::FAdd: | ||||||
3780 | case AtomicRMWInst::FSub: | ||||||
3781 | case AtomicRMWInst::BAD_BINOP: | ||||||
3782 | case AtomicRMWInst::Max: | ||||||
3783 | case AtomicRMWInst::Min: | ||||||
3784 | case AtomicRMWInst::UMax: | ||||||
3785 | case AtomicRMWInst::UMin: | ||||||
3786 | llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3786); | ||||||
3787 | } | ||||||
3788 | llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3788); | ||||||
3789 | } | ||||||
3790 | |||||||
3791 | std::pair<Value *, Value *> OpenMPIRBuilder::emitAtomicUpdate( | ||||||
3792 | InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr, | ||||||
3793 | AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, | ||||||
3794 | AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr) { | ||||||
3795 | // TODO: handle the case where XElemTy is not byte-sized or not a power of 2 | ||||||
3796 | // or a complex datatype. | ||||||
3797 | bool emitRMWOp = false; | ||||||
3798 | switch (RMWOp) { | ||||||
3799 | case AtomicRMWInst::Add: | ||||||
3800 | case AtomicRMWInst::And: | ||||||
3801 | case AtomicRMWInst::Nand: | ||||||
3802 | case AtomicRMWInst::Or: | ||||||
3803 | case AtomicRMWInst::Xor: | ||||||
3804 | case AtomicRMWInst::Xchg: | ||||||
3805 | emitRMWOp = XElemTy; | ||||||
3806 | break; | ||||||
3807 | case AtomicRMWInst::Sub: | ||||||
3808 | emitRMWOp = (IsXBinopExpr && XElemTy); | ||||||
3809 | break; | ||||||
3810 | default: | ||||||
3811 | emitRMWOp = false; | ||||||
3812 | } | ||||||
3813 | emitRMWOp &= XElemTy->isIntegerTy(); | ||||||
3814 | |||||||
3815 | std::pair<Value *, Value *> Res; | ||||||
3816 | if (emitRMWOp
| ||||||
3817 | Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO); | ||||||
3818 | // not needed except in case of postfix captures. Generate anyway for | ||||||
3819 | // consistency with the else part. Will be removed with any DCE pass. | ||||||
3820 | // AtomicRMWInst::Xchg does not have a coressponding instruction. | ||||||
3821 | if (RMWOp == AtomicRMWInst::Xchg) | ||||||
3822 | Res.second = Res.first; | ||||||
3823 | else | ||||||
3824 | Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp); | ||||||
3825 | } else { | ||||||
3826 | unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace(); | ||||||
3827 | IntegerType *IntCastTy = | ||||||
3828 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | ||||||
3829 | Value *XBCast = | ||||||
3830 | Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); | ||||||
3831 | LoadInst *OldVal = | ||||||
3832 | Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load"); | ||||||
3833 | OldVal->setAtomic(AO); | ||||||
3834 | // CurBB | ||||||
3835 | // | /---\ | ||||||
3836 | // ContBB | | ||||||
3837 | // | \---/ | ||||||
3838 | // ExitBB | ||||||
3839 | BasicBlock *CurBB = Builder.GetInsertBlock(); | ||||||
3840 | Instruction *CurBBTI = CurBB->getTerminator(); | ||||||
3841 | CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable(); | ||||||
3842 | BasicBlock *ExitBB = | ||||||
3843 | CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit"); | ||||||
3844 | BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(), | ||||||
3845 | X->getName() + ".atomic.cont"); | ||||||
3846 | ContBB->getTerminator()->eraseFromParent(); | ||||||
3847 | Builder.restoreIP(AllocaIP); | ||||||
3848 | AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy); | ||||||
3849 | NewAtomicAddr->setName(X->getName() + "x.new.val"); | ||||||
3850 | Builder.SetInsertPoint(ContBB); | ||||||
3851 | llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2); | ||||||
3852 | PHI->addIncoming(OldVal, CurBB); | ||||||
3853 | IntegerType *NewAtomicCastTy = | ||||||
3854 | IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits()); | ||||||
3855 | bool IsIntTy = XElemTy->isIntegerTy(); | ||||||
3856 | Value *NewAtomicIntAddr = | ||||||
3857 | (IsIntTy
| ||||||
3858 | ? NewAtomicAddr | ||||||
3859 | : Builder.CreateBitCast(NewAtomicAddr, | ||||||
3860 | NewAtomicCastTy->getPointerTo(Addrspace)); | ||||||
3861 | Value *OldExprVal = PHI; | ||||||
3862 | if (!IsIntTy
| ||||||
3863 | if (XElemTy->isFloatingPointTy()) { | ||||||
3864 | OldExprVal = Builder.CreateBitCast(PHI, XElemTy, | ||||||
3865 | X->getName() + ".atomic.fltCast"); | ||||||
3866 | } else { | ||||||
3867 | OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy, | ||||||
3868 | X->getName() + ".atomic.ptrCast"); | ||||||
3869 | } | ||||||
3870 | } | ||||||
3871 | |||||||
3872 | Value *Upd = UpdateOp(OldExprVal, Builder); | ||||||
3873 | Builder.CreateStore(Upd, NewAtomicAddr); | ||||||
3874 | LoadInst *DesiredVal = Builder.CreateLoad(IntCastTy, NewAtomicIntAddr); | ||||||
3875 | Value *XAddr = | ||||||
3876 | (IsIntTy
| ||||||
3877 | ? X | ||||||
3878 | : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace)); | ||||||
3879 | AtomicOrdering Failure = | ||||||
3880 | llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); | ||||||
3881 | AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg( | ||||||
3882 | XAddr, PHI, DesiredVal, llvm::MaybeAlign(), AO, Failure); | ||||||
3883 | Result->setVolatile(VolatileX); | ||||||
3884 | Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0); | ||||||
3885 | Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1); | ||||||
3886 | PHI->addIncoming(PreviousVal, Builder.GetInsertBlock()); | ||||||
3887 | Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB); | ||||||
3888 | |||||||
3889 | Res.first = OldExprVal; | ||||||
3890 | Res.second = Upd; | ||||||
3891 | |||||||
3892 | // set Insertion point in exit block | ||||||
3893 | if (UnreachableInst *ExitTI
| ||||||
3894 | dyn_cast<UnreachableInst>(ExitBB->getTerminator())) { | ||||||
3895 | CurBBTI->eraseFromParent(); | ||||||
3896 | Builder.SetInsertPoint(ExitBB); | ||||||
3897 | } else { | ||||||
3898 | Builder.SetInsertPoint(ExitTI); | ||||||
3899 | } | ||||||
3900 | } | ||||||
3901 | |||||||
3902 | return Res; | ||||||
3903 | } | ||||||
3904 | |||||||
3905 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture( | ||||||
3906 | const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, | ||||||
3907 | AtomicOpValue &V, Value *Expr, AtomicOrdering AO, | ||||||
3908 | AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, | ||||||
3909 | bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr) { | ||||||
3910 | if (!updateToLocation(Loc)) | ||||||
3911 | return Loc.IP; | ||||||
3912 | |||||||
3913 | LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3914 | Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3915 | assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3916 | "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3917 | Type *XElemTy = X.ElemTy;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3918 | assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3919 | XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3920 | "OMP atomic capture expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3921 | assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3922 | "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false) | ||||||
3923 | })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); ( static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3916, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = X.ElemTy; (static_cast <bool> ((XElemTy->isFloatingPointTy() || XElemTy-> isIntegerTy() || XElemTy->isPointerTy()) && "OMP atomic capture expected a scalar type" ) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3920, __extension__ __PRETTY_FUNCTION__)); (static_cast <bool> ((RMWOp != AtomicRMWInst ::Max) && (RMWOp != AtomicRMWInst::Min) && "OpenMP atomic does not support LT or GT operations" ) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3922, __extension__ __PRETTY_FUNCTION__)); }; } } while (false); | ||||||
3924 | |||||||
3925 | // If UpdateExpr is 'x' updated with some `expr` not based on 'x', | ||||||
3926 | // 'x' is simply atomically rewritten with 'expr'. | ||||||
3927 | AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg); | ||||||
3928 | std::pair<Value *, Value *> Result = | ||||||
3929 | emitAtomicUpdate(AllocaIP, X.Var, X.ElemTy, Expr, AO, AtomicOp, UpdateOp, | ||||||
3930 | X.IsVolatile, IsXBinopExpr); | ||||||
3931 | |||||||
3932 | Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second); | ||||||
3933 | Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile); | ||||||
3934 | |||||||
3935 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture); | ||||||
3936 | return Builder.saveIP(); | ||||||
3937 | } | ||||||
3938 | |||||||
3939 | OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCompare( | ||||||
3940 | const LocationDescription &Loc, AtomicOpValue &X, Value *E, Value *D, | ||||||
3941 | AtomicOrdering AO, OMPAtomicCompareOp Op, bool IsXBinopExpr) { | ||||||
3942 | if (!updateToLocation(Loc)) | ||||||
3943 | return Loc.IP; | ||||||
3944 | |||||||
3945 | assert(X.Var->getType()->isPointerTy() &&(static_cast <bool> (X.Var->getType()->isPointerTy () && "OMP atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("X.Var->getType()->isPointerTy() && \"OMP atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3946, __extension__ __PRETTY_FUNCTION__)) | ||||||
3946 | "OMP atomic expects a pointer to target memory")(static_cast <bool> (X.Var->getType()->isPointerTy () && "OMP atomic expects a pointer to target memory" ) ? void (0) : __assert_fail ("X.Var->getType()->isPointerTy() && \"OMP atomic expects a pointer to target memory\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3946, __extension__ __PRETTY_FUNCTION__)); | ||||||
3947 | assert((X.ElemTy->isIntegerTy() || X.ElemTy->isPointerTy()) &&(static_cast <bool> ((X.ElemTy->isIntegerTy() || X.ElemTy ->isPointerTy()) && "OMP atomic compare expected a integer scalar type" ) ? void (0) : __assert_fail ("(X.ElemTy->isIntegerTy() || X.ElemTy->isPointerTy()) && \"OMP atomic compare expected a integer scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3948, __extension__ __PRETTY_FUNCTION__)) | ||||||
3948 | "OMP atomic compare expected a integer scalar type")(static_cast <bool> ((X.ElemTy->isIntegerTy() || X.ElemTy ->isPointerTy()) && "OMP atomic compare expected a integer scalar type" ) ? void (0) : __assert_fail ("(X.ElemTy->isIntegerTy() || X.ElemTy->isPointerTy()) && \"OMP atomic compare expected a integer scalar type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3948, __extension__ __PRETTY_FUNCTION__)); | ||||||
3949 | |||||||
3950 | if (Op == OMPAtomicCompareOp::EQ) { | ||||||
3951 | AtomicOrdering Failure = AtomicCmpXchgInst::getStrongestFailureOrdering(AO); | ||||||
3952 | // We don't need the result for now. | ||||||
3953 | (void)Builder.CreateAtomicCmpXchg(X.Var, E, D, MaybeAlign(), AO, Failure); | ||||||
3954 | } else { | ||||||
3955 | assert((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) &&(static_cast <bool> ((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && "Op should be either max or min at this point" ) ? void (0) : __assert_fail ("(Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && \"Op should be either max or min at this point\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3956, __extension__ __PRETTY_FUNCTION__)) | ||||||
3956 | "Op should be either max or min at this point")(static_cast <bool> ((Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && "Op should be either max or min at this point" ) ? void (0) : __assert_fail ("(Op == OMPAtomicCompareOp::MAX || Op == OMPAtomicCompareOp::MIN) && \"Op should be either max or min at this point\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 3956, __extension__ __PRETTY_FUNCTION__)); | ||||||
3957 | |||||||
3958 | // Reverse the ordop as the OpenMP forms are different from LLVM forms. | ||||||
3959 | // Let's take max as example. | ||||||
3960 | // OpenMP form: | ||||||
3961 | // x = x > expr ? expr : x; | ||||||
3962 | // LLVM form: | ||||||
3963 | // *ptr = *ptr > val ? *ptr : val; | ||||||
3964 | // We need to transform to LLVM form. | ||||||
3965 | // x = x <= expr ? x : expr; | ||||||
3966 | AtomicRMWInst::BinOp NewOp; | ||||||
3967 | if (IsXBinopExpr) { | ||||||
3968 | if (X.IsSigned) | ||||||
3969 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Min | ||||||
3970 | : AtomicRMWInst::Max; | ||||||
3971 | else | ||||||
3972 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMin | ||||||
3973 | : AtomicRMWInst::UMax; | ||||||
3974 | } else { | ||||||
3975 | if (X.IsSigned) | ||||||
3976 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::Max | ||||||
3977 | : AtomicRMWInst::Min; | ||||||
3978 | else | ||||||
3979 | NewOp = Op == OMPAtomicCompareOp::MAX ? AtomicRMWInst::UMax | ||||||
3980 | : AtomicRMWInst::UMin; | ||||||
3981 | } | ||||||
3982 | // We dont' need the result for now. | ||||||
3983 | (void)Builder.CreateAtomicRMW(NewOp, X.Var, E, MaybeAlign(), AO); | ||||||
3984 | } | ||||||
3985 | |||||||
3986 | checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Compare); | ||||||
3987 | |||||||
3988 | return Builder.saveIP(); | ||||||
3989 | } | ||||||
3990 | |||||||
3991 | GlobalVariable * | ||||||
3992 | OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, | ||||||
3993 | std::string VarName) { | ||||||
3994 | llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get( | ||||||
3995 | llvm::ArrayType::get( | ||||||
3996 | llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()), | ||||||
3997 | Names); | ||||||
3998 | auto *MapNamesArrayGlobal = new llvm::GlobalVariable( | ||||||
3999 | M, MapNamesArrayInit->getType(), | ||||||
4000 | /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit, | ||||||
4001 | VarName); | ||||||
4002 | return MapNamesArrayGlobal; | ||||||
4003 | } | ||||||
4004 | |||||||
4005 | // Create all simple and struct types exposed by the runtime and remember | ||||||
4006 | // the llvm::PointerTypes of them for easy access later. | ||||||
4007 | void OpenMPIRBuilder::initializeTypes(Module &M) { | ||||||
4008 | LLVMContext &Ctx = M.getContext(); | ||||||
4009 | StructType *T; | ||||||
4010 | #define OMP_TYPE(VarName, InitValue) VarName = InitValue; | ||||||
4011 | #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ | ||||||
4012 | VarName##Ty = ArrayType::get(ElemTy, ArraySize); \ | ||||||
4013 | VarName##PtrTy = PointerType::getUnqual(VarName##Ty); | ||||||
4014 | #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ | ||||||
4015 | VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \ | ||||||
4016 | VarName##Ptr = PointerType::getUnqual(VarName); | ||||||
4017 | #define OMP_STRUCT_TYPE(VarName, StructName, ...) \ | ||||||
4018 | T = StructType::getTypeByName(Ctx, StructName); \ | ||||||
4019 | if (!T) \ | ||||||
4020 | T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \ | ||||||
4021 | VarName = T; \ | ||||||
4022 | VarName##Ptr = PointerType::getUnqual(T); | ||||||
4023 | #include "llvm/Frontend/OpenMP/OMPKinds.def" | ||||||
4024 | } | ||||||
4025 | |||||||
4026 | void OpenMPIRBuilder::OutlineInfo::collectBlocks( | ||||||
4027 | SmallPtrSetImpl<BasicBlock *> &BlockSet, | ||||||
4028 | SmallVectorImpl<BasicBlock *> &BlockVector) { | ||||||
4029 | SmallVector<BasicBlock *, 32> Worklist; | ||||||
4030 | BlockSet.insert(EntryBB); | ||||||
4031 | BlockSet.insert(ExitBB); | ||||||
4032 | |||||||
4033 | Worklist.push_back(EntryBB); | ||||||
4034 | while (!Worklist.empty()) { | ||||||
4035 | BasicBlock *BB = Worklist.pop_back_val(); | ||||||
4036 | BlockVector.push_back(BB); | ||||||
4037 | for (BasicBlock *SuccBB : successors(BB)) | ||||||
4038 | if (BlockSet.insert(SuccBB).second) | ||||||
4039 | Worklist.push_back(SuccBB); | ||||||
4040 | } | ||||||
4041 | } | ||||||
4042 | |||||||
4043 | void CanonicalLoopInfo::collectControlBlocks( | ||||||
4044 | SmallVectorImpl<BasicBlock *> &BBs) { | ||||||
4045 | // We only count those BBs as control block for which we do not need to | ||||||
4046 | // reverse the CFG, i.e. not the loop body which can contain arbitrary control | ||||||
4047 | // flow. For consistency, this also means we do not add the Body block, which | ||||||
4048 | // is just the entry to the body code. | ||||||
4049 | BBs.reserve(BBs.size() + 6); | ||||||
4050 | BBs.append({getPreheader(), Header, Cond, Latch, Exit, getAfter()}); | ||||||
4051 | } | ||||||
4052 | |||||||
4053 | BasicBlock *CanonicalLoopInfo::getPreheader() const { | ||||||
4054 | assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4054, __extension__ __PRETTY_FUNCTION__)); | ||||||
4055 | for (BasicBlock *Pred : predecessors(Header)) { | ||||||
4056 | if (Pred != Latch) | ||||||
4057 | return Pred; | ||||||
4058 | } | ||||||
4059 | llvm_unreachable("Missing preheader")::llvm::llvm_unreachable_internal("Missing preheader", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp" , 4059); | ||||||
4060 | } | ||||||
4061 | |||||||
4062 | void CanonicalLoopInfo::setTripCount(Value *TripCount) { | ||||||
4063 | assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4063, __extension__ __PRETTY_FUNCTION__)); | ||||||
4064 | |||||||
4065 | Instruction *CmpI = &getCond()->front(); | ||||||
4066 | assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")(static_cast <bool> (isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount") ? void (0) : __assert_fail ("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4066, __extension__ __PRETTY_FUNCTION__)); | ||||||
4067 | CmpI->setOperand(1, TripCount); | ||||||
4068 | |||||||
4069 | #ifndef NDEBUG | ||||||
4070 | assertOK(); | ||||||
4071 | #endif | ||||||
4072 | } | ||||||
4073 | |||||||
4074 | void CanonicalLoopInfo::mapIndVar( | ||||||
4075 | llvm::function_ref<Value *(Instruction *)> Updater) { | ||||||
4076 | assert(isValid() && "Requires a valid canonical loop")(static_cast <bool> (isValid() && "Requires a valid canonical loop" ) ? void (0) : __assert_fail ("isValid() && \"Requires a valid canonical loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4076, __extension__ __PRETTY_FUNCTION__)); | ||||||
4077 | |||||||
4078 | Instruction *OldIV = getIndVar(); | ||||||
4079 | |||||||
4080 | // Record all uses excluding those introduced by the updater. Uses by the | ||||||
4081 | // CanonicalLoopInfo itself to keep track of the number of iterations are | ||||||
4082 | // excluded. | ||||||
4083 | SmallVector<Use *> ReplacableUses; | ||||||
4084 | for (Use &U : OldIV->uses()) { | ||||||
4085 | auto *User = dyn_cast<Instruction>(U.getUser()); | ||||||
4086 | if (!User) | ||||||
4087 | continue; | ||||||
4088 | if (User->getParent() == getCond()) | ||||||
4089 | continue; | ||||||
4090 | if (User->getParent() == getLatch()) | ||||||
4091 | continue; | ||||||
4092 | ReplacableUses.push_back(&U); | ||||||
4093 | } | ||||||
4094 | |||||||
4095 | // Run the updater that may introduce new uses | ||||||
4096 | Value *NewIV = Updater(OldIV); | ||||||
4097 | |||||||
4098 | // Replace the old uses with the value returned by the updater. | ||||||
4099 | for (Use *U : ReplacableUses) | ||||||
4100 | U->set(NewIV); | ||||||
4101 | |||||||
4102 | #ifndef NDEBUG | ||||||
4103 | assertOK(); | ||||||
4104 | #endif | ||||||
4105 | } | ||||||
4106 | |||||||
4107 | void CanonicalLoopInfo::assertOK() const { | ||||||
4108 | #ifndef NDEBUG | ||||||
4109 | // No constraints if this object currently does not describe a loop. | ||||||
4110 | if (!isValid()) | ||||||
4111 | return; | ||||||
4112 | |||||||
4113 | BasicBlock *Preheader = getPreheader(); | ||||||
4114 | BasicBlock *Body = getBody(); | ||||||
4115 | BasicBlock *After = getAfter(); | ||||||
4116 | |||||||
4117 | // Verify standard control-flow we use for OpenMP loops. | ||||||
4118 | assert(Preheader)(static_cast <bool> (Preheader) ? void (0) : __assert_fail ("Preheader", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4118 , __extension__ __PRETTY_FUNCTION__)); | ||||||
4119 | assert(isa<BranchInst>(Preheader->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Preheader-> getTerminator()) && "Preheader must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4120, __extension__ __PRETTY_FUNCTION__)) | ||||||
4120 | "Preheader must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Preheader-> getTerminator()) && "Preheader must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4120, __extension__ __PRETTY_FUNCTION__)); | ||||||
4121 | assert(Preheader->getSingleSuccessor() == Header &&(static_cast <bool> (Preheader->getSingleSuccessor() == Header && "Preheader must jump to header") ? void (0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4122, __extension__ __PRETTY_FUNCTION__)) | ||||||
4122 | "Preheader must jump to header")(static_cast <bool> (Preheader->getSingleSuccessor() == Header && "Preheader must jump to header") ? void (0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4122, __extension__ __PRETTY_FUNCTION__)); | ||||||
4123 | |||||||
4124 | assert(Header)(static_cast <bool> (Header) ? void (0) : __assert_fail ("Header", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4124 , __extension__ __PRETTY_FUNCTION__)); | ||||||
4125 | assert(isa<BranchInst>(Header->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Header->getTerminator ()) && "Header must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4126, __extension__ __PRETTY_FUNCTION__)) | ||||||
4126 | "Header must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Header->getTerminator ()) && "Header must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4126, __extension__ __PRETTY_FUNCTION__)); | ||||||
4127 | assert(Header->getSingleSuccessor() == Cond &&(static_cast <bool> (Header->getSingleSuccessor() == Cond && "Header must jump to exiting block") ? void ( 0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4128, __extension__ __PRETTY_FUNCTION__)) | ||||||
4128 | "Header must jump to exiting block")(static_cast <bool> (Header->getSingleSuccessor() == Cond && "Header must jump to exiting block") ? void ( 0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4128, __extension__ __PRETTY_FUNCTION__)); | ||||||
4129 | |||||||
4130 | assert(Cond)(static_cast <bool> (Cond) ? void (0) : __assert_fail ( "Cond", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4130, __extension__ __PRETTY_FUNCTION__)); | ||||||
4131 | assert(Cond->getSinglePredecessor() == Header &&(static_cast <bool> (Cond->getSinglePredecessor() == Header && "Exiting block only reachable from header" ) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4132, __extension__ __PRETTY_FUNCTION__)) | ||||||
4132 | "Exiting block only reachable from header")(static_cast <bool> (Cond->getSinglePredecessor() == Header && "Exiting block only reachable from header" ) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4132, __extension__ __PRETTY_FUNCTION__)); | ||||||
4133 | |||||||
4134 | assert(isa<BranchInst>(Cond->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Cond->getTerminator ()) && "Exiting block must terminate with conditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4135, __extension__ __PRETTY_FUNCTION__)) | ||||||
4135 | "Exiting block must terminate with conditional branch")(static_cast <bool> (isa<BranchInst>(Cond->getTerminator ()) && "Exiting block must terminate with conditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4135, __extension__ __PRETTY_FUNCTION__)); | ||||||
4136 | assert(size(successors(Cond)) == 2 &&(static_cast <bool> (size(successors(Cond)) == 2 && "Exiting block must have two successors") ? void (0) : __assert_fail ("size(successors(Cond)) == 2 && \"Exiting block must have two successors\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4137, __extension__ __PRETTY_FUNCTION__)) | ||||||
4137 | "Exiting block must have two successors")(static_cast <bool> (size(successors(Cond)) == 2 && "Exiting block must have two successors") ? void (0) : __assert_fail ("size(successors(Cond)) == 2 && \"Exiting block must have two successors\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4137, __extension__ __PRETTY_FUNCTION__)); | ||||||
4138 | assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4139, __extension__ __PRETTY_FUNCTION__)) | ||||||
4139 | "Exiting block's first successor jump to the body")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4139, __extension__ __PRETTY_FUNCTION__)); | ||||||
4140 | assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4141, __extension__ __PRETTY_FUNCTION__)) | ||||||
4141 | "Exiting block's second successor must exit the loop")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator ())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop" ) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4141, __extension__ __PRETTY_FUNCTION__)); | ||||||
4142 | |||||||
4143 | assert(Body)(static_cast <bool> (Body) ? void (0) : __assert_fail ( "Body", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4143, __extension__ __PRETTY_FUNCTION__)); | ||||||
4144 | assert(Body->getSinglePredecessor() == Cond &&(static_cast <bool> (Body->getSinglePredecessor() == Cond && "Body only reachable from exiting block") ? void (0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4145, __extension__ __PRETTY_FUNCTION__)) | ||||||
4145 | "Body only reachable from exiting block")(static_cast <bool> (Body->getSinglePredecessor() == Cond && "Body only reachable from exiting block") ? void (0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4145, __extension__ __PRETTY_FUNCTION__)); | ||||||
4146 | assert(!isa<PHINode>(Body->front()))(static_cast <bool> (!isa<PHINode>(Body->front ())) ? void (0) : __assert_fail ("!isa<PHINode>(Body->front())" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4146, __extension__ __PRETTY_FUNCTION__)); | ||||||
4147 | |||||||
4148 | assert(Latch)(static_cast <bool> (Latch) ? void (0) : __assert_fail ( "Latch", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4148, __extension__ __PRETTY_FUNCTION__)); | ||||||
4149 | assert(isa<BranchInst>(Latch->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Latch->getTerminator ()) && "Latch must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4150, __extension__ __PRETTY_FUNCTION__)) | ||||||
4150 | "Latch must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Latch->getTerminator ()) && "Latch must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4150, __extension__ __PRETTY_FUNCTION__)); | ||||||
4151 | assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")(static_cast <bool> (Latch->getSingleSuccessor() == Header && "Latch must jump to header") ? void (0) : __assert_fail ("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4151, __extension__ __PRETTY_FUNCTION__)); | ||||||
4152 | // TODO: To support simple redirecting of the end of the body code that has | ||||||
4153 | // multiple; introduce another auxiliary basic block like preheader and after. | ||||||
4154 | assert(Latch->getSinglePredecessor() != nullptr)(static_cast <bool> (Latch->getSinglePredecessor() != nullptr) ? void (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4154, __extension__ __PRETTY_FUNCTION__)); | ||||||
4155 | assert(!isa<PHINode>(Latch->front()))(static_cast <bool> (!isa<PHINode>(Latch->front ())) ? void (0) : __assert_fail ("!isa<PHINode>(Latch->front())" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4155, __extension__ __PRETTY_FUNCTION__)); | ||||||
4156 | |||||||
4157 | assert(Exit)(static_cast <bool> (Exit) ? void (0) : __assert_fail ( "Exit", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4157, __extension__ __PRETTY_FUNCTION__)); | ||||||
4158 | assert(isa<BranchInst>(Exit->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Exit->getTerminator ()) && "Exit block must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4159, __extension__ __PRETTY_FUNCTION__)) | ||||||
4159 | "Exit block must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Exit->getTerminator ()) && "Exit block must terminate with unconditional branch" ) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4159, __extension__ __PRETTY_FUNCTION__)); | ||||||
4160 | assert(Exit->getSingleSuccessor() == After &&(static_cast <bool> (Exit->getSingleSuccessor() == After && "Exit block must jump to after block") ? void (0) : __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4161, __extension__ __PRETTY_FUNCTION__)) | ||||||
4161 | "Exit block must jump to after block")(static_cast <bool> (Exit->getSingleSuccessor() == After && "Exit block must jump to after block") ? void (0) : __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4161, __extension__ __PRETTY_FUNCTION__)); | ||||||
4162 | |||||||
4163 | assert(After)(static_cast <bool> (After) ? void (0) : __assert_fail ( "After", "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4163, __extension__ __PRETTY_FUNCTION__)); | ||||||
4164 | assert(After->getSinglePredecessor() == Exit &&(static_cast <bool> (After->getSinglePredecessor() == Exit && "After block only reachable from exit block" ) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4165, __extension__ __PRETTY_FUNCTION__)) | ||||||
4165 | "After block only reachable from exit block")(static_cast <bool> (After->getSinglePredecessor() == Exit && "After block only reachable from exit block" ) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4165, __extension__ __PRETTY_FUNCTION__)); | ||||||
4166 | assert(After->empty() || !isa<PHINode>(After->front()))(static_cast <bool> (After->empty() || !isa<PHINode >(After->front())) ? void (0) : __assert_fail ("After->empty() || !isa<PHINode>(After->front())" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4166, __extension__ __PRETTY_FUNCTION__)); | ||||||
4167 | |||||||
4168 | Instruction *IndVar = getIndVar(); | ||||||
4169 | assert(IndVar && "Canonical induction variable not found?")(static_cast <bool> (IndVar && "Canonical induction variable not found?" ) ? void (0) : __assert_fail ("IndVar && \"Canonical induction variable not found?\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4169, __extension__ __PRETTY_FUNCTION__)); | ||||||
4170 | assert(isa<IntegerType>(IndVar->getType()) &&(static_cast <bool> (isa<IntegerType>(IndVar-> getType()) && "Induction variable must be an integer" ) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4171, __extension__ __PRETTY_FUNCTION__)) | ||||||
4171 | "Induction variable must be an integer")(static_cast <bool> (isa<IntegerType>(IndVar-> getType()) && "Induction variable must be an integer" ) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4171, __extension__ __PRETTY_FUNCTION__)); | ||||||
4172 | assert(cast<PHINode>(IndVar)->getParent() == Header &&(static_cast <bool> (cast<PHINode>(IndVar)->getParent () == Header && "Induction variable must be a PHI in the loop header" ) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4173, __extension__ __PRETTY_FUNCTION__)) | ||||||
4173 | "Induction variable must be a PHI in the loop header")(static_cast <bool> (cast<PHINode>(IndVar)->getParent () == Header && "Induction variable must be a PHI in the loop header" ) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4173, __extension__ __PRETTY_FUNCTION__)); | ||||||
4174 | assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock (0) == Preheader) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4174, __extension__ __PRETTY_FUNCTION__)); | ||||||
4175 | assert((static_cast <bool> (cast<ConstantInt>(cast<PHINode >(IndVar)->getIncomingValue(0))->isZero()) ? void (0 ) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4176, __extension__ __PRETTY_FUNCTION__)) | ||||||
4176 | cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())(static_cast <bool> (cast<ConstantInt>(cast<PHINode >(IndVar)->getIncomingValue(0))->isZero()) ? void (0 ) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4176, __extension__ __PRETTY_FUNCTION__)); | ||||||
4177 | assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock (1) == Latch) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4177, __extension__ __PRETTY_FUNCTION__)); | ||||||
4178 | |||||||
4179 | auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1); | ||||||
4180 | assert(cast<Instruction>(NextIndVar)->getParent() == Latch)(static_cast <bool> (cast<Instruction>(NextIndVar )->getParent() == Latch) ? void (0) : __assert_fail ("cast<Instruction>(NextIndVar)->getParent() == Latch" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4180, __extension__ __PRETTY_FUNCTION__)); | ||||||
4181 | assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)(static_cast <bool> (cast<BinaryOperator>(NextIndVar )->getOpcode() == BinaryOperator::Add) ? void (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4181, __extension__ __PRETTY_FUNCTION__)); | ||||||
4182 | assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)(static_cast <bool> (cast<BinaryOperator>(NextIndVar )->getOperand(0) == IndVar) ? void (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4182, __extension__ __PRETTY_FUNCTION__)); | ||||||
4183 | assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator >(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) : __assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4184, __extension__ __PRETTY_FUNCTION__)) | ||||||
4184 | ->isOne())(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator >(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) : __assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4184, __extension__ __PRETTY_FUNCTION__)); | ||||||
4185 | |||||||
4186 | Value *TripCount = getTripCount(); | ||||||
4187 | assert(TripCount && "Loop trip count not found?")(static_cast <bool> (TripCount && "Loop trip count not found?" ) ? void (0) : __assert_fail ("TripCount && \"Loop trip count not found?\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4187, __extension__ __PRETTY_FUNCTION__)); | ||||||
4188 | assert(IndVar->getType() == TripCount->getType() &&(static_cast <bool> (IndVar->getType() == TripCount-> getType() && "Trip count and induction variable must have the same type" ) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4189, __extension__ __PRETTY_FUNCTION__)) | ||||||
4189 | "Trip count and induction variable must have the same type")(static_cast <bool> (IndVar->getType() == TripCount-> getType() && "Trip count and induction variable must have the same type" ) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4189, __extension__ __PRETTY_FUNCTION__)); | ||||||
4190 | |||||||
4191 | auto *CmpI = cast<CmpInst>(&Cond->front()); | ||||||
4192 | assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&(static_cast <bool> (CmpI->getPredicate() == CmpInst ::ICMP_ULT && "Exit condition must be a signed less-than comparison" ) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4193, __extension__ __PRETTY_FUNCTION__)) | ||||||
4193 | "Exit condition must be a signed less-than comparison")(static_cast <bool> (CmpI->getPredicate() == CmpInst ::ICMP_ULT && "Exit condition must be a signed less-than comparison" ) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4193, __extension__ __PRETTY_FUNCTION__)); | ||||||
4194 | assert(CmpI->getOperand(0) == IndVar &&(static_cast <bool> (CmpI->getOperand(0) == IndVar && "Exit condition must compare the induction variable") ? void (0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4195, __extension__ __PRETTY_FUNCTION__)) | ||||||
4195 | "Exit condition must compare the induction variable")(static_cast <bool> (CmpI->getOperand(0) == IndVar && "Exit condition must compare the induction variable") ? void (0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4195, __extension__ __PRETTY_FUNCTION__)); | ||||||
4196 | assert(CmpI->getOperand(1) == TripCount &&(static_cast <bool> (CmpI->getOperand(1) == TripCount && "Exit condition must compare with the trip count" ) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4197, __extension__ __PRETTY_FUNCTION__)) | ||||||
4197 | "Exit condition must compare with the trip count")(static_cast <bool> (CmpI->getOperand(1) == TripCount && "Exit condition must compare with the trip count" ) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\"" , "llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp", 4197, __extension__ __PRETTY_FUNCTION__)); | ||||||
4198 | #endif | ||||||
4199 | } | ||||||
4200 | |||||||
4201 | void CanonicalLoopInfo::invalidate() { | ||||||
4202 | Header = nullptr; | ||||||
4203 | Cond = nullptr; | ||||||
4204 | Latch = nullptr; | ||||||
4205 | Exit = nullptr; | ||||||
4206 | } |
1 | //===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the declaration of the Type class. For more "Type" |
10 | // stuff, look in DerivedTypes.h. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_IR_TYPE_H |
15 | #define LLVM_IR_TYPE_H |
16 | |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/Support/CBindingWrapping.h" |
19 | #include "llvm/Support/Casting.h" |
20 | #include "llvm/Support/Compiler.h" |
21 | #include "llvm/Support/ErrorHandling.h" |
22 | #include "llvm/Support/TypeSize.h" |
23 | #include <cassert> |
24 | #include <cstdint> |
25 | #include <iterator> |
26 | |
27 | namespace llvm { |
28 | |
29 | class IntegerType; |
30 | struct fltSemantics; |
31 | class LLVMContext; |
32 | class PointerType; |
33 | class raw_ostream; |
34 | class StringRef; |
35 | template <typename PtrType> class SmallPtrSetImpl; |
36 | |
37 | /// The instances of the Type class are immutable: once they are created, |
38 | /// they are never changed. Also note that only one instance of a particular |
39 | /// type is ever created. Thus seeing if two types are equal is a matter of |
40 | /// doing a trivial pointer comparison. To enforce that no two equal instances |
41 | /// are created, Type instances can only be created via static factory methods |
42 | /// in class Type and in derived classes. Once allocated, Types are never |
43 | /// free'd. |
44 | /// |
45 | class Type { |
46 | public: |
47 | //===--------------------------------------------------------------------===// |
48 | /// Definitions of all of the base types for the Type system. Based on this |
49 | /// value, you can cast to a class defined in DerivedTypes.h. |
50 | /// Note: If you add an element to this, you need to add an element to the |
51 | /// Type::getPrimitiveType function, or else things will break! |
52 | /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding. |
53 | /// |
54 | enum TypeID { |
55 | // PrimitiveTypes |
56 | HalfTyID = 0, ///< 16-bit floating point type |
57 | BFloatTyID, ///< 16-bit floating point type (7-bit significand) |
58 | FloatTyID, ///< 32-bit floating point type |
59 | DoubleTyID, ///< 64-bit floating point type |
60 | X86_FP80TyID, ///< 80-bit floating point type (X87) |
61 | FP128TyID, ///< 128-bit floating point type (112-bit significand) |
62 | PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC) |
63 | VoidTyID, ///< type with no size |
64 | LabelTyID, ///< Labels |
65 | MetadataTyID, ///< Metadata |
66 | X86_MMXTyID, ///< MMX vectors (64 bits, X86 specific) |
67 | X86_AMXTyID, ///< AMX vectors (8192 bits, X86 specific) |
68 | TokenTyID, ///< Tokens |
69 | |
70 | // Derived types... see DerivedTypes.h file. |
71 | IntegerTyID, ///< Arbitrary bit width integers |
72 | FunctionTyID, ///< Functions |
73 | PointerTyID, ///< Pointers |
74 | StructTyID, ///< Structures |
75 | ArrayTyID, ///< Arrays |
76 | FixedVectorTyID, ///< Fixed width SIMD vector type |
77 | ScalableVectorTyID ///< Scalable SIMD vector type |
78 | }; |
79 | |
80 | private: |
81 | /// This refers to the LLVMContext in which this type was uniqued. |
82 | LLVMContext &Context; |
83 | |
84 | TypeID ID : 8; // The current base type of this type. |
85 | unsigned SubclassData : 24; // Space for subclasses to store data. |
86 | // Note that this should be synchronized with |
87 | // MAX_INT_BITS value in IntegerType class. |
88 | |
89 | protected: |
90 | friend class LLVMContextImpl; |
91 | |
92 | explicit Type(LLVMContext &C, TypeID tid) |
93 | : Context(C), ID(tid), SubclassData(0) {} |
94 | ~Type() = default; |
95 | |
96 | unsigned getSubclassData() const { return SubclassData; } |
97 | |
98 | void setSubclassData(unsigned val) { |
99 | SubclassData = val; |
100 | // Ensure we don't have any accidental truncation. |
101 | assert(getSubclassData() == val && "Subclass data too large for field")(static_cast <bool> (getSubclassData() == val && "Subclass data too large for field") ? void (0) : __assert_fail ("getSubclassData() == val && \"Subclass data too large for field\"" , "llvm/include/llvm/IR/Type.h", 101, __extension__ __PRETTY_FUNCTION__ )); |
102 | } |
103 | |
104 | /// Keeps track of how many Type*'s there are in the ContainedTys list. |
105 | unsigned NumContainedTys = 0; |
106 | |
107 | /// A pointer to the array of Types contained by this Type. For example, this |
108 | /// includes the arguments of a function type, the elements of a structure, |
109 | /// the pointee of a pointer, the element type of an array, etc. This pointer |
110 | /// may be 0 for types that don't contain other types (Integer, Double, |
111 | /// Float). |
112 | Type * const *ContainedTys = nullptr; |
113 | |
114 | public: |
115 | /// Print the current type. |
116 | /// Omit the type details if \p NoDetails == true. |
117 | /// E.g., let %st = type { i32, i16 } |
118 | /// When \p NoDetails is true, we only print %st. |
119 | /// Put differently, \p NoDetails prints the type as if |
120 | /// inlined with the operands when printing an instruction. |
121 | void print(raw_ostream &O, bool IsForDebug = false, |
122 | bool NoDetails = false) const; |
123 | |
124 | void dump() const; |
125 | |
126 | /// Return the LLVMContext in which this type was uniqued. |
127 | LLVMContext &getContext() const { return Context; } |
128 | |
129 | //===--------------------------------------------------------------------===// |
130 | // Accessors for working with types. |
131 | // |
132 | |
133 | /// Return the type id for the type. This will return one of the TypeID enum |
134 | /// elements defined above. |
135 | TypeID getTypeID() const { return ID; } |
136 | |
137 | /// Return true if this is 'void'. |
138 | bool isVoidTy() const { return getTypeID() == VoidTyID; } |
139 | |
140 | /// Return true if this is 'half', a 16-bit IEEE fp type. |
141 | bool isHalfTy() const { return getTypeID() == HalfTyID; } |
142 | |
143 | /// Return true if this is 'bfloat', a 16-bit bfloat type. |
144 | bool isBFloatTy() const { return getTypeID() == BFloatTyID; } |
145 | |
146 | /// Return true if this is 'float', a 32-bit IEEE fp type. |
147 | bool isFloatTy() const { return getTypeID() == FloatTyID; } |
148 | |
149 | /// Return true if this is 'double', a 64-bit IEEE fp type. |
150 | bool isDoubleTy() const { return getTypeID() == DoubleTyID; } |
151 | |
152 | /// Return true if this is x86 long double. |
153 | bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; } |
154 | |
155 | /// Return true if this is 'fp128'. |
156 | bool isFP128Ty() const { return getTypeID() == FP128TyID; } |
157 | |
158 | /// Return true if this is powerpc long double. |
159 | bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; } |
160 | |
161 | /// Return true if this is one of the six floating-point types |
162 | bool isFloatingPointTy() const { |
163 | return getTypeID() == HalfTyID || getTypeID() == BFloatTyID || |
164 | getTypeID() == FloatTyID || getTypeID() == DoubleTyID || |
165 | getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID || |
166 | getTypeID() == PPC_FP128TyID; |
167 | } |
168 | |
169 | const fltSemantics &getFltSemantics() const; |
170 | |
171 | /// Return true if this is X86 MMX. |
172 | bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; } |
173 | |
174 | /// Return true if this is X86 AMX. |
175 | bool isX86_AMXTy() const { return getTypeID() == X86_AMXTyID; } |
176 | |
177 | /// Return true if this is a FP type or a vector of FP. |
178 | bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); } |
179 | |
180 | /// Return true if this is 'label'. |
181 | bool isLabelTy() const { return getTypeID() == LabelTyID; } |
182 | |
183 | /// Return true if this is 'metadata'. |
184 | bool isMetadataTy() const { return getTypeID() == MetadataTyID; } |
185 | |
186 | /// Return true if this is 'token'. |
187 | bool isTokenTy() const { return getTypeID() == TokenTyID; } |
188 | |
189 | /// True if this is an instance of IntegerType. |
190 | bool isIntegerTy() const { return getTypeID() == IntegerTyID; } |
191 | |
192 | /// Return true if this is an IntegerType of the given width. |
193 | bool isIntegerTy(unsigned Bitwidth) const; |
194 | |
195 | /// Return true if this is an integer type or a vector of integer types. |
196 | bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); } |
197 | |
198 | /// Return true if this is an integer type or a vector of integer types of |
199 | /// the given width. |
200 | bool isIntOrIntVectorTy(unsigned BitWidth) const { |
201 | return getScalarType()->isIntegerTy(BitWidth); |
202 | } |
203 | |
204 | /// Return true if this is an integer type or a pointer type. |
205 | bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); } |
206 | |
207 | /// True if this is an instance of FunctionType. |
208 | bool isFunctionTy() const { return getTypeID() == FunctionTyID; } |
209 | |
210 | /// True if this is an instance of StructType. |
211 | bool isStructTy() const { return getTypeID() == StructTyID; } |
212 | |
213 | /// True if this is an instance of ArrayType. |
214 | bool isArrayTy() const { return getTypeID() == ArrayTyID; } |
215 | |
216 | /// True if this is an instance of PointerType. |
217 | bool isPointerTy() const { return getTypeID() == PointerTyID; } |
218 | |
219 | /// True if this is an instance of an opaque PointerType. |
220 | bool isOpaquePointerTy() const; |
221 | |
222 | /// Return true if this is a pointer type or a vector of pointer types. |
223 | bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); } |
224 | |
225 | /// True if this is an instance of VectorType. |
226 | inline bool isVectorTy() const { |
227 | return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID; |
228 | } |
229 | |
230 | /// Return true if this type could be converted with a lossless BitCast to |
231 | /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the |
232 | /// same size only where no re-interpretation of the bits is done. |
233 | /// Determine if this type could be losslessly bitcast to Ty |
234 | bool canLosslesslyBitCastTo(Type *Ty) const; |
235 | |
236 | /// Return true if this type is empty, that is, it has no elements or all of |
237 | /// its elements are empty. |
238 | bool isEmptyTy() const; |
239 | |
240 | /// Return true if the type is "first class", meaning it is a valid type for a |
241 | /// Value. |
242 | bool isFirstClassType() const { |
243 | return getTypeID() != FunctionTyID && getTypeID() != VoidTyID; |
244 | } |
245 | |
246 | /// Return true if the type is a valid type for a register in codegen. This |
247 | /// includes all first-class types except struct and array types. |
248 | bool isSingleValueType() const { |
249 | return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() || |
250 | isPointerTy() || isVectorTy() || isX86_AMXTy(); |
251 | } |
252 | |
253 | /// Return true if the type is an aggregate type. This means it is valid as |
254 | /// the first operand of an insertvalue or extractvalue instruction. This |
255 | /// includes struct and array types, but does not include vector types. |
256 | bool isAggregateType() const { |
257 | return getTypeID() == StructTyID || getTypeID() == ArrayTyID; |
258 | } |
259 | |
260 | /// Return true if it makes sense to take the size of this type. To get the |
261 | /// actual size for a particular target, it is reasonable to use the |
262 | /// DataLayout subsystem to do this. |
263 | bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const { |
264 | // If it's a primitive, it is always sized. |
265 | if (getTypeID() == IntegerTyID || isFloatingPointTy() || |
266 | getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID || |
267 | getTypeID() == X86_AMXTyID) |
268 | return true; |
269 | // If it is not something that can have a size (e.g. a function or label), |
270 | // it doesn't have a size. |
271 | if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && !isVectorTy()) |
272 | return false; |
273 | // Otherwise we have to try harder to decide. |
274 | return isSizedDerivedType(Visited); |
275 | } |
276 | |
277 | /// Return the basic size of this type if it is a primitive type. These are |
278 | /// fixed by LLVM and are not target-dependent. |
279 | /// This will return zero if the type does not have a size or is not a |
280 | /// primitive type. |
281 | /// |
282 | /// If this is a scalable vector type, the scalable property will be set and |
283 | /// the runtime size will be a positive integer multiple of the base size. |
284 | /// |
285 | /// Note that this may not reflect the size of memory allocated for an |
286 | /// instance of the type or the number of bytes that are written when an |
287 | /// instance of the type is stored to memory. The DataLayout class provides |
288 | /// additional query functions to provide this information. |
289 | /// |
290 | TypeSize getPrimitiveSizeInBits() const LLVM_READONLY__attribute__((__pure__)); |
291 | |
292 | /// If this is a vector type, return the getPrimitiveSizeInBits value for the |
293 | /// element type. Otherwise return the getPrimitiveSizeInBits value for this |
294 | /// type. |
295 | unsigned getScalarSizeInBits() const LLVM_READONLY__attribute__((__pure__)); |
296 | |
297 | /// Return the width of the mantissa of this type. This is only valid on |
298 | /// floating-point types. If the FP type does not have a stable mantissa (e.g. |
299 | /// ppc long double), this method returns -1. |
300 | int getFPMantissaWidth() const; |
301 | |
302 | /// Return whether the type is IEEE compatible, as defined by the eponymous |
303 | /// method in APFloat. |
304 | bool isIEEE() const; |
305 | |
306 | /// If this is a vector type, return the element type, otherwise return |
307 | /// 'this'. |
308 | inline Type *getScalarType() const { |
309 | if (isVectorTy()) |
310 | return getContainedType(0); |
311 | return const_cast<Type *>(this); |
312 | } |
313 | |
314 | //===--------------------------------------------------------------------===// |
315 | // Type Iteration support. |
316 | // |
317 | using subtype_iterator = Type * const *; |
318 | |
319 | subtype_iterator subtype_begin() const { return ContainedTys; } |
320 | subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];} |
321 | ArrayRef<Type*> subtypes() const { |
322 | return makeArrayRef(subtype_begin(), subtype_end()); |
323 | } |
324 | |
325 | using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>; |
326 | |
327 | subtype_reverse_iterator subtype_rbegin() const { |
328 | return subtype_reverse_iterator(subtype_end()); |
329 | } |
330 | subtype_reverse_iterator subtype_rend() const { |
331 | return subtype_reverse_iterator(subtype_begin()); |
332 | } |
333 | |
334 | /// This method is used to implement the type iterator (defined at the end of |
335 | /// the file). For derived types, this returns the types 'contained' in the |
336 | /// derived type. |
337 | Type *getContainedType(unsigned i) const { |
338 | assert(i < NumContainedTys && "Index out of range!")(static_cast <bool> (i < NumContainedTys && "Index out of range!" ) ? void (0) : __assert_fail ("i < NumContainedTys && \"Index out of range!\"" , "llvm/include/llvm/IR/Type.h", 338, __extension__ __PRETTY_FUNCTION__ )); |
339 | return ContainedTys[i]; |
340 | } |
341 | |
342 | /// Return the number of types in the derived type. |
343 | unsigned getNumContainedTypes() const { return NumContainedTys; } |
344 | |
345 | //===--------------------------------------------------------------------===// |
346 | // Helper methods corresponding to subclass methods. This forces a cast to |
347 | // the specified subclass and calls its accessor. "getArrayNumElements" (for |
348 | // example) is shorthand for cast<ArrayType>(Ty)->getNumElements(). This is |
349 | // only intended to cover the core methods that are frequently used, helper |
350 | // methods should not be added here. |
351 | |
352 | inline unsigned getIntegerBitWidth() const; |
353 | |
354 | inline Type *getFunctionParamType(unsigned i) const; |
355 | inline unsigned getFunctionNumParams() const; |
356 | inline bool isFunctionVarArg() const; |
357 | |
358 | inline StringRef getStructName() const; |
359 | inline unsigned getStructNumElements() const; |
360 | inline Type *getStructElementType(unsigned N) const; |
361 | |
362 | inline uint64_t getArrayNumElements() const; |
363 | |
364 | Type *getArrayElementType() const { |
365 | assert(getTypeID() == ArrayTyID)(static_cast <bool> (getTypeID() == ArrayTyID) ? void ( 0) : __assert_fail ("getTypeID() == ArrayTyID", "llvm/include/llvm/IR/Type.h" , 365, __extension__ __PRETTY_FUNCTION__)); |
366 | return ContainedTys[0]; |
367 | } |
368 | |
369 | /// This method is deprecated without replacement. Pointer element types are |
370 | /// not available with opaque pointers. |
371 | [[deprecated("Deprecated without replacement, see " |
372 | "https://llvm.org/docs/OpaquePointers.html for context and " |
373 | "migration instructions")]] |
374 | Type *getPointerElementType() const { |
375 | return getNonOpaquePointerElementType(); |
376 | } |
377 | |
378 | /// Only use this method in code that is not reachable with opaque pointers, |
379 | /// or part of deprecated methods that will be removed as part of the opaque |
380 | /// pointers transition. |
381 | Type *getNonOpaquePointerElementType() const { |
382 | assert(getTypeID() == PointerTyID)(static_cast <bool> (getTypeID() == PointerTyID) ? void (0) : __assert_fail ("getTypeID() == PointerTyID", "llvm/include/llvm/IR/Type.h" , 382, __extension__ __PRETTY_FUNCTION__)); |
383 | assert(NumContainedTys &&(static_cast <bool> (NumContainedTys && "Attempting to get element type of opaque pointer" ) ? void (0) : __assert_fail ("NumContainedTys && \"Attempting to get element type of opaque pointer\"" , "llvm/include/llvm/IR/Type.h", 384, __extension__ __PRETTY_FUNCTION__ )) |
384 | "Attempting to get element type of opaque pointer")(static_cast <bool> (NumContainedTys && "Attempting to get element type of opaque pointer" ) ? void (0) : __assert_fail ("NumContainedTys && \"Attempting to get element type of opaque pointer\"" , "llvm/include/llvm/IR/Type.h", 384, __extension__ __PRETTY_FUNCTION__ )); |
385 | return ContainedTys[0]; |
386 | } |
387 | |
388 | /// Given vector type, change the element type, |
389 | /// whilst keeping the old number of elements. |
390 | /// For non-vectors simply returns \p EltTy. |
391 | inline Type *getWithNewType(Type *EltTy) const; |
392 | |
393 | /// Given an integer or vector type, change the lane bitwidth to NewBitwidth, |
394 | /// whilst keeping the old number of lanes. |
395 | inline Type *getWithNewBitWidth(unsigned NewBitWidth) const; |
396 | |
397 | /// Given scalar/vector integer type, returns a type with elements twice as |
398 | /// wide as in the original type. For vectors, preserves element count. |
399 | inline Type *getExtendedType() const; |
400 | |
401 | /// Get the address space of this pointer or pointer vector type. |
402 | inline unsigned getPointerAddressSpace() const; |
403 | |
404 | //===--------------------------------------------------------------------===// |
405 | // Static members exported by the Type class itself. Useful for getting |
406 | // instances of Type. |
407 | // |
408 | |
409 | /// Return a type based on an identifier. |
410 | static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber); |
411 | |
412 | //===--------------------------------------------------------------------===// |
413 | // These are the builtin types that are always available. |
414 | // |
415 | static Type *getVoidTy(LLVMContext &C); |
416 | static Type *getLabelTy(LLVMContext &C); |
417 | static Type *getHalfTy(LLVMContext &C); |
418 | static Type *getBFloatTy(LLVMContext &C); |
419 | static Type *getFloatTy(LLVMContext &C); |
420 | static Type *getDoubleTy(LLVMContext &C); |
421 | static Type *getMetadataTy(LLVMContext &C); |
422 | static Type *getX86_FP80Ty(LLVMContext &C); |
423 | static Type *getFP128Ty(LLVMContext &C); |
424 | static Type *getPPC_FP128Ty(LLVMContext &C); |
425 | static Type *getX86_MMXTy(LLVMContext &C); |
426 | static Type *getX86_AMXTy(LLVMContext &C); |
427 | static Type *getTokenTy(LLVMContext &C); |
428 | static IntegerType *getIntNTy(LLVMContext &C, unsigned N); |
429 | static IntegerType *getInt1Ty(LLVMContext &C); |
430 | static IntegerType *getInt8Ty(LLVMContext &C); |
431 | static IntegerType *getInt16Ty(LLVMContext &C); |
432 | static IntegerType *getInt32Ty(LLVMContext &C); |
433 | static IntegerType *getInt64Ty(LLVMContext &C); |
434 | static IntegerType *getInt128Ty(LLVMContext &C); |
435 | template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) { |
436 | int noOfBits = sizeof(ScalarTy) * CHAR_BIT8; |
437 | if (std::is_integral<ScalarTy>::value) { |
438 | return (Type*) Type::getIntNTy(C, noOfBits); |
439 | } else if (std::is_floating_point<ScalarTy>::value) { |
440 | switch (noOfBits) { |
441 | case 32: |
442 | return Type::getFloatTy(C); |
443 | case 64: |
444 | return Type::getDoubleTy(C); |
445 | } |
446 | } |
447 | llvm_unreachable("Unsupported type in Type::getScalarTy")::llvm::llvm_unreachable_internal("Unsupported type in Type::getScalarTy" , "llvm/include/llvm/IR/Type.h", 447); |
448 | } |
449 | static Type *getFloatingPointTy(LLVMContext &C, const fltSemantics &S); |
450 | |
451 | //===--------------------------------------------------------------------===// |
452 | // Convenience methods for getting pointer types with one of the above builtin |
453 | // types as pointee. |
454 | // |
455 | static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0); |
456 | static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0); |
457 | static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0); |
458 | static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0); |
459 | static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0); |
460 | static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0); |
461 | static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0); |
462 | static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0); |
463 | static PointerType *getX86_AMXPtrTy(LLVMContext &C, unsigned AS = 0); |
464 | static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0); |
465 | static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0); |
466 | static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0); |
467 | static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0); |
468 | static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0); |
469 | static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0); |
470 | |
471 | /// Return a pointer to the current type. This is equivalent to |
472 | /// PointerType::get(Foo, AddrSpace). |
473 | /// TODO: Remove this after opaque pointer transition is complete. |
474 | PointerType *getPointerTo(unsigned AddrSpace = 0) const; |
475 | |
476 | private: |
477 | /// Derived types like structures and arrays are sized iff all of the members |
478 | /// of the type are sized as well. Since asking for their size is relatively |
479 | /// uncommon, move this operation out-of-line. |
480 | bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const; |
481 | }; |
482 | |
483 | // Printing of types. |
484 | inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) { |
485 | T.print(OS); |
486 | return OS; |
487 | } |
488 | |
489 | // allow isa<PointerType>(x) to work without DerivedTypes.h included. |
490 | template <> struct isa_impl<PointerType, Type> { |
491 | static inline bool doit(const Type &Ty) { |
492 | return Ty.getTypeID() == Type::PointerTyID; |
493 | } |
494 | }; |
495 | |
496 | // Create wrappers for C Binding types (see CBindingWrapping.h). |
497 | DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)inline Type *unwrap(LLVMTypeRef P) { return reinterpret_cast< Type*>(P); } inline LLVMTypeRef wrap(const Type *P) { return reinterpret_cast<LLVMTypeRef>(const_cast<Type*>( P)); } template<typename T> inline T *unwrap(LLVMTypeRef P) { return cast<T>(unwrap(P)); } |
498 | |
499 | /* Specialized opaque type conversions. |
500 | */ |
501 | inline Type **unwrap(LLVMTypeRef* Tys) { |
502 | return reinterpret_cast<Type**>(Tys); |
503 | } |
504 | |
505 | inline LLVMTypeRef *wrap(Type **Tys) { |
506 | return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys)); |
507 | } |
508 | |
509 | } // end namespace llvm |
510 | |
511 | #endif // LLVM_IR_TYPE_H |
1 | //===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file defines the IRBuilder class, which is used as a convenient way | |||
10 | // to create LLVM instructions with a consistent and simplified interface. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #ifndef LLVM_IR_IRBUILDER_H | |||
15 | #define LLVM_IR_IRBUILDER_H | |||
16 | ||||
17 | #include "llvm-c/Types.h" | |||
18 | #include "llvm/ADT/ArrayRef.h" | |||
19 | #include "llvm/ADT/None.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/StringRef.h" | |||
22 | #include "llvm/ADT/Twine.h" | |||
23 | #include "llvm/IR/BasicBlock.h" | |||
24 | #include "llvm/IR/Constant.h" | |||
25 | #include "llvm/IR/ConstantFolder.h" | |||
26 | #include "llvm/IR/Constants.h" | |||
27 | #include "llvm/IR/DataLayout.h" | |||
28 | #include "llvm/IR/DebugLoc.h" | |||
29 | #include "llvm/IR/DerivedTypes.h" | |||
30 | #include "llvm/IR/FPEnv.h" | |||
31 | #include "llvm/IR/Function.h" | |||
32 | #include "llvm/IR/GlobalVariable.h" | |||
33 | #include "llvm/IR/InstrTypes.h" | |||
34 | #include "llvm/IR/Instruction.h" | |||
35 | #include "llvm/IR/Instructions.h" | |||
36 | #include "llvm/IR/Intrinsics.h" | |||
37 | #include "llvm/IR/LLVMContext.h" | |||
38 | #include "llvm/IR/Module.h" | |||
39 | #include "llvm/IR/Operator.h" | |||
40 | #include "llvm/IR/Type.h" | |||
41 | #include "llvm/IR/Value.h" | |||
42 | #include "llvm/IR/ValueHandle.h" | |||
43 | #include "llvm/Support/AtomicOrdering.h" | |||
44 | #include "llvm/Support/CBindingWrapping.h" | |||
45 | #include "llvm/Support/Casting.h" | |||
46 | #include <cassert> | |||
47 | #include <cstdint> | |||
48 | #include <functional> | |||
49 | #include <utility> | |||
50 | ||||
51 | namespace llvm { | |||
52 | ||||
53 | class APInt; | |||
54 | class Use; | |||
55 | ||||
56 | /// This provides the default implementation of the IRBuilder | |||
57 | /// 'InsertHelper' method that is called whenever an instruction is created by | |||
58 | /// IRBuilder and needs to be inserted. | |||
59 | /// | |||
60 | /// By default, this inserts the instruction at the insertion point. | |||
61 | class IRBuilderDefaultInserter { | |||
62 | public: | |||
63 | virtual ~IRBuilderDefaultInserter(); | |||
64 | ||||
65 | virtual void InsertHelper(Instruction *I, const Twine &Name, | |||
66 | BasicBlock *BB, | |||
67 | BasicBlock::iterator InsertPt) const { | |||
68 | if (BB) BB->getInstList().insert(InsertPt, I); | |||
69 | I->setName(Name); | |||
70 | } | |||
71 | }; | |||
72 | ||||
73 | /// Provides an 'InsertHelper' that calls a user-provided callback after | |||
74 | /// performing the default insertion. | |||
75 | class IRBuilderCallbackInserter : public IRBuilderDefaultInserter { | |||
76 | std::function<void(Instruction *)> Callback; | |||
77 | ||||
78 | public: | |||
79 | ~IRBuilderCallbackInserter() override; | |||
80 | ||||
81 | IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback) | |||
82 | : Callback(std::move(Callback)) {} | |||
83 | ||||
84 | void InsertHelper(Instruction *I, const Twine &Name, | |||
85 | BasicBlock *BB, | |||
86 | BasicBlock::iterator InsertPt) const override { | |||
87 | IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); | |||
88 | Callback(I); | |||
89 | } | |||
90 | }; | |||
91 | ||||
92 | /// Common base class shared among various IRBuilders. | |||
93 | class IRBuilderBase { | |||
94 | /// Pairs of (metadata kind, MDNode *) that should be added to all newly | |||
95 | /// created instructions, like !dbg metadata. | |||
96 | SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy; | |||
97 | ||||
98 | /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not | |||
99 | /// null. If \p MD is null, remove the entry with \p Kind. | |||
100 | void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) { | |||
101 | if (!MD) { | |||
102 | erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) { | |||
103 | return KV.first == Kind; | |||
104 | }); | |||
105 | return; | |||
106 | } | |||
107 | ||||
108 | for (auto &KV : MetadataToCopy) | |||
109 | if (KV.first == Kind) { | |||
110 | KV.second = MD; | |||
111 | return; | |||
112 | } | |||
113 | ||||
114 | MetadataToCopy.emplace_back(Kind, MD); | |||
115 | } | |||
116 | ||||
117 | protected: | |||
118 | BasicBlock *BB; | |||
119 | BasicBlock::iterator InsertPt; | |||
120 | LLVMContext &Context; | |||
121 | const IRBuilderFolder &Folder; | |||
122 | const IRBuilderDefaultInserter &Inserter; | |||
123 | ||||
124 | MDNode *DefaultFPMathTag; | |||
125 | FastMathFlags FMF; | |||
126 | ||||
127 | bool IsFPConstrained = false; | |||
128 | fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; | |||
129 | RoundingMode DefaultConstrainedRounding = RoundingMode::Dynamic; | |||
130 | ||||
131 | ArrayRef<OperandBundleDef> DefaultOperandBundles; | |||
132 | ||||
133 | public: | |||
134 | IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder, | |||
135 | const IRBuilderDefaultInserter &Inserter, MDNode *FPMathTag, | |||
136 | ArrayRef<OperandBundleDef> OpBundles) | |||
137 | : Context(context), Folder(Folder), Inserter(Inserter), | |||
138 | DefaultFPMathTag(FPMathTag), DefaultOperandBundles(OpBundles) { | |||
139 | ClearInsertionPoint(); | |||
140 | } | |||
141 | ||||
142 | /// Insert and return the specified instruction. | |||
143 | template<typename InstTy> | |||
144 | InstTy *Insert(InstTy *I, const Twine &Name = "") const { | |||
145 | Inserter.InsertHelper(I, Name, BB, InsertPt); | |||
146 | AddMetadataToInst(I); | |||
147 | return I; | |||
148 | } | |||
149 | ||||
150 | /// No-op overload to handle constants. | |||
151 | Constant *Insert(Constant *C, const Twine& = "") const { | |||
152 | return C; | |||
153 | } | |||
154 | ||||
155 | Value *Insert(Value *V, const Twine &Name = "") const { | |||
156 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
157 | return Insert(I, Name); | |||
158 | assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0) : __assert_fail ("isa<Constant>(V)", "llvm/include/llvm/IR/IRBuilder.h" , 158, __extension__ __PRETTY_FUNCTION__)); | |||
159 | return V; | |||
160 | } | |||
161 | ||||
162 | //===--------------------------------------------------------------------===// | |||
163 | // Builder configuration methods | |||
164 | //===--------------------------------------------------------------------===// | |||
165 | ||||
166 | /// Clear the insertion point: created instructions will not be | |||
167 | /// inserted into a block. | |||
168 | void ClearInsertionPoint() { | |||
169 | BB = nullptr; | |||
170 | InsertPt = BasicBlock::iterator(); | |||
171 | } | |||
172 | ||||
173 | BasicBlock *GetInsertBlock() const { return BB; } | |||
174 | BasicBlock::iterator GetInsertPoint() const { return InsertPt; } | |||
175 | LLVMContext &getContext() const { return Context; } | |||
176 | ||||
177 | /// This specifies that created instructions should be appended to the | |||
178 | /// end of the specified block. | |||
179 | void SetInsertPoint(BasicBlock *TheBB) { | |||
180 | BB = TheBB; | |||
181 | InsertPt = BB->end(); | |||
182 | } | |||
183 | ||||
184 | /// This specifies that created instructions should be inserted before | |||
185 | /// the specified instruction. | |||
186 | void SetInsertPoint(Instruction *I) { | |||
187 | BB = I->getParent(); | |||
| ||||
188 | InsertPt = I->getIterator(); | |||
189 | assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() && "Can't read debug loc from end()") ? void (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\"" , "llvm/include/llvm/IR/IRBuilder.h", 189, __extension__ __PRETTY_FUNCTION__ )); | |||
190 | SetCurrentDebugLocation(I->getDebugLoc()); | |||
191 | } | |||
192 | ||||
193 | /// This specifies that created instructions should be inserted at the | |||
194 | /// specified point. | |||
195 | void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) { | |||
196 | BB = TheBB; | |||
197 | InsertPt = IP; | |||
198 | if (IP != TheBB->end()) | |||
199 | SetCurrentDebugLocation(IP->getDebugLoc()); | |||
200 | } | |||
201 | ||||
202 | /// Set location information used by debugging information. | |||
203 | void SetCurrentDebugLocation(DebugLoc L) { | |||
204 | AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode()); | |||
205 | } | |||
206 | ||||
207 | /// Collect metadata with IDs \p MetadataKinds from \p Src which should be | |||
208 | /// added to all created instructions. Entries present in MedataDataToCopy but | |||
209 | /// not on \p Src will be dropped from MetadataToCopy. | |||
210 | void CollectMetadataToCopy(Instruction *Src, | |||
211 | ArrayRef<unsigned> MetadataKinds) { | |||
212 | for (unsigned K : MetadataKinds) | |||
213 | AddOrRemoveMetadataToCopy(K, Src->getMetadata(K)); | |||
214 | } | |||
215 | ||||
216 | /// Get location information used by debugging information. | |||
217 | DebugLoc getCurrentDebugLocation() const; | |||
218 | ||||
219 | /// If this builder has a current debug location, set it on the | |||
220 | /// specified instruction. | |||
221 | void SetInstDebugLocation(Instruction *I) const; | |||
222 | ||||
223 | /// Add all entries in MetadataToCopy to \p I. | |||
224 | void AddMetadataToInst(Instruction *I) const { | |||
225 | for (auto &KV : MetadataToCopy) | |||
226 | I->setMetadata(KV.first, KV.second); | |||
227 | } | |||
228 | ||||
229 | /// Get the return type of the current function that we're emitting | |||
230 | /// into. | |||
231 | Type *getCurrentFunctionReturnType() const; | |||
232 | ||||
233 | /// InsertPoint - A saved insertion point. | |||
234 | class InsertPoint { | |||
235 | BasicBlock *Block = nullptr; | |||
236 | BasicBlock::iterator Point; | |||
237 | ||||
238 | public: | |||
239 | /// Creates a new insertion point which doesn't point to anything. | |||
240 | InsertPoint() = default; | |||
241 | ||||
242 | /// Creates a new insertion point at the given location. | |||
243 | InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint) | |||
244 | : Block(InsertBlock), Point(InsertPoint) {} | |||
245 | ||||
246 | /// Returns true if this insert point is set. | |||
247 | bool isSet() const { return (Block != nullptr); } | |||
248 | ||||
249 | BasicBlock *getBlock() const { return Block; } | |||
250 | BasicBlock::iterator getPoint() const { return Point; } | |||
251 | }; | |||
252 | ||||
253 | /// Returns the current insert point. | |||
254 | InsertPoint saveIP() const { | |||
255 | return InsertPoint(GetInsertBlock(), GetInsertPoint()); | |||
256 | } | |||
257 | ||||
258 | /// Returns the current insert point, clearing it in the process. | |||
259 | InsertPoint saveAndClearIP() { | |||
260 | InsertPoint IP(GetInsertBlock(), GetInsertPoint()); | |||
261 | ClearInsertionPoint(); | |||
262 | return IP; | |||
263 | } | |||
264 | ||||
265 | /// Sets the current insert point to a previously-saved location. | |||
266 | void restoreIP(InsertPoint IP) { | |||
267 | if (IP.isSet()) | |||
268 | SetInsertPoint(IP.getBlock(), IP.getPoint()); | |||
269 | else | |||
270 | ClearInsertionPoint(); | |||
271 | } | |||
272 | ||||
273 | /// Get the floating point math metadata being used. | |||
274 | MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; } | |||
275 | ||||
276 | /// Get the flags to be applied to created floating point ops | |||
277 | FastMathFlags getFastMathFlags() const { return FMF; } | |||
278 | ||||
279 | FastMathFlags &getFastMathFlags() { return FMF; } | |||
280 | ||||
281 | /// Clear the fast-math flags. | |||
282 | void clearFastMathFlags() { FMF.clear(); } | |||
283 | ||||
284 | /// Set the floating point math metadata to be used. | |||
285 | void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; } | |||
286 | ||||
287 | /// Set the fast-math flags to be used with generated fp-math operators | |||
288 | void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; } | |||
289 | ||||
290 | /// Enable/Disable use of constrained floating point math. When | |||
291 | /// enabled the CreateF<op>() calls instead create constrained | |||
292 | /// floating point intrinsic calls. Fast math flags are unaffected | |||
293 | /// by this setting. | |||
294 | void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; } | |||
295 | ||||
296 | /// Query for the use of constrained floating point math | |||
297 | bool getIsFPConstrained() { return IsFPConstrained; } | |||
298 | ||||
299 | /// Set the exception handling to be used with constrained floating point | |||
300 | void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { | |||
301 | #ifndef NDEBUG | |||
302 | Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(NewExcept); | |||
303 | assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 303, __extension__ __PRETTY_FUNCTION__ )); | |||
304 | #endif | |||
305 | DefaultConstrainedExcept = NewExcept; | |||
306 | } | |||
307 | ||||
308 | /// Set the rounding mode handling to be used with constrained floating point | |||
309 | void setDefaultConstrainedRounding(RoundingMode NewRounding) { | |||
310 | #ifndef NDEBUG | |||
311 | Optional<StringRef> RoundingStr = convertRoundingModeToStr(NewRounding); | |||
312 | assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 312, __extension__ __PRETTY_FUNCTION__ )); | |||
313 | #endif | |||
314 | DefaultConstrainedRounding = NewRounding; | |||
315 | } | |||
316 | ||||
317 | /// Get the exception handling used with constrained floating point | |||
318 | fp::ExceptionBehavior getDefaultConstrainedExcept() { | |||
319 | return DefaultConstrainedExcept; | |||
320 | } | |||
321 | ||||
322 | /// Get the rounding mode handling used with constrained floating point | |||
323 | RoundingMode getDefaultConstrainedRounding() { | |||
324 | return DefaultConstrainedRounding; | |||
325 | } | |||
326 | ||||
327 | void setConstrainedFPFunctionAttr() { | |||
328 | assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!" ) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\"" , "llvm/include/llvm/IR/IRBuilder.h", 328, __extension__ __PRETTY_FUNCTION__ )); | |||
329 | ||||
330 | Function *F = BB->getParent(); | |||
331 | if (!F->hasFnAttribute(Attribute::StrictFP)) { | |||
332 | F->addFnAttr(Attribute::StrictFP); | |||
333 | } | |||
334 | } | |||
335 | ||||
336 | void setConstrainedFPCallAttr(CallBase *I) { | |||
337 | I->addFnAttr(Attribute::StrictFP); | |||
338 | } | |||
339 | ||||
340 | void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) { | |||
341 | DefaultOperandBundles = OpBundles; | |||
342 | } | |||
343 | ||||
344 | //===--------------------------------------------------------------------===// | |||
345 | // RAII helpers. | |||
346 | //===--------------------------------------------------------------------===// | |||
347 | ||||
348 | // RAII object that stores the current insertion point and restores it | |||
349 | // when the object is destroyed. This includes the debug location. | |||
350 | class InsertPointGuard { | |||
351 | IRBuilderBase &Builder; | |||
352 | AssertingVH<BasicBlock> Block; | |||
353 | BasicBlock::iterator Point; | |||
354 | DebugLoc DbgLoc; | |||
355 | ||||
356 | public: | |||
357 | InsertPointGuard(IRBuilderBase &B) | |||
358 | : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()), | |||
359 | DbgLoc(B.getCurrentDebugLocation()) {} | |||
360 | ||||
361 | InsertPointGuard(const InsertPointGuard &) = delete; | |||
362 | InsertPointGuard &operator=(const InsertPointGuard &) = delete; | |||
363 | ||||
364 | ~InsertPointGuard() { | |||
365 | Builder.restoreIP(InsertPoint(Block, Point)); | |||
366 | Builder.SetCurrentDebugLocation(DbgLoc); | |||
367 | } | |||
368 | }; | |||
369 | ||||
370 | // RAII object that stores the current fast math settings and restores | |||
371 | // them when the object is destroyed. | |||
372 | class FastMathFlagGuard { | |||
373 | IRBuilderBase &Builder; | |||
374 | FastMathFlags FMF; | |||
375 | MDNode *FPMathTag; | |||
376 | bool IsFPConstrained; | |||
377 | fp::ExceptionBehavior DefaultConstrainedExcept; | |||
378 | RoundingMode DefaultConstrainedRounding; | |||
379 | ||||
380 | public: | |||
381 | FastMathFlagGuard(IRBuilderBase &B) | |||
382 | : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag), | |||
383 | IsFPConstrained(B.IsFPConstrained), | |||
384 | DefaultConstrainedExcept(B.DefaultConstrainedExcept), | |||
385 | DefaultConstrainedRounding(B.DefaultConstrainedRounding) {} | |||
386 | ||||
387 | FastMathFlagGuard(const FastMathFlagGuard &) = delete; | |||
388 | FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete; | |||
389 | ||||
390 | ~FastMathFlagGuard() { | |||
391 | Builder.FMF = FMF; | |||
392 | Builder.DefaultFPMathTag = FPMathTag; | |||
393 | Builder.IsFPConstrained = IsFPConstrained; | |||
394 | Builder.DefaultConstrainedExcept = DefaultConstrainedExcept; | |||
395 | Builder.DefaultConstrainedRounding = DefaultConstrainedRounding; | |||
396 | } | |||
397 | }; | |||
398 | ||||
399 | // RAII object that stores the current default operand bundles and restores | |||
400 | // them when the object is destroyed. | |||
401 | class OperandBundlesGuard { | |||
402 | IRBuilderBase &Builder; | |||
403 | ArrayRef<OperandBundleDef> DefaultOperandBundles; | |||
404 | ||||
405 | public: | |||
406 | OperandBundlesGuard(IRBuilderBase &B) | |||
407 | : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {} | |||
408 | ||||
409 | OperandBundlesGuard(const OperandBundlesGuard &) = delete; | |||
410 | OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete; | |||
411 | ||||
412 | ~OperandBundlesGuard() { | |||
413 | Builder.DefaultOperandBundles = DefaultOperandBundles; | |||
414 | } | |||
415 | }; | |||
416 | ||||
417 | ||||
418 | //===--------------------------------------------------------------------===// | |||
419 | // Miscellaneous creation methods. | |||
420 | //===--------------------------------------------------------------------===// | |||
421 | ||||
422 | /// Make a new global variable with initializer type i8* | |||
423 | /// | |||
424 | /// Make a new global variable with an initializer that has array of i8 type | |||
425 | /// filled in with the null terminated string value specified. The new global | |||
426 | /// variable will be marked mergable with any others of the same contents. If | |||
427 | /// Name is specified, it is the name of the global variable created. | |||
428 | /// | |||
429 | /// If no module is given via \p M, it is take from the insertion point basic | |||
430 | /// block. | |||
431 | GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "", | |||
432 | unsigned AddressSpace = 0, | |||
433 | Module *M = nullptr); | |||
434 | ||||
435 | /// Get a constant value representing either true or false. | |||
436 | ConstantInt *getInt1(bool V) { | |||
437 | return ConstantInt::get(getInt1Ty(), V); | |||
438 | } | |||
439 | ||||
440 | /// Get the constant value for i1 true. | |||
441 | ConstantInt *getTrue() { | |||
442 | return ConstantInt::getTrue(Context); | |||
443 | } | |||
444 | ||||
445 | /// Get the constant value for i1 false. | |||
446 | ConstantInt *getFalse() { | |||
447 | return ConstantInt::getFalse(Context); | |||
448 | } | |||
449 | ||||
450 | /// Get a constant 8-bit value. | |||
451 | ConstantInt *getInt8(uint8_t C) { | |||
452 | return ConstantInt::get(getInt8Ty(), C); | |||
453 | } | |||
454 | ||||
455 | /// Get a constant 16-bit value. | |||
456 | ConstantInt *getInt16(uint16_t C) { | |||
457 | return ConstantInt::get(getInt16Ty(), C); | |||
458 | } | |||
459 | ||||
460 | /// Get a constant 32-bit value. | |||
461 | ConstantInt *getInt32(uint32_t C) { | |||
462 | return ConstantInt::get(getInt32Ty(), C); | |||
463 | } | |||
464 | ||||
465 | /// Get a constant 64-bit value. | |||
466 | ConstantInt *getInt64(uint64_t C) { | |||
467 | return ConstantInt::get(getInt64Ty(), C); | |||
468 | } | |||
469 | ||||
470 | /// Get a constant N-bit value, zero extended or truncated from | |||
471 | /// a 64-bit value. | |||
472 | ConstantInt *getIntN(unsigned N, uint64_t C) { | |||
473 | return ConstantInt::get(getIntNTy(N), C); | |||
474 | } | |||
475 | ||||
476 | /// Get a constant integer value. | |||
477 | ConstantInt *getInt(const APInt &AI) { | |||
478 | return ConstantInt::get(Context, AI); | |||
479 | } | |||
480 | ||||
481 | //===--------------------------------------------------------------------===// | |||
482 | // Type creation methods | |||
483 | //===--------------------------------------------------------------------===// | |||
484 | ||||
485 | /// Fetch the type representing a single bit | |||
486 | IntegerType *getInt1Ty() { | |||
487 | return Type::getInt1Ty(Context); | |||
488 | } | |||
489 | ||||
490 | /// Fetch the type representing an 8-bit integer. | |||
491 | IntegerType *getInt8Ty() { | |||
492 | return Type::getInt8Ty(Context); | |||
493 | } | |||
494 | ||||
495 | /// Fetch the type representing a 16-bit integer. | |||
496 | IntegerType *getInt16Ty() { | |||
497 | return Type::getInt16Ty(Context); | |||
498 | } | |||
499 | ||||
500 | /// Fetch the type representing a 32-bit integer. | |||
501 | IntegerType *getInt32Ty() { | |||
502 | return Type::getInt32Ty(Context); | |||
503 | } | |||
504 | ||||
505 | /// Fetch the type representing a 64-bit integer. | |||
506 | IntegerType *getInt64Ty() { | |||
507 | return Type::getInt64Ty(Context); | |||
508 | } | |||
509 | ||||
510 | /// Fetch the type representing a 128-bit integer. | |||
511 | IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); } | |||
512 | ||||
513 | /// Fetch the type representing an N-bit integer. | |||
514 | IntegerType *getIntNTy(unsigned N) { | |||
515 | return Type::getIntNTy(Context, N); | |||
516 | } | |||
517 | ||||
518 | /// Fetch the type representing a 16-bit floating point value. | |||
519 | Type *getHalfTy() { | |||
520 | return Type::getHalfTy(Context); | |||
521 | } | |||
522 | ||||
523 | /// Fetch the type representing a 16-bit brain floating point value. | |||
524 | Type *getBFloatTy() { | |||
525 | return Type::getBFloatTy(Context); | |||
526 | } | |||
527 | ||||
528 | /// Fetch the type representing a 32-bit floating point value. | |||
529 | Type *getFloatTy() { | |||
530 | return Type::getFloatTy(Context); | |||
531 | } | |||
532 | ||||
533 | /// Fetch the type representing a 64-bit floating point value. | |||
534 | Type *getDoubleTy() { | |||
535 | return Type::getDoubleTy(Context); | |||
536 | } | |||
537 | ||||
538 | /// Fetch the type representing void. | |||
539 | Type *getVoidTy() { | |||
540 | return Type::getVoidTy(Context); | |||
541 | } | |||
542 | ||||
543 | /// Fetch the type representing a pointer to an 8-bit integer value. | |||
544 | PointerType *getInt8PtrTy(unsigned AddrSpace = 0) { | |||
545 | return Type::getInt8PtrTy(Context, AddrSpace); | |||
546 | } | |||
547 | ||||
548 | /// Fetch the type representing a pointer to an integer value. | |||
549 | IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) { | |||
550 | return DL.getIntPtrType(Context, AddrSpace); | |||
551 | } | |||
552 | ||||
553 | //===--------------------------------------------------------------------===// | |||
554 | // Intrinsic creation methods | |||
555 | //===--------------------------------------------------------------------===// | |||
556 | ||||
557 | /// Create and insert a memset to the specified pointer and the | |||
558 | /// specified value. | |||
559 | /// | |||
560 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is | |||
561 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
562 | /// and noalias tags. | |||
563 | CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, | |||
564 | MaybeAlign Align, bool isVolatile = false, | |||
565 | MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, | |||
566 | MDNode *NoAliasTag = nullptr) { | |||
567 | return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, | |||
568 | TBAATag, ScopeTag, NoAliasTag); | |||
569 | } | |||
570 | ||||
571 | CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align, | |||
572 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
573 | MDNode *ScopeTag = nullptr, | |||
574 | MDNode *NoAliasTag = nullptr); | |||
575 | ||||
576 | /// Create and insert an element unordered-atomic memset of the region of | |||
577 | /// memory starting at the given pointer to the given value. | |||
578 | /// | |||
579 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is | |||
580 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
581 | /// and noalias tags. | |||
582 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, | |||
583 | uint64_t Size, Align Alignment, | |||
584 | uint32_t ElementSize, | |||
585 | MDNode *TBAATag = nullptr, | |||
586 | MDNode *ScopeTag = nullptr, | |||
587 | MDNode *NoAliasTag = nullptr) { | |||
588 | return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size), | |||
589 | Align(Alignment), ElementSize, | |||
590 | TBAATag, ScopeTag, NoAliasTag); | |||
591 | } | |||
592 | ||||
593 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, | |||
594 | Value *Size, Align Alignment, | |||
595 | uint32_t ElementSize, | |||
596 | MDNode *TBAATag = nullptr, | |||
597 | MDNode *ScopeTag = nullptr, | |||
598 | MDNode *NoAliasTag = nullptr); | |||
599 | ||||
600 | /// Create and insert a memcpy between the specified pointers. | |||
601 | /// | |||
602 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
603 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
604 | /// and noalias tags. | |||
605 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
606 | MaybeAlign SrcAlign, uint64_t Size, | |||
607 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
608 | MDNode *TBAAStructTag = nullptr, | |||
609 | MDNode *ScopeTag = nullptr, | |||
610 | MDNode *NoAliasTag = nullptr) { | |||
611 | return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size), | |||
612 | isVolatile, TBAATag, TBAAStructTag, ScopeTag, | |||
613 | NoAliasTag); | |||
614 | } | |||
615 | ||||
616 | CallInst *CreateMemTransferInst( | |||
617 | Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
618 | MaybeAlign SrcAlign, Value *Size, bool isVolatile = false, | |||
619 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, | |||
620 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); | |||
621 | ||||
622 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
623 | MaybeAlign SrcAlign, Value *Size, | |||
624 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
625 | MDNode *TBAAStructTag = nullptr, | |||
626 | MDNode *ScopeTag = nullptr, | |||
627 | MDNode *NoAliasTag = nullptr) { | |||
628 | return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src, | |||
629 | SrcAlign, Size, isVolatile, TBAATag, | |||
630 | TBAAStructTag, ScopeTag, NoAliasTag); | |||
631 | } | |||
632 | ||||
633 | CallInst * | |||
634 | CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
635 | MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false, | |||
636 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, | |||
637 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); | |||
638 | ||||
639 | /// Create and insert an element unordered-atomic memcpy between the | |||
640 | /// specified pointers. | |||
641 | /// | |||
642 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively. | |||
643 | /// | |||
644 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
645 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
646 | /// and noalias tags. | |||
647 | CallInst *CreateElementUnorderedAtomicMemCpy( | |||
648 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, | |||
649 | uint32_t ElementSize, MDNode *TBAATag = nullptr, | |||
650 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, | |||
651 | MDNode *NoAliasTag = nullptr); | |||
652 | ||||
653 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
654 | MaybeAlign SrcAlign, uint64_t Size, | |||
655 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
656 | MDNode *ScopeTag = nullptr, | |||
657 | MDNode *NoAliasTag = nullptr) { | |||
658 | return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), | |||
659 | isVolatile, TBAATag, ScopeTag, NoAliasTag); | |||
660 | } | |||
661 | ||||
662 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
663 | MaybeAlign SrcAlign, Value *Size, | |||
664 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
665 | MDNode *ScopeTag = nullptr, | |||
666 | MDNode *NoAliasTag = nullptr); | |||
667 | ||||
668 | /// \brief Create and insert an element unordered-atomic memmove between the | |||
669 | /// specified pointers. | |||
670 | /// | |||
671 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, | |||
672 | /// respectively. | |||
673 | /// | |||
674 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
675 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
676 | /// and noalias tags. | |||
677 | CallInst *CreateElementUnorderedAtomicMemMove( | |||
678 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, | |||
679 | uint32_t ElementSize, MDNode *TBAATag = nullptr, | |||
680 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, | |||
681 | MDNode *NoAliasTag = nullptr); | |||
682 | ||||
683 | /// Create a sequential vector fadd reduction intrinsic of the source vector. | |||
684 | /// The first parameter is a scalar accumulator value. An unordered reduction | |||
685 | /// can be created by adding the reassoc fast-math flag to the resulting | |||
686 | /// sequential reduction. | |||
687 | CallInst *CreateFAddReduce(Value *Acc, Value *Src); | |||
688 | ||||
689 | /// Create a sequential vector fmul reduction intrinsic of the source vector. | |||
690 | /// The first parameter is a scalar accumulator value. An unordered reduction | |||
691 | /// can be created by adding the reassoc fast-math flag to the resulting | |||
692 | /// sequential reduction. | |||
693 | CallInst *CreateFMulReduce(Value *Acc, Value *Src); | |||
694 | ||||
695 | /// Create a vector int add reduction intrinsic of the source vector. | |||
696 | CallInst *CreateAddReduce(Value *Src); | |||
697 | ||||
698 | /// Create a vector int mul reduction intrinsic of the source vector. | |||
699 | CallInst *CreateMulReduce(Value *Src); | |||
700 | ||||
701 | /// Create a vector int AND reduction intrinsic of the source vector. | |||
702 | CallInst *CreateAndReduce(Value *Src); | |||
703 | ||||
704 | /// Create a vector int OR reduction intrinsic of the source vector. | |||
705 | CallInst *CreateOrReduce(Value *Src); | |||
706 | ||||
707 | /// Create a vector int XOR reduction intrinsic of the source vector. | |||
708 | CallInst *CreateXorReduce(Value *Src); | |||
709 | ||||
710 | /// Create a vector integer max reduction intrinsic of the source | |||
711 | /// vector. | |||
712 | CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false); | |||
713 | ||||
714 | /// Create a vector integer min reduction intrinsic of the source | |||
715 | /// vector. | |||
716 | CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false); | |||
717 | ||||
718 | /// Create a vector float max reduction intrinsic of the source | |||
719 | /// vector. | |||
720 | CallInst *CreateFPMaxReduce(Value *Src); | |||
721 | ||||
722 | /// Create a vector float min reduction intrinsic of the source | |||
723 | /// vector. | |||
724 | CallInst *CreateFPMinReduce(Value *Src); | |||
725 | ||||
726 | /// Create a lifetime.start intrinsic. | |||
727 | /// | |||
728 | /// If the pointer isn't i8* it will be converted. | |||
729 | CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr); | |||
730 | ||||
731 | /// Create a lifetime.end intrinsic. | |||
732 | /// | |||
733 | /// If the pointer isn't i8* it will be converted. | |||
734 | CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr); | |||
735 | ||||
736 | /// Create a call to invariant.start intrinsic. | |||
737 | /// | |||
738 | /// If the pointer isn't i8* it will be converted. | |||
739 | CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr); | |||
740 | ||||
741 | /// Create a call to Masked Load intrinsic | |||
742 | CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, | |||
743 | Value *PassThru = nullptr, const Twine &Name = ""); | |||
744 | ||||
745 | /// Create a call to Masked Store intrinsic | |||
746 | CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, | |||
747 | Value *Mask); | |||
748 | ||||
749 | /// Create a call to Masked Gather intrinsic | |||
750 | CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, | |||
751 | Value *Mask = nullptr, Value *PassThru = nullptr, | |||
752 | const Twine &Name = ""); | |||
753 | ||||
754 | /// Create a call to Masked Scatter intrinsic | |||
755 | CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, | |||
756 | Value *Mask = nullptr); | |||
757 | ||||
758 | /// Create an assume intrinsic call that allows the optimizer to | |||
759 | /// assume that the provided condition will be true. | |||
760 | /// | |||
761 | /// The optional argument \p OpBundles specifies operand bundles that are | |||
762 | /// added to the call instruction. | |||
763 | CallInst *CreateAssumption(Value *Cond, | |||
764 | ArrayRef<OperandBundleDef> OpBundles = llvm::None); | |||
765 | ||||
766 | /// Create a llvm.experimental.noalias.scope.decl intrinsic call. | |||
767 | Instruction *CreateNoAliasScopeDeclaration(Value *Scope); | |||
768 | Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) { | |||
769 | return CreateNoAliasScopeDeclaration( | |||
770 | MetadataAsValue::get(Context, ScopeTag)); | |||
771 | } | |||
772 | ||||
773 | /// Create a call to the experimental.gc.statepoint intrinsic to | |||
774 | /// start a new statepoint sequence. | |||
775 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
776 | FunctionCallee ActualCallee, | |||
777 | ArrayRef<Value *> CallArgs, | |||
778 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
779 | ArrayRef<Value *> GCArgs, | |||
780 | const Twine &Name = ""); | |||
781 | ||||
782 | /// Create a call to the experimental.gc.statepoint intrinsic to | |||
783 | /// start a new statepoint sequence. | |||
784 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
785 | FunctionCallee ActualCallee, uint32_t Flags, | |||
786 | ArrayRef<Value *> CallArgs, | |||
787 | Optional<ArrayRef<Use>> TransitionArgs, | |||
788 | Optional<ArrayRef<Use>> DeoptArgs, | |||
789 | ArrayRef<Value *> GCArgs, | |||
790 | const Twine &Name = ""); | |||
791 | ||||
792 | /// Conveninence function for the common case when CallArgs are filled | |||
793 | /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be | |||
794 | /// .get()'ed to get the Value pointer. | |||
795 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
796 | FunctionCallee ActualCallee, | |||
797 | ArrayRef<Use> CallArgs, | |||
798 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
799 | ArrayRef<Value *> GCArgs, | |||
800 | const Twine &Name = ""); | |||
801 | ||||
802 | /// Create an invoke to the experimental.gc.statepoint intrinsic to | |||
803 | /// start a new statepoint sequence. | |||
804 | InvokeInst * | |||
805 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, | |||
806 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, | |||
807 | BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs, | |||
808 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
809 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); | |||
810 | ||||
811 | /// Create an invoke to the experimental.gc.statepoint intrinsic to | |||
812 | /// start a new statepoint sequence. | |||
813 | InvokeInst *CreateGCStatepointInvoke( | |||
814 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, | |||
815 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, | |||
816 | ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs, | |||
817 | Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, | |||
818 | const Twine &Name = ""); | |||
819 | ||||
820 | // Convenience function for the common case when CallArgs are filled in using | |||
821 | // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to | |||
822 | // get the Value *. | |||
823 | InvokeInst * | |||
824 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, | |||
825 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, | |||
826 | BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, | |||
827 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
828 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); | |||
829 | ||||
830 | /// Create a call to the experimental.gc.result intrinsic to extract | |||
831 | /// the result from a call wrapped in a statepoint. | |||
832 | CallInst *CreateGCResult(Instruction *Statepoint, | |||
833 | Type *ResultType, | |||
834 | const Twine &Name = ""); | |||
835 | ||||
836 | /// Create a call to the experimental.gc.relocate intrinsics to | |||
837 | /// project the relocated value of one pointer from the statepoint. | |||
838 | CallInst *CreateGCRelocate(Instruction *Statepoint, | |||
839 | int BaseOffset, | |||
840 | int DerivedOffset, | |||
841 | Type *ResultType, | |||
842 | const Twine &Name = ""); | |||
843 | ||||
844 | /// Create a call to the experimental.gc.pointer.base intrinsic to get the | |||
845 | /// base pointer for the specified derived pointer. | |||
846 | CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = ""); | |||
847 | ||||
848 | /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get | |||
849 | /// the offset of the specified derived pointer from its base. | |||
850 | CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = ""); | |||
851 | ||||
852 | /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale | |||
853 | /// will be the same type as that of \p Scaling. | |||
854 | Value *CreateVScale(Constant *Scaling, const Twine &Name = ""); | |||
855 | ||||
856 | /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...> | |||
857 | Value *CreateStepVector(Type *DstType, const Twine &Name = ""); | |||
858 | ||||
859 | /// Create a call to intrinsic \p ID with 1 operand which is mangled on its | |||
860 | /// type. | |||
861 | CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, | |||
862 | Instruction *FMFSource = nullptr, | |||
863 | const Twine &Name = ""); | |||
864 | ||||
865 | /// Create a call to intrinsic \p ID with 2 operands which is mangled on the | |||
866 | /// first type. | |||
867 | CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, | |||
868 | Instruction *FMFSource = nullptr, | |||
869 | const Twine &Name = ""); | |||
870 | ||||
871 | /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If | |||
872 | /// \p FMFSource is provided, copy fast-math-flags from that instruction to | |||
873 | /// the intrinsic. | |||
874 | CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types, | |||
875 | ArrayRef<Value *> Args, | |||
876 | Instruction *FMFSource = nullptr, | |||
877 | const Twine &Name = ""); | |||
878 | ||||
879 | /// Create call to the minnum intrinsic. | |||
880 | CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
881 | return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name); | |||
882 | } | |||
883 | ||||
884 | /// Create call to the maxnum intrinsic. | |||
885 | CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
886 | return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name); | |||
887 | } | |||
888 | ||||
889 | /// Create call to the minimum intrinsic. | |||
890 | CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
891 | return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name); | |||
892 | } | |||
893 | ||||
894 | /// Create call to the maximum intrinsic. | |||
895 | CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
896 | return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name); | |||
897 | } | |||
898 | ||||
899 | /// Create a call to the arithmetic_fence intrinsic. | |||
900 | CallInst *CreateArithmeticFence(Value *Val, Type *DstType, | |||
901 | const Twine &Name = "") { | |||
902 | return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr, | |||
903 | Name); | |||
904 | } | |||
905 | ||||
906 | /// Create a call to the experimental.vector.extract intrinsic. | |||
907 | CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, | |||
908 | const Twine &Name = "") { | |||
909 | return CreateIntrinsic(Intrinsic::experimental_vector_extract, | |||
910 | {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr, | |||
911 | Name); | |||
912 | } | |||
913 | ||||
914 | /// Create a call to the experimental.vector.insert intrinsic. | |||
915 | CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, | |||
916 | Value *Idx, const Twine &Name = "") { | |||
917 | return CreateIntrinsic(Intrinsic::experimental_vector_insert, | |||
918 | {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx}, | |||
919 | nullptr, Name); | |||
920 | } | |||
921 | ||||
922 | private: | |||
923 | /// Create a call to a masked intrinsic with given Id. | |||
924 | CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops, | |||
925 | ArrayRef<Type *> OverloadedTypes, | |||
926 | const Twine &Name = ""); | |||
927 | ||||
928 | Value *getCastedInt8PtrValue(Value *Ptr); | |||
929 | ||||
930 | //===--------------------------------------------------------------------===// | |||
931 | // Instruction creation methods: Terminators | |||
932 | //===--------------------------------------------------------------------===// | |||
933 | ||||
934 | private: | |||
935 | /// Helper to add branch weight and unpredictable metadata onto an | |||
936 | /// instruction. | |||
937 | /// \returns The annotated instruction. | |||
938 | template <typename InstTy> | |||
939 | InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) { | |||
940 | if (Weights) | |||
941 | I->setMetadata(LLVMContext::MD_prof, Weights); | |||
942 | if (Unpredictable) | |||
943 | I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable); | |||
944 | return I; | |||
945 | } | |||
946 | ||||
947 | public: | |||
948 | /// Create a 'ret void' instruction. | |||
949 | ReturnInst *CreateRetVoid() { | |||
950 | return Insert(ReturnInst::Create(Context)); | |||
951 | } | |||
952 | ||||
953 | /// Create a 'ret <val>' instruction. | |||
954 | ReturnInst *CreateRet(Value *V) { | |||
955 | return Insert(ReturnInst::Create(Context, V)); | |||
956 | } | |||
957 | ||||
958 | /// Create a sequence of N insertvalue instructions, | |||
959 | /// with one Value from the retVals array each, that build a aggregate | |||
960 | /// return value one value at a time, and a ret instruction to return | |||
961 | /// the resulting aggregate value. | |||
962 | /// | |||
963 | /// This is a convenience function for code that uses aggregate return values | |||
964 | /// as a vehicle for having multiple return values. | |||
965 | ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) { | |||
966 | Value *V = UndefValue::get(getCurrentFunctionReturnType()); | |||
967 | for (unsigned i = 0; i != N; ++i) | |||
968 | V = CreateInsertValue(V, retVals[i], i, "mrv"); | |||
969 | return Insert(ReturnInst::Create(Context, V)); | |||
970 | } | |||
971 | ||||
972 | /// Create an unconditional 'br label X' instruction. | |||
973 | BranchInst *CreateBr(BasicBlock *Dest) { | |||
974 | return Insert(BranchInst::Create(Dest)); | |||
975 | } | |||
976 | ||||
977 | /// Create a conditional 'br Cond, TrueDest, FalseDest' | |||
978 | /// instruction. | |||
979 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, | |||
980 | MDNode *BranchWeights = nullptr, | |||
981 | MDNode *Unpredictable = nullptr) { | |||
982 | return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond), | |||
983 | BranchWeights, Unpredictable)); | |||
984 | } | |||
985 | ||||
986 | /// Create a conditional 'br Cond, TrueDest, FalseDest' | |||
987 | /// instruction. Copy branch meta data if available. | |||
988 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, | |||
989 | Instruction *MDSrc) { | |||
990 | BranchInst *Br = BranchInst::Create(True, False, Cond); | |||
991 | if (MDSrc) { | |||
992 | unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable, | |||
993 | LLVMContext::MD_make_implicit, LLVMContext::MD_dbg}; | |||
994 | Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4)); | |||
995 | } | |||
996 | return Insert(Br); | |||
997 | } | |||
998 | ||||
999 | /// Create a switch instruction with the specified value, default dest, | |||
1000 | /// and with a hint for the number of cases that will be added (for efficient | |||
1001 | /// allocation). | |||
1002 | SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10, | |||
1003 | MDNode *BranchWeights = nullptr, | |||
1004 | MDNode *Unpredictable = nullptr) { | |||
1005 | return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases), | |||
1006 | BranchWeights, Unpredictable)); | |||
1007 | } | |||
1008 | ||||
1009 | /// Create an indirect branch instruction with the specified address | |||
1010 | /// operand, with an optional hint for the number of destinations that will be | |||
1011 | /// added (for efficient allocation). | |||
1012 | IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) { | |||
1013 | return Insert(IndirectBrInst::Create(Addr, NumDests)); | |||
1014 | } | |||
1015 | ||||
1016 | /// Create an invoke instruction. | |||
1017 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, | |||
1018 | BasicBlock *NormalDest, BasicBlock *UnwindDest, | |||
1019 | ArrayRef<Value *> Args, | |||
1020 | ArrayRef<OperandBundleDef> OpBundles, | |||
1021 | const Twine &Name = "") { | |||
1022 | InvokeInst *II = | |||
1023 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles); | |||
1024 | if (IsFPConstrained) | |||
1025 | setConstrainedFPCallAttr(II); | |||
1026 | return Insert(II, Name); | |||
1027 | } | |||
1028 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, | |||
1029 | BasicBlock *NormalDest, BasicBlock *UnwindDest, | |||
1030 | ArrayRef<Value *> Args = None, | |||
1031 | const Twine &Name = "") { | |||
1032 | InvokeInst *II = | |||
1033 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args); | |||
1034 | if (IsFPConstrained) | |||
1035 | setConstrainedFPCallAttr(II); | |||
1036 | return Insert(II, Name); | |||
1037 | } | |||
1038 | ||||
1039 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, | |||
1040 | BasicBlock *UnwindDest, ArrayRef<Value *> Args, | |||
1041 | ArrayRef<OperandBundleDef> OpBundles, | |||
1042 | const Twine &Name = "") { | |||
1043 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), | |||
1044 | NormalDest, UnwindDest, Args, OpBundles, Name); | |||
1045 | } | |||
1046 | ||||
1047 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, | |||
1048 | BasicBlock *UnwindDest, | |||
1049 | ArrayRef<Value *> Args = None, | |||
1050 | const Twine &Name = "") { | |||
1051 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), | |||
1052 | NormalDest, UnwindDest, Args, Name); | |||
1053 | } | |||
1054 | ||||
1055 | /// \brief Create a callbr instruction. | |||
1056 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, | |||
1057 | BasicBlock *DefaultDest, | |||
1058 | ArrayRef<BasicBlock *> IndirectDests, | |||
1059 | ArrayRef<Value *> Args = None, | |||
1060 | const Twine &Name = "") { | |||
1061 | return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, | |||
1062 | Args), Name); | |||
1063 | } | |||
1064 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, | |||
1065 | BasicBlock *DefaultDest, | |||
1066 | ArrayRef<BasicBlock *> IndirectDests, | |||
1067 | ArrayRef<Value *> Args, | |||
1068 | ArrayRef<OperandBundleDef> OpBundles, | |||
1069 | const Twine &Name = "") { | |||
1070 | return Insert( | |||
1071 | CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args, | |||
1072 | OpBundles), Name); | |||
1073 | } | |||
1074 | ||||
1075 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, | |||
1076 | ArrayRef<BasicBlock *> IndirectDests, | |||
1077 | ArrayRef<Value *> Args = None, | |||
1078 | const Twine &Name = "") { | |||
1079 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), | |||
1080 | DefaultDest, IndirectDests, Args, Name); | |||
1081 | } | |||
1082 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, | |||
1083 | ArrayRef<BasicBlock *> IndirectDests, | |||
1084 | ArrayRef<Value *> Args, | |||
1085 | ArrayRef<OperandBundleDef> OpBundles, | |||
1086 | const Twine &Name = "") { | |||
1087 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), | |||
1088 | DefaultDest, IndirectDests, Args, Name); | |||
1089 | } | |||
1090 | ||||
1091 | ResumeInst *CreateResume(Value *Exn) { | |||
1092 | return Insert(ResumeInst::Create(Exn)); | |||
1093 | } | |||
1094 | ||||
1095 | CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad, | |||
1096 | BasicBlock *UnwindBB = nullptr) { | |||
1097 | return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB)); | |||
1098 | } | |||
1099 | ||||
1100 | CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB, | |||
1101 | unsigned NumHandlers, | |||
1102 | const Twine &Name = "") { | |||
1103 | return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers), | |||
1104 | Name); | |||
1105 | } | |||
1106 | ||||
1107 | CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args, | |||
1108 | const Twine &Name = "") { | |||
1109 | return Insert(CatchPadInst::Create(ParentPad, Args), Name); | |||
1110 | } | |||
1111 | ||||
1112 | CleanupPadInst *CreateCleanupPad(Value *ParentPad, | |||
1113 | ArrayRef<Value *> Args = None, | |||
1114 | const Twine &Name = "") { | |||
1115 | return Insert(CleanupPadInst::Create(ParentPad, Args), Name); | |||
1116 | } | |||
1117 | ||||
1118 | CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) { | |||
1119 | return Insert(CatchReturnInst::Create(CatchPad, BB)); | |||
1120 | } | |||
1121 | ||||
1122 | UnreachableInst *CreateUnreachable() { | |||
1123 | return Insert(new UnreachableInst(Context)); | |||
1124 | } | |||
1125 | ||||
1126 | //===--------------------------------------------------------------------===// | |||
1127 | // Instruction creation methods: Binary Operators | |||
1128 | //===--------------------------------------------------------------------===// | |||
1129 | private: | |||
1130 | BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc, | |||
1131 | Value *LHS, Value *RHS, | |||
1132 | const Twine &Name, | |||
1133 | bool HasNUW, bool HasNSW) { | |||
1134 | BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name); | |||
1135 | if (HasNUW) BO->setHasNoUnsignedWrap(); | |||
1136 | if (HasNSW) BO->setHasNoSignedWrap(); | |||
1137 | return BO; | |||
1138 | } | |||
1139 | ||||
1140 | Instruction *setFPAttrs(Instruction *I, MDNode *FPMD, | |||
1141 | FastMathFlags FMF) const { | |||
1142 | if (!FPMD) | |||
1143 | FPMD = DefaultFPMathTag; | |||
1144 | if (FPMD) | |||
1145 | I->setMetadata(LLVMContext::MD_fpmath, FPMD); | |||
1146 | I->setFastMathFlags(FMF); | |||
1147 | return I; | |||
1148 | } | |||
1149 | ||||
1150 | Value *foldConstant(Instruction::BinaryOps Opc, Value *L, | |||
1151 | Value *R, const Twine &Name) const { | |||
1152 | auto *LC = dyn_cast<Constant>(L); | |||
1153 | auto *RC = dyn_cast<Constant>(R); | |||
1154 | return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr; | |||
1155 | } | |||
1156 | ||||
1157 | Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) { | |||
1158 | RoundingMode UseRounding = DefaultConstrainedRounding; | |||
1159 | ||||
1160 | if (Rounding.hasValue()) | |||
1161 | UseRounding = Rounding.getValue(); | |||
1162 | ||||
1163 | Optional<StringRef> RoundingStr = convertRoundingModeToStr(UseRounding); | |||
1164 | assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1164, __extension__ __PRETTY_FUNCTION__ )); | |||
1165 | auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue()); | |||
1166 | ||||
1167 | return MetadataAsValue::get(Context, RoundingMDS); | |||
1168 | } | |||
1169 | ||||
1170 | Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) { | |||
1171 | fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept; | |||
1172 | ||||
1173 | if (Except.hasValue()) | |||
1174 | UseExcept = Except.getValue(); | |||
1175 | ||||
1176 | Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(UseExcept); | |||
1177 | assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1177, __extension__ __PRETTY_FUNCTION__ )); | |||
1178 | auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue()); | |||
1179 | ||||
1180 | return MetadataAsValue::get(Context, ExceptMDS); | |||
1181 | } | |||
1182 | ||||
1183 | Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) { | |||
1184 | assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )) | |||
1185 | Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )) | |||
1186 | Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )) | |||
1187 | "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )); | |||
1188 | ||||
1189 | StringRef PredicateStr = CmpInst::getPredicateName(Predicate); | |||
1190 | auto *PredicateMDS = MDString::get(Context, PredicateStr); | |||
1191 | ||||
1192 | return MetadataAsValue::get(Context, PredicateMDS); | |||
1193 | } | |||
1194 | ||||
1195 | public: | |||
1196 | Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1197 | bool HasNUW = false, bool HasNSW = false) { | |||
1198 | if (auto *V = Folder.FoldAdd(LHS, RHS, HasNUW, HasNSW)) | |||
1199 | return V; | |||
1200 | return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, | |||
1201 | HasNUW, HasNSW); | |||
1202 | } | |||
1203 | ||||
1204 | Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1205 | return CreateAdd(LHS, RHS, Name, false, true); | |||
1206 | } | |||
1207 | ||||
1208 | Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1209 | return CreateAdd(LHS, RHS, Name, true, false); | |||
1210 | } | |||
1211 | ||||
1212 | Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1213 | bool HasNUW = false, bool HasNSW = false) { | |||
1214 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1215 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1216 | return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name); | |||
1217 | return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, | |||
1218 | HasNUW, HasNSW); | |||
1219 | } | |||
1220 | ||||
1221 | Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1222 | return CreateSub(LHS, RHS, Name, false, true); | |||
1223 | } | |||
1224 | ||||
1225 | Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1226 | return CreateSub(LHS, RHS, Name, true, false); | |||
1227 | } | |||
1228 | ||||
1229 | Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1230 | bool HasNUW = false, bool HasNSW = false) { | |||
1231 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1232 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1233 | return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name); | |||
1234 | return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, | |||
1235 | HasNUW, HasNSW); | |||
1236 | } | |||
1237 | ||||
1238 | Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1239 | return CreateMul(LHS, RHS, Name, false, true); | |||
1240 | } | |||
1241 | ||||
1242 | Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1243 | return CreateMul(LHS, RHS, Name, true, false); | |||
1244 | } | |||
1245 | ||||
1246 | Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1247 | bool isExact = false) { | |||
1248 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1249 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1250 | return Insert(Folder.CreateUDiv(LC, RC, isExact), Name); | |||
1251 | if (!isExact) | |||
1252 | return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); | |||
1253 | return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); | |||
1254 | } | |||
1255 | ||||
1256 | Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1257 | return CreateUDiv(LHS, RHS, Name, true); | |||
1258 | } | |||
1259 | ||||
1260 | Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1261 | bool isExact = false) { | |||
1262 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1263 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1264 | return Insert(Folder.CreateSDiv(LC, RC, isExact), Name); | |||
1265 | if (!isExact) | |||
1266 | return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); | |||
1267 | return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); | |||
1268 | } | |||
1269 | ||||
1270 | Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1271 | return CreateSDiv(LHS, RHS, Name, true); | |||
1272 | } | |||
1273 | ||||
1274 | Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1275 | if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V; | |||
1276 | return Insert(BinaryOperator::CreateURem(LHS, RHS), Name); | |||
1277 | } | |||
1278 | ||||
1279 | Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1280 | if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V; | |||
1281 | return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name); | |||
1282 | } | |||
1283 | ||||
1284 | Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1285 | bool HasNUW = false, bool HasNSW = false) { | |||
1286 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1287 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1288 | return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name); | |||
1289 | return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name, | |||
1290 | HasNUW, HasNSW); | |||
1291 | } | |||
1292 | ||||
1293 | Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1294 | bool HasNUW = false, bool HasNSW = false) { | |||
1295 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, | |||
1296 | HasNUW, HasNSW); | |||
1297 | } | |||
1298 | ||||
1299 | Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1300 | bool HasNUW = false, bool HasNSW = false) { | |||
1301 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, | |||
1302 | HasNUW, HasNSW); | |||
1303 | } | |||
1304 | ||||
1305 | Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1306 | bool isExact = false) { | |||
1307 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1308 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1309 | return Insert(Folder.CreateLShr(LC, RC, isExact), Name); | |||
1310 | if (!isExact) | |||
1311 | return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name); | |||
1312 | return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name); | |||
1313 | } | |||
1314 | ||||
1315 | Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1316 | bool isExact = false) { | |||
1317 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1318 | } | |||
1319 | ||||
1320 | Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1321 | bool isExact = false) { | |||
1322 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1323 | } | |||
1324 | ||||
1325 | Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1326 | bool isExact = false) { | |||
1327 | if (auto *LC = dyn_cast<Constant>(LHS)) | |||
1328 | if (auto *RC = dyn_cast<Constant>(RHS)) | |||
1329 | return Insert(Folder.CreateAShr(LC, RC, isExact), Name); | |||
1330 | if (!isExact) | |||
1331 | return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name); | |||
1332 | return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name); | |||
1333 | } | |||
1334 | ||||
1335 | Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1336 | bool isExact = false) { | |||
1337 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1338 | } | |||
1339 | ||||
1340 | Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1341 | bool isExact = false) { | |||
1342 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1343 | } | |||
1344 | ||||
1345 | Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1346 | if (auto *V = Folder.FoldAnd(LHS, RHS)) | |||
1347 | return V; | |||
1348 | return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name); | |||
1349 | } | |||
1350 | ||||
1351 | Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1352 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1353 | } | |||
1354 | ||||
1355 | Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1356 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1357 | } | |||
1358 | ||||
1359 | Value *CreateAnd(ArrayRef<Value*> Ops) { | |||
1360 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1360, __extension__ __PRETTY_FUNCTION__)); | |||
1361 | Value *Accum = Ops[0]; | |||
1362 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1363 | Accum = CreateAnd(Accum, Ops[i]); | |||
1364 | return Accum; | |||
1365 | } | |||
1366 | ||||
1367 | Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1368 | if (auto *V = Folder.FoldOr(LHS, RHS)) | |||
1369 | return V; | |||
1370 | return Insert(BinaryOperator::CreateOr(LHS, RHS), Name); | |||
1371 | } | |||
1372 | ||||
1373 | Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1374 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1375 | } | |||
1376 | ||||
1377 | Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1378 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1379 | } | |||
1380 | ||||
1381 | Value *CreateOr(ArrayRef<Value*> Ops) { | |||
1382 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1382, __extension__ __PRETTY_FUNCTION__)); | |||
1383 | Value *Accum = Ops[0]; | |||
1384 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1385 | Accum = CreateOr(Accum, Ops[i]); | |||
1386 | return Accum; | |||
1387 | } | |||
1388 | ||||
1389 | Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1390 | if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V; | |||
1391 | return Insert(BinaryOperator::CreateXor(LHS, RHS), Name); | |||
1392 | } | |||
1393 | ||||
1394 | Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1395 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1396 | } | |||
1397 | ||||
1398 | Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1399 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1400 | } | |||
1401 | ||||
1402 | Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "", | |||
1403 | MDNode *FPMD = nullptr) { | |||
1404 | if (IsFPConstrained) | |||
1405 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, | |||
1406 | L, R, nullptr, Name, FPMD); | |||
1407 | ||||
1408 | if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V; | |||
1409 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF); | |||
1410 | return Insert(I, Name); | |||
1411 | } | |||
1412 | ||||
1413 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1414 | /// default FMF. | |||
1415 | Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1416 | const Twine &Name = "") { | |||
1417 | if (IsFPConstrained) | |||
1418 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, | |||
1419 | L, R, FMFSource, Name); | |||
1420 | ||||
1421 | if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V; | |||
1422 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr, | |||
1423 | FMFSource->getFastMathFlags()); | |||
1424 | return Insert(I, Name); | |||
1425 | } | |||
1426 | ||||
1427 | Value *CreateFSub(Value *L, Value *R, const Twine &Name = "", | |||
1428 | MDNode *FPMD = nullptr) { | |||
1429 | if (IsFPConstrained) | |||
1430 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, | |||
1431 | L, R, nullptr, Name, FPMD); | |||
1432 | ||||
1433 | if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V; | |||
1434 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF); | |||
1435 | return Insert(I, Name); | |||
1436 | } | |||
1437 | ||||
1438 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1439 | /// default FMF. | |||
1440 | Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1441 | const Twine &Name = "") { | |||
1442 | if (IsFPConstrained) | |||
1443 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, | |||
1444 | L, R, FMFSource, Name); | |||
1445 | ||||
1446 | if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V; | |||
1447 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr, | |||
1448 | FMFSource->getFastMathFlags()); | |||
1449 | return Insert(I, Name); | |||
1450 | } | |||
1451 | ||||
1452 | Value *CreateFMul(Value *L, Value *R, const Twine &Name = "", | |||
1453 | MDNode *FPMD = nullptr) { | |||
1454 | if (IsFPConstrained) | |||
1455 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, | |||
1456 | L, R, nullptr, Name, FPMD); | |||
1457 | ||||
1458 | if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V; | |||
1459 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF); | |||
1460 | return Insert(I, Name); | |||
1461 | } | |||
1462 | ||||
1463 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1464 | /// default FMF. | |||
1465 | Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1466 | const Twine &Name = "") { | |||
1467 | if (IsFPConstrained) | |||
1468 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, | |||
1469 | L, R, FMFSource, Name); | |||
1470 | ||||
1471 | if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V; | |||
1472 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr, | |||
1473 | FMFSource->getFastMathFlags()); | |||
1474 | return Insert(I, Name); | |||
1475 | } | |||
1476 | ||||
1477 | Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "", | |||
1478 | MDNode *FPMD = nullptr) { | |||
1479 | if (IsFPConstrained) | |||
1480 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, | |||
1481 | L, R, nullptr, Name, FPMD); | |||
1482 | ||||
1483 | if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V; | |||
1484 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF); | |||
1485 | return Insert(I, Name); | |||
1486 | } | |||
1487 | ||||
1488 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1489 | /// default FMF. | |||
1490 | Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1491 | const Twine &Name = "") { | |||
1492 | if (IsFPConstrained) | |||
1493 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, | |||
1494 | L, R, FMFSource, Name); | |||
1495 | ||||
1496 | if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V; | |||
1497 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr, | |||
1498 | FMFSource->getFastMathFlags()); | |||
1499 | return Insert(I, Name); | |||
1500 | } | |||
1501 | ||||
1502 | Value *CreateFRem(Value *L, Value *R, const Twine &Name = "", | |||
1503 | MDNode *FPMD = nullptr) { | |||
1504 | if (IsFPConstrained) | |||
1505 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, | |||
1506 | L, R, nullptr, Name, FPMD); | |||
1507 | ||||
1508 | if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V; | |||
1509 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF); | |||
1510 | return Insert(I, Name); | |||
1511 | } | |||
1512 | ||||
1513 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1514 | /// default FMF. | |||
1515 | Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1516 | const Twine &Name = "") { | |||
1517 | if (IsFPConstrained) | |||
1518 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, | |||
1519 | L, R, FMFSource, Name); | |||
1520 | ||||
1521 | if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V; | |||
1522 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr, | |||
1523 | FMFSource->getFastMathFlags()); | |||
1524 | return Insert(I, Name); | |||
1525 | } | |||
1526 | ||||
1527 | Value *CreateBinOp(Instruction::BinaryOps Opc, | |||
1528 | Value *LHS, Value *RHS, const Twine &Name = "", | |||
1529 | MDNode *FPMathTag = nullptr) { | |||
1530 | if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V; | |||
1531 | Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS); | |||
1532 | if (isa<FPMathOperator>(BinOp)) | |||
1533 | setFPAttrs(BinOp, FPMathTag, FMF); | |||
1534 | return Insert(BinOp, Name); | |||
1535 | } | |||
1536 | ||||
1537 | Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") { | |||
1538 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1538, __extension__ __PRETTY_FUNCTION__ )); | |||
1539 | return CreateSelect(Cond1, Cond2, | |||
1540 | ConstantInt::getNullValue(Cond2->getType()), Name); | |||
1541 | } | |||
1542 | ||||
1543 | Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") { | |||
1544 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1544, __extension__ __PRETTY_FUNCTION__ )); | |||
1545 | return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()), | |||
1546 | Cond2, Name); | |||
1547 | } | |||
1548 | ||||
1549 | // NOTE: this is sequential, non-commutative, ordered reduction! | |||
1550 | Value *CreateLogicalOr(ArrayRef<Value *> Ops) { | |||
1551 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1551, __extension__ __PRETTY_FUNCTION__)); | |||
1552 | Value *Accum = Ops[0]; | |||
1553 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1554 | Accum = CreateLogicalOr(Accum, Ops[i]); | |||
1555 | return Accum; | |||
1556 | } | |||
1557 | ||||
1558 | CallInst *CreateConstrainedFPBinOp( | |||
1559 | Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr, | |||
1560 | const Twine &Name = "", MDNode *FPMathTag = nullptr, | |||
1561 | Optional<RoundingMode> Rounding = None, | |||
1562 | Optional<fp::ExceptionBehavior> Except = None); | |||
1563 | ||||
1564 | Value *CreateNeg(Value *V, const Twine &Name = "", | |||
1565 | bool HasNUW = false, bool HasNSW = false) { | |||
1566 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1567 | return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name); | |||
1568 | BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name); | |||
1569 | if (HasNUW) BO->setHasNoUnsignedWrap(); | |||
1570 | if (HasNSW) BO->setHasNoSignedWrap(); | |||
1571 | return BO; | |||
1572 | } | |||
1573 | ||||
1574 | Value *CreateNSWNeg(Value *V, const Twine &Name = "") { | |||
1575 | return CreateNeg(V, Name, false, true); | |||
1576 | } | |||
1577 | ||||
1578 | Value *CreateNUWNeg(Value *V, const Twine &Name = "") { | |||
1579 | return CreateNeg(V, Name, true, false); | |||
1580 | } | |||
1581 | ||||
1582 | Value *CreateFNeg(Value *V, const Twine &Name = "", | |||
1583 | MDNode *FPMathTag = nullptr) { | |||
1584 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1585 | return Insert(Folder.CreateFNeg(VC), Name); | |||
1586 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF), | |||
1587 | Name); | |||
1588 | } | |||
1589 | ||||
1590 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1591 | /// default FMF. | |||
1592 | Value *CreateFNegFMF(Value *V, Instruction *FMFSource, | |||
1593 | const Twine &Name = "") { | |||
1594 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1595 | return Insert(Folder.CreateFNeg(VC), Name); | |||
1596 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, | |||
1597 | FMFSource->getFastMathFlags()), | |||
1598 | Name); | |||
1599 | } | |||
1600 | ||||
1601 | Value *CreateNot(Value *V, const Twine &Name = "") { | |||
1602 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1603 | return Insert(Folder.CreateNot(VC), Name); | |||
1604 | return Insert(BinaryOperator::CreateNot(V), Name); | |||
1605 | } | |||
1606 | ||||
1607 | Value *CreateUnOp(Instruction::UnaryOps Opc, | |||
1608 | Value *V, const Twine &Name = "", | |||
1609 | MDNode *FPMathTag = nullptr) { | |||
1610 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1611 | return Insert(Folder.CreateUnOp(Opc, VC), Name); | |||
1612 | Instruction *UnOp = UnaryOperator::Create(Opc, V); | |||
1613 | if (isa<FPMathOperator>(UnOp)) | |||
1614 | setFPAttrs(UnOp, FPMathTag, FMF); | |||
1615 | return Insert(UnOp, Name); | |||
1616 | } | |||
1617 | ||||
1618 | /// Create either a UnaryOperator or BinaryOperator depending on \p Opc. | |||
1619 | /// Correct number of operands must be passed accordingly. | |||
1620 | Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, | |||
1621 | const Twine &Name = "", MDNode *FPMathTag = nullptr); | |||
1622 | ||||
1623 | //===--------------------------------------------------------------------===// | |||
1624 | // Instruction creation methods: Memory Instructions | |||
1625 | //===--------------------------------------------------------------------===// | |||
1626 | ||||
1627 | AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace, | |||
1628 | Value *ArraySize = nullptr, const Twine &Name = "") { | |||
1629 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1630 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); | |||
1631 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); | |||
1632 | } | |||
1633 | ||||
1634 | AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr, | |||
1635 | const Twine &Name = "") { | |||
1636 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1637 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); | |||
1638 | unsigned AddrSpace = DL.getAllocaAddrSpace(); | |||
1639 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); | |||
1640 | } | |||
1641 | ||||
1642 | /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of | |||
1643 | /// converting the string to 'bool' for the isVolatile parameter. | |||
1644 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) { | |||
1645 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); | |||
1646 | } | |||
1647 | ||||
1648 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { | |||
1649 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); | |||
1650 | } | |||
1651 | ||||
1652 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile, | |||
1653 | const Twine &Name = "") { | |||
1654 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name); | |||
1655 | } | |||
1656 | ||||
1657 | StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { | |||
1658 | return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile); | |||
1659 | } | |||
1660 | ||||
1661 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1662 | const char *Name) { | |||
1663 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); | |||
1664 | } | |||
1665 | ||||
1666 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1667 | const Twine &Name = "") { | |||
1668 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); | |||
1669 | } | |||
1670 | ||||
1671 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1672 | bool isVolatile, const Twine &Name = "") { | |||
1673 | if (!Align) { | |||
1674 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1675 | Align = DL.getABITypeAlign(Ty); | |||
1676 | } | |||
1677 | return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name); | |||
1678 | } | |||
1679 | ||||
1680 | StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, | |||
1681 | bool isVolatile = false) { | |||
1682 | if (!Align) { | |||
1683 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1684 | Align = DL.getABITypeAlign(Val->getType()); | |||
1685 | } | |||
1686 | return Insert(new StoreInst(Val, Ptr, isVolatile, *Align)); | |||
1687 | } | |||
1688 | FenceInst *CreateFence(AtomicOrdering Ordering, | |||
1689 | SyncScope::ID SSID = SyncScope::System, | |||
1690 | const Twine &Name = "") { | |||
1691 | return Insert(new FenceInst(Context, Ordering, SSID), Name); | |||
1692 | } | |||
1693 | ||||
1694 | AtomicCmpXchgInst * | |||
1695 | CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, | |||
1696 | AtomicOrdering SuccessOrdering, | |||
1697 | AtomicOrdering FailureOrdering, | |||
1698 | SyncScope::ID SSID = SyncScope::System) { | |||
1699 | if (!Align) { | |||
1700 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1701 | Align = llvm::Align(DL.getTypeStoreSize(New->getType())); | |||
1702 | } | |||
1703 | ||||
1704 | return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering, | |||
1705 | FailureOrdering, SSID)); | |||
1706 | } | |||
1707 | ||||
1708 | AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, | |||
1709 | Value *Val, MaybeAlign Align, | |||
1710 | AtomicOrdering Ordering, | |||
1711 | SyncScope::ID SSID = SyncScope::System) { | |||
1712 | if (!Align) { | |||
1713 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1714 | Align = llvm::Align(DL.getTypeStoreSize(Val->getType())); | |||
1715 | } | |||
1716 | ||||
1717 | return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID)); | |||
1718 | } | |||
1719 | ||||
1720 | Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, | |||
1721 | const Twine &Name = "") { | |||
1722 | if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, /*IsInBounds=*/false)) | |||
1723 | return V; | |||
1724 | return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name); | |||
1725 | } | |||
1726 | ||||
1727 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, | |||
1728 | const Twine &Name = "") { | |||
1729 | if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, /*IsInBounds=*/true)) | |||
1730 | return V; | |||
1731 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name); | |||
1732 | } | |||
1733 | ||||
1734 | Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") { | |||
1735 | if (auto *V = Folder.FoldGEP(Ty, Ptr, {Idx}, /*IsInBounds=*/false)) | |||
1736 | return V; | |||
1737 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1738 | } | |||
1739 | ||||
1740 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx, | |||
1741 | const Twine &Name = "") { | |||
1742 | if (auto *V = Folder.FoldGEP(Ty, Ptr, {Idx}, /*IsInBounds=*/true)) | |||
1743 | return V; | |||
1744 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1745 | } | |||
1746 | ||||
1747 | Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1748 | const Twine &Name = "") { | |||
1749 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); | |||
1750 | ||||
1751 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) | |||
1752 | return V; | |||
1753 | ||||
1754 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1755 | } | |||
1756 | ||||
1757 | Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1758 | const Twine &Name = "") { | |||
1759 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); | |||
1760 | ||||
1761 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) | |||
1762 | return V; | |||
1763 | ||||
1764 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1765 | } | |||
1766 | ||||
1767 | Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, | |||
1768 | const Twine &Name = "") { | |||
1769 | Value *Idxs[] = { | |||
1770 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), | |||
1771 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) | |||
1772 | }; | |||
1773 | ||||
1774 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) | |||
1775 | return V; | |||
1776 | ||||
1777 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); | |||
1778 | } | |||
1779 | ||||
1780 | Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1781 | unsigned Idx1, const Twine &Name = "") { | |||
1782 | Value *Idxs[] = { | |||
1783 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), | |||
1784 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) | |||
1785 | }; | |||
1786 | ||||
1787 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) | |||
1788 | return V; | |||
1789 | ||||
1790 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); | |||
1791 | } | |||
1792 | ||||
1793 | Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1794 | const Twine &Name = "") { | |||
1795 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); | |||
1796 | ||||
1797 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) | |||
1798 | return V; | |||
1799 | ||||
1800 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1801 | } | |||
1802 | ||||
1803 | Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1804 | const Twine &Name = "") { | |||
1805 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); | |||
1806 | ||||
1807 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) | |||
1808 | return V; | |||
1809 | ||||
1810 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1811 | } | |||
1812 | ||||
1813 | Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1, | |||
1814 | const Twine &Name = "") { | |||
1815 | Value *Idxs[] = { | |||
1816 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), | |||
1817 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) | |||
1818 | }; | |||
1819 | ||||
1820 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) | |||
1821 | return V; | |||
1822 | ||||
1823 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); | |||
1824 | } | |||
1825 | ||||
1826 | Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1827 | uint64_t Idx1, const Twine &Name = "") { | |||
1828 | Value *Idxs[] = { | |||
1829 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), | |||
1830 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) | |||
1831 | }; | |||
1832 | ||||
1833 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) | |||
1834 | return V; | |||
1835 | ||||
1836 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); | |||
1837 | } | |||
1838 | ||||
1839 | Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, | |||
1840 | const Twine &Name = "") { | |||
1841 | return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name); | |||
1842 | } | |||
1843 | ||||
1844 | /// Same as CreateGlobalString, but return a pointer with "i8*" type | |||
1845 | /// instead of a pointer to array of i8. | |||
1846 | /// | |||
1847 | /// If no module is given via \p M, it is take from the insertion point basic | |||
1848 | /// block. | |||
1849 | Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "", | |||
1850 | unsigned AddressSpace = 0, | |||
1851 | Module *M = nullptr) { | |||
1852 | GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M); | |||
1853 | Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); | |||
1854 | Constant *Indices[] = {Zero, Zero}; | |||
1855 | return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV, | |||
1856 | Indices); | |||
1857 | } | |||
1858 | ||||
1859 | //===--------------------------------------------------------------------===// | |||
1860 | // Instruction creation methods: Cast/Conversion Operators | |||
1861 | //===--------------------------------------------------------------------===// | |||
1862 | ||||
1863 | Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1864 | return CreateCast(Instruction::Trunc, V, DestTy, Name); | |||
1865 | } | |||
1866 | ||||
1867 | Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1868 | return CreateCast(Instruction::ZExt, V, DestTy, Name); | |||
1869 | } | |||
1870 | ||||
1871 | Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1872 | return CreateCast(Instruction::SExt, V, DestTy, Name); | |||
1873 | } | |||
1874 | ||||
1875 | /// Create a ZExt or Trunc from the integer value V to DestTy. Return | |||
1876 | /// the value untouched if the type of V is already DestTy. | |||
1877 | Value *CreateZExtOrTrunc(Value *V, Type *DestTy, | |||
1878 | const Twine &Name = "") { | |||
1879 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1881, __extension__ __PRETTY_FUNCTION__ )) | |||
1880 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1881, __extension__ __PRETTY_FUNCTION__ )) | |||
1881 | "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1881, __extension__ __PRETTY_FUNCTION__ )); | |||
1882 | Type *VTy = V->getType(); | |||
1883 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) | |||
1884 | return CreateZExt(V, DestTy, Name); | |||
1885 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) | |||
1886 | return CreateTrunc(V, DestTy, Name); | |||
1887 | return V; | |||
1888 | } | |||
1889 | ||||
1890 | /// Create a SExt or Trunc from the integer value V to DestTy. Return | |||
1891 | /// the value untouched if the type of V is already DestTy. | |||
1892 | Value *CreateSExtOrTrunc(Value *V, Type *DestTy, | |||
1893 | const Twine &Name = "") { | |||
1894 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1896, __extension__ __PRETTY_FUNCTION__ )) | |||
1895 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1896, __extension__ __PRETTY_FUNCTION__ )) | |||
1896 | "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1896, __extension__ __PRETTY_FUNCTION__ )); | |||
1897 | Type *VTy = V->getType(); | |||
1898 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) | |||
1899 | return CreateSExt(V, DestTy, Name); | |||
1900 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) | |||
1901 | return CreateTrunc(V, DestTy, Name); | |||
1902 | return V; | |||
1903 | } | |||
1904 | ||||
1905 | Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1906 | if (IsFPConstrained) | |||
1907 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui, | |||
1908 | V, DestTy, nullptr, Name); | |||
1909 | return CreateCast(Instruction::FPToUI, V, DestTy, Name); | |||
1910 | } | |||
1911 | ||||
1912 | Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1913 | if (IsFPConstrained) | |||
1914 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi, | |||
1915 | V, DestTy, nullptr, Name); | |||
1916 | return CreateCast(Instruction::FPToSI, V, DestTy, Name); | |||
1917 | } | |||
1918 | ||||
1919 | Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ | |||
1920 | if (IsFPConstrained) | |||
1921 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp, | |||
1922 | V, DestTy, nullptr, Name); | |||
1923 | return CreateCast(Instruction::UIToFP, V, DestTy, Name); | |||
1924 | } | |||
1925 | ||||
1926 | Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ | |||
1927 | if (IsFPConstrained) | |||
1928 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp, | |||
1929 | V, DestTy, nullptr, Name); | |||
1930 | return CreateCast(Instruction::SIToFP, V, DestTy, Name); | |||
1931 | } | |||
1932 | ||||
1933 | Value *CreateFPTrunc(Value *V, Type *DestTy, | |||
1934 | const Twine &Name = "") { | |||
1935 | if (IsFPConstrained) | |||
1936 | return CreateConstrainedFPCast( | |||
1937 | Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr, | |||
1938 | Name); | |||
1939 | return CreateCast(Instruction::FPTrunc, V, DestTy, Name); | |||
1940 | } | |||
1941 | ||||
1942 | Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1943 | if (IsFPConstrained) | |||
1944 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext, | |||
1945 | V, DestTy, nullptr, Name); | |||
1946 | return CreateCast(Instruction::FPExt, V, DestTy, Name); | |||
1947 | } | |||
1948 | ||||
1949 | Value *CreatePtrToInt(Value *V, Type *DestTy, | |||
1950 | const Twine &Name = "") { | |||
1951 | return CreateCast(Instruction::PtrToInt, V, DestTy, Name); | |||
1952 | } | |||
1953 | ||||
1954 | Value *CreateIntToPtr(Value *V, Type *DestTy, | |||
1955 | const Twine &Name = "") { | |||
1956 | return CreateCast(Instruction::IntToPtr, V, DestTy, Name); | |||
1957 | } | |||
1958 | ||||
1959 | Value *CreateBitCast(Value *V, Type *DestTy, | |||
1960 | const Twine &Name = "") { | |||
1961 | return CreateCast(Instruction::BitCast, V, DestTy, Name); | |||
1962 | } | |||
1963 | ||||
1964 | Value *CreateAddrSpaceCast(Value *V, Type *DestTy, | |||
1965 | const Twine &Name = "") { | |||
1966 | return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name); | |||
1967 | } | |||
1968 | ||||
1969 | Value *CreateZExtOrBitCast(Value *V, Type *DestTy, | |||
1970 | const Twine &Name = "") { | |||
1971 | if (V->getType() == DestTy) | |||
1972 | return V; | |||
1973 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1974 | return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name); | |||
1975 | return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name); | |||
1976 | } | |||
1977 | ||||
1978 | Value *CreateSExtOrBitCast(Value *V, Type *DestTy, | |||
1979 | const Twine &Name = "") { | |||
1980 | if (V->getType() == DestTy) | |||
1981 | return V; | |||
1982 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1983 | return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name); | |||
1984 | return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name); | |||
1985 | } | |||
1986 | ||||
1987 | Value *CreateTruncOrBitCast(Value *V, Type *DestTy, | |||
1988 | const Twine &Name = "") { | |||
1989 | if (V->getType() == DestTy) | |||
1990 | return V; | |||
1991 | if (auto *VC = dyn_cast<Constant>(V)) | |||
1992 | return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name); | |||
1993 | return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name); | |||
1994 | } | |||
1995 | ||||
1996 | Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, | |||
1997 | const Twine &Name = "") { | |||
1998 | if (V->getType() == DestTy) | |||
1999 | return V; | |||
2000 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2001 | return Insert(Folder.CreateCast(Op, VC, DestTy), Name); | |||
2002 | return Insert(CastInst::Create(Op, V, DestTy), Name); | |||
2003 | } | |||
2004 | ||||
2005 | Value *CreatePointerCast(Value *V, Type *DestTy, | |||
2006 | const Twine &Name = "") { | |||
2007 | if (V->getType() == DestTy) | |||
2008 | return V; | |||
2009 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2010 | return Insert(Folder.CreatePointerCast(VC, DestTy), Name); | |||
2011 | return Insert(CastInst::CreatePointerCast(V, DestTy), Name); | |||
2012 | } | |||
2013 | ||||
2014 | Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy, | |||
2015 | const Twine &Name = "") { | |||
2016 | if (V->getType() == DestTy) | |||
2017 | return V; | |||
2018 | ||||
2019 | if (auto *VC = dyn_cast<Constant>(V)) { | |||
2020 | return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy), | |||
2021 | Name); | |||
2022 | } | |||
2023 | ||||
2024 | return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy), | |||
2025 | Name); | |||
2026 | } | |||
2027 | ||||
2028 | Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned, | |||
2029 | const Twine &Name = "") { | |||
2030 | if (V->getType() == DestTy) | |||
2031 | return V; | |||
2032 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2033 | return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name); | |||
2034 | return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name); | |||
2035 | } | |||
2036 | ||||
2037 | Value *CreateBitOrPointerCast(Value *V, Type *DestTy, | |||
2038 | const Twine &Name = "") { | |||
2039 | if (V->getType() == DestTy) | |||
2040 | return V; | |||
2041 | if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy()) | |||
2042 | return CreatePtrToInt(V, DestTy, Name); | |||
2043 | if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy()) | |||
2044 | return CreateIntToPtr(V, DestTy, Name); | |||
2045 | ||||
2046 | return CreateBitCast(V, DestTy, Name); | |||
2047 | } | |||
2048 | ||||
2049 | Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2050 | if (V->getType() == DestTy) | |||
2051 | return V; | |||
2052 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2053 | return Insert(Folder.CreateFPCast(VC, DestTy), Name); | |||
2054 | return Insert(CastInst::CreateFPCast(V, DestTy), Name); | |||
2055 | } | |||
2056 | ||||
2057 | CallInst *CreateConstrainedFPCast( | |||
2058 | Intrinsic::ID ID, Value *V, Type *DestTy, | |||
2059 | Instruction *FMFSource = nullptr, const Twine &Name = "", | |||
2060 | MDNode *FPMathTag = nullptr, | |||
2061 | Optional<RoundingMode> Rounding = None, | |||
2062 | Optional<fp::ExceptionBehavior> Except = None); | |||
2063 | ||||
2064 | // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a | |||
2065 | // compile time error, instead of converting the string to bool for the | |||
2066 | // isSigned parameter. | |||
2067 | Value *CreateIntCast(Value *, Type *, const char *) = delete; | |||
2068 | ||||
2069 | //===--------------------------------------------------------------------===// | |||
2070 | // Instruction creation methods: Compare Instructions | |||
2071 | //===--------------------------------------------------------------------===// | |||
2072 | ||||
2073 | Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2074 | return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name); | |||
2075 | } | |||
2076 | ||||
2077 | Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2078 | return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name); | |||
2079 | } | |||
2080 | ||||
2081 | Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2082 | return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name); | |||
2083 | } | |||
2084 | ||||
2085 | Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2086 | return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name); | |||
2087 | } | |||
2088 | ||||
2089 | Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2090 | return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name); | |||
2091 | } | |||
2092 | ||||
2093 | Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2094 | return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name); | |||
2095 | } | |||
2096 | ||||
2097 | Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2098 | return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name); | |||
2099 | } | |||
2100 | ||||
2101 | Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2102 | return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name); | |||
2103 | } | |||
2104 | ||||
2105 | Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2106 | return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name); | |||
2107 | } | |||
2108 | ||||
2109 | Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2110 | return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name); | |||
2111 | } | |||
2112 | ||||
2113 | Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2114 | MDNode *FPMathTag = nullptr) { | |||
2115 | return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag); | |||
2116 | } | |||
2117 | ||||
2118 | Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2119 | MDNode *FPMathTag = nullptr) { | |||
2120 | return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag); | |||
2121 | } | |||
2122 | ||||
2123 | Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2124 | MDNode *FPMathTag = nullptr) { | |||
2125 | return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag); | |||
2126 | } | |||
2127 | ||||
2128 | Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2129 | MDNode *FPMathTag = nullptr) { | |||
2130 | return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag); | |||
2131 | } | |||
2132 | ||||
2133 | Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2134 | MDNode *FPMathTag = nullptr) { | |||
2135 | return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag); | |||
2136 | } | |||
2137 | ||||
2138 | Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2139 | MDNode *FPMathTag = nullptr) { | |||
2140 | return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag); | |||
2141 | } | |||
2142 | ||||
2143 | Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2144 | MDNode *FPMathTag = nullptr) { | |||
2145 | return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag); | |||
2146 | } | |||
2147 | ||||
2148 | Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2149 | MDNode *FPMathTag = nullptr) { | |||
2150 | return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag); | |||
2151 | } | |||
2152 | ||||
2153 | Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2154 | MDNode *FPMathTag = nullptr) { | |||
2155 | return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag); | |||
2156 | } | |||
2157 | ||||
2158 | Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2159 | MDNode *FPMathTag = nullptr) { | |||
2160 | return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag); | |||
2161 | } | |||
2162 | ||||
2163 | Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2164 | MDNode *FPMathTag = nullptr) { | |||
2165 | return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag); | |||
2166 | } | |||
2167 | ||||
2168 | Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2169 | MDNode *FPMathTag = nullptr) { | |||
2170 | return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag); | |||
2171 | } | |||
2172 | ||||
2173 | Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2174 | MDNode *FPMathTag = nullptr) { | |||
2175 | return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag); | |||
2176 | } | |||
2177 | ||||
2178 | Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2179 | MDNode *FPMathTag = nullptr) { | |||
2180 | return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag); | |||
2181 | } | |||
2182 | ||||
2183 | Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2184 | const Twine &Name = "") { | |||
2185 | if (auto *V = Folder.FoldICmp(P, LHS, RHS)) | |||
2186 | return V; | |||
2187 | return Insert(new ICmpInst(P, LHS, RHS), Name); | |||
2188 | } | |||
2189 | ||||
2190 | // Create a quiet floating-point comparison (i.e. one that raises an FP | |||
2191 | // exception only in the case where an input is a signaling NaN). | |||
2192 | // Note that this differs from CreateFCmpS only if IsFPConstrained is true. | |||
2193 | Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2194 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2195 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false); | |||
2196 | } | |||
2197 | ||||
2198 | Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, | |||
2199 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2200 | return CmpInst::isFPPredicate(Pred) | |||
2201 | ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag) | |||
2202 | : CreateICmp(Pred, LHS, RHS, Name); | |||
2203 | } | |||
2204 | ||||
2205 | // Create a signaling floating-point comparison (i.e. one that raises an FP | |||
2206 | // exception whenever an input is any NaN, signaling or quiet). | |||
2207 | // Note that this differs from CreateFCmp only if IsFPConstrained is true. | |||
2208 | Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2209 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2210 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true); | |||
2211 | } | |||
2212 | ||||
2213 | private: | |||
2214 | // Helper routine to create either a signaling or a quiet FP comparison. | |||
2215 | Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2216 | const Twine &Name, MDNode *FPMathTag, | |||
2217 | bool IsSignaling); | |||
2218 | ||||
2219 | public: | |||
2220 | CallInst *CreateConstrainedFPCmp( | |||
2221 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, | |||
2222 | const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None); | |||
2223 | ||||
2224 | //===--------------------------------------------------------------------===// | |||
2225 | // Instruction creation methods: Other Instructions | |||
2226 | //===--------------------------------------------------------------------===// | |||
2227 | ||||
2228 | PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues, | |||
2229 | const Twine &Name = "") { | |||
2230 | PHINode *Phi = PHINode::Create(Ty, NumReservedValues); | |||
2231 | if (isa<FPMathOperator>(Phi)) | |||
2232 | setFPAttrs(Phi, nullptr /* MDNode* */, FMF); | |||
2233 | return Insert(Phi, Name); | |||
2234 | } | |||
2235 | ||||
2236 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, | |||
2237 | ArrayRef<Value *> Args = None, const Twine &Name = "", | |||
2238 | MDNode *FPMathTag = nullptr) { | |||
2239 | CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles); | |||
2240 | if (IsFPConstrained) | |||
2241 | setConstrainedFPCallAttr(CI); | |||
2242 | if (isa<FPMathOperator>(CI)) | |||
2243 | setFPAttrs(CI, FPMathTag, FMF); | |||
2244 | return Insert(CI, Name); | |||
2245 | } | |||
2246 | ||||
2247 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args, | |||
2248 | ArrayRef<OperandBundleDef> OpBundles, | |||
2249 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2250 | CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles); | |||
2251 | if (IsFPConstrained) | |||
2252 | setConstrainedFPCallAttr(CI); | |||
2253 | if (isa<FPMathOperator>(CI)) | |||
2254 | setFPAttrs(CI, FPMathTag, FMF); | |||
2255 | return Insert(CI, Name); | |||
2256 | } | |||
2257 | ||||
2258 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None, | |||
2259 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2260 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name, | |||
2261 | FPMathTag); | |||
2262 | } | |||
2263 | ||||
2264 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args, | |||
2265 | ArrayRef<OperandBundleDef> OpBundles, | |||
2266 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2267 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, | |||
2268 | OpBundles, Name, FPMathTag); | |||
2269 | } | |||
2270 | ||||
2271 | CallInst *CreateConstrainedFPCall( | |||
2272 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "", | |||
2273 | Optional<RoundingMode> Rounding = None, | |||
2274 | Optional<fp::ExceptionBehavior> Except = None); | |||
2275 | ||||
2276 | Value *CreateSelect(Value *C, Value *True, Value *False, | |||
2277 | const Twine &Name = "", Instruction *MDFrom = nullptr); | |||
2278 | ||||
2279 | VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") { | |||
2280 | return Insert(new VAArgInst(List, Ty), Name); | |||
2281 | } | |||
2282 | ||||
2283 | Value *CreateExtractElement(Value *Vec, Value *Idx, | |||
2284 | const Twine &Name = "") { | |||
2285 | if (auto *VC = dyn_cast<Constant>(Vec)) | |||
2286 | if (auto *IC = dyn_cast<Constant>(Idx)) | |||
2287 | return Insert(Folder.CreateExtractElement(VC, IC), Name); | |||
2288 | return Insert(ExtractElementInst::Create(Vec, Idx), Name); | |||
2289 | } | |||
2290 | ||||
2291 | Value *CreateExtractElement(Value *Vec, uint64_t Idx, | |||
2292 | const Twine &Name = "") { | |||
2293 | return CreateExtractElement(Vec, getInt64(Idx), Name); | |||
2294 | } | |||
2295 | ||||
2296 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, | |||
2297 | const Twine &Name = "") { | |||
2298 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); | |||
2299 | } | |||
2300 | ||||
2301 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, uint64_t Idx, | |||
2302 | const Twine &Name = "") { | |||
2303 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); | |||
2304 | } | |||
2305 | ||||
2306 | Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, | |||
2307 | const Twine &Name = "") { | |||
2308 | if (auto *VC = dyn_cast<Constant>(Vec)) | |||
2309 | if (auto *NC = dyn_cast<Constant>(NewElt)) | |||
2310 | if (auto *IC = dyn_cast<Constant>(Idx)) | |||
2311 | return Insert(Folder.CreateInsertElement(VC, NC, IC), Name); | |||
2312 | return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name); | |||
2313 | } | |||
2314 | ||||
2315 | Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx, | |||
2316 | const Twine &Name = "") { | |||
2317 | return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name); | |||
2318 | } | |||
2319 | ||||
2320 | Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask, | |||
2321 | const Twine &Name = "") { | |||
2322 | SmallVector<int, 16> IntMask; | |||
2323 | ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask); | |||
2324 | return CreateShuffleVector(V1, V2, IntMask, Name); | |||
2325 | } | |||
2326 | ||||
2327 | /// See class ShuffleVectorInst for a description of the mask representation. | |||
2328 | Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2329 | const Twine &Name = "") { | |||
2330 | if (auto *V1C = dyn_cast<Constant>(V1)) | |||
2331 | if (auto *V2C = dyn_cast<Constant>(V2)) | |||
2332 | return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name); | |||
2333 | return Insert(new ShuffleVectorInst(V1, V2, Mask), Name); | |||
2334 | } | |||
2335 | ||||
2336 | /// Create a unary shuffle. The second vector operand of the IR instruction | |||
2337 | /// is poison. | |||
2338 | Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask, | |||
2339 | const Twine &Name = "") { | |||
2340 | return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name); | |||
2341 | } | |||
2342 | ||||
2343 | Value *CreateExtractValue(Value *Agg, | |||
2344 | ArrayRef<unsigned> Idxs, | |||
2345 | const Twine &Name = "") { | |||
2346 | if (auto *AggC = dyn_cast<Constant>(Agg)) | |||
2347 | return Insert(Folder.CreateExtractValue(AggC, Idxs), Name); | |||
2348 | return Insert(ExtractValueInst::Create(Agg, Idxs), Name); | |||
2349 | } | |||
2350 | ||||
2351 | Value *CreateInsertValue(Value *Agg, Value *Val, | |||
2352 | ArrayRef<unsigned> Idxs, | |||
2353 | const Twine &Name = "") { | |||
2354 | if (auto *AggC = dyn_cast<Constant>(Agg)) | |||
2355 | if (auto *ValC = dyn_cast<Constant>(Val)) | |||
2356 | return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name); | |||
2357 | return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name); | |||
2358 | } | |||
2359 | ||||
2360 | LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses, | |||
2361 | const Twine &Name = "") { | |||
2362 | return Insert(LandingPadInst::Create(Ty, NumClauses), Name); | |||
2363 | } | |||
2364 | ||||
2365 | Value *CreateFreeze(Value *V, const Twine &Name = "") { | |||
2366 | return Insert(new FreezeInst(V), Name); | |||
2367 | } | |||
2368 | ||||
2369 | //===--------------------------------------------------------------------===// | |||
2370 | // Utility creation methods | |||
2371 | //===--------------------------------------------------------------------===// | |||
2372 | ||||
2373 | /// Return an i1 value testing if \p Arg is null. | |||
2374 | Value *CreateIsNull(Value *Arg, const Twine &Name = "") { | |||
2375 | return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), | |||
2376 | Name); | |||
2377 | } | |||
2378 | ||||
2379 | /// Return an i1 value testing if \p Arg is not null. | |||
2380 | Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") { | |||
2381 | return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), | |||
2382 | Name); | |||
2383 | } | |||
2384 | ||||
2385 | /// Return the i64 difference between two pointer values, dividing out | |||
2386 | /// the size of the pointed-to objects. | |||
2387 | /// | |||
2388 | /// This is intended to implement C-style pointer subtraction. As such, the | |||
2389 | /// pointers must be appropriately aligned for their element types and | |||
2390 | /// pointing into the same object. | |||
2391 | Value *CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, | |||
2392 | const Twine &Name = ""); | |||
2393 | ||||
2394 | /// Create a launder.invariant.group intrinsic call. If Ptr type is | |||
2395 | /// different from pointer to i8, it's casted to pointer to i8 in the same | |||
2396 | /// address space before call and casted back to Ptr type after call. | |||
2397 | Value *CreateLaunderInvariantGroup(Value *Ptr); | |||
2398 | ||||
2399 | /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is | |||
2400 | /// different from pointer to i8, it's casted to pointer to i8 in the same | |||
2401 | /// address space before call and casted back to Ptr type after call. | |||
2402 | Value *CreateStripInvariantGroup(Value *Ptr); | |||
2403 | ||||
2404 | /// Return a vector value that contains the vector V reversed | |||
2405 | Value *CreateVectorReverse(Value *V, const Twine &Name = ""); | |||
2406 | ||||
2407 | /// Return a vector splice intrinsic if using scalable vectors, otherwise | |||
2408 | /// return a shufflevector. If the immediate is positive, a vector is | |||
2409 | /// extracted from concat(V1, V2), starting at Imm. If the immediate | |||
2410 | /// is negative, we extract -Imm elements from V1 and the remaining | |||
2411 | /// elements from V2. Imm is a signed integer in the range | |||
2412 | /// -VL <= Imm < VL (where VL is the runtime vector length of the | |||
2413 | /// source/result vector) | |||
2414 | Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, | |||
2415 | const Twine &Name = ""); | |||
2416 | ||||
2417 | /// Return a vector value that contains \arg V broadcasted to \p | |||
2418 | /// NumElts elements. | |||
2419 | Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = ""); | |||
2420 | ||||
2421 | /// Return a vector value that contains \arg V broadcasted to \p | |||
2422 | /// EC elements. | |||
2423 | Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = ""); | |||
2424 | ||||
2425 | /// Return a value that has been extracted from a larger integer type. | |||
2426 | Value *CreateExtractInteger(const DataLayout &DL, Value *From, | |||
2427 | IntegerType *ExtractedTy, uint64_t Offset, | |||
2428 | const Twine &Name); | |||
2429 | ||||
2430 | Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, | |||
2431 | unsigned Dimension, unsigned LastIndex, | |||
2432 | MDNode *DbgInfo); | |||
2433 | ||||
2434 | Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, | |||
2435 | MDNode *DbgInfo); | |||
2436 | ||||
2437 | Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, | |||
2438 | unsigned Index, unsigned FieldIndex, | |||
2439 | MDNode *DbgInfo); | |||
2440 | ||||
2441 | private: | |||
2442 | /// Helper function that creates an assume intrinsic call that | |||
2443 | /// represents an alignment assumption on the provided pointer \p PtrValue | |||
2444 | /// with offset \p OffsetValue and alignment value \p AlignValue. | |||
2445 | CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL, | |||
2446 | Value *PtrValue, Value *AlignValue, | |||
2447 | Value *OffsetValue); | |||
2448 | ||||
2449 | public: | |||
2450 | /// Create an assume intrinsic call that represents an alignment | |||
2451 | /// assumption on the provided pointer. | |||
2452 | /// | |||
2453 | /// An optional offset can be provided, and if it is provided, the offset | |||
2454 | /// must be subtracted from the provided pointer to get the pointer with the | |||
2455 | /// specified alignment. | |||
2456 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, | |||
2457 | unsigned Alignment, | |||
2458 | Value *OffsetValue = nullptr); | |||
2459 | ||||
2460 | /// Create an assume intrinsic call that represents an alignment | |||
2461 | /// assumption on the provided pointer. | |||
2462 | /// | |||
2463 | /// An optional offset can be provided, and if it is provided, the offset | |||
2464 | /// must be subtracted from the provided pointer to get the pointer with the | |||
2465 | /// specified alignment. | |||
2466 | /// | |||
2467 | /// This overload handles the condition where the Alignment is dependent | |||
2468 | /// on an existing value rather than a static value. | |||
2469 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, | |||
2470 | Value *Alignment, | |||
2471 | Value *OffsetValue = nullptr); | |||
2472 | }; | |||
2473 | ||||
2474 | /// This provides a uniform API for creating instructions and inserting | |||
2475 | /// them into a basic block: either at the end of a BasicBlock, or at a specific | |||
2476 | /// iterator location in a block. | |||
2477 | /// | |||
2478 | /// Note that the builder does not expose the full generality of LLVM | |||
2479 | /// instructions. For access to extra instruction properties, use the mutators | |||
2480 | /// (e.g. setVolatile) on the instructions after they have been | |||
2481 | /// created. Convenience state exists to specify fast-math flags and fp-math | |||
2482 | /// tags. | |||
2483 | /// | |||
2484 | /// The first template argument specifies a class to use for creating constants. | |||
2485 | /// This defaults to creating minimally folded constants. The second template | |||
2486 | /// argument allows clients to specify custom insertion hooks that are called on | |||
2487 | /// every newly created insertion. | |||
2488 | template <typename FolderTy = ConstantFolder, | |||
2489 | typename InserterTy = IRBuilderDefaultInserter> | |||
2490 | class IRBuilder : public IRBuilderBase { | |||
2491 | private: | |||
2492 | FolderTy Folder; | |||
2493 | InserterTy Inserter; | |||
2494 | ||||
2495 | public: | |||
2496 | IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(), | |||
2497 | MDNode *FPMathTag = nullptr, | |||
2498 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2499 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles), | |||
2500 | Folder(Folder), Inserter(Inserter) {} | |||
2501 | ||||
2502 | explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr, | |||
2503 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2504 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {} | |||
2505 | ||||
2506 | explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder, | |||
2507 | MDNode *FPMathTag = nullptr, | |||
2508 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2509 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2510 | FPMathTag, OpBundles), Folder(Folder) { | |||
2511 | SetInsertPoint(TheBB); | |||
2512 | } | |||
2513 | ||||
2514 | explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr, | |||
2515 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2516 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2517 | FPMathTag, OpBundles) { | |||
2518 | SetInsertPoint(TheBB); | |||
2519 | } | |||
2520 | ||||
2521 | explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr, | |||
2522 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2523 | : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, | |||
2524 | FPMathTag, OpBundles) { | |||
2525 | SetInsertPoint(IP); | |||
2526 | } | |||
2527 | ||||
2528 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder, | |||
2529 | MDNode *FPMathTag = nullptr, | |||
2530 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2531 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2532 | FPMathTag, OpBundles), Folder(Folder) { | |||
2533 | SetInsertPoint(TheBB, IP); | |||
2534 | } | |||
2535 | ||||
2536 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, | |||
2537 | MDNode *FPMathTag = nullptr, | |||
2538 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2539 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2540 | FPMathTag, OpBundles) { | |||
2541 | SetInsertPoint(TheBB, IP); | |||
2542 | } | |||
2543 | ||||
2544 | /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard | |||
2545 | /// or FastMathFlagGuard instead. | |||
2546 | IRBuilder(const IRBuilder &) = delete; | |||
2547 | ||||
2548 | InserterTy &getInserter() { return Inserter; } | |||
2549 | }; | |||
2550 | ||||
2551 | // Create wrappers for C Binding types (see CBindingWrapping.h). | |||
2552 | DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast <IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef >(const_cast<IRBuilder<>*>(P)); } | |||
2553 | ||||
2554 | } // end namespace llvm | |||
2555 | ||||
2556 | #endif // LLVM_IR_IRBUILDER_H |