Bug Summary

File:polly/lib/CodeGen/BlockGenerators.cpp
Warning:line 1164, column 40
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name BlockGenerators.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/polly/lib -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/polly/lib -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/polly/lib -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/polly/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/polly/lib/External -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/polly/lib/External/pet/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/polly/lib/External/isl/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/polly/lib/External/isl/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/polly/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-long-long -Wno-unused-parameter -Wwrite-strings -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/polly/lib -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/polly/lib/CodeGen/BlockGenerators.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/polly/lib/CodeGen/BlockGenerators.cpp

1//===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the BlockGenerator and VectorBlockGenerator classes,
10// which generate sequential code and vectorized code for a polyhedral
11// statement, respectively.
12//
13//===----------------------------------------------------------------------===//
14
15#include "polly/CodeGen/BlockGenerators.h"
16#include "polly/CodeGen/IslExprBuilder.h"
17#include "polly/CodeGen/RuntimeDebugBuilder.h"
18#include "polly/Options.h"
19#include "polly/ScopInfo.h"
20#include "polly/Support/ScopHelper.h"
21#include "polly/Support/VirtualInstruction.h"
22#include "llvm/Analysis/LoopInfo.h"
23#include "llvm/Analysis/RegionInfo.h"
24#include "llvm/Analysis/ScalarEvolution.h"
25#include "llvm/Transforms/Utils/BasicBlockUtils.h"
26#include "llvm/Transforms/Utils/Local.h"
27#include "isl/ast.h"
28#include <deque>
29
30using namespace llvm;
31using namespace polly;
32
33static cl::opt<bool> Aligned("enable-polly-aligned",
34 cl::desc("Assumed aligned memory accesses."),
35 cl::Hidden, cl::init(false), cl::ZeroOrMore,
36 cl::cat(PollyCategory));
37
38bool PollyDebugPrinting;
39static cl::opt<bool, true> DebugPrintingX(
40 "polly-codegen-add-debug-printing",
41 cl::desc("Add printf calls that show the values loaded/stored."),
42 cl::location(PollyDebugPrinting), cl::Hidden, cl::init(false),
43 cl::ZeroOrMore, cl::cat(PollyCategory));
44
45static cl::opt<bool> TraceStmts(
46 "polly-codegen-trace-stmts",
47 cl::desc("Add printf calls that print the statement being executed"),
48 cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
49
50static cl::opt<bool> TraceScalars(
51 "polly-codegen-trace-scalars",
52 cl::desc("Add printf calls that print the values of all scalar values "
53 "used in a statement. Requires -polly-codegen-trace-stmts."),
54 cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
55
56BlockGenerator::BlockGenerator(
57 PollyIRBuilder &B, LoopInfo &LI, ScalarEvolution &SE, DominatorTree &DT,
58 AllocaMapTy &ScalarMap, EscapeUsersAllocaMapTy &EscapeMap,
59 ValueMapT &GlobalMap, IslExprBuilder *ExprBuilder, BasicBlock *StartBlock)
60 : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT),
61 EntryBB(nullptr), ScalarMap(ScalarMap), EscapeMap(EscapeMap),
62 GlobalMap(GlobalMap), StartBlock(StartBlock) {}
63
64Value *BlockGenerator::trySynthesizeNewValue(ScopStmt &Stmt, Value *Old,
65 ValueMapT &BBMap,
66 LoopToScevMapT &LTS,
67 Loop *L) const {
68 if (!SE.isSCEVable(Old->getType()))
69 return nullptr;
70
71 const SCEV *Scev = SE.getSCEVAtScope(Old, L);
72 if (!Scev)
73 return nullptr;
74
75 if (isa<SCEVCouldNotCompute>(Scev))
76 return nullptr;
77
78 const SCEV *NewScev = SCEVLoopAddRecRewriter::rewrite(Scev, LTS, SE);
79 ValueMapT VTV;
80 VTV.insert(BBMap.begin(), BBMap.end());
81 VTV.insert(GlobalMap.begin(), GlobalMap.end());
82
83 Scop &S = *Stmt.getParent();
84 const DataLayout &DL = S.getFunction().getParent()->getDataLayout();
85 auto IP = Builder.GetInsertPoint();
86
87 assert(IP != Builder.GetInsertBlock()->end() &&(static_cast<void> (0))
88 "Only instructions can be insert points for SCEVExpander")(static_cast<void> (0));
89 Value *Expanded =
90 expandCodeFor(S, SE, DL, "polly", NewScev, Old->getType(), &*IP, &VTV,
91 StartBlock->getSinglePredecessor());
92
93 BBMap[Old] = Expanded;
94 return Expanded;
95}
96
97Value *BlockGenerator::getNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap,
98 LoopToScevMapT &LTS, Loop *L) const {
99
100 auto lookupGlobally = [this](Value *Old) -> Value * {
101 Value *New = GlobalMap.lookup(Old);
102 if (!New)
103 return nullptr;
104
105 // Required by:
106 // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll
107 // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll
108 // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll
109 // * Isl/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll
110 // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
111 // * Isl/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll
112 // GlobalMap should be a mapping from (value in original SCoP) to (copied
113 // value in generated SCoP), without intermediate mappings, which might
114 // easily require transitiveness as well.
115 if (Value *NewRemapped = GlobalMap.lookup(New))
116 New = NewRemapped;
117
118 // No test case for this code.
119 if (Old->getType()->getScalarSizeInBits() <
120 New->getType()->getScalarSizeInBits())
121 New = Builder.CreateTruncOrBitCast(New, Old->getType());
122
123 return New;
124 };
125
126 Value *New = nullptr;
127 auto VUse = VirtualUse::create(&Stmt, L, Old, true);
128 switch (VUse.getKind()) {
129 case VirtualUse::Block:
130 // BasicBlock are constants, but the BlockGenerator copies them.
131 New = BBMap.lookup(Old);
132 break;
133
134 case VirtualUse::Constant:
135 // Used by:
136 // * Isl/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll
137 // Constants should not be redefined. In this case, the GlobalMap just
138 // contains a mapping to the same constant, which is unnecessary, but
139 // harmless.
140 if ((New = lookupGlobally(Old)))
141 break;
142
143 assert(!BBMap.count(Old))(static_cast<void> (0));
144 New = Old;
145 break;
146
147 case VirtualUse::ReadOnly:
148 assert(!GlobalMap.count(Old))(static_cast<void> (0));
149
150 // Required for:
151 // * Isl/CodeGen/MemAccess/create_arrays.ll
152 // * Isl/CodeGen/read-only-scalars.ll
153 // * ScheduleOptimizer/pattern-matching-based-opts_10.ll
154 // For some reason these reload a read-only value. The reloaded value ends
155 // up in BBMap, buts its value should be identical.
156 //
157 // Required for:
158 // * Isl/CodeGen/OpenMP/single_loop_with_param.ll
159 // The parallel subfunctions need to reference the read-only value from the
160 // parent function, this is done by reloading them locally.
161 if ((New = BBMap.lookup(Old)))
162 break;
163
164 New = Old;
165 break;
166
167 case VirtualUse::Synthesizable:
168 // Used by:
169 // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
170 // * Isl/CodeGen/OpenMP/recomputed-srem.ll
171 // * Isl/CodeGen/OpenMP/reference-other-bb.ll
172 // * Isl/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll
173 // For some reason synthesizable values end up in GlobalMap. Their values
174 // are the same as trySynthesizeNewValue would return. The legacy
175 // implementation prioritized GlobalMap, so this is what we do here as well.
176 // Ideally, synthesizable values should not end up in GlobalMap.
177 if ((New = lookupGlobally(Old)))
178 break;
179
180 // Required for:
181 // * Isl/CodeGen/RuntimeDebugBuilder/combine_different_values.ll
182 // * Isl/CodeGen/getNumberOfIterations.ll
183 // * Isl/CodeGen/non_affine_float_compare.ll
184 // * ScheduleOptimizer/pattern-matching-based-opts_10.ll
185 // Ideally, synthesizable values are synthesized by trySynthesizeNewValue,
186 // not precomputed (SCEVExpander has its own caching mechanism).
187 // These tests fail without this, but I think trySynthesizeNewValue would
188 // just re-synthesize the same instructions.
189 if ((New = BBMap.lookup(Old)))
190 break;
191
192 New = trySynthesizeNewValue(Stmt, Old, BBMap, LTS, L);
193 break;
194
195 case VirtualUse::Hoisted:
196 // TODO: Hoisted invariant loads should be found in GlobalMap only, but not
197 // redefined locally (which will be ignored anyway). That is, the following
198 // assertion should apply: assert(!BBMap.count(Old))
199
200 New = lookupGlobally(Old);
201 break;
202
203 case VirtualUse::Intra:
204 case VirtualUse::Inter:
205 assert(!GlobalMap.count(Old) &&(static_cast<void> (0))
206 "Intra and inter-stmt values are never global")(static_cast<void> (0));
207 New = BBMap.lookup(Old);
208 break;
209 }
210 assert(New && "Unexpected scalar dependence in region!")(static_cast<void> (0));
211 return New;
212}
213
214void BlockGenerator::copyInstScalar(ScopStmt &Stmt, Instruction *Inst,
215 ValueMapT &BBMap, LoopToScevMapT &LTS) {
216 // We do not generate debug intrinsics as we did not investigate how to
217 // copy them correctly. At the current state, they just crash the code
218 // generation as the meta-data operands are not correctly copied.
219 if (isa<DbgInfoIntrinsic>(Inst))
220 return;
221
222 Instruction *NewInst = Inst->clone();
223
224 // Replace old operands with the new ones.
225 for (Value *OldOperand : Inst->operands()) {
226 Value *NewOperand =
227 getNewValue(Stmt, OldOperand, BBMap, LTS, getLoopForStmt(Stmt));
228
229 if (!NewOperand) {
230 assert(!isa<StoreInst>(NewInst) &&(static_cast<void> (0))
231 "Store instructions are always needed!")(static_cast<void> (0));
232 NewInst->deleteValue();
233 return;
234 }
235
236 NewInst->replaceUsesOfWith(OldOperand, NewOperand);
237 }
238
239 Builder.Insert(NewInst);
240 BBMap[Inst] = NewInst;
241
242 // When copying the instruction onto the Module meant for the GPU,
243 // debug metadata attached to an instruction causes all related
244 // metadata to be pulled into the Module. This includes the DICompileUnit,
245 // which will not be listed in llvm.dbg.cu of the Module since the Module
246 // doesn't contain one. This fails the verification of the Module and the
247 // subsequent generation of the ASM string.
248 if (NewInst->getModule() != Inst->getModule())
249 NewInst->setDebugLoc(llvm::DebugLoc());
250
251 if (!NewInst->getType()->isVoidTy())
252 NewInst->setName("p_" + Inst->getName());
253}
254
255Value *
256BlockGenerator::generateLocationAccessed(ScopStmt &Stmt, MemAccInst Inst,
257 ValueMapT &BBMap, LoopToScevMapT &LTS,
258 isl_id_to_ast_expr *NewAccesses) {
259 const MemoryAccess &MA = Stmt.getArrayAccessFor(Inst);
260 return generateLocationAccessed(
261 Stmt, getLoopForStmt(Stmt),
262 Inst.isNull() ? nullptr : Inst.getPointerOperand(), BBMap, LTS,
263 NewAccesses, MA.getId().release(), MA.getAccessValue()->getType());
264}
265
266Value *BlockGenerator::generateLocationAccessed(
267 ScopStmt &Stmt, Loop *L, Value *Pointer, ValueMapT &BBMap,
268 LoopToScevMapT &LTS, isl_id_to_ast_expr *NewAccesses, __isl_take isl_id *Id,
269 Type *ExpectedType) {
270 isl_ast_expr *AccessExpr = isl_id_to_ast_expr_get(NewAccesses, Id);
271
272 if (AccessExpr) {
273 AccessExpr = isl_ast_expr_address_of(AccessExpr);
274 auto Address = ExprBuilder->create(AccessExpr);
275
276 // Cast the address of this memory access to a pointer type that has the
277 // same element type as the original access, but uses the address space of
278 // the newly generated pointer.
279 auto OldPtrTy = ExpectedType->getPointerTo();
280 auto NewPtrTy = Address->getType();
281 OldPtrTy = PointerType::getWithSamePointeeType(
282 OldPtrTy, NewPtrTy->getPointerAddressSpace());
283
284 if (OldPtrTy != NewPtrTy)
285 Address = Builder.CreateBitOrPointerCast(Address, OldPtrTy);
286 return Address;
287 }
288 assert((static_cast<void> (0))
289 Pointer &&(static_cast<void> (0))
290 "If expression was not generated, must use the original pointer value")(static_cast<void> (0));
291 return getNewValue(Stmt, Pointer, BBMap, LTS, L);
292}
293
294Value *
295BlockGenerator::getImplicitAddress(MemoryAccess &Access, Loop *L,
296 LoopToScevMapT &LTS, ValueMapT &BBMap,
297 __isl_keep isl_id_to_ast_expr *NewAccesses) {
298 if (Access.isLatestArrayKind())
299 return generateLocationAccessed(*Access.getStatement(), L, nullptr, BBMap,
300 LTS, NewAccesses, Access.getId().release(),
301 Access.getAccessValue()->getType());
302
303 return getOrCreateAlloca(Access);
304}
305
306Loop *BlockGenerator::getLoopForStmt(const ScopStmt &Stmt) const {
307 auto *StmtBB = Stmt.getEntryBlock();
308 return LI.getLoopFor(StmtBB);
309}
310
311Value *BlockGenerator::generateArrayLoad(ScopStmt &Stmt, LoadInst *Load,
312 ValueMapT &BBMap, LoopToScevMapT &LTS,
313 isl_id_to_ast_expr *NewAccesses) {
314 if (Value *PreloadLoad = GlobalMap.lookup(Load))
315 return PreloadLoad;
316
317 Value *NewPointer =
318 generateLocationAccessed(Stmt, Load, BBMap, LTS, NewAccesses);
319 Value *ScalarLoad =
320 Builder.CreateAlignedLoad(Load->getType(), NewPointer, Load->getAlign(),
321 Load->getName() + "_p_scalar_");
322
323 if (PollyDebugPrinting)
324 RuntimeDebugBuilder::createCPUPrinter(Builder, "Load from ", NewPointer,
325 ": ", ScalarLoad, "\n");
326
327 return ScalarLoad;
328}
329
330void BlockGenerator::generateArrayStore(ScopStmt &Stmt, StoreInst *Store,
331 ValueMapT &BBMap, LoopToScevMapT &LTS,
332 isl_id_to_ast_expr *NewAccesses) {
333 MemoryAccess &MA = Stmt.getArrayAccessFor(Store);
334 isl::set AccDom = MA.getAccessRelation().domain();
335 std::string Subject = MA.getId().get_name();
336
337 generateConditionalExecution(Stmt, AccDom, Subject.c_str(), [&, this]() {
338 Value *NewPointer =
339 generateLocationAccessed(Stmt, Store, BBMap, LTS, NewAccesses);
340 Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap,
341 LTS, getLoopForStmt(Stmt));
342
343 if (PollyDebugPrinting)
344 RuntimeDebugBuilder::createCPUPrinter(Builder, "Store to ", NewPointer,
345 ": ", ValueOperand, "\n");
346
347 Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlign());
348 });
349}
350
351bool BlockGenerator::canSyntheziseInStmt(ScopStmt &Stmt, Instruction *Inst) {
352 Loop *L = getLoopForStmt(Stmt);
353 return (Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) &&
354 canSynthesize(Inst, *Stmt.getParent(), &SE, L);
355}
356
357void BlockGenerator::copyInstruction(ScopStmt &Stmt, Instruction *Inst,
358 ValueMapT &BBMap, LoopToScevMapT &LTS,
359 isl_id_to_ast_expr *NewAccesses) {
360 // Terminator instructions control the control flow. They are explicitly
361 // expressed in the clast and do not need to be copied.
362 if (Inst->isTerminator())
363 return;
364
365 // Synthesizable statements will be generated on-demand.
366 if (canSyntheziseInStmt(Stmt, Inst))
367 return;
368
369 if (auto *Load = dyn_cast<LoadInst>(Inst)) {
370 Value *NewLoad = generateArrayLoad(Stmt, Load, BBMap, LTS, NewAccesses);
371 // Compute NewLoad before its insertion in BBMap to make the insertion
372 // deterministic.
373 BBMap[Load] = NewLoad;
374 return;
375 }
376
377 if (auto *Store = dyn_cast<StoreInst>(Inst)) {
378 // Identified as redundant by -polly-simplify.
379 if (!Stmt.getArrayAccessOrNULLFor(Store))
380 return;
381
382 generateArrayStore(Stmt, Store, BBMap, LTS, NewAccesses);
383 return;
384 }
385
386 if (auto *PHI = dyn_cast<PHINode>(Inst)) {
387 copyPHIInstruction(Stmt, PHI, BBMap, LTS);
388 return;
389 }
390
391 // Skip some special intrinsics for which we do not adjust the semantics to
392 // the new schedule. All others are handled like every other instruction.
393 if (isIgnoredIntrinsic(Inst))
394 return;
395
396 copyInstScalar(Stmt, Inst, BBMap, LTS);
397}
398
399void BlockGenerator::removeDeadInstructions(BasicBlock *BB, ValueMapT &BBMap) {
400 auto NewBB = Builder.GetInsertBlock();
401 for (auto I = NewBB->rbegin(); I != NewBB->rend(); I++) {
402 Instruction *NewInst = &*I;
403
404 if (!isInstructionTriviallyDead(NewInst))
405 continue;
406
407 for (auto Pair : BBMap)
408 if (Pair.second == NewInst) {
409 BBMap.erase(Pair.first);
410 }
411
412 NewInst->eraseFromParent();
413 I = NewBB->rbegin();
414 }
415}
416
417void BlockGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
418 isl_id_to_ast_expr *NewAccesses) {
419 assert(Stmt.isBlockStmt() &&(static_cast<void> (0))
420 "Only block statements can be copied by the block generator")(static_cast<void> (0));
421
422 ValueMapT BBMap;
423
424 BasicBlock *BB = Stmt.getBasicBlock();
425 copyBB(Stmt, BB, BBMap, LTS, NewAccesses);
426 removeDeadInstructions(BB, BBMap);
427}
428
429BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) {
430 BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(),
431 &*Builder.GetInsertPoint(), &DT, &LI);
432 CopyBB->setName("polly.stmt." + BB->getName());
433 return CopyBB;
434}
435
436BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB,
437 ValueMapT &BBMap, LoopToScevMapT &LTS,
438 isl_id_to_ast_expr *NewAccesses) {
439 BasicBlock *CopyBB = splitBB(BB);
440 Builder.SetInsertPoint(&CopyBB->front());
441 generateScalarLoads(Stmt, LTS, BBMap, NewAccesses);
442 generateBeginStmtTrace(Stmt, LTS, BBMap);
443
444 copyBB(Stmt, BB, CopyBB, BBMap, LTS, NewAccesses);
445
446 // After a basic block was copied store all scalars that escape this block in
447 // their alloca.
448 generateScalarStores(Stmt, LTS, BBMap, NewAccesses);
449 return CopyBB;
450}
451
452void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB,
453 ValueMapT &BBMap, LoopToScevMapT &LTS,
454 isl_id_to_ast_expr *NewAccesses) {
455 EntryBB = &CopyBB->getParent()->getEntryBlock();
456
457 // Block statements and the entry blocks of region statement are code
458 // generated from instruction lists. This allow us to optimize the
459 // instructions that belong to a certain scop statement. As the code
460 // structure of region statements might be arbitrary complex, optimizing the
461 // instruction list is not yet supported.
462 if (Stmt.isBlockStmt() || (Stmt.isRegionStmt() && Stmt.getEntryBlock() == BB))
463 for (Instruction *Inst : Stmt.getInstructions())
464 copyInstruction(Stmt, Inst, BBMap, LTS, NewAccesses);
465 else
466 for (Instruction &Inst : *BB)
467 copyInstruction(Stmt, &Inst, BBMap, LTS, NewAccesses);
468}
469
470Value *BlockGenerator::getOrCreateAlloca(const MemoryAccess &Access) {
471 assert(!Access.isLatestArrayKind() && "Trying to get alloca for array kind")(static_cast<void> (0));
472
473 return getOrCreateAlloca(Access.getLatestScopArrayInfo());
474}
475
476Value *BlockGenerator::getOrCreateAlloca(const ScopArrayInfo *Array) {
477 assert(!Array->isArrayKind() && "Trying to get alloca for array kind")(static_cast<void> (0));
478
479 auto &Addr = ScalarMap[Array];
480
481 if (Addr) {
482 // Allow allocas to be (temporarily) redirected once by adding a new
483 // old-alloca-addr to new-addr mapping to GlobalMap. This functionality
484 // is used for example by the OpenMP code generation where a first use
485 // of a scalar while still in the host code allocates a normal alloca with
486 // getOrCreateAlloca. When the values of this scalar are accessed during
487 // the generation of the parallel subfunction, these values are copied over
488 // to the parallel subfunction and each request for a scalar alloca slot
489 // must be forwarded to the temporary in-subfunction slot. This mapping is
490 // removed when the subfunction has been generated and again normal host
491 // code is generated. Due to the following reasons it is not possible to
492 // perform the GlobalMap lookup right after creating the alloca below, but
493 // instead we need to check GlobalMap at each call to getOrCreateAlloca:
494 //
495 // 1) GlobalMap may be changed multiple times (for each parallel loop),
496 // 2) The temporary mapping is commonly only known after the initial
497 // alloca has already been generated, and
498 // 3) The original alloca value must be restored after leaving the
499 // sub-function.
500 if (Value *NewAddr = GlobalMap.lookup(&*Addr))
501 return NewAddr;
502 return Addr;
503 }
504
505 Type *Ty = Array->getElementType();
506 Value *ScalarBase = Array->getBasePtr();
507 std::string NameExt;
508 if (Array->isPHIKind())
509 NameExt = ".phiops";
510 else
511 NameExt = ".s2a";
512
513 const DataLayout &DL = Builder.GetInsertBlock()->getModule()->getDataLayout();
514
515 Addr =
516 new AllocaInst(Ty, DL.getAllocaAddrSpace(), nullptr,
517 DL.getPrefTypeAlign(Ty), ScalarBase->getName() + NameExt);
518 EntryBB = &Builder.GetInsertBlock()->getParent()->getEntryBlock();
519 Addr->insertBefore(&*EntryBB->getFirstInsertionPt());
520
521 return Addr;
522}
523
524void BlockGenerator::handleOutsideUsers(const Scop &S, ScopArrayInfo *Array) {
525 Instruction *Inst = cast<Instruction>(Array->getBasePtr());
526
527 // If there are escape users we get the alloca for this instruction and put it
528 // in the EscapeMap for later finalization. Lastly, if the instruction was
529 // copied multiple times we already did this and can exit.
530 if (EscapeMap.count(Inst))
531 return;
532
533 EscapeUserVectorTy EscapeUsers;
534 for (User *U : Inst->users()) {
535
536 // Non-instruction user will never escape.
537 Instruction *UI = dyn_cast<Instruction>(U);
538 if (!UI)
539 continue;
540
541 if (S.contains(UI))
542 continue;
543
544 EscapeUsers.push_back(UI);
545 }
546
547 // Exit if no escape uses were found.
548 if (EscapeUsers.empty())
549 return;
550
551 // Get or create an escape alloca for this instruction.
552 auto *ScalarAddr = getOrCreateAlloca(Array);
553
554 // Remember that this instruction has escape uses and the escape alloca.
555 EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers));
556}
557
558void BlockGenerator::generateScalarLoads(
559 ScopStmt &Stmt, LoopToScevMapT &LTS, ValueMapT &BBMap,
560 __isl_keep isl_id_to_ast_expr *NewAccesses) {
561 for (MemoryAccess *MA : Stmt) {
562 if (MA->isOriginalArrayKind() || MA->isWrite())
563 continue;
564
565#ifndef NDEBUG1
566 auto StmtDom =
567 Stmt.getDomain().intersect_params(Stmt.getParent()->getContext());
568 auto AccDom = MA->getAccessRelation().domain();
569 assert(!StmtDom.is_subset(AccDom).is_false() &&(static_cast<void> (0))
570 "Scalar must be loaded in all statement instances")(static_cast<void> (0));
571#endif
572
573 auto *Address =
574 getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS, BBMap, NewAccesses);
575 assert((!isa<Instruction>(Address) ||(static_cast<void> (0))
576 DT.dominates(cast<Instruction>(Address)->getParent(),(static_cast<void> (0))
577 Builder.GetInsertBlock())) &&(static_cast<void> (0))
578 "Domination violation")(static_cast<void> (0));
579 BBMap[MA->getAccessValue()] = Builder.CreateLoad(
580 MA->getElementType(), Address, Address->getName() + ".reload");
581 }
582}
583
584Value *BlockGenerator::buildContainsCondition(ScopStmt &Stmt,
585 const isl::set &Subdomain) {
586 isl::ast_build AstBuild = Stmt.getAstBuild();
587 isl::set Domain = Stmt.getDomain();
588
589 isl::union_map USchedule = AstBuild.get_schedule();
590 USchedule = USchedule.intersect_domain(Domain);
591
592 assert(!USchedule.is_empty())(static_cast<void> (0));
593 isl::map Schedule = isl::map::from_union_map(USchedule);
594
595 isl::set ScheduledDomain = Schedule.range();
596 isl::set ScheduledSet = Subdomain.apply(Schedule);
597
598 isl::ast_build RestrictedBuild = AstBuild.restrict(ScheduledDomain);
599
600 isl::ast_expr IsInSet = RestrictedBuild.expr_from(ScheduledSet);
601 Value *IsInSetExpr = ExprBuilder->create(IsInSet.copy());
602 IsInSetExpr = Builder.CreateICmpNE(
603 IsInSetExpr, ConstantInt::get(IsInSetExpr->getType(), 0));
604
605 return IsInSetExpr;
606}
607
608void BlockGenerator::generateConditionalExecution(
609 ScopStmt &Stmt, const isl::set &Subdomain, StringRef Subject,
610 const std::function<void()> &GenThenFunc) {
611 isl::set StmtDom = Stmt.getDomain();
612
613 // If the condition is a tautology, don't generate a condition around the
614 // code.
615 bool IsPartialWrite =
616 !StmtDom.intersect_params(Stmt.getParent()->getContext())
617 .is_subset(Subdomain);
618 if (!IsPartialWrite) {
619 GenThenFunc();
620 return;
621 }
622
623 // Generate the condition.
624 Value *Cond = buildContainsCondition(Stmt, Subdomain);
625
626 // Don't call GenThenFunc if it is never executed. An ast index expression
627 // might not be defined in this case.
628 if (auto *Const = dyn_cast<ConstantInt>(Cond))
629 if (Const->isZero())
630 return;
631
632 BasicBlock *HeadBlock = Builder.GetInsertBlock();
633 StringRef BlockName = HeadBlock->getName();
634
635 // Generate the conditional block.
636 SplitBlockAndInsertIfThen(Cond, &*Builder.GetInsertPoint(), false, nullptr,
637 &DT, &LI);
638 BranchInst *Branch = cast<BranchInst>(HeadBlock->getTerminator());
639 BasicBlock *ThenBlock = Branch->getSuccessor(0);
640 BasicBlock *TailBlock = Branch->getSuccessor(1);
641
642 // Assign descriptive names.
643 if (auto *CondInst = dyn_cast<Instruction>(Cond))
644 CondInst->setName("polly." + Subject + ".cond");
645 ThenBlock->setName(BlockName + "." + Subject + ".partial");
646 TailBlock->setName(BlockName + ".cont");
647
648 // Put the client code into the conditional block and continue in the merge
649 // block afterwards.
650 Builder.SetInsertPoint(ThenBlock, ThenBlock->getFirstInsertionPt());
651 GenThenFunc();
652 Builder.SetInsertPoint(TailBlock, TailBlock->getFirstInsertionPt());
653}
654
655static std::string getInstName(Value *Val) {
656 std::string Result;
657 raw_string_ostream OS(Result);
658 Val->printAsOperand(OS, false);
659 return OS.str();
660}
661
662void BlockGenerator::generateBeginStmtTrace(ScopStmt &Stmt, LoopToScevMapT &LTS,
663 ValueMapT &BBMap) {
664 if (!TraceStmts)
665 return;
666
667 Scop *S = Stmt.getParent();
668 const char *BaseName = Stmt.getBaseName();
669
670 isl::ast_build AstBuild = Stmt.getAstBuild();
671 isl::set Domain = Stmt.getDomain();
672
673 isl::union_map USchedule = AstBuild.get_schedule().intersect_domain(Domain);
674 isl::map Schedule = isl::map::from_union_map(USchedule);
675 assert(Schedule.is_empty().is_false() &&(static_cast<void> (0))
676 "The stmt must have a valid instance")(static_cast<void> (0));
677
678 isl::multi_pw_aff ScheduleMultiPwAff =
679 isl::pw_multi_aff::from_map(Schedule.reverse());
680 isl::ast_build RestrictedBuild = AstBuild.restrict(Schedule.range());
681
682 // Sequence of strings to print.
683 SmallVector<llvm::Value *, 8> Values;
684
685 // Print the name of the statement.
686 // TODO: Indent by the depth of the statement instance in the schedule tree.
687 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, BaseName));
688 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "("));
689
690 // Add the coordinate of the statement instance.
691 int DomDims = ScheduleMultiPwAff.dim(isl::dim::out).release();
692 for (int i = 0; i < DomDims; i += 1) {
693 if (i > 0)
694 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ","));
695
696 isl::ast_expr IsInSet = RestrictedBuild.expr_from(ScheduleMultiPwAff.at(i));
697 Values.push_back(ExprBuilder->create(IsInSet.copy()));
698 }
699
700 if (TraceScalars) {
701 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")"));
702 DenseSet<Instruction *> Encountered;
703
704 // Add the value of each scalar (and the result of PHIs) used in the
705 // statement.
706 // TODO: Values used in region-statements.
707 for (Instruction *Inst : Stmt.insts()) {
708 if (!RuntimeDebugBuilder::isPrintable(Inst->getType()))
709 continue;
710
711 if (isa<PHINode>(Inst)) {
712 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, " "));
713 Values.push_back(RuntimeDebugBuilder::getPrintableString(
714 Builder, getInstName(Inst)));
715 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "="));
716 Values.push_back(getNewValue(Stmt, Inst, BBMap, LTS,
717 LI.getLoopFor(Inst->getParent())));
718 } else {
719 for (Value *Op : Inst->operand_values()) {
720 // Do not print values that cannot change during the execution of the
721 // SCoP.
722 auto *OpInst = dyn_cast<Instruction>(Op);
723 if (!OpInst)
724 continue;
725 if (!S->contains(OpInst))
726 continue;
727
728 // Print each scalar at most once, and exclude values defined in the
729 // statement itself.
730 if (Encountered.count(OpInst))
731 continue;
732
733 Values.push_back(
734 RuntimeDebugBuilder::getPrintableString(Builder, " "));
735 Values.push_back(RuntimeDebugBuilder::getPrintableString(
736 Builder, getInstName(OpInst)));
737 Values.push_back(
738 RuntimeDebugBuilder::getPrintableString(Builder, "="));
739 Values.push_back(getNewValue(Stmt, OpInst, BBMap, LTS,
740 LI.getLoopFor(Inst->getParent())));
741 Encountered.insert(OpInst);
742 }
743 }
744
745 Encountered.insert(Inst);
746 }
747
748 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "\n"));
749 } else {
750 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")\n"));
751 }
752
753 RuntimeDebugBuilder::createCPUPrinter(Builder, ArrayRef<Value *>(Values));
754}
755
756void BlockGenerator::generateScalarStores(
757 ScopStmt &Stmt, LoopToScevMapT &LTS, ValueMapT &BBMap,
758 __isl_keep isl_id_to_ast_expr *NewAccesses) {
759 Loop *L = LI.getLoopFor(Stmt.getBasicBlock());
760
761 assert(Stmt.isBlockStmt() &&(static_cast<void> (0))
762 "Region statements need to use the generateScalarStores() function in "(static_cast<void> (0))
763 "the RegionGenerator")(static_cast<void> (0));
764
765 for (MemoryAccess *MA : Stmt) {
766 if (MA->isOriginalArrayKind() || MA->isRead())
767 continue;
768
769 isl::set AccDom = MA->getAccessRelation().domain();
770 std::string Subject = MA->getId().get_name();
771
772 generateConditionalExecution(
773 Stmt, AccDom, Subject.c_str(), [&, this, MA]() {
774 Value *Val = MA->getAccessValue();
775 if (MA->isAnyPHIKind()) {
776 assert(MA->getIncoming().size() >= 1 &&(static_cast<void> (0))
777 "Block statements have exactly one exiting block, or "(static_cast<void> (0))
778 "multiple but "(static_cast<void> (0))
779 "with same incoming block and value")(static_cast<void> (0));
780 assert(std::all_of(MA->getIncoming().begin(),(static_cast<void> (0))
781 MA->getIncoming().end(),(static_cast<void> (0))
782 [&](std::pair<BasicBlock *, Value *> p) -> bool {(static_cast<void> (0))
783 return p.first == Stmt.getBasicBlock();(static_cast<void> (0))
784 }) &&(static_cast<void> (0))
785 "Incoming block must be statement's block")(static_cast<void> (0));
786 Val = MA->getIncoming()[0].second;
787 }
788 auto Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS,
789 BBMap, NewAccesses);
790
791 Val = getNewValue(Stmt, Val, BBMap, LTS, L);
792 assert((!isa<Instruction>(Val) ||(static_cast<void> (0))
793 DT.dominates(cast<Instruction>(Val)->getParent(),(static_cast<void> (0))
794 Builder.GetInsertBlock())) &&(static_cast<void> (0))
795 "Domination violation")(static_cast<void> (0));
796 assert((!isa<Instruction>(Address) ||(static_cast<void> (0))
797 DT.dominates(cast<Instruction>(Address)->getParent(),(static_cast<void> (0))
798 Builder.GetInsertBlock())) &&(static_cast<void> (0))
799 "Domination violation")(static_cast<void> (0));
800
801 // The new Val might have a different type than the old Val due to
802 // ScalarEvolution looking through bitcasts.
803 Address = Builder.CreateBitOrPointerCast(
804 Address, Val->getType()->getPointerTo(
805 Address->getType()->getPointerAddressSpace()));
806
807 Builder.CreateStore(Val, Address);
808 });
809 }
810}
811
812void BlockGenerator::createScalarInitialization(Scop &S) {
813 BasicBlock *ExitBB = S.getExit();
814 BasicBlock *PreEntryBB = S.getEnteringBlock();
815
816 Builder.SetInsertPoint(&*StartBlock->begin());
817
818 for (auto &Array : S.arrays()) {
819 if (Array->getNumberOfDimensions() != 0)
820 continue;
821 if (Array->isPHIKind()) {
822 // For PHI nodes, the only values we need to store are the ones that
823 // reach the PHI node from outside the region. In general there should
824 // only be one such incoming edge and this edge should enter through
825 // 'PreEntryBB'.
826 auto PHI = cast<PHINode>(Array->getBasePtr());
827
828 for (auto BI = PHI->block_begin(), BE = PHI->block_end(); BI != BE; BI++)
829 if (!S.contains(*BI) && *BI != PreEntryBB)
830 llvm_unreachable("Incoming edges from outside the scop should always "__builtin_unreachable()
831 "come from PreEntryBB")__builtin_unreachable();
832
833 int Idx = PHI->getBasicBlockIndex(PreEntryBB);
834 if (Idx < 0)
835 continue;
836
837 Value *ScalarValue = PHI->getIncomingValue(Idx);
838
839 Builder.CreateStore(ScalarValue, getOrCreateAlloca(Array));
840 continue;
841 }
842
843 auto *Inst = dyn_cast<Instruction>(Array->getBasePtr());
844
845 if (Inst && S.contains(Inst))
846 continue;
847
848 // PHI nodes that are not marked as such in their SAI object are either exit
849 // PHI nodes we model as common scalars but without initialization, or
850 // incoming phi nodes that need to be initialized. Check if the first is the
851 // case for Inst and do not create and initialize memory if so.
852 if (auto *PHI = dyn_cast_or_null<PHINode>(Inst))
853 if (!S.hasSingleExitEdge() && PHI->getBasicBlockIndex(ExitBB) >= 0)
854 continue;
855
856 Builder.CreateStore(Array->getBasePtr(), getOrCreateAlloca(Array));
857 }
858}
859
860void BlockGenerator::createScalarFinalization(Scop &S) {
861 // The exit block of the __unoptimized__ region.
862 BasicBlock *ExitBB = S.getExitingBlock();
863 // The merge block __just after__ the region and the optimized region.
864 BasicBlock *MergeBB = S.getExit();
865
866 // The exit block of the __optimized__ region.
867 BasicBlock *OptExitBB = *(pred_begin(MergeBB));
868 if (OptExitBB == ExitBB)
869 OptExitBB = *(++pred_begin(MergeBB));
870
871 Builder.SetInsertPoint(OptExitBB->getTerminator());
872 for (const auto &EscapeMapping : EscapeMap) {
873 // Extract the escaping instruction and the escaping users as well as the
874 // alloca the instruction was demoted to.
875 Instruction *EscapeInst = EscapeMapping.first;
876 const auto &EscapeMappingValue = EscapeMapping.second;
877 const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second;
878 auto *ScalarAddr = cast<AllocaInst>(&*EscapeMappingValue.first);
879
880 // Reload the demoted instruction in the optimized version of the SCoP.
881 Value *EscapeInstReload =
882 Builder.CreateLoad(ScalarAddr->getAllocatedType(), ScalarAddr,
883 EscapeInst->getName() + ".final_reload");
884 EscapeInstReload =
885 Builder.CreateBitOrPointerCast(EscapeInstReload, EscapeInst->getType());
886
887 // Create the merge PHI that merges the optimized and unoptimized version.
888 PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2,
889 EscapeInst->getName() + ".merge");
890 MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt());
891
892 // Add the respective values to the merge PHI.
893 MergePHI->addIncoming(EscapeInstReload, OptExitBB);
894 MergePHI->addIncoming(EscapeInst, ExitBB);
895
896 // The information of scalar evolution about the escaping instruction needs
897 // to be revoked so the new merged instruction will be used.
898 if (SE.isSCEVable(EscapeInst->getType()))
899 SE.forgetValue(EscapeInst);
900
901 // Replace all uses of the demoted instruction with the merge PHI.
902 for (Instruction *EUser : EscapeUsers)
903 EUser->replaceUsesOfWith(EscapeInst, MergePHI);
904 }
905}
906
907void BlockGenerator::findOutsideUsers(Scop &S) {
908 for (auto &Array : S.arrays()) {
909
910 if (Array->getNumberOfDimensions() != 0)
911 continue;
912
913 if (Array->isPHIKind())
914 continue;
915
916 auto *Inst = dyn_cast<Instruction>(Array->getBasePtr());
917
918 if (!Inst)
919 continue;
920
921 // Scop invariant hoisting moves some of the base pointers out of the scop.
922 // We can ignore these, as the invariant load hoisting already registers the
923 // relevant outside users.
924 if (!S.contains(Inst))
925 continue;
926
927 handleOutsideUsers(S, Array);
928 }
929}
930
931void BlockGenerator::createExitPHINodeMerges(Scop &S) {
932 if (S.hasSingleExitEdge())
933 return;
934
935 auto *ExitBB = S.getExitingBlock();
936 auto *MergeBB = S.getExit();
937 auto *AfterMergeBB = MergeBB->getSingleSuccessor();
938 BasicBlock *OptExitBB = *(pred_begin(MergeBB));
939 if (OptExitBB == ExitBB)
940 OptExitBB = *(++pred_begin(MergeBB));
941
942 Builder.SetInsertPoint(OptExitBB->getTerminator());
943
944 for (auto &SAI : S.arrays()) {
945 auto *Val = SAI->getBasePtr();
946
947 // Only Value-like scalars need a merge PHI. Exit block PHIs receive either
948 // the original PHI's value or the reloaded incoming values from the
949 // generated code. An llvm::Value is merged between the original code's
950 // value or the generated one.
951 if (!SAI->isExitPHIKind())
952 continue;
953
954 PHINode *PHI = dyn_cast<PHINode>(Val);
955 if (!PHI)
956 continue;
957
958 if (PHI->getParent() != AfterMergeBB)
959 continue;
960
961 std::string Name = PHI->getName().str();
962 Value *ScalarAddr = getOrCreateAlloca(SAI);
963 Value *Reload = Builder.CreateLoad(SAI->getElementType(), ScalarAddr,
964 Name + ".ph.final_reload");
965 Reload = Builder.CreateBitOrPointerCast(Reload, PHI->getType());
966 Value *OriginalValue = PHI->getIncomingValueForBlock(MergeBB);
967 assert((!isa<Instruction>(OriginalValue) ||(static_cast<void> (0))
968 cast<Instruction>(OriginalValue)->getParent() != MergeBB) &&(static_cast<void> (0))
969 "Original value must no be one we just generated.")(static_cast<void> (0));
970 auto *MergePHI = PHINode::Create(PHI->getType(), 2, Name + ".ph.merge");
971 MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt());
972 MergePHI->addIncoming(Reload, OptExitBB);
973 MergePHI->addIncoming(OriginalValue, ExitBB);
974 int Idx = PHI->getBasicBlockIndex(MergeBB);
975 PHI->setIncomingValue(Idx, MergePHI);
976 }
977}
978
979void BlockGenerator::invalidateScalarEvolution(Scop &S) {
980 for (auto &Stmt : S)
981 if (Stmt.isCopyStmt())
982 continue;
983 else if (Stmt.isBlockStmt())
984 for (auto &Inst : *Stmt.getBasicBlock())
985 SE.forgetValue(&Inst);
986 else if (Stmt.isRegionStmt())
987 for (auto *BB : Stmt.getRegion()->blocks())
988 for (auto &Inst : *BB)
989 SE.forgetValue(&Inst);
990 else
991 llvm_unreachable("Unexpected statement type found")__builtin_unreachable();
992
993 // Invalidate SCEV of loops surrounding the EscapeUsers.
994 for (const auto &EscapeMapping : EscapeMap) {
995 const EscapeUserVectorTy &EscapeUsers = EscapeMapping.second.second;
996 for (Instruction *EUser : EscapeUsers) {
997 if (Loop *L = LI.getLoopFor(EUser->getParent()))
998 while (L) {
999 SE.forgetLoop(L);
1000 L = L->getParentLoop();
1001 }
1002 }
1003 }
1004}
1005
1006void BlockGenerator::finalizeSCoP(Scop &S) {
1007 findOutsideUsers(S);
1008 createScalarInitialization(S);
1009 createExitPHINodeMerges(S);
1010 createScalarFinalization(S);
1011 invalidateScalarEvolution(S);
1012}
1013
1014VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen,
1015 std::vector<LoopToScevMapT> &VLTS,
1016 isl_map *Schedule)
1017 : BlockGenerator(BlockGen), VLTS(VLTS), Schedule(Schedule) {
1018 assert(Schedule && "No statement domain provided")(static_cast<void> (0));
1019}
1020
1021Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, Value *Old,
1022 ValueMapT &VectorMap,
1023 VectorValueMapT &ScalarMaps,
1024 Loop *L) {
1025 if (Value *NewValue = VectorMap.lookup(Old))
1026 return NewValue;
1027
1028 int Width = getVectorWidth();
1029
1030 Value *Vector = UndefValue::get(FixedVectorType::get(Old->getType(), Width));
1031
1032 for (int Lane = 0; Lane < Width; Lane++)
1033 Vector = Builder.CreateInsertElement(
1034 Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], VLTS[Lane], L),
1035 Builder.getInt32(Lane));
1036
1037 VectorMap[Old] = Vector;
1038
1039 return Vector;
1040}
1041
1042Value *VectorBlockGenerator::generateStrideOneLoad(
1043 ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
1044 __isl_keep isl_id_to_ast_expr *NewAccesses, bool NegativeStride = false) {
1045 unsigned VectorWidth = getVectorWidth();
1046 Type *VectorType = FixedVectorType::get(Load->getType(), VectorWidth);
1047 Type *VectorPtrType =
1048 PointerType::get(VectorType, Load->getPointerAddressSpace());
1049 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0;
1050
1051 Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[Offset],
1052 VLTS[Offset], NewAccesses);
1053 Value *VectorPtr =
1054 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
1055 LoadInst *VecLoad = Builder.CreateLoad(VectorType, VectorPtr,
1056 Load->getName() + "_p_vec_full");
1057 if (!Aligned)
1058 VecLoad->setAlignment(Align(8));
1059
1060 if (NegativeStride) {
1061 SmallVector<Constant *, 16> Indices;
1062 for (int i = VectorWidth - 1; i >= 0; i--)
1063 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i));
1064 Constant *SV = llvm::ConstantVector::get(Indices);
1065 Value *RevVecLoad = Builder.CreateShuffleVector(
1066 VecLoad, VecLoad, SV, Load->getName() + "_reverse");
1067 return RevVecLoad;
1068 }
1069
1070 return VecLoad;
1071}
1072
1073Value *VectorBlockGenerator::generateStrideZeroLoad(
1074 ScopStmt &Stmt, LoadInst *Load, ValueMapT &BBMap,
1075 __isl_keep isl_id_to_ast_expr *NewAccesses) {
1076 Type *VectorType = FixedVectorType::get(Load->getType(), 1);
1077 Type *VectorPtrType =
1078 PointerType::get(VectorType, Load->getPointerAddressSpace());
1079 Value *NewPointer =
1080 generateLocationAccessed(Stmt, Load, BBMap, VLTS[0], NewAccesses);
1081 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType,
1082 Load->getName() + "_p_vec_p");
1083 LoadInst *ScalarLoad = Builder.CreateLoad(VectorType, VectorPtr,
1084 Load->getName() + "_p_splat_one");
1085
1086 if (!Aligned)
1087 ScalarLoad->setAlignment(Align(8));
1088
1089 Constant *SplatVector = Constant::getNullValue(
1090 FixedVectorType::get(Builder.getInt32Ty(), getVectorWidth()));
1091
1092 Value *VectorLoad = Builder.CreateShuffleVector(
1093 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat");
1094 return VectorLoad;
1095}
1096
1097Value *VectorBlockGenerator::generateUnknownStrideLoad(
1098 ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
1099 __isl_keep isl_id_to_ast_expr *NewAccesses) {
1100 int VectorWidth = getVectorWidth();
1101 Type *ElemTy = Load->getType();
1102 auto *FVTy = FixedVectorType::get(ElemTy, VectorWidth);
1103
1104 Value *Vector = UndefValue::get(FVTy);
1105
1106 for (int i = 0; i < VectorWidth; i++) {
1107 Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[i],
1108 VLTS[i], NewAccesses);
1109 Value *ScalarLoad =
1110 Builder.CreateLoad(ElemTy, NewPointer, Load->getName() + "_p_scalar_");
1111 Vector = Builder.CreateInsertElement(
1112 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_");
1113 }
1114
1115 return Vector;
1116}
1117
1118void VectorBlockGenerator::generateLoad(
1119 ScopStmt &Stmt, LoadInst *Load, ValueMapT &VectorMap,
1120 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1121 if (Value *PreloadLoad = GlobalMap.lookup(Load)) {
1122 VectorMap[Load] = Builder.CreateVectorSplat(getVectorWidth(), PreloadLoad,
1123 Load->getName() + "_p");
1124 return;
1125 }
1126
1127 if (!VectorType::isValidElementType(Load->getType())) {
1128 for (int i = 0; i < getVectorWidth(); i++)
1129 ScalarMaps[i][Load] =
1130 generateArrayLoad(Stmt, Load, ScalarMaps[i], VLTS[i], NewAccesses);
1131 return;
1132 }
1133
1134 const MemoryAccess &Access = Stmt.getArrayAccessFor(Load);
1135
1136 // Make sure we have scalar values available to access the pointer to
1137 // the data location.
1138 extractScalarValues(Load, VectorMap, ScalarMaps);
1139
1140 Value *NewLoad;
1141 if (Access.isStrideZero(isl::manage_copy(Schedule)))
1142 NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0], NewAccesses);
1143 else if (Access.isStrideOne(isl::manage_copy(Schedule)))
1144 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses);
1145 else if (Access.isStrideX(isl::manage_copy(Schedule), -1))
1146 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses, true);
1147 else
1148 NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps, NewAccesses);
1149
1150 VectorMap[Load] = NewLoad;
1151}
1152
1153void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, UnaryInstruction *Inst,
1154 ValueMapT &VectorMap,
1155 VectorValueMapT &ScalarMaps) {
1156 int VectorWidth = getVectorWidth();
1157 Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap,
1158 ScalarMaps, getLoopForStmt(Stmt));
1159
1160 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction")(static_cast<void> (0));
1161
1162 const CastInst *Cast = dyn_cast<CastInst>(Inst);
12
Assuming 'Inst' is not a 'CastInst'
13
'Cast' initialized to a null pointer value
1163 auto *DestType = FixedVectorType::get(Inst->getType(), VectorWidth);
1164 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType);
14
Called C++ object pointer is null
1165}
1166
1167void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, BinaryOperator *Inst,
1168 ValueMapT &VectorMap,
1169 VectorValueMapT &ScalarMaps) {
1170 Loop *L = getLoopForStmt(Stmt);
1171 Value *OpZero = Inst->getOperand(0);
1172 Value *OpOne = Inst->getOperand(1);
1173
1174 Value *NewOpZero, *NewOpOne;
1175 NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L);
1176 NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L);
1177
1178 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne,
1179 Inst->getName() + "p_vec");
1180 VectorMap[Inst] = NewInst;
1181}
1182
1183void VectorBlockGenerator::copyStore(
1184 ScopStmt &Stmt, StoreInst *Store, ValueMapT &VectorMap,
1185 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1186 const MemoryAccess &Access = Stmt.getArrayAccessFor(Store);
1187
1188 Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap,
1189 ScalarMaps, getLoopForStmt(Stmt));
1190
1191 // Make sure we have scalar values available to access the pointer to
1192 // the data location.
1193 extractScalarValues(Store, VectorMap, ScalarMaps);
1194
1195 if (Access.isStrideOne(isl::manage_copy(Schedule))) {
1196 Type *VectorType = FixedVectorType::get(Store->getValueOperand()->getType(),
1197 getVectorWidth());
1198 Type *VectorPtrType =
1199 PointerType::get(VectorType, Store->getPointerAddressSpace());
1200 Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[0],
1201 VLTS[0], NewAccesses);
1202
1203 Value *VectorPtr =
1204 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
1205 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr);
1206
1207 if (!Aligned)
1208 Store->setAlignment(Align(8));
1209 } else {
1210 for (unsigned i = 0; i < ScalarMaps.size(); i++) {
1211 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i));
1212 Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[i],
1213 VLTS[i], NewAccesses);
1214 Builder.CreateStore(Scalar, NewPointer);
1215 }
1216 }
1217}
1218
1219bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst,
1220 ValueMapT &VectorMap) {
1221 for (Value *Operand : Inst->operands())
1222 if (VectorMap.count(Operand))
1223 return true;
1224 return false;
1225}
1226
1227bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst,
1228 ValueMapT &VectorMap,
1229 VectorValueMapT &ScalarMaps) {
1230 bool HasVectorOperand = false;
1231 int VectorWidth = getVectorWidth();
1232
1233 for (Value *Operand : Inst->operands()) {
1234 ValueMapT::iterator VecOp = VectorMap.find(Operand);
1235
1236 if (VecOp == VectorMap.end())
1237 continue;
1238
1239 HasVectorOperand = true;
1240 Value *NewVector = VecOp->second;
1241
1242 for (int i = 0; i < VectorWidth; ++i) {
1243 ValueMapT &SM = ScalarMaps[i];
1244
1245 // If there is one scalar extracted, all scalar elements should have
1246 // already been extracted by the code here. So no need to check for the
1247 // existence of all of them.
1248 if (SM.count(Operand))
1249 break;
1250
1251 SM[Operand] =
1252 Builder.CreateExtractElement(NewVector, Builder.getInt32(i));
1253 }
1254 }
1255
1256 return HasVectorOperand;
1257}
1258
1259void VectorBlockGenerator::copyInstScalarized(
1260 ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
1261 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1262 bool HasVectorOperand;
1263 int VectorWidth = getVectorWidth();
1264
1265 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps);
1266
1267 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++)
1268 BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane],
1269 VLTS[VectorLane], NewAccesses);
1270
1271 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand)
1272 return;
1273
1274 // Make the result available as vector value.
1275 auto *FVTy = FixedVectorType::get(Inst->getType(), VectorWidth);
1276 Value *Vector = UndefValue::get(FVTy);
1277
1278 for (int i = 0; i < VectorWidth; i++)
1279 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst],
1280 Builder.getInt32(i));
1281
1282 VectorMap[Inst] = Vector;
1283}
1284
1285int VectorBlockGenerator::getVectorWidth() { return VLTS.size(); }
1286
1287void VectorBlockGenerator::copyInstruction(
1288 ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
1289 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1290 // Terminator instructions control the control flow. They are explicitly
1291 // expressed in the clast and do not need to be copied.
1292 if (Inst->isTerminator())
2
Taking false branch
1293 return;
1294
1295 if (canSyntheziseInStmt(Stmt, Inst))
3
Taking false branch
1296 return;
1297
1298 if (auto *Load
4.1
'Load' is null
4.1
'Load' is null
= dyn_cast<LoadInst>(Inst)) {
4
Assuming 'Inst' is not a 'LoadInst'
5
Taking false branch
1299 generateLoad(Stmt, Load, VectorMap, ScalarMaps, NewAccesses);
1300 return;
1301 }
1302
1303 if (hasVectorOperands(Inst, VectorMap)) {
6
Taking true branch
1304 if (auto *Store
7.1
'Store' is null
7.1
'Store' is null
= dyn_cast<StoreInst>(Inst)) {
7
Assuming 'Inst' is not a 'StoreInst'
8
Taking false branch
1305 // Identified as redundant by -polly-simplify.
1306 if (!Stmt.getArrayAccessOrNULLFor(Store))
1307 return;
1308
1309 copyStore(Stmt, Store, VectorMap, ScalarMaps, NewAccesses);
1310 return;
1311 }
1312
1313 if (auto *Unary
9.1
'Unary' is non-null
9.1
'Unary' is non-null
= dyn_cast<UnaryInstruction>(Inst)) {
9
Assuming 'Inst' is a 'UnaryInstruction'
10
Taking true branch
1314 copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps);
11
Calling 'VectorBlockGenerator::copyUnaryInst'
1315 return;
1316 }
1317
1318 if (auto *Binary = dyn_cast<BinaryOperator>(Inst)) {
1319 copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps);
1320 return;
1321 }
1322
1323 // Fallthrough: We generate scalar instructions, if we don't know how to
1324 // generate vector code.
1325 }
1326
1327 copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps, NewAccesses);
1328}
1329
1330void VectorBlockGenerator::generateScalarVectorLoads(
1331 ScopStmt &Stmt, ValueMapT &VectorBlockMap) {
1332 for (MemoryAccess *MA : Stmt) {
1333 if (MA->isArrayKind() || MA->isWrite())
1334 continue;
1335
1336 auto *Address = getOrCreateAlloca(*MA);
1337 Type *VectorType = FixedVectorType::get(MA->getElementType(), 1);
1338 Type *VectorPtrType = PointerType::get(
1339 VectorType, Address->getType()->getPointerAddressSpace());
1340 Value *VectorPtr = Builder.CreateBitCast(Address, VectorPtrType,
1341 Address->getName() + "_p_vec_p");
1342 auto *Val = Builder.CreateLoad(VectorType, VectorPtr,
1343 Address->getName() + ".reload");
1344 Constant *SplatVector = Constant::getNullValue(
1345 FixedVectorType::get(Builder.getInt32Ty(), getVectorWidth()));
1346
1347 Value *VectorVal = Builder.CreateShuffleVector(
1348 Val, Val, SplatVector, Address->getName() + "_p_splat");
1349 VectorBlockMap[MA->getAccessValue()] = VectorVal;
1350 }
1351}
1352
1353void VectorBlockGenerator::verifyNoScalarStores(ScopStmt &Stmt) {
1354 for (MemoryAccess *MA : Stmt) {
1355 if (MA->isArrayKind() || MA->isRead())
1356 continue;
1357
1358 llvm_unreachable("Scalar stores not expected in vector loop")__builtin_unreachable();
1359 }
1360}
1361
1362void VectorBlockGenerator::copyStmt(
1363 ScopStmt &Stmt, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1364 assert(Stmt.isBlockStmt() &&(static_cast<void> (0))
1365 "TODO: Only block statements can be copied by the vector block "(static_cast<void> (0))
1366 "generator")(static_cast<void> (0));
1367
1368 BasicBlock *BB = Stmt.getBasicBlock();
1369 BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(),
1370 &*Builder.GetInsertPoint(), &DT, &LI);
1371 CopyBB->setName("polly.stmt." + BB->getName());
1372 Builder.SetInsertPoint(&CopyBB->front());
1373
1374 // Create two maps that store the mapping from the original instructions of
1375 // the old basic block to their copies in the new basic block. Those maps
1376 // are basic block local.
1377 //
1378 // As vector code generation is supported there is one map for scalar values
1379 // and one for vector values.
1380 //
1381 // In case we just do scalar code generation, the vectorMap is not used and
1382 // the scalarMap has just one dimension, which contains the mapping.
1383 //
1384 // In case vector code generation is done, an instruction may either appear
1385 // in the vector map once (as it is calculating >vectorwidth< values at a
1386 // time. Or (if the values are calculated using scalar operations), it
1387 // appears once in every dimension of the scalarMap.
1388 VectorValueMapT ScalarBlockMap(getVectorWidth());
1389 ValueMapT VectorBlockMap;
1390
1391 generateScalarVectorLoads(Stmt, VectorBlockMap);
1392
1393 for (Instruction *Inst : Stmt.getInstructions())
1394 copyInstruction(Stmt, Inst, VectorBlockMap, ScalarBlockMap, NewAccesses);
1
Calling 'VectorBlockGenerator::copyInstruction'
1395
1396 verifyNoScalarStores(Stmt);
1397}
1398
1399BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB,
1400 BasicBlock *BBCopy) {
1401
1402 BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock();
1403 BasicBlock *BBCopyIDom = EndBlockMap.lookup(BBIDom);
1404
1405 if (BBCopyIDom)
1406 DT.changeImmediateDominator(BBCopy, BBCopyIDom);
1407
1408 return StartBlockMap.lookup(BBIDom);
1409}
1410
1411// This is to determine whether an llvm::Value (defined in @p BB) is usable when
1412// leaving a subregion. The straight-forward DT.dominates(BB, R->getExitBlock())
1413// does not work in cases where the exit block has edges from outside the
1414// region. In that case the llvm::Value would never be usable in in the exit
1415// block. The RegionGenerator however creates an new exit block ('ExitBBCopy')
1416// for the subregion's exiting edges only. We need to determine whether an
1417// llvm::Value is usable in there. We do this by checking whether it dominates
1418// all exiting blocks individually.
1419static bool isDominatingSubregionExit(const DominatorTree &DT, Region *R,
1420 BasicBlock *BB) {
1421 for (auto ExitingBB : predecessors(R->getExit())) {
1422 // Check for non-subregion incoming edges.
1423 if (!R->contains(ExitingBB))
1424 continue;
1425
1426 if (!DT.dominates(BB, ExitingBB))
1427 return false;
1428 }
1429
1430 return true;
1431}
1432
1433// Find the direct dominator of the subregion's exit block if the subregion was
1434// simplified.
1435static BasicBlock *findExitDominator(DominatorTree &DT, Region *R) {
1436 BasicBlock *Common = nullptr;
1437 for (auto ExitingBB : predecessors(R->getExit())) {
1438 // Check for non-subregion incoming edges.
1439 if (!R->contains(ExitingBB))
1440 continue;
1441
1442 // First exiting edge.
1443 if (!Common) {
1444 Common = ExitingBB;
1445 continue;
1446 }
1447
1448 Common = DT.findNearestCommonDominator(Common, ExitingBB);
1449 }
1450
1451 assert(Common && R->contains(Common))(static_cast<void> (0));
1452 return Common;
1453}
1454
1455void RegionGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT &LTS,
1456 isl_id_to_ast_expr *IdToAstExp) {
1457 assert(Stmt.isRegionStmt() &&(static_cast<void> (0))
1458 "Only region statements can be copied by the region generator")(static_cast<void> (0));
1459
1460 // Forget all old mappings.
1461 StartBlockMap.clear();
1462 EndBlockMap.clear();
1463 RegionMaps.clear();
1464 IncompletePHINodeMap.clear();
1465
1466 // Collection of all values related to this subregion.
1467 ValueMapT ValueMap;
1468
1469 // The region represented by the statement.
1470 Region *R = Stmt.getRegion();
1471
1472 // Create a dedicated entry for the region where we can reload all demoted
1473 // inputs.
1474 BasicBlock *EntryBB = R->getEntry();
1475 BasicBlock *EntryBBCopy = SplitBlock(Builder.GetInsertBlock(),
1476 &*Builder.GetInsertPoint(), &DT, &LI);
1477 EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry");
1478 Builder.SetInsertPoint(&EntryBBCopy->front());
1479
1480 ValueMapT &EntryBBMap = RegionMaps[EntryBBCopy];
1481 generateScalarLoads(Stmt, LTS, EntryBBMap, IdToAstExp);
1482 generateBeginStmtTrace(Stmt, LTS, EntryBBMap);
1483
1484 for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI)
1485 if (!R->contains(*PI)) {
1486 StartBlockMap[*PI] = EntryBBCopy;
1487 EndBlockMap[*PI] = EntryBBCopy;
1488 }
1489
1490 // Iterate over all blocks in the region in a breadth-first search.
1491 std::deque<BasicBlock *> Blocks;
1492 SmallSetVector<BasicBlock *, 8> SeenBlocks;
1493 Blocks.push_back(EntryBB);
1494 SeenBlocks.insert(EntryBB);
1495
1496 while (!Blocks.empty()) {
1497 BasicBlock *BB = Blocks.front();
1498 Blocks.pop_front();
1499
1500 // First split the block and update dominance information.
1501 BasicBlock *BBCopy = splitBB(BB);
1502 BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy);
1503
1504 // Get the mapping for this block and initialize it with either the scalar
1505 // loads from the generated entering block (which dominates all blocks of
1506 // this subregion) or the maps of the immediate dominator, if part of the
1507 // subregion. The latter necessarily includes the former.
1508 ValueMapT *InitBBMap;
1509 if (BBCopyIDom) {
1510 assert(RegionMaps.count(BBCopyIDom))(static_cast<void> (0));
1511 InitBBMap = &RegionMaps[BBCopyIDom];
1512 } else
1513 InitBBMap = &EntryBBMap;
1514 auto Inserted = RegionMaps.insert(std::make_pair(BBCopy, *InitBBMap));
1515 ValueMapT &RegionMap = Inserted.first->second;
1516
1517 // Copy the block with the BlockGenerator.
1518 Builder.SetInsertPoint(&BBCopy->front());
1519 copyBB(Stmt, BB, BBCopy, RegionMap, LTS, IdToAstExp);
1520
1521 // In order to remap PHI nodes we store also basic block mappings.
1522 StartBlockMap[BB] = BBCopy;
1523 EndBlockMap[BB] = Builder.GetInsertBlock();
1524
1525 // Add values to incomplete PHI nodes waiting for this block to be copied.
1526 for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB])
1527 addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, LTS);
1528 IncompletePHINodeMap[BB].clear();
1529
1530 // And continue with new successors inside the region.
1531 for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++)
1532 if (R->contains(*SI) && SeenBlocks.insert(*SI))
1533 Blocks.push_back(*SI);
1534
1535 // Remember value in case it is visible after this subregion.
1536 if (isDominatingSubregionExit(DT, R, BB))
1537 ValueMap.insert(RegionMap.begin(), RegionMap.end());
1538 }
1539
1540 // Now create a new dedicated region exit block and add it to the region map.
1541 BasicBlock *ExitBBCopy = SplitBlock(Builder.GetInsertBlock(),
1542 &*Builder.GetInsertPoint(), &DT, &LI);
1543 ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit");
1544 StartBlockMap[R->getExit()] = ExitBBCopy;
1545 EndBlockMap[R->getExit()] = ExitBBCopy;
1546
1547 BasicBlock *ExitDomBBCopy = EndBlockMap.lookup(findExitDominator(DT, R));
1548 assert(ExitDomBBCopy &&(static_cast<void> (0))
1549 "Common exit dominator must be within region; at least the entry node "(static_cast<void> (0))
1550 "must match")(static_cast<void> (0));
1551 DT.changeImmediateDominator(ExitBBCopy, ExitDomBBCopy);
1552
1553 // As the block generator doesn't handle control flow we need to add the
1554 // region control flow by hand after all blocks have been copied.
1555 for (BasicBlock *BB : SeenBlocks) {
1556
1557 BasicBlock *BBCopyStart = StartBlockMap[BB];
1558 BasicBlock *BBCopyEnd = EndBlockMap[BB];
1559 Instruction *TI = BB->getTerminator();
1560 if (isa<UnreachableInst>(TI)) {
1561 while (!BBCopyEnd->empty())
1562 BBCopyEnd->begin()->eraseFromParent();
1563 new UnreachableInst(BBCopyEnd->getContext(), BBCopyEnd);
1564 continue;
1565 }
1566
1567 Instruction *BICopy = BBCopyEnd->getTerminator();
1568
1569 ValueMapT &RegionMap = RegionMaps[BBCopyStart];
1570 RegionMap.insert(StartBlockMap.begin(), StartBlockMap.end());
1571
1572 Builder.SetInsertPoint(BICopy);
1573 copyInstScalar(Stmt, TI, RegionMap, LTS);
1574 BICopy->eraseFromParent();
1575 }
1576
1577 // Add counting PHI nodes to all loops in the region that can be used as
1578 // replacement for SCEVs referring to the old loop.
1579 for (BasicBlock *BB : SeenBlocks) {
1580 Loop *L = LI.getLoopFor(BB);
1581 if (L == nullptr || L->getHeader() != BB || !R->contains(L))
1582 continue;
1583
1584 BasicBlock *BBCopy = StartBlockMap[BB];
1585 Value *NullVal = Builder.getInt32(0);
1586 PHINode *LoopPHI =
1587 PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv");
1588 Instruction *LoopPHIInc = BinaryOperator::CreateAdd(
1589 LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc");
1590 LoopPHI->insertBefore(&BBCopy->front());
1591 LoopPHIInc->insertBefore(BBCopy->getTerminator());
1592
1593 for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) {
1594 if (!R->contains(PredBB))
1595 continue;
1596 if (L->contains(PredBB))
1597 LoopPHI->addIncoming(LoopPHIInc, EndBlockMap[PredBB]);
1598 else
1599 LoopPHI->addIncoming(NullVal, EndBlockMap[PredBB]);
1600 }
1601
1602 for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy)))
1603 if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0)
1604 LoopPHI->addIncoming(NullVal, PredBBCopy);
1605
1606 LTS[L] = SE.getUnknown(LoopPHI);
1607 }
1608
1609 // Continue generating code in the exit block.
1610 Builder.SetInsertPoint(&*ExitBBCopy->getFirstInsertionPt());
1611
1612 // Write values visible to other statements.
1613 generateScalarStores(Stmt, LTS, ValueMap, IdToAstExp);
1614 StartBlockMap.clear();
1615 EndBlockMap.clear();
1616 RegionMaps.clear();
1617 IncompletePHINodeMap.clear();
1618}
1619
1620PHINode *RegionGenerator::buildExitPHI(MemoryAccess *MA, LoopToScevMapT &LTS,
1621 ValueMapT &BBMap, Loop *L) {
1622 ScopStmt *Stmt = MA->getStatement();
1623 Region *SubR = Stmt->getRegion();
1624 auto Incoming = MA->getIncoming();
1625
1626 PollyIRBuilder::InsertPointGuard IPGuard(Builder);
1627 PHINode *OrigPHI = cast<PHINode>(MA->getAccessInstruction());
1628 BasicBlock *NewSubregionExit = Builder.GetInsertBlock();
1629
1630 // This can happen if the subregion is simplified after the ScopStmts
1631 // have been created; simplification happens as part of CodeGeneration.
1632 if (OrigPHI->getParent() != SubR->getExit()) {
1633 BasicBlock *FormerExit = SubR->getExitingBlock();
1634 if (FormerExit)
1635 NewSubregionExit = StartBlockMap.lookup(FormerExit);
1636 }
1637
1638 PHINode *NewPHI = PHINode::Create(OrigPHI->getType(), Incoming.size(),
1639 "polly." + OrigPHI->getName(),
1640 NewSubregionExit->getFirstNonPHI());
1641
1642 // Add the incoming values to the PHI.
1643 for (auto &Pair : Incoming) {
1644 BasicBlock *OrigIncomingBlock = Pair.first;
1645 BasicBlock *NewIncomingBlockStart = StartBlockMap.lookup(OrigIncomingBlock);
1646 BasicBlock *NewIncomingBlockEnd = EndBlockMap.lookup(OrigIncomingBlock);
1647 Builder.SetInsertPoint(NewIncomingBlockEnd->getTerminator());
1648 assert(RegionMaps.count(NewIncomingBlockStart))(static_cast<void> (0));
1649 assert(RegionMaps.count(NewIncomingBlockEnd))(static_cast<void> (0));
1650 ValueMapT *LocalBBMap = &RegionMaps[NewIncomingBlockStart];
1651
1652 Value *OrigIncomingValue = Pair.second;
1653 Value *NewIncomingValue =
1654 getNewValue(*Stmt, OrigIncomingValue, *LocalBBMap, LTS, L);
1655 NewPHI->addIncoming(NewIncomingValue, NewIncomingBlockEnd);
1656 }
1657
1658 return NewPHI;
1659}
1660
1661Value *RegionGenerator::getExitScalar(MemoryAccess *MA, LoopToScevMapT &LTS,
1662 ValueMapT &BBMap) {
1663 ScopStmt *Stmt = MA->getStatement();
1664
1665 // TODO: Add some test cases that ensure this is really the right choice.
1666 Loop *L = LI.getLoopFor(Stmt->getRegion()->getExit());
1667
1668 if (MA->isAnyPHIKind()) {
1669 auto Incoming = MA->getIncoming();
1670 assert(!Incoming.empty() &&(static_cast<void> (0))
1671 "PHI WRITEs must have originate from at least one incoming block")(static_cast<void> (0));
1672
1673 // If there is only one incoming value, we do not need to create a PHI.
1674 if (Incoming.size() == 1) {
1675 Value *OldVal = Incoming[0].second;
1676 return getNewValue(*Stmt, OldVal, BBMap, LTS, L);
1677 }
1678
1679 return buildExitPHI(MA, LTS, BBMap, L);
1680 }
1681
1682 // MemoryKind::Value accesses leaving the subregion must dominate the exit
1683 // block; just pass the copied value.
1684 Value *OldVal = MA->getAccessValue();
1685 return getNewValue(*Stmt, OldVal, BBMap, LTS, L);
1686}
1687
1688void RegionGenerator::generateScalarStores(
1689 ScopStmt &Stmt, LoopToScevMapT &LTS, ValueMapT &BBMap,
1690 __isl_keep isl_id_to_ast_expr *NewAccesses) {
1691 assert(Stmt.getRegion() &&(static_cast<void> (0))
1692 "Block statements need to use the generateScalarStores() "(static_cast<void> (0))
1693 "function in the BlockGenerator")(static_cast<void> (0));
1694
1695 // Get the exit scalar values before generating the writes.
1696 // This is necessary because RegionGenerator::getExitScalar may insert
1697 // PHINodes that depend on the region's exiting blocks. But
1698 // BlockGenerator::generateConditionalExecution may insert a new basic block
1699 // such that the current basic block is not a direct successor of the exiting
1700 // blocks anymore. Hence, build the PHINodes while the current block is still
1701 // the direct successor.
1702 SmallDenseMap<MemoryAccess *, Value *> NewExitScalars;
1703 for (MemoryAccess *MA : Stmt) {
1704 if (MA->isOriginalArrayKind() || MA->isRead())
1705 continue;
1706
1707 Value *NewVal = getExitScalar(MA, LTS, BBMap);
1708 NewExitScalars[MA] = NewVal;
1709 }
1710
1711 for (MemoryAccess *MA : Stmt) {
1712 if (MA->isOriginalArrayKind() || MA->isRead())
1713 continue;
1714
1715 isl::set AccDom = MA->getAccessRelation().domain();
1716 std::string Subject = MA->getId().get_name();
1717 generateConditionalExecution(
1718 Stmt, AccDom, Subject.c_str(), [&, this, MA]() {
1719 Value *NewVal = NewExitScalars.lookup(MA);
1720 assert(NewVal && "The exit scalar must be determined before")(static_cast<void> (0));
1721 Value *Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS,
1722 BBMap, NewAccesses);
1723 assert((!isa<Instruction>(NewVal) ||(static_cast<void> (0))
1724 DT.dominates(cast<Instruction>(NewVal)->getParent(),(static_cast<void> (0))
1725 Builder.GetInsertBlock())) &&(static_cast<void> (0))
1726 "Domination violation")(static_cast<void> (0));
1727 assert((!isa<Instruction>(Address) ||(static_cast<void> (0))
1728 DT.dominates(cast<Instruction>(Address)->getParent(),(static_cast<void> (0))
1729 Builder.GetInsertBlock())) &&(static_cast<void> (0))
1730 "Domination violation")(static_cast<void> (0));
1731 Builder.CreateStore(NewVal, Address);
1732 });
1733 }
1734}
1735
1736void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, PHINode *PHI,
1737 PHINode *PHICopy, BasicBlock *IncomingBB,
1738 LoopToScevMapT &LTS) {
1739 // If the incoming block was not yet copied mark this PHI as incomplete.
1740 // Once the block will be copied the incoming value will be added.
1741 BasicBlock *BBCopyStart = StartBlockMap[IncomingBB];
1742 BasicBlock *BBCopyEnd = EndBlockMap[IncomingBB];
1743 if (!BBCopyStart) {
1744 assert(!BBCopyEnd)(static_cast<void> (0));
1745 assert(Stmt.represents(IncomingBB) &&(static_cast<void> (0))
1746 "Bad incoming block for PHI in non-affine region")(static_cast<void> (0));
1747 IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy));
1748 return;
1749 }
1750
1751 assert(RegionMaps.count(BBCopyStart) &&(static_cast<void> (0))
1752 "Incoming PHI block did not have a BBMap")(static_cast<void> (0));
1753 ValueMapT &BBCopyMap = RegionMaps[BBCopyStart];
1754
1755 Value *OpCopy = nullptr;
1756
1757 if (Stmt.represents(IncomingBB)) {
1758 Value *Op = PHI->getIncomingValueForBlock(IncomingBB);
1759
1760 // If the current insert block is different from the PHIs incoming block
1761 // change it, otherwise do not.
1762 auto IP = Builder.GetInsertPoint();
1763 if (IP->getParent() != BBCopyEnd)
1764 Builder.SetInsertPoint(BBCopyEnd->getTerminator());
1765 OpCopy = getNewValue(Stmt, Op, BBCopyMap, LTS, getLoopForStmt(Stmt));
1766 if (IP->getParent() != BBCopyEnd)
1767 Builder.SetInsertPoint(&*IP);
1768 } else {
1769 // All edges from outside the non-affine region become a single edge
1770 // in the new copy of the non-affine region. Make sure to only add the
1771 // corresponding edge the first time we encounter a basic block from
1772 // outside the non-affine region.
1773 if (PHICopy->getBasicBlockIndex(BBCopyEnd) >= 0)
1774 return;
1775
1776 // Get the reloaded value.
1777 OpCopy = getNewValue(Stmt, PHI, BBCopyMap, LTS, getLoopForStmt(Stmt));
1778 }
1779
1780 assert(OpCopy && "Incoming PHI value was not copied properly")(static_cast<void> (0));
1781 PHICopy->addIncoming(OpCopy, BBCopyEnd);
1782}
1783
1784void RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI,
1785 ValueMapT &BBMap,
1786 LoopToScevMapT &LTS) {
1787 unsigned NumIncoming = PHI->getNumIncomingValues();
1788 PHINode *PHICopy =
1789 Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName());
1790 PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI());
1791 BBMap[PHI] = PHICopy;
1792
1793 for (BasicBlock *IncomingBB : PHI->blocks())
1794 addOperandToPHI(Stmt, PHI, PHICopy, IncomingBB, LTS);
1795}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/MemAlloc.h"
20#include "llvm/Support/type_traits.h"
21#include <algorithm>
22#include <cassert>
23#include <cstddef>
24#include <cstdlib>
25#include <cstring>
26#include <functional>
27#include <initializer_list>
28#include <iterator>
29#include <limits>
30#include <memory>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37/// This is all the stuff common to all SmallVectors.
38///
39/// The template parameter specifies the type which should be used to hold the
40/// Size and Capacity of the SmallVector, so it can be adjusted.
41/// Using 32 bit size is desirable to shrink the size of the SmallVector.
42/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
43/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
44/// buffering bitcode output - which can exceed 4GB.
45template <class Size_T> class SmallVectorBase {
46protected:
47 void *BeginX;
48 Size_T Size = 0, Capacity;
49
50 /// The maximum value of the Size_T used.
51 static constexpr size_t SizeTypeMax() {
52 return std::numeric_limits<Size_T>::max();
53 }
54
55 SmallVectorBase() = delete;
56 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
57 : BeginX(FirstEl), Capacity(TotalCapacity) {}
58
59 /// This is a helper for \a grow() that's out of line to reduce code
60 /// duplication. This function will report a fatal error if it can't grow at
61 /// least to \p MinSize.
62 void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity);
63
64 /// This is an implementation of the grow() method which only works
65 /// on POD-like data types and is out of line to reduce code duplication.
66 /// This function will report a fatal error if it cannot increase capacity.
67 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
68
69public:
70 size_t size() const { return Size; }
71 size_t capacity() const { return Capacity; }
72
73 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
74
75 /// Set the array size to \p N, which the current array must have enough
76 /// capacity for.
77 ///
78 /// This does not construct or destroy any elements in the vector.
79 ///
80 /// Clients can use this in conjunction with capacity() to write past the end
81 /// of the buffer when they know that more elements are available, and only
82 /// update the size later. This avoids the cost of value initializing elements
83 /// which will only be overwritten.
84 void set_size(size_t N) {
85 assert(N <= capacity())(static_cast<void> (0));
86 Size = N;
87 }
88};
89
90template <class T>
91using SmallVectorSizeType =
92 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
93 uint32_t>::type;
94
95/// Figure out the offset of the first element.
96template <class T, typename = void> struct SmallVectorAlignmentAndSize {
97 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
98 SmallVectorBase<SmallVectorSizeType<T>>)];
99 alignas(T) char FirstEl[sizeof(T)];
100};
101
102/// This is the part of SmallVectorTemplateBase which does not depend on whether
103/// the type T is a POD. The extra dummy template argument is used by ArrayRef
104/// to avoid unnecessarily requiring T to be complete.
105template <typename T, typename = void>
106class SmallVectorTemplateCommon
107 : public SmallVectorBase<SmallVectorSizeType<T>> {
108 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
109
110 /// Find the address of the first element. For this pointer math to be valid
111 /// with small-size of 0 for T with lots of alignment, it's important that
112 /// SmallVectorStorage is properly-aligned even for small-size of 0.
113 void *getFirstEl() const {
114 return const_cast<void *>(reinterpret_cast<const void *>(
115 reinterpret_cast<const char *>(this) +
116 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
117 }
118 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
119
120protected:
121 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
122
123 void grow_pod(size_t MinSize, size_t TSize) {
124 Base::grow_pod(getFirstEl(), MinSize, TSize);
125 }
126
127 /// Return true if this is a smallvector which has not had dynamic
128 /// memory allocated for it.
129 bool isSmall() const { return this->BeginX == getFirstEl(); }
130
131 /// Put this vector in a state of being small.
132 void resetToSmall() {
133 this->BeginX = getFirstEl();
134 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
135 }
136
137 /// Return true if V is an internal reference to the given range.
138 bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
139 // Use std::less to avoid UB.
140 std::less<> LessThan;
141 return !LessThan(V, First) && LessThan(V, Last);
142 }
143
144 /// Return true if V is an internal reference to this vector.
145 bool isReferenceToStorage(const void *V) const {
146 return isReferenceToRange(V, this->begin(), this->end());
147 }
148
149 /// Return true if First and Last form a valid (possibly empty) range in this
150 /// vector's storage.
151 bool isRangeInStorage(const void *First, const void *Last) const {
152 // Use std::less to avoid UB.
153 std::less<> LessThan;
154 return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
155 !LessThan(this->end(), Last);
156 }
157
158 /// Return true unless Elt will be invalidated by resizing the vector to
159 /// NewSize.
160 bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
161 // Past the end.
162 if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true))
163 return true;
164
165 // Return false if Elt will be destroyed by shrinking.
166 if (NewSize <= this->size())
167 return Elt < this->begin() + NewSize;
168
169 // Return false if we need to grow.
170 return NewSize <= this->capacity();
171 }
172
173 /// Check whether Elt will be invalidated by resizing the vector to NewSize.
174 void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
175 assert(isSafeToReferenceAfterResize(Elt, NewSize) &&(static_cast<void> (0))
176 "Attempting to reference an element of the vector in an operation "(static_cast<void> (0))
177 "that invalidates it")(static_cast<void> (0));
178 }
179
180 /// Check whether Elt will be invalidated by increasing the size of the
181 /// vector by N.
182 void assertSafeToAdd(const void *Elt, size_t N = 1) {
183 this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
184 }
185
186 /// Check whether any part of the range will be invalidated by clearing.
187 void assertSafeToReferenceAfterClear(const T *From, const T *To) {
188 if (From == To)
189 return;
190 this->assertSafeToReferenceAfterResize(From, 0);
191 this->assertSafeToReferenceAfterResize(To - 1, 0);
192 }
193 template <
194 class ItTy,
195 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
196 bool> = false>
197 void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
198
199 /// Check whether any part of the range will be invalidated by growing.
200 void assertSafeToAddRange(const T *From, const T *To) {
201 if (From == To)
202 return;
203 this->assertSafeToAdd(From, To - From);
204 this->assertSafeToAdd(To - 1, To - From);
205 }
206 template <
207 class ItTy,
208 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
209 bool> = false>
210 void assertSafeToAddRange(ItTy, ItTy) {}
211
212 /// Reserve enough space to add one element, and return the updated element
213 /// pointer in case it was a reference to the storage.
214 template <class U>
215 static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
216 size_t N) {
217 size_t NewSize = This->size() + N;
218 if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true
)
)
219 return &Elt;
220
221 bool ReferencesStorage = false;
222 int64_t Index = -1;
223 if (!U::TakesParamByValue) {
224 if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt
)), false)
) {
225 ReferencesStorage = true;
226 Index = &Elt - This->begin();
227 }
228 }
229 This->grow(NewSize);
230 return ReferencesStorage ? This->begin() + Index : &Elt;
231 }
232
233public:
234 using size_type = size_t;
235 using difference_type = ptrdiff_t;
236 using value_type = T;
237 using iterator = T *;
238 using const_iterator = const T *;
239
240 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
241 using reverse_iterator = std::reverse_iterator<iterator>;
242
243 using reference = T &;
244 using const_reference = const T &;
245 using pointer = T *;
246 using const_pointer = const T *;
247
248 using Base::capacity;
249 using Base::empty;
250 using Base::size;
251
252 // forward iterator creation methods.
253 iterator begin() { return (iterator)this->BeginX; }
254 const_iterator begin() const { return (const_iterator)this->BeginX; }
255 iterator end() { return begin() + size(); }
256 const_iterator end() const { return begin() + size(); }
257
258 // reverse iterator creation methods.
259 reverse_iterator rbegin() { return reverse_iterator(end()); }
260 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
261 reverse_iterator rend() { return reverse_iterator(begin()); }
262 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
263
264 size_type size_in_bytes() const { return size() * sizeof(T); }
265 size_type max_size() const {
266 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
267 }
268
269 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
270
271 /// Return a pointer to the vector's buffer, even if empty().
272 pointer data() { return pointer(begin()); }
273 /// Return a pointer to the vector's buffer, even if empty().
274 const_pointer data() const { return const_pointer(begin()); }
275
276 reference operator[](size_type idx) {
277 assert(idx < size())(static_cast<void> (0));
278 return begin()[idx];
279 }
280 const_reference operator[](size_type idx) const {
281 assert(idx < size())(static_cast<void> (0));
282 return begin()[idx];
283 }
284
285 reference front() {
286 assert(!empty())(static_cast<void> (0));
287 return begin()[0];
288 }
289 const_reference front() const {
290 assert(!empty())(static_cast<void> (0));
291 return begin()[0];
292 }
293
294 reference back() {
295 assert(!empty())(static_cast<void> (0));
296 return end()[-1];
297 }
298 const_reference back() const {
299 assert(!empty())(static_cast<void> (0));
300 return end()[-1];
301 }
302};
303
304/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
305/// method implementations that are designed to work with non-trivial T's.
306///
307/// We approximate is_trivially_copyable with trivial move/copy construction and
308/// trivial destruction. While the standard doesn't specify that you're allowed
309/// copy these types with memcpy, there is no way for the type to observe this.
310/// This catches the important case of std::pair<POD, POD>, which is not
311/// trivially assignable.
312template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
313 (is_trivially_move_constructible<T>::value) &&
314 std::is_trivially_destructible<T>::value>
315class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
316 friend class SmallVectorTemplateCommon<T>;
317
318protected:
319 static constexpr bool TakesParamByValue = false;
320 using ValueParamT = const T &;
321
322 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
323
324 static void destroy_range(T *S, T *E) {
325 while (S != E) {
326 --E;
327 E->~T();
328 }
329 }
330
331 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
332 /// constructing elements as needed.
333 template<typename It1, typename It2>
334 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
335 std::uninitialized_copy(std::make_move_iterator(I),
336 std::make_move_iterator(E), Dest);
337 }
338
339 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
340 /// constructing elements as needed.
341 template<typename It1, typename It2>
342 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
343 std::uninitialized_copy(I, E, Dest);
344 }
345
346 /// Grow the allocated memory (without initializing new elements), doubling
347 /// the size of the allocated memory. Guarantees space for at least one more
348 /// element, or MinSize more elements if specified.
349 void grow(size_t MinSize = 0);
350
351 /// Create a new allocation big enough for \p MinSize and pass back its size
352 /// in \p NewCapacity. This is the first section of \a grow().
353 T *mallocForGrow(size_t MinSize, size_t &NewCapacity) {
354 return static_cast<T *>(
355 SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
356 MinSize, sizeof(T), NewCapacity));
357 }
358
359 /// Move existing elements over to the new allocation \p NewElts, the middle
360 /// section of \a grow().
361 void moveElementsForGrow(T *NewElts);
362
363 /// Transfer ownership of the allocation, finishing up \a grow().
364 void takeAllocationForGrow(T *NewElts, size_t NewCapacity);
365
366 /// Reserve enough space to add one element, and return the updated element
367 /// pointer in case it was a reference to the storage.
368 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
369 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
370 }
371
372 /// Reserve enough space to add one element, and return the updated element
373 /// pointer in case it was a reference to the storage.
374 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
375 return const_cast<T *>(
376 this->reserveForParamAndGetAddressImpl(this, Elt, N));
377 }
378
379 static T &&forward_value_param(T &&V) { return std::move(V); }
380 static const T &forward_value_param(const T &V) { return V; }
381
382 void growAndAssign(size_t NumElts, const T &Elt) {
383 // Grow manually in case Elt is an internal reference.
384 size_t NewCapacity;
385 T *NewElts = mallocForGrow(NumElts, NewCapacity);
386 std::uninitialized_fill_n(NewElts, NumElts, Elt);
387 this->destroy_range(this->begin(), this->end());
388 takeAllocationForGrow(NewElts, NewCapacity);
389 this->set_size(NumElts);
390 }
391
392 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
393 // Grow manually in case one of Args is an internal reference.
394 size_t NewCapacity;
395 T *NewElts = mallocForGrow(0, NewCapacity);
396 ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
397 moveElementsForGrow(NewElts);
398 takeAllocationForGrow(NewElts, NewCapacity);
399 this->set_size(this->size() + 1);
400 return this->back();
401 }
402
403public:
404 void push_back(const T &Elt) {
405 const T *EltPtr = reserveForParamAndGetAddress(Elt);
406 ::new ((void *)this->end()) T(*EltPtr);
407 this->set_size(this->size() + 1);
408 }
409
410 void push_back(T &&Elt) {
411 T *EltPtr = reserveForParamAndGetAddress(Elt);
412 ::new ((void *)this->end()) T(::std::move(*EltPtr));
413 this->set_size(this->size() + 1);
414 }
415
416 void pop_back() {
417 this->set_size(this->size() - 1);
418 this->end()->~T();
419 }
420};
421
422// Define this out-of-line to dissuade the C++ compiler from inlining it.
423template <typename T, bool TriviallyCopyable>
424void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
425 size_t NewCapacity;
426 T *NewElts = mallocForGrow(MinSize, NewCapacity);
427 moveElementsForGrow(NewElts);
428 takeAllocationForGrow(NewElts, NewCapacity);
429}
430
431// Define this out-of-line to dissuade the C++ compiler from inlining it.
432template <typename T, bool TriviallyCopyable>
433void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
434 T *NewElts) {
435 // Move the elements over.
436 this->uninitialized_move(this->begin(), this->end(), NewElts);
437
438 // Destroy the original elements.
439 destroy_range(this->begin(), this->end());
440}
441
442// Define this out-of-line to dissuade the C++ compiler from inlining it.
443template <typename T, bool TriviallyCopyable>
444void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
445 T *NewElts, size_t NewCapacity) {
446 // If this wasn't grown from the inline copy, deallocate the old space.
447 if (!this->isSmall())
448 free(this->begin());
449
450 this->BeginX = NewElts;
451 this->Capacity = NewCapacity;
452}
453
454/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
455/// method implementations that are designed to work with trivially copyable
456/// T's. This allows using memcpy in place of copy/move construction and
457/// skipping destruction.
458template <typename T>
459class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
460 friend class SmallVectorTemplateCommon<T>;
461
462protected:
463 /// True if it's cheap enough to take parameters by value. Doing so avoids
464 /// overhead related to mitigations for reference invalidation.
465 static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
466
467 /// Either const T& or T, depending on whether it's cheap enough to take
468 /// parameters by value.
469 using ValueParamT =
470 typename std::conditional<TakesParamByValue, T, const T &>::type;
471
472 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
473
474 // No need to do a destroy loop for POD's.
475 static void destroy_range(T *, T *) {}
476
477 /// Move the range [I, E) onto the uninitialized memory
478 /// starting with "Dest", constructing elements into it as needed.
479 template<typename It1, typename It2>
480 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
481 // Just do a copy.
482 uninitialized_copy(I, E, Dest);
483 }
484
485 /// Copy the range [I, E) onto the uninitialized memory
486 /// starting with "Dest", constructing elements into it as needed.
487 template<typename It1, typename It2>
488 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
489 // Arbitrary iterator types; just use the basic implementation.
490 std::uninitialized_copy(I, E, Dest);
491 }
492
493 /// Copy the range [I, E) onto the uninitialized memory
494 /// starting with "Dest", constructing elements into it as needed.
495 template <typename T1, typename T2>
496 static void uninitialized_copy(
497 T1 *I, T1 *E, T2 *Dest,
498 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
499 T2>::value> * = nullptr) {
500 // Use memcpy for PODs iterated by pointers (which includes SmallVector
501 // iterators): std::uninitialized_copy optimizes to memmove, but we can
502 // use memcpy here. Note that I and E are iterators and thus might be
503 // invalid for memcpy if they are equal.
504 if (I != E)
505 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
506 }
507
508 /// Double the size of the allocated memory, guaranteeing space for at
509 /// least one more element or MinSize if specified.
510 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
511
512 /// Reserve enough space to add one element, and return the updated element
513 /// pointer in case it was a reference to the storage.
514 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
515 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
516 }
517
518 /// Reserve enough space to add one element, and return the updated element
519 /// pointer in case it was a reference to the storage.
520 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
521 return const_cast<T *>(
522 this->reserveForParamAndGetAddressImpl(this, Elt, N));
523 }
524
525 /// Copy \p V or return a reference, depending on \a ValueParamT.
526 static ValueParamT forward_value_param(ValueParamT V) { return V; }
527
528 void growAndAssign(size_t NumElts, T Elt) {
529 // Elt has been copied in case it's an internal reference, side-stepping
530 // reference invalidation problems without losing the realloc optimization.
531 this->set_size(0);
532 this->grow(NumElts);
533 std::uninitialized_fill_n(this->begin(), NumElts, Elt);
534 this->set_size(NumElts);
535 }
536
537 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
538 // Use push_back with a copy in case Args has an internal reference,
539 // side-stepping reference invalidation problems without losing the realloc
540 // optimization.
541 push_back(T(std::forward<ArgTypes>(Args)...));
542 return this->back();
543 }
544
545public:
546 void push_back(ValueParamT Elt) {
547 const T *EltPtr = reserveForParamAndGetAddress(Elt);
548 memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
549 this->set_size(this->size() + 1);
550 }
551
552 void pop_back() { this->set_size(this->size() - 1); }
553};
554
555/// This class consists of common code factored out of the SmallVector class to
556/// reduce code duplication based on the SmallVector 'N' template parameter.
557template <typename T>
558class SmallVectorImpl : public SmallVectorTemplateBase<T> {
559 using SuperClass = SmallVectorTemplateBase<T>;
560
561public:
562 using iterator = typename SuperClass::iterator;
563 using const_iterator = typename SuperClass::const_iterator;
564 using reference = typename SuperClass::reference;
565 using size_type = typename SuperClass::size_type;
566
567protected:
568 using SmallVectorTemplateBase<T>::TakesParamByValue;
569 using ValueParamT = typename SuperClass::ValueParamT;
570
571 // Default ctor - Initialize to empty.
572 explicit SmallVectorImpl(unsigned N)
573 : SmallVectorTemplateBase<T>(N) {}
574
575public:
576 SmallVectorImpl(const SmallVectorImpl &) = delete;
577
578 ~SmallVectorImpl() {
579 // Subclass has already destructed this vector's elements.
580 // If this wasn't grown from the inline copy, deallocate the old space.
581 if (!this->isSmall())
582 free(this->begin());
583 }
584
585 void clear() {
586 this->destroy_range(this->begin(), this->end());
587 this->Size = 0;
588 }
589
590private:
591 template <bool ForOverwrite> void resizeImpl(size_type N) {
592 if (N < this->size()) {
593 this->pop_back_n(this->size() - N);
594 } else if (N > this->size()) {
595 this->reserve(N);
596 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
597 if (ForOverwrite)
598 new (&*I) T;
599 else
600 new (&*I) T();
601 this->set_size(N);
602 }
603 }
604
605public:
606 void resize(size_type N) { resizeImpl<false>(N); }
607
608 /// Like resize, but \ref T is POD, the new values won't be initialized.
609 void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
610
611 void resize(size_type N, ValueParamT NV) {
612 if (N == this->size())
613 return;
614
615 if (N < this->size()) {
616 this->pop_back_n(this->size() - N);
617 return;
618 }
619
620 // N > this->size(). Defer to append.
621 this->append(N - this->size(), NV);
622 }
623
624 void reserve(size_type N) {
625 if (this->capacity() < N)
626 this->grow(N);
627 }
628
629 void pop_back_n(size_type NumItems) {
630 assert(this->size() >= NumItems)(static_cast<void> (0));
631 this->destroy_range(this->end() - NumItems, this->end());
632 this->set_size(this->size() - NumItems);
633 }
634
635 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
636 T Result = ::std::move(this->back());
637 this->pop_back();
638 return Result;
639 }
640
641 void swap(SmallVectorImpl &RHS);
642
643 /// Add the specified range to the end of the SmallVector.
644 template <typename in_iter,
645 typename = std::enable_if_t<std::is_convertible<
646 typename std::iterator_traits<in_iter>::iterator_category,
647 std::input_iterator_tag>::value>>
648 void append(in_iter in_start, in_iter in_end) {
649 this->assertSafeToAddRange(in_start, in_end);
650 size_type NumInputs = std::distance(in_start, in_end);
651 this->reserve(this->size() + NumInputs);
652 this->uninitialized_copy(in_start, in_end, this->end());
653 this->set_size(this->size() + NumInputs);
654 }
655
656 /// Append \p NumInputs copies of \p Elt to the end.
657 void append(size_type NumInputs, ValueParamT Elt) {
658 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
659 std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
660 this->set_size(this->size() + NumInputs);
661 }
662
663 void append(std::initializer_list<T> IL) {
664 append(IL.begin(), IL.end());
665 }
666
667 void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }
668
669 void assign(size_type NumElts, ValueParamT Elt) {
670 // Note that Elt could be an internal reference.
671 if (NumElts > this->capacity()) {
672 this->growAndAssign(NumElts, Elt);
673 return;
674 }
675
676 // Assign over existing elements.
677 std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
678 if (NumElts > this->size())
679 std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
680 else if (NumElts < this->size())
681 this->destroy_range(this->begin() + NumElts, this->end());
682 this->set_size(NumElts);
683 }
684
685 // FIXME: Consider assigning over existing elements, rather than clearing &
686 // re-initializing them - for all assign(...) variants.
687
688 template <typename in_iter,
689 typename = std::enable_if_t<std::is_convertible<
690 typename std::iterator_traits<in_iter>::iterator_category,
691 std::input_iterator_tag>::value>>
692 void assign(in_iter in_start, in_iter in_end) {
693 this->assertSafeToReferenceAfterClear(in_start, in_end);
694 clear();
695 append(in_start, in_end);
696 }
697
698 void assign(std::initializer_list<T> IL) {
699 clear();
700 append(IL);
701 }
702
703 void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }
704
705 iterator erase(const_iterator CI) {
706 // Just cast away constness because this is a non-const member function.
707 iterator I = const_cast<iterator>(CI);
708
709 assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")(static_cast<void> (0));
710
711 iterator N = I;
712 // Shift all elts down one.
713 std::move(I+1, this->end(), I);
714 // Drop the last elt.
715 this->pop_back();
716 return(N);
717 }
718
719 iterator erase(const_iterator CS, const_iterator CE) {
720 // Just cast away constness because this is a non-const member function.
721 iterator S = const_cast<iterator>(CS);
722 iterator E = const_cast<iterator>(CE);
723
724 assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")(static_cast<void> (0));
725
726 iterator N = S;
727 // Shift all elts down.
728 iterator I = std::move(E, this->end(), S);
729 // Drop the last elts.
730 this->destroy_range(I, this->end());
731 this->set_size(I - this->begin());
732 return(N);
733 }
734
735private:
736 template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
737 // Callers ensure that ArgType is derived from T.
738 static_assert(
739 std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
740 T>::value,
741 "ArgType must be derived from T!");
742
743 if (I == this->end()) { // Important special case for empty vector.
744 this->push_back(::std::forward<ArgType>(Elt));
745 return this->end()-1;
746 }
747
748 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast<void> (0));
749
750 // Grow if necessary.
751 size_t Index = I - this->begin();
752 std::remove_reference_t<ArgType> *EltPtr =
753 this->reserveForParamAndGetAddress(Elt);
754 I = this->begin() + Index;
755
756 ::new ((void*) this->end()) T(::std::move(this->back()));
757 // Push everything else over.
758 std::move_backward(I, this->end()-1, this->end());
759 this->set_size(this->size() + 1);
760
761 // If we just moved the element we're inserting, be sure to update
762 // the reference (never happens if TakesParamByValue).
763 static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
764 "ArgType must be 'T' when taking by value!");
765 if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
766 ++EltPtr;
767
768 *I = ::std::forward<ArgType>(*EltPtr);
769 return I;
770 }
771
772public:
773 iterator insert(iterator I, T &&Elt) {
774 return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
775 }
776
777 iterator insert(iterator I, const T &Elt) {
778 return insert_one_impl(I, this->forward_value_param(Elt));
779 }
780
781 iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
782 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
783 size_t InsertElt = I - this->begin();
784
785 if (I == this->end()) { // Important special case for empty vector.
786 append(NumToInsert, Elt);
787 return this->begin()+InsertElt;
788 }
789
790 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast<void> (0));
791
792 // Ensure there is enough space, and get the (maybe updated) address of
793 // Elt.
794 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
795
796 // Uninvalidate the iterator.
797 I = this->begin()+InsertElt;
798
799 // If there are more elements between the insertion point and the end of the
800 // range than there are being inserted, we can use a simple approach to
801 // insertion. Since we already reserved space, we know that this won't
802 // reallocate the vector.
803 if (size_t(this->end()-I) >= NumToInsert) {
804 T *OldEnd = this->end();
805 append(std::move_iterator<iterator>(this->end() - NumToInsert),
806 std::move_iterator<iterator>(this->end()));
807
808 // Copy the existing elements that get replaced.
809 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
810
811 // If we just moved the element we're inserting, be sure to update
812 // the reference (never happens if TakesParamByValue).
813 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
814 EltPtr += NumToInsert;
815
816 std::fill_n(I, NumToInsert, *EltPtr);
817 return I;
818 }
819
820 // Otherwise, we're inserting more elements than exist already, and we're
821 // not inserting at the end.
822
823 // Move over the elements that we're about to overwrite.
824 T *OldEnd = this->end();
825 this->set_size(this->size() + NumToInsert);
826 size_t NumOverwritten = OldEnd-I;
827 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
828
829 // If we just moved the element we're inserting, be sure to update
830 // the reference (never happens if TakesParamByValue).
831 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
832 EltPtr += NumToInsert;
833
834 // Replace the overwritten part.
835 std::fill_n(I, NumOverwritten, *EltPtr);
836
837 // Insert the non-overwritten middle part.
838 std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
839 return I;
840 }
841
842 template <typename ItTy,
843 typename = std::enable_if_t<std::is_convertible<
844 typename std::iterator_traits<ItTy>::iterator_category,
845 std::input_iterator_tag>::value>>
846 iterator insert(iterator I, ItTy From, ItTy To) {
847 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
848 size_t InsertElt = I - this->begin();
849
850 if (I == this->end()) { // Important special case for empty vector.
851 append(From, To);
852 return this->begin()+InsertElt;
853 }
854
855 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast<void> (0));
856
857 // Check that the reserve that follows doesn't invalidate the iterators.
858 this->assertSafeToAddRange(From, To);
859
860 size_t NumToInsert = std::distance(From, To);
861
862 // Ensure there is enough space.
863 reserve(this->size() + NumToInsert);
864
865 // Uninvalidate the iterator.
866 I = this->begin()+InsertElt;
867
868 // If there are more elements between the insertion point and the end of the
869 // range than there are being inserted, we can use a simple approach to
870 // insertion. Since we already reserved space, we know that this won't
871 // reallocate the vector.
872 if (size_t(this->end()-I) >= NumToInsert) {
873 T *OldEnd = this->end();
874 append(std::move_iterator<iterator>(this->end() - NumToInsert),
875 std::move_iterator<iterator>(this->end()));
876
877 // Copy the existing elements that get replaced.
878 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
879
880 std::copy(From, To, I);
881 return I;
882 }
883
884 // Otherwise, we're inserting more elements than exist already, and we're
885 // not inserting at the end.
886
887 // Move over the elements that we're about to overwrite.
888 T *OldEnd = this->end();
889 this->set_size(this->size() + NumToInsert);
890 size_t NumOverwritten = OldEnd-I;
891 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
892
893 // Replace the overwritten part.
894 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
895 *J = *From;
896 ++J; ++From;
897 }
898
899 // Insert the non-overwritten middle part.
900 this->uninitialized_copy(From, To, OldEnd);
901 return I;
902 }
903
904 void insert(iterator I, std::initializer_list<T> IL) {
905 insert(I, IL.begin(), IL.end());
906 }
907
908 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
909 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
910 return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
911
912 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
913 this->set_size(this->size() + 1);
914 return this->back();
915 }
916
917 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
918
919 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
920
921 bool operator==(const SmallVectorImpl &RHS) const {
922 if (this->size() != RHS.size()) return false;
923 return std::equal(this->begin(), this->end(), RHS.begin());
924 }
925 bool operator!=(const SmallVectorImpl &RHS) const {
926 return !(*this == RHS);
927 }
928
929 bool operator<(const SmallVectorImpl &RHS) const {
930 return std::lexicographical_compare(this->begin(), this->end(),
931 RHS.begin(), RHS.end());
932 }
933};
934
935template <typename T>
936void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
937 if (this == &RHS) return;
938
939 // We can only avoid copying elements if neither vector is small.
940 if (!this->isSmall() && !RHS.isSmall()) {
941 std::swap(this->BeginX, RHS.BeginX);
942 std::swap(this->Size, RHS.Size);
943 std::swap(this->Capacity, RHS.Capacity);
944 return;
945 }
946 this->reserve(RHS.size());
947 RHS.reserve(this->size());
948
949 // Swap the shared elements.
950 size_t NumShared = this->size();
951 if (NumShared > RHS.size()) NumShared = RHS.size();
952 for (size_type i = 0; i != NumShared; ++i)
953 std::swap((*this)[i], RHS[i]);
954
955 // Copy over the extra elts.
956 if (this->size() > RHS.size()) {
957 size_t EltDiff = this->size() - RHS.size();
958 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
959 RHS.set_size(RHS.size() + EltDiff);
960 this->destroy_range(this->begin()+NumShared, this->end());
961 this->set_size(NumShared);
962 } else if (RHS.size() > this->size()) {
963 size_t EltDiff = RHS.size() - this->size();
964 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
965 this->set_size(this->size() + EltDiff);
966 this->destroy_range(RHS.begin()+NumShared, RHS.end());
967 RHS.set_size(NumShared);
968 }
969}
970
971template <typename T>
972SmallVectorImpl<T> &SmallVectorImpl<T>::
973 operator=(const SmallVectorImpl<T> &RHS) {
974 // Avoid self-assignment.
975 if (this == &RHS) return *this;
976
977 // If we already have sufficient space, assign the common elements, then
978 // destroy any excess.
979 size_t RHSSize = RHS.size();
980 size_t CurSize = this->size();
981 if (CurSize >= RHSSize) {
982 // Assign common elements.
983 iterator NewEnd;
984 if (RHSSize)
985 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
986 else
987 NewEnd = this->begin();
988
989 // Destroy excess elements.
990 this->destroy_range(NewEnd, this->end());
991
992 // Trim.
993 this->set_size(RHSSize);
994 return *this;
995 }
996
997 // If we have to grow to have enough elements, destroy the current elements.
998 // This allows us to avoid copying them during the grow.
999 // FIXME: don't do this if they're efficiently moveable.
1000 if (this->capacity() < RHSSize) {
1001 // Destroy current elements.
1002 this->clear();
1003 CurSize = 0;
1004 this->grow(RHSSize);
1005 } else if (CurSize) {
1006 // Otherwise, use assignment for the already-constructed elements.
1007 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
1008 }
1009
1010 // Copy construct the new elements in place.
1011 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
1012 this->begin()+CurSize);
1013
1014 // Set end.
1015 this->set_size(RHSSize);
1016 return *this;
1017}
1018
1019template <typename T>
1020SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
1021 // Avoid self-assignment.
1022 if (this == &RHS) return *this;
1023
1024 // If the RHS isn't small, clear this vector and then steal its buffer.
1025 if (!RHS.isSmall()) {
1026 this->destroy_range(this->begin(), this->end());
1027 if (!this->isSmall()) free(this->begin());
1028 this->BeginX = RHS.BeginX;
1029 this->Size = RHS.Size;
1030 this->Capacity = RHS.Capacity;
1031 RHS.resetToSmall();
1032 return *this;
1033 }
1034
1035 // If we already have sufficient space, assign the common elements, then
1036 // destroy any excess.
1037 size_t RHSSize = RHS.size();
1038 size_t CurSize = this->size();
1039 if (CurSize >= RHSSize) {
1040 // Assign common elements.
1041 iterator NewEnd = this->begin();
1042 if (RHSSize)
1043 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
1044
1045 // Destroy excess elements and trim the bounds.
1046 this->destroy_range(NewEnd, this->end());
1047 this->set_size(RHSSize);
1048
1049 // Clear the RHS.
1050 RHS.clear();
1051
1052 return *this;
1053 }
1054
1055 // If we have to grow to have enough elements, destroy the current elements.
1056 // This allows us to avoid copying them during the grow.
1057 // FIXME: this may not actually make any sense if we can efficiently move
1058 // elements.
1059 if (this->capacity() < RHSSize) {
1060 // Destroy current elements.
1061 this->clear();
1062 CurSize = 0;
1063 this->grow(RHSSize);
1064 } else if (CurSize) {
1065 // Otherwise, use assignment for the already-constructed elements.
1066 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
1067 }
1068
1069 // Move-construct the new elements in place.
1070 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
1071 this->begin()+CurSize);
1072
1073 // Set end.
1074 this->set_size(RHSSize);
1075
1076 RHS.clear();
1077 return *this;
1078}
1079
1080/// Storage for the SmallVector elements. This is specialized for the N=0 case
1081/// to avoid allocating unnecessary storage.
1082template <typename T, unsigned N>
1083struct SmallVectorStorage {
1084 alignas(T) char InlineElts[N * sizeof(T)];
1085};
1086
1087/// We need the storage to be properly aligned even for small-size of 0 so that
1088/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
1089/// well-defined.
1090template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
1091
1092/// Forward declaration of SmallVector so that
1093/// calculateSmallVectorDefaultInlinedElements can reference
1094/// `sizeof(SmallVector<T, 0>)`.
1095template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector;
1096
1097/// Helper class for calculating the default number of inline elements for
1098/// `SmallVector<T>`.
1099///
1100/// This should be migrated to a constexpr function when our minimum
1101/// compiler support is enough for multi-statement constexpr functions.
1102template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
1103 // Parameter controlling the default number of inlined elements
1104 // for `SmallVector<T>`.
1105 //
1106 // The default number of inlined elements ensures that
1107 // 1. There is at least one inlined element.
1108 // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
1109 // it contradicts 1.
1110 static constexpr size_t kPreferredSmallVectorSizeof = 64;
1111
1112 // static_assert that sizeof(T) is not "too big".
1113 //
1114 // Because our policy guarantees at least one inlined element, it is possible
1115 // for an arbitrarily large inlined element to allocate an arbitrarily large
1116 // amount of inline storage. We generally consider it an antipattern for a
1117 // SmallVector to allocate an excessive amount of inline storage, so we want
1118 // to call attention to these cases and make sure that users are making an
1119 // intentional decision if they request a lot of inline storage.
1120 //
1121 // We want this assertion to trigger in pathological cases, but otherwise
1122 // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
1123 // larger than kPreferredSmallVectorSizeof (otherwise,
1124 // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
1125 // pattern seems useful in practice).
1126 //
1127 // One wrinkle is that this assertion is in theory non-portable, since
1128 // sizeof(T) is in general platform-dependent. However, we don't expect this
1129 // to be much of an issue, because most LLVM development happens on 64-bit
1130 // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
1131 // 32-bit hosts, dodging the issue. The reverse situation, where development
1132 // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
1133 // 64-bit host, is expected to be very rare.
1134 static_assert(
1135 sizeof(T) <= 256,
1136 "You are trying to use a default number of inlined elements for "
1137 "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1138 "explicit number of inlined elements with `SmallVector<T, N>` to make "
1139 "sure you really want that much inline storage.");
1140
1141 // Discount the size of the header itself when calculating the maximum inline
1142 // bytes.
1143 static constexpr size_t PreferredInlineBytes =
1144 kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
1145 static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
1146 static constexpr size_t value =
1147 NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1148};
1149
1150/// This is a 'vector' (really, a variable-sized array), optimized
1151/// for the case when the array is small. It contains some number of elements
1152/// in-place, which allows it to avoid heap allocation when the actual number of
1153/// elements is below that threshold. This allows normal "small" cases to be
1154/// fast without losing generality for large inputs.
1155///
1156/// \note
1157/// In the absence of a well-motivated choice for the number of inlined
1158/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
1159/// omitting the \p N). This will choose a default number of inlined elements
1160/// reasonable for allocation on the stack (for example, trying to keep \c
1161/// sizeof(SmallVector<T>) around 64 bytes).
1162///
1163/// \warning This does not attempt to be exception safe.
1164///
1165/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
1166template <typename T,
1167 unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
1168class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
1169 SmallVectorStorage<T, N> {
1170public:
1171 SmallVector() : SmallVectorImpl<T>(N) {}
1172
1173 ~SmallVector() {
1174 // Destroy the constructed elements in the vector.
1175 this->destroy_range(this->begin(), this->end());
1176 }
1177
1178 explicit SmallVector(size_t Size, const T &Value = T())
1179 : SmallVectorImpl<T>(N) {
1180 this->assign(Size, Value);
1181 }
1182
1183 template <typename ItTy,
1184 typename = std::enable_if_t<std::is_convertible<
1185 typename std::iterator_traits<ItTy>::iterator_category,
1186 std::input_iterator_tag>::value>>
1187 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
1188 this->append(S, E);
1189 }
1190
1191 template <typename RangeTy>
1192 explicit SmallVector(const iterator_range<RangeTy> &R)
1193 : SmallVectorImpl<T>(N) {
1194 this->append(R.begin(), R.end());
1195 }
1196
1197 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
1198 this->assign(IL);
1199 }
1200
1201 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
1202 if (!RHS.empty())
1203 SmallVectorImpl<T>::operator=(RHS);
1204 }
1205
1206 SmallVector &operator=(const SmallVector &RHS) {
1207 SmallVectorImpl<T>::operator=(RHS);
1208 return *this;
1209 }
1210
1211 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
1212 if (!RHS.empty())
1213 SmallVectorImpl<T>::operator=(::std::move(RHS));
1214 }
1215
1216 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
1217 if (!RHS.empty())
1218 SmallVectorImpl<T>::operator=(::std::move(RHS));
1219 }
1220
1221 SmallVector &operator=(SmallVector &&RHS) {
1222 SmallVectorImpl<T>::operator=(::std::move(RHS));
1223 return *this;
1224 }
1225
1226 SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
1227 SmallVectorImpl<T>::operator=(::std::move(RHS));
1228 return *this;
1229 }
1230
1231 SmallVector &operator=(std::initializer_list<T> IL) {
1232 this->assign(IL);
1233 return *this;
1234 }
1235};
1236
1237template <typename T, unsigned N>
1238inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
1239 return X.capacity_in_bytes();
1240}
1241
1242/// Given a range of type R, iterate the entire range and return a
1243/// SmallVector with elements of the vector. This is useful, for example,
1244/// when you want to iterate a range and then sort the results.
1245template <unsigned Size, typename R>
1246SmallVector<typename std::remove_const<typename std::remove_reference<
1247 decltype(*std::begin(std::declval<R &>()))>::type>::type,
1248 Size>
1249to_vector(R &&Range) {
1250 return {std::begin(Range), std::end(Range)};
1251}
1252
1253} // end namespace llvm
1254
1255namespace std {
1256
1257 /// Implement std::swap in terms of SmallVector swap.
1258 template<typename T>
1259 inline void
1260 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
1261 LHS.swap(RHS);
1262 }
1263
1264 /// Implement std::swap in terms of SmallVector swap.
1265 template<typename T, unsigned N>
1266 inline void
1267 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
1268 LHS.swap(RHS);
1269 }
1270
1271} // end namespace std
1272
1273#endif // LLVM_ADT_SMALLVECTOR_H