Bug Summary

File:tools/polly/lib/Analysis/ScopInfo.cpp
Location:line 1420, column 5
Description:Value stored to 'Ty' is never read

Annotated Source Code

1//===--------- ScopInfo.cpp - Create Scops from LLVM IR ------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Create a polyhedral description for a static control flow region.
11//
12// The pass creates a polyhedral description of the Scops detected by the Scop
13// detection derived from their LLVM-IR code.
14//
15// This representation is shared among several tools in the polyhedral
16// community, which are e.g. Cloog, Pluto, Loopo, Graphite.
17//
18//===----------------------------------------------------------------------===//
19
20#include "polly/ScopInfo.h"
21#include "polly/LinkAllPasses.h"
22#include "polly/Options.h"
23#include "polly/Support/GICHelper.h"
24#include "polly/Support/SCEVValidator.h"
25#include "polly/Support/ScopHelper.h"
26#include "llvm/ADT/DepthFirstIterator.h"
27#include "llvm/ADT/MapVector.h"
28#include "llvm/ADT/PostOrderIterator.h"
29#include "llvm/ADT/STLExtras.h"
30#include "llvm/ADT/SetVector.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/AliasAnalysis.h"
34#include "llvm/Analysis/AssumptionCache.h"
35#include "llvm/Analysis/Loads.h"
36#include "llvm/Analysis/LoopInfo.h"
37#include "llvm/Analysis/LoopIterator.h"
38#include "llvm/Analysis/RegionIterator.h"
39#include "llvm/Analysis/ScalarEvolutionExpressions.h"
40#include "llvm/IR/DiagnosticInfo.h"
41#include "llvm/Support/Debug.h"
42#include "isl/aff.h"
43#include "isl/constraint.h"
44#include "isl/local_space.h"
45#include "isl/map.h"
46#include "isl/options.h"
47#include "isl/printer.h"
48#include "isl/schedule.h"
49#include "isl/schedule_node.h"
50#include "isl/set.h"
51#include "isl/union_map.h"
52#include "isl/union_set.h"
53#include "isl/val.h"
54#include <sstream>
55#include <string>
56#include <vector>
57
58using namespace llvm;
59using namespace polly;
60
61#define DEBUG_TYPE"polly-scops" "polly-scops"
62
63STATISTIC(ScopFound, "Number of valid Scops")static llvm::Statistic ScopFound = { "polly-scops", "Number of valid Scops"
, 0, 0 }
;
64STATISTIC(RichScopFound, "Number of Scops containing a loop")static llvm::Statistic RichScopFound = { "polly-scops", "Number of Scops containing a loop"
, 0, 0 }
;
65
66// The maximal number of basic sets we allow during domain construction to
67// be created. More complex scops will result in very high compile time and
68// are also unlikely to result in good code
69static int const MaxDisjunctionsInDomain = 20;
70
71static cl::opt<bool> PollyRemarksMinimal(
72 "polly-remarks-minimal",
73 cl::desc("Do not emit remarks about assumptions that are known"),
74 cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
75
76static cl::opt<bool> ModelReadOnlyScalars(
77 "polly-analyze-read-only-scalars",
78 cl::desc("Model read-only scalar values in the scop description"),
79 cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::cat(PollyCategory));
80
81// Multiplicative reductions can be disabled separately as these kind of
82// operations can overflow easily. Additive reductions and bit operations
83// are in contrast pretty stable.
84static cl::opt<bool> DisableMultiplicativeReductions(
85 "polly-disable-multiplicative-reductions",
86 cl::desc("Disable multiplicative reductions"), cl::Hidden, cl::ZeroOrMore,
87 cl::init(false), cl::cat(PollyCategory));
88
89static cl::opt<unsigned> RunTimeChecksMaxParameters(
90 "polly-rtc-max-parameters",
91 cl::desc("The maximal number of parameters allowed in RTCs."), cl::Hidden,
92 cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
93
94static cl::opt<unsigned> RunTimeChecksMaxArraysPerGroup(
95 "polly-rtc-max-arrays-per-group",
96 cl::desc("The maximal number of arrays to compare in each alias group."),
97 cl::Hidden, cl::ZeroOrMore, cl::init(20), cl::cat(PollyCategory));
98static cl::opt<std::string> UserContextStr(
99 "polly-context", cl::value_desc("isl parameter set"),
100 cl::desc("Provide additional constraints on the context parameters"),
101 cl::init(""), cl::cat(PollyCategory));
102
103static cl::opt<bool> DetectReductions("polly-detect-reductions",
104 cl::desc("Detect and exploit reductions"),
105 cl::Hidden, cl::ZeroOrMore,
106 cl::init(true), cl::cat(PollyCategory));
107
108static cl::opt<bool>
109 IslOnErrorAbort("polly-on-isl-error-abort",
110 cl::desc("Abort if an isl error is encountered"),
111 cl::init(true), cl::cat(PollyCategory));
112
113//===----------------------------------------------------------------------===//
114
115// Create a sequence of two schedules. Either argument may be null and is
116// interpreted as the empty schedule. Can also return null if both schedules are
117// empty.
118static __isl_give isl_schedule *
119combineInSequence(__isl_take isl_schedule *Prev,
120 __isl_take isl_schedule *Succ) {
121 if (!Prev)
122 return Succ;
123 if (!Succ)
124 return Prev;
125
126 return isl_schedule_sequence(Prev, Succ);
127}
128
129static __isl_give isl_set *addRangeBoundsToSet(__isl_take isl_set *S,
130 const ConstantRange &Range,
131 int dim,
132 enum isl_dim_type type) {
133 isl_val *V;
134 isl_ctx *ctx = isl_set_get_ctx(S);
135
136 bool useLowerUpperBound = Range.isSignWrappedSet() && !Range.isFullSet();
137 const auto LB = useLowerUpperBound ? Range.getLower() : Range.getSignedMin();
138 V = isl_valFromAPInt(ctx, LB, true);
139 isl_set *SLB = isl_set_lower_bound_val(isl_set_copy(S), type, dim, V);
140
141 const auto UB = useLowerUpperBound ? Range.getUpper() : Range.getSignedMax();
142 V = isl_valFromAPInt(ctx, UB, true);
143 if (useLowerUpperBound)
144 V = isl_val_sub_ui(V, 1);
145 isl_set *SUB = isl_set_upper_bound_val(S, type, dim, V);
146
147 if (useLowerUpperBound)
148 return isl_set_union(SLB, SUB);
149 else
150 return isl_set_intersect(SLB, SUB);
151}
152
153static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
154 LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
155 if (!BasePtrLI)
156 return nullptr;
157
158 if (!S->getRegion().contains(BasePtrLI))
159 return nullptr;
160
161 ScalarEvolution &SE = *S->getSE();
162
163 auto *OriginBaseSCEV =
164 SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
165 if (!OriginBaseSCEV)
166 return nullptr;
167
168 auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
169 if (!OriginBaseSCEVUnknown)
170 return nullptr;
171
172 return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
173 ScopArrayInfo::MK_Array);
174}
175
176ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl_ctx *Ctx,
177 ArrayRef<const SCEV *> Sizes, enum MemoryKind Kind,
178 const DataLayout &DL, Scop *S)
179 : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
180 std::string BasePtrName =
181 getIslCompatibleName("MemRef_", BasePtr, Kind == MK_PHI ? "__phi" : "");
182 Id = isl_id_alloc(Ctx, BasePtrName.c_str(), this);
183
184 updateSizes(Sizes);
185 BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
186 if (BasePtrOriginSAI)
187 const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
188}
189
190__isl_give isl_space *ScopArrayInfo::getSpace() const {
191 auto *Space =
192 isl_space_set_alloc(isl_id_get_ctx(Id), 0, getNumberOfDimensions());
193 Space = isl_space_set_tuple_id(Space, isl_dim_set, isl_id_copy(Id));
194 return Space;
195}
196
197void ScopArrayInfo::updateElementType(Type *NewElementType) {
198 if (NewElementType == ElementType)
199 return;
200
201 auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
202 auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
203
204 if (NewElementSize == OldElementSize || NewElementSize == 0)
205 return;
206
207 if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
208 ElementType = NewElementType;
209 } else {
210 auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
211 ElementType = IntegerType::get(ElementType->getContext(), GCD);
212 }
213}
214
215bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes) {
216 int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
217 int ExtraDimsNew = NewSizes.size() - SharedDims;
218 int ExtraDimsOld = DimensionSizes.size() - SharedDims;
219 for (int i = 0; i < SharedDims; i++)
220 if (NewSizes[i + ExtraDimsNew] != DimensionSizes[i + ExtraDimsOld])
221 return false;
222
223 if (DimensionSizes.size() >= NewSizes.size())
224 return true;
225
226 DimensionSizes.clear();
227 DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
228 NewSizes.end());
229 for (isl_pw_aff *Size : DimensionSizesPw)
230 isl_pw_aff_free(Size);
231 DimensionSizesPw.clear();
232 for (const SCEV *Expr : DimensionSizes) {
233 isl_pw_aff *Size = S.getPwAffOnly(Expr);
234 DimensionSizesPw.push_back(Size);
235 }
236 return true;
237}
238
239ScopArrayInfo::~ScopArrayInfo() {
240 isl_id_free(Id);
241 for (isl_pw_aff *Size : DimensionSizesPw)
242 isl_pw_aff_free(Size);
243}
244
245std::string ScopArrayInfo::getName() const { return isl_id_get_name(Id); }
246
247int ScopArrayInfo::getElemSizeInBytes() const {
248 return DL.getTypeAllocSize(ElementType);
249}
250
251__isl_give isl_id *ScopArrayInfo::getBasePtrId() const {
252 return isl_id_copy(Id);
253}
254
255void ScopArrayInfo::dump() const { print(errs()); }
256
257void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
258 OS.indent(8) << *getElementType() << " " << getName();
259 if (getNumberOfDimensions() > 0)
260 OS << "[*]";
261 for (unsigned u = 1; u < getNumberOfDimensions(); u++) {
262 OS << "[";
263
264 if (SizeAsPwAff) {
265 auto *Size = getDimensionSizePw(u);
266 OS << " " << Size << " ";
267 isl_pw_aff_free(Size);
268 } else {
269 OS << *getDimensionSize(u);
270 }
271
272 OS << "]";
273 }
274
275 OS << ";";
276
277 if (BasePtrOriginSAI)
278 OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
279
280 OS << " // Element size " << getElemSizeInBytes() << "\n";
281}
282
283const ScopArrayInfo *
284ScopArrayInfo::getFromAccessFunction(__isl_keep isl_pw_multi_aff *PMA) {
285 isl_id *Id = isl_pw_multi_aff_get_tuple_id(PMA, isl_dim_out);
286 assert(Id && "Output dimension didn't have an ID")((Id && "Output dimension didn't have an ID") ? static_cast
<void> (0) : __assert_fail ("Id && \"Output dimension didn't have an ID\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 286, __PRETTY_FUNCTION__))
;
287 return getFromId(Id);
288}
289
290const ScopArrayInfo *ScopArrayInfo::getFromId(isl_id *Id) {
291 void *User = isl_id_get_user(Id);
292 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
293 isl_id_free(Id);
294 return SAI;
295}
296
297void MemoryAccess::wrapConstantDimensions() {
298 auto *SAI = getScopArrayInfo();
299 auto *ArraySpace = SAI->getSpace();
300 auto *Ctx = isl_space_get_ctx(ArraySpace);
301 unsigned DimsArray = SAI->getNumberOfDimensions();
302
303 auto *DivModAff = isl_multi_aff_identity(isl_space_map_from_domain_and_range(
304 isl_space_copy(ArraySpace), isl_space_copy(ArraySpace)));
305 auto *LArraySpace = isl_local_space_from_space(ArraySpace);
306
307 // Begin with last dimension, to iteratively carry into higher dimensions.
308 for (int i = DimsArray - 1; i > 0; i--) {
309 auto *DimSize = SAI->getDimensionSize(i);
310 auto *DimSizeCst = dyn_cast<SCEVConstant>(DimSize);
311
312 // This transformation is not applicable to dimensions with dynamic size.
313 if (!DimSizeCst)
314 continue;
315
316 auto *DimSizeVal = isl_valFromAPInt(Ctx, DimSizeCst->getAPInt(), false);
317 auto *Var = isl_aff_var_on_domain(isl_local_space_copy(LArraySpace),
318 isl_dim_set, i);
319 auto *PrevVar = isl_aff_var_on_domain(isl_local_space_copy(LArraySpace),
320 isl_dim_set, i - 1);
321
322 // Compute: index % size
323 // Modulo must apply in the divide of the previous iteration, if any.
324 auto *Modulo = isl_aff_copy(Var);
325 Modulo = isl_aff_mod_val(Modulo, isl_val_copy(DimSizeVal));
326 Modulo = isl_aff_pullback_multi_aff(Modulo, isl_multi_aff_copy(DivModAff));
327
328 // Compute: floor(index / size)
329 auto *Divide = Var;
330 Divide = isl_aff_div(
331 Divide,
332 isl_aff_val_on_domain(isl_local_space_copy(LArraySpace), DimSizeVal));
333 Divide = isl_aff_floor(Divide);
334 Divide = isl_aff_add(Divide, PrevVar);
335 Divide = isl_aff_pullback_multi_aff(Divide, isl_multi_aff_copy(DivModAff));
336
337 // Apply Modulo and Divide.
338 DivModAff = isl_multi_aff_set_aff(DivModAff, i, Modulo);
339 DivModAff = isl_multi_aff_set_aff(DivModAff, i - 1, Divide);
340 }
341
342 // Apply all modulo/divides on the accesses.
343 AccessRelation =
344 isl_map_apply_range(AccessRelation, isl_map_from_multi_aff(DivModAff));
345 AccessRelation = isl_map_detect_equalities(AccessRelation);
346 isl_local_space_free(LArraySpace);
347}
348
349void MemoryAccess::updateDimensionality() {
350 auto *SAI = getScopArrayInfo();
351 auto *ArraySpace = SAI->getSpace();
352 auto *AccessSpace = isl_space_range(isl_map_get_space(AccessRelation));
353 auto *Ctx = isl_space_get_ctx(AccessSpace);
354
355 auto DimsArray = isl_space_dim(ArraySpace, isl_dim_set);
356 auto DimsAccess = isl_space_dim(AccessSpace, isl_dim_set);
357 auto DimsMissing = DimsArray - DimsAccess;
358
359 auto *BB = getStatement()->getEntryBlock();
360 auto &DL = BB->getModule()->getDataLayout();
361 unsigned ArrayElemSize = SAI->getElemSizeInBytes();
362 unsigned ElemBytes = DL.getTypeAllocSize(getElementType());
363
364 auto *Map = isl_map_from_domain_and_range(
365 isl_set_universe(AccessSpace),
366 isl_set_universe(isl_space_copy(ArraySpace)));
367
368 for (unsigned i = 0; i < DimsMissing; i++)
369 Map = isl_map_fix_si(Map, isl_dim_out, i, 0);
370
371 for (unsigned i = DimsMissing; i < DimsArray; i++)
372 Map = isl_map_equate(Map, isl_dim_in, i - DimsMissing, isl_dim_out, i);
373
374 AccessRelation = isl_map_apply_range(AccessRelation, Map);
375
376 // For the non delinearized arrays, divide the access function of the last
377 // subscript by the size of the elements in the array.
378 //
379 // A stride one array access in C expressed as A[i] is expressed in
380 // LLVM-IR as something like A[i * elementsize]. This hides the fact that
381 // two subsequent values of 'i' index two values that are stored next to
382 // each other in memory. By this division we make this characteristic
383 // obvious again. If the base pointer was accessed with offsets not divisible
384 // by the accesses element size, we will have choosen a smaller ArrayElemSize
385 // that divides the offsets of all accesses to this base pointer.
386 if (DimsAccess == 1) {
387 isl_val *V = isl_val_int_from_si(Ctx, ArrayElemSize);
388 AccessRelation = isl_map_floordiv_val(AccessRelation, V);
389 }
390
391 // We currently do this only if we added at least one dimension, which means
392 // some dimension's indices have not been specified, an indicator that some
393 // index values have been added together.
394 // TODO: Investigate general usefulness; Effect on unit tests is to make index
395 // expressions more complicated.
396 if (DimsMissing)
397 wrapConstantDimensions();
398
399 if (!isAffine())
400 computeBoundsOnAccessRelation(ArrayElemSize);
401
402 // Introduce multi-element accesses in case the type loaded by this memory
403 // access is larger than the canonical element type of the array.
404 //
405 // An access ((float *)A)[i] to an array char *A is modeled as
406 // {[i] -> A[o] : 4 i <= o <= 4 i + 3
407 if (ElemBytes > ArrayElemSize) {
408 assert(ElemBytes % ArrayElemSize == 0 &&((ElemBytes % ArrayElemSize == 0 && "Loaded element size should be multiple of canonical element size"
) ? static_cast<void> (0) : __assert_fail ("ElemBytes % ArrayElemSize == 0 && \"Loaded element size should be multiple of canonical element size\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 409, __PRETTY_FUNCTION__))
409 "Loaded element size should be multiple of canonical element size")((ElemBytes % ArrayElemSize == 0 && "Loaded element size should be multiple of canonical element size"
) ? static_cast<void> (0) : __assert_fail ("ElemBytes % ArrayElemSize == 0 && \"Loaded element size should be multiple of canonical element size\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 409, __PRETTY_FUNCTION__))
;
410 auto *Map = isl_map_from_domain_and_range(
411 isl_set_universe(isl_space_copy(ArraySpace)),
412 isl_set_universe(isl_space_copy(ArraySpace)));
413 for (unsigned i = 0; i < DimsArray - 1; i++)
414 Map = isl_map_equate(Map, isl_dim_in, i, isl_dim_out, i);
415
416 isl_constraint *C;
417 isl_local_space *LS;
418
419 LS = isl_local_space_from_space(isl_map_get_space(Map));
420 int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
421
422 C = isl_constraint_alloc_inequality(isl_local_space_copy(LS));
423 C = isl_constraint_set_constant_val(C, isl_val_int_from_si(Ctx, Num - 1));
424 C = isl_constraint_set_coefficient_si(C, isl_dim_in, DimsArray - 1, 1);
425 C = isl_constraint_set_coefficient_si(C, isl_dim_out, DimsArray - 1, -1);
426 Map = isl_map_add_constraint(Map, C);
427
428 C = isl_constraint_alloc_inequality(LS);
429 C = isl_constraint_set_coefficient_si(C, isl_dim_in, DimsArray - 1, -1);
430 C = isl_constraint_set_coefficient_si(C, isl_dim_out, DimsArray - 1, 1);
431 C = isl_constraint_set_constant_val(C, isl_val_int_from_si(Ctx, 0));
432 Map = isl_map_add_constraint(Map, C);
433 AccessRelation = isl_map_apply_range(AccessRelation, Map);
434 }
435
436 isl_space_free(ArraySpace);
437
438 assumeNoOutOfBound();
439}
440
441const std::string
442MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
443 switch (RT) {
444 case MemoryAccess::RT_NONE:
445 llvm_unreachable("Requested a reduction operator string for a memory "::llvm::llvm_unreachable_internal("Requested a reduction operator string for a memory "
"access which isn't a reduction", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 446)
446 "access which isn't a reduction")::llvm::llvm_unreachable_internal("Requested a reduction operator string for a memory "
"access which isn't a reduction", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 446)
;
447 case MemoryAccess::RT_ADD:
448 return "+";
449 case MemoryAccess::RT_MUL:
450 return "*";
451 case MemoryAccess::RT_BOR:
452 return "|";
453 case MemoryAccess::RT_BXOR:
454 return "^";
455 case MemoryAccess::RT_BAND:
456 return "&";
457 }
458 llvm_unreachable("Unknown reduction type")::llvm::llvm_unreachable_internal("Unknown reduction type", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 458)
;
459 return "";
460}
461
462/// @brief Return the reduction type for a given binary operator
463static MemoryAccess::ReductionType getReductionType(const BinaryOperator *BinOp,
464 const Instruction *Load) {
465 if (!BinOp)
466 return MemoryAccess::RT_NONE;
467 switch (BinOp->getOpcode()) {
468 case Instruction::FAdd:
469 if (!BinOp->hasUnsafeAlgebra())
470 return MemoryAccess::RT_NONE;
471 // Fall through
472 case Instruction::Add:
473 return MemoryAccess::RT_ADD;
474 case Instruction::Or:
475 return MemoryAccess::RT_BOR;
476 case Instruction::Xor:
477 return MemoryAccess::RT_BXOR;
478 case Instruction::And:
479 return MemoryAccess::RT_BAND;
480 case Instruction::FMul:
481 if (!BinOp->hasUnsafeAlgebra())
482 return MemoryAccess::RT_NONE;
483 // Fall through
484 case Instruction::Mul:
485 if (DisableMultiplicativeReductions)
486 return MemoryAccess::RT_NONE;
487 return MemoryAccess::RT_MUL;
488 default:
489 return MemoryAccess::RT_NONE;
490 }
491}
492
493/// @brief Derive the individual index expressions from a GEP instruction
494///
495/// This function optimistically assumes the GEP references into a fixed size
496/// array. If this is actually true, this function returns a list of array
497/// subscript expressions as SCEV as well as a list of integers describing
498/// the size of the individual array dimensions. Both lists have either equal
499/// length of the size list is one element shorter in case there is no known
500/// size available for the outermost array dimension.
501///
502/// @param GEP The GetElementPtr instruction to analyze.
503///
504/// @return A tuple with the subscript expressions and the dimension sizes.
505static std::tuple<std::vector<const SCEV *>, std::vector<int>>
506getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
507 std::vector<const SCEV *> Subscripts;
508 std::vector<int> Sizes;
509
510 Type *Ty = GEP->getPointerOperandType();
511
512 bool DroppedFirstDim = false;
513
514 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
515
516 const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
517
518 if (i == 1) {
519 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
520 Ty = PtrTy->getElementType();
521 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
522 Ty = ArrayTy->getElementType();
523 } else {
524 Subscripts.clear();
525 Sizes.clear();
526 break;
527 }
528 if (auto *Const = dyn_cast<SCEVConstant>(Expr))
529 if (Const->getValue()->isZero()) {
530 DroppedFirstDim = true;
531 continue;
532 }
533 Subscripts.push_back(Expr);
534 continue;
535 }
536
537 auto *ArrayTy = dyn_cast<ArrayType>(Ty);
538 if (!ArrayTy) {
539 Subscripts.clear();
540 Sizes.clear();
541 break;
542 }
543
544 Subscripts.push_back(Expr);
545 if (!(DroppedFirstDim && i == 2))
546 Sizes.push_back(ArrayTy->getNumElements());
547
548 Ty = ArrayTy->getElementType();
549 }
550
551 return std::make_tuple(Subscripts, Sizes);
552}
553
554MemoryAccess::~MemoryAccess() {
555 isl_id_free(Id);
556 isl_set_free(InvalidDomain);
557 isl_map_free(AccessRelation);
558 isl_map_free(NewAccessRelation);
559}
560
561const ScopArrayInfo *MemoryAccess::getScopArrayInfo() const {
562 isl_id *ArrayId = getArrayId();
563 void *User = isl_id_get_user(ArrayId);
564 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
565 isl_id_free(ArrayId);
566 return SAI;
567}
568
569__isl_give isl_id *MemoryAccess::getArrayId() const {
570 return isl_map_get_tuple_id(AccessRelation, isl_dim_out);
571}
572
573__isl_give isl_map *MemoryAccess::getAddressFunction() const {
574 return isl_map_lexmin(getAccessRelation());
575}
576
577__isl_give isl_pw_multi_aff *MemoryAccess::applyScheduleToAccessRelation(
578 __isl_take isl_union_map *USchedule) const {
579 isl_map *Schedule, *ScheduledAccRel;
580 isl_union_set *UDomain;
581
582 UDomain = isl_union_set_from_set(getStatement()->getDomain());
583 USchedule = isl_union_map_intersect_domain(USchedule, UDomain);
584 Schedule = isl_map_from_union_map(USchedule);
585 ScheduledAccRel = isl_map_apply_domain(getAddressFunction(), Schedule);
586 return isl_pw_multi_aff_from_map(ScheduledAccRel);
587}
588
589__isl_give isl_map *MemoryAccess::getOriginalAccessRelation() const {
590 return isl_map_copy(AccessRelation);
591}
592
593std::string MemoryAccess::getOriginalAccessRelationStr() const {
594 return stringFromIslObj(AccessRelation);
595}
596
597__isl_give isl_space *MemoryAccess::getOriginalAccessRelationSpace() const {
598 return isl_map_get_space(AccessRelation);
599}
600
601__isl_give isl_map *MemoryAccess::getNewAccessRelation() const {
602 return isl_map_copy(NewAccessRelation);
603}
604
605std::string MemoryAccess::getNewAccessRelationStr() const {
606 return stringFromIslObj(NewAccessRelation);
607}
608
609__isl_give isl_basic_map *
610MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
611 isl_space *Space = isl_space_set_alloc(Statement->getIslCtx(), 0, 1);
612 Space = isl_space_align_params(Space, Statement->getDomainSpace());
613
614 return isl_basic_map_from_domain_and_range(
615 isl_basic_set_universe(Statement->getDomainSpace()),
616 isl_basic_set_universe(Space));
617}
618
619// Formalize no out-of-bound access assumption
620//
621// When delinearizing array accesses we optimistically assume that the
622// delinearized accesses do not access out of bound locations (the subscript
623// expression of each array evaluates for each statement instance that is
624// executed to a value that is larger than zero and strictly smaller than the
625// size of the corresponding dimension). The only exception is the outermost
626// dimension for which we do not need to assume any upper bound. At this point
627// we formalize this assumption to ensure that at code generation time the
628// relevant run-time checks can be generated.
629//
630// To find the set of constraints necessary to avoid out of bound accesses, we
631// first build the set of data locations that are not within array bounds. We
632// then apply the reverse access relation to obtain the set of iterations that
633// may contain invalid accesses and reduce this set of iterations to the ones
634// that are actually executed by intersecting them with the domain of the
635// statement. If we now project out all loop dimensions, we obtain a set of
636// parameters that may cause statement instances to be executed that may
637// possibly yield out of bound memory accesses. The complement of these
638// constraints is the set of constraints that needs to be assumed to ensure such
639// statement instances are never executed.
640void MemoryAccess::assumeNoOutOfBound() {
641 auto *SAI = getScopArrayInfo();
642 isl_space *Space = isl_space_range(getOriginalAccessRelationSpace());
643 isl_set *Outside = isl_set_empty(isl_space_copy(Space));
644 for (int i = 1, Size = isl_space_dim(Space, isl_dim_set); i < Size; ++i) {
645 isl_local_space *LS = isl_local_space_from_space(isl_space_copy(Space));
646 isl_pw_aff *Var =
647 isl_pw_aff_var_on_domain(isl_local_space_copy(LS), isl_dim_set, i);
648 isl_pw_aff *Zero = isl_pw_aff_zero_on_domain(LS);
649
650 isl_set *DimOutside;
651
652 DimOutside = isl_pw_aff_lt_set(isl_pw_aff_copy(Var), Zero);
653 isl_pw_aff *SizeE = SAI->getDimensionSizePw(i);
654 SizeE = isl_pw_aff_add_dims(SizeE, isl_dim_in,
655 isl_space_dim(Space, isl_dim_set));
656 SizeE = isl_pw_aff_set_tuple_id(SizeE, isl_dim_in,
657 isl_space_get_tuple_id(Space, isl_dim_set));
658
659 DimOutside = isl_set_union(DimOutside, isl_pw_aff_le_set(SizeE, Var));
660
661 Outside = isl_set_union(Outside, DimOutside);
662 }
663
664 Outside = isl_set_apply(Outside, isl_map_reverse(getAccessRelation()));
665 Outside = isl_set_intersect(Outside, Statement->getDomain());
666 Outside = isl_set_params(Outside);
667
668 // Remove divs to avoid the construction of overly complicated assumptions.
669 // Doing so increases the set of parameter combinations that are assumed to
670 // not appear. This is always save, but may make the resulting run-time check
671 // bail out more often than strictly necessary.
672 Outside = isl_set_remove_divs(Outside);
673 Outside = isl_set_complement(Outside);
674 const auto &Loc = getAccessInstruction()
675 ? getAccessInstruction()->getDebugLoc()
676 : DebugLoc();
677 Statement->getParent()->recordAssumption(INBOUNDS, Outside, Loc,
678 AS_ASSUMPTION);
679 isl_space_free(Space);
680}
681
682void MemoryAccess::buildMemIntrinsicAccessRelation() {
683 assert(isa<MemIntrinsic>(getAccessInstruction()))((isa<MemIntrinsic>(getAccessInstruction())) ? static_cast
<void> (0) : __assert_fail ("isa<MemIntrinsic>(getAccessInstruction())"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 683, __PRETTY_FUNCTION__))
;
684 assert(Subscripts.size() == 2 && Sizes.size() == 0)((Subscripts.size() == 2 && Sizes.size() == 0) ? static_cast
<void> (0) : __assert_fail ("Subscripts.size() == 2 && Sizes.size() == 0"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 684, __PRETTY_FUNCTION__))
;
685
686 auto *SubscriptPWA = getPwAff(Subscripts[0]);
687 auto *SubscriptMap = isl_map_from_pw_aff(SubscriptPWA);
688
689 isl_map *LengthMap;
690 if (Subscripts[1] == nullptr) {
691 LengthMap = isl_map_universe(isl_map_get_space(SubscriptMap));
692 } else {
693 auto *LengthPWA = getPwAff(Subscripts[1]);
694 LengthMap = isl_map_from_pw_aff(LengthPWA);
695 auto *RangeSpace = isl_space_range(isl_map_get_space(LengthMap));
696 LengthMap = isl_map_apply_range(LengthMap, isl_map_lex_gt(RangeSpace));
697 }
698 LengthMap = isl_map_lower_bound_si(LengthMap, isl_dim_out, 0, 0);
699 LengthMap = isl_map_align_params(LengthMap, isl_map_get_space(SubscriptMap));
700 SubscriptMap =
701 isl_map_align_params(SubscriptMap, isl_map_get_space(LengthMap));
702 LengthMap = isl_map_sum(LengthMap, SubscriptMap);
703 AccessRelation = isl_map_set_tuple_id(LengthMap, isl_dim_in,
704 getStatement()->getDomainId());
705}
706
707void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
708 ScalarEvolution *SE = Statement->getParent()->getSE();
709
710 auto MAI = MemAccInst(getAccessInstruction());
711 if (isa<MemIntrinsic>(MAI))
712 return;
713
714 Value *Ptr = MAI.getPointerOperand();
715 if (!Ptr || !SE->isSCEVable(Ptr->getType()))
716 return;
717
718 auto *PtrSCEV = SE->getSCEV(Ptr);
719 if (isa<SCEVCouldNotCompute>(PtrSCEV))
720 return;
721
722 auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
723 if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
724 PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
725
726 const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
727 if (Range.isFullSet())
728 return;
729
730 bool isWrapping = Range.isSignWrappedSet();
731 unsigned BW = Range.getBitWidth();
732 const auto One = APInt(BW, 1);
733 const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
734 const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
735
736 auto Min = LB.sdiv(APInt(BW, ElementSize));
737 auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
738
739 isl_set *AccessRange = isl_map_range(isl_map_copy(AccessRelation));
740 AccessRange =
741 addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0, isl_dim_set);
742 AccessRelation = isl_map_intersect_range(AccessRelation, AccessRange);
743}
744
745__isl_give isl_map *MemoryAccess::foldAccess(__isl_take isl_map *AccessRelation,
746 ScopStmt *Statement) {
747 int Size = Subscripts.size();
748
749 for (int i = Size - 2; i >= 0; --i) {
750 isl_space *Space;
751 isl_map *MapOne, *MapTwo;
752 isl_pw_aff *DimSize = getPwAff(Sizes[i]);
753
754 isl_space *SpaceSize = isl_pw_aff_get_space(DimSize);
755 isl_pw_aff_free(DimSize);
756 isl_id *ParamId = isl_space_get_dim_id(SpaceSize, isl_dim_param, 0);
757
758 Space = isl_map_get_space(AccessRelation);
759 Space = isl_space_map_from_set(isl_space_range(Space));
760 Space = isl_space_align_params(Space, SpaceSize);
761
762 int ParamLocation = isl_space_find_dim_by_id(Space, isl_dim_param, ParamId);
763 isl_id_free(ParamId);
764
765 MapOne = isl_map_universe(isl_space_copy(Space));
766 for (int j = 0; j < Size; ++j)
767 MapOne = isl_map_equate(MapOne, isl_dim_in, j, isl_dim_out, j);
768 MapOne = isl_map_lower_bound_si(MapOne, isl_dim_in, i + 1, 0);
769
770 MapTwo = isl_map_universe(isl_space_copy(Space));
771 for (int j = 0; j < Size; ++j)
772 if (j < i || j > i + 1)
773 MapTwo = isl_map_equate(MapTwo, isl_dim_in, j, isl_dim_out, j);
774
775 isl_local_space *LS = isl_local_space_from_space(Space);
776 isl_constraint *C;
777 C = isl_equality_alloc(isl_local_space_copy(LS));
778 C = isl_constraint_set_constant_si(C, -1);
779 C = isl_constraint_set_coefficient_si(C, isl_dim_in, i, 1);
780 C = isl_constraint_set_coefficient_si(C, isl_dim_out, i, -1);
781 MapTwo = isl_map_add_constraint(MapTwo, C);
782 C = isl_equality_alloc(LS);
783 C = isl_constraint_set_coefficient_si(C, isl_dim_in, i + 1, 1);
784 C = isl_constraint_set_coefficient_si(C, isl_dim_out, i + 1, -1);
785 C = isl_constraint_set_coefficient_si(C, isl_dim_param, ParamLocation, 1);
786 MapTwo = isl_map_add_constraint(MapTwo, C);
787 MapTwo = isl_map_upper_bound_si(MapTwo, isl_dim_in, i + 1, -1);
788
789 MapOne = isl_map_union(MapOne, MapTwo);
790 AccessRelation = isl_map_apply_range(AccessRelation, MapOne);
791 }
792 return AccessRelation;
793}
794
795/// @brief Check if @p Expr is divisible by @p Size.
796static bool isDivisible(const SCEV *Expr, unsigned Size, ScalarEvolution &SE) {
797 assert(Size != 0)((Size != 0) ? static_cast<void> (0) : __assert_fail ("Size != 0"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 797, __PRETTY_FUNCTION__))
;
798 if (Size == 1)
799 return true;
800
801 // Only one factor needs to be divisible.
802 if (auto *MulExpr = dyn_cast<SCEVMulExpr>(Expr)) {
803 for (auto *FactorExpr : MulExpr->operands())
804 if (isDivisible(FactorExpr, Size, SE))
805 return true;
806 return false;
807 }
808
809 // For other n-ary expressions (Add, AddRec, Max,...) all operands need
810 // to be divisble.
811 if (auto *NAryExpr = dyn_cast<SCEVNAryExpr>(Expr)) {
812 for (auto *OpExpr : NAryExpr->operands())
813 if (!isDivisible(OpExpr, Size, SE))
814 return false;
815 return true;
816 }
817
818 auto *SizeSCEV = SE.getConstant(Expr->getType(), Size);
819 auto *UDivSCEV = SE.getUDivExpr(Expr, SizeSCEV);
820 auto *MulSCEV = SE.getMulExpr(UDivSCEV, SizeSCEV);
821 return MulSCEV == Expr;
822}
823
824void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
825 assert(!AccessRelation && "AccessReltation already built")((!AccessRelation && "AccessReltation already built")
? static_cast<void> (0) : __assert_fail ("!AccessRelation && \"AccessReltation already built\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 825, __PRETTY_FUNCTION__))
;
826
827 // Initialize the invalid domain which describes all iterations for which the
828 // access relation is not modeled correctly.
829 auto *StmtInvalidDomain = getStatement()->getInvalidDomain();
830 InvalidDomain = isl_set_empty(isl_set_get_space(StmtInvalidDomain));
831 isl_set_free(StmtInvalidDomain);
832
833 isl_ctx *Ctx = isl_id_get_ctx(Id);
834 isl_id *BaseAddrId = SAI->getBasePtrId();
835
836 if (!isAffine()) {
837 if (isa<MemIntrinsic>(getAccessInstruction()))
838 buildMemIntrinsicAccessRelation();
839
840 // We overapproximate non-affine accesses with a possible access to the
841 // whole array. For read accesses it does not make a difference, if an
842 // access must or may happen. However, for write accesses it is important to
843 // differentiate between writes that must happen and writes that may happen.
844 if (!AccessRelation)
845 AccessRelation = isl_map_from_basic_map(createBasicAccessMap(Statement));
846
847 AccessRelation =
848 isl_map_set_tuple_id(AccessRelation, isl_dim_out, BaseAddrId);
849 return;
850 }
851
852 isl_space *Space = isl_space_alloc(Ctx, 0, Statement->getNumIterators(), 0);
853 AccessRelation = isl_map_universe(Space);
854
855 for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
856 isl_pw_aff *Affine = getPwAff(Subscripts[i]);
857 isl_map *SubscriptMap = isl_map_from_pw_aff(Affine);
858 AccessRelation = isl_map_flat_range_product(AccessRelation, SubscriptMap);
859 }
860
861 if (Sizes.size() >= 1 && !isa<SCEVConstant>(Sizes[0]))
862 AccessRelation = foldAccess(AccessRelation, Statement);
863
864 Space = Statement->getDomainSpace();
865 AccessRelation = isl_map_set_tuple_id(
866 AccessRelation, isl_dim_in, isl_space_get_tuple_id(Space, isl_dim_set));
867 AccessRelation =
868 isl_map_set_tuple_id(AccessRelation, isl_dim_out, BaseAddrId);
869
870 AccessRelation = isl_map_gist_domain(AccessRelation, Statement->getDomain());
871 isl_space_free(Space);
872}
873
874MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
875 AccessType AccType, Value *BaseAddress,
876 Type *ElementType, bool Affine,
877 ArrayRef<const SCEV *> Subscripts,
878 ArrayRef<const SCEV *> Sizes, Value *AccessValue,
879 ScopArrayInfo::MemoryKind Kind, StringRef BaseName)
880 : Kind(Kind), AccType(AccType), RedType(RT_NONE), Statement(Stmt),
881 InvalidDomain(nullptr), BaseAddr(BaseAddress), BaseName(BaseName),
882 ElementType(ElementType), Sizes(Sizes.begin(), Sizes.end()),
883 AccessInstruction(AccessInst), AccessValue(AccessValue), IsAffine(Affine),
884 Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(nullptr),
885 NewAccessRelation(nullptr) {
886 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
887 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size()) + "_";
888
889 std::string IdName =
890 getIslCompatibleName(Stmt->getBaseName(), Access, BaseName);
891 Id = isl_id_alloc(Stmt->getParent()->getIslCtx(), IdName.c_str(), this);
892}
893
894void MemoryAccess::realignParams() {
895 auto *Ctx = Statement->getParent()->getContext();
896 InvalidDomain = isl_set_gist_params(InvalidDomain, isl_set_copy(Ctx));
897 AccessRelation = isl_map_gist_params(AccessRelation, Ctx);
898}
899
900const std::string MemoryAccess::getReductionOperatorStr() const {
901 return MemoryAccess::getReductionOperatorStr(getReductionType());
902}
903
904__isl_give isl_id *MemoryAccess::getId() const { return isl_id_copy(Id); }
905
906raw_ostream &polly::operator<<(raw_ostream &OS,
907 MemoryAccess::ReductionType RT) {
908 if (RT == MemoryAccess::RT_NONE)
909 OS << "NONE";
910 else
911 OS << MemoryAccess::getReductionOperatorStr(RT);
912 return OS;
913}
914
915void MemoryAccess::print(raw_ostream &OS) const {
916 switch (AccType) {
917 case READ:
918 OS.indent(12) << "ReadAccess :=\t";
919 break;
920 case MUST_WRITE:
921 OS.indent(12) << "MustWriteAccess :=\t";
922 break;
923 case MAY_WRITE:
924 OS.indent(12) << "MayWriteAccess :=\t";
925 break;
926 }
927 OS << "[Reduction Type: " << getReductionType() << "] ";
928 OS << "[Scalar: " << isScalarKind() << "]\n";
929 OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
930 if (hasNewAccessRelation())
931 OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
932}
933
934void MemoryAccess::dump() const { print(errs()); }
935
936__isl_give isl_pw_aff *MemoryAccess::getPwAff(const SCEV *E) {
937 auto *Stmt = getStatement();
938 PWACtx PWAC = Stmt->getParent()->getPwAff(E, Stmt->getEntryBlock());
939 InvalidDomain = isl_set_union(InvalidDomain, PWAC.second);
940 return PWAC.first;
941}
942
943// Create a map in the size of the provided set domain, that maps from the
944// one element of the provided set domain to another element of the provided
945// set domain.
946// The mapping is limited to all points that are equal in all but the last
947// dimension and for which the last dimension of the input is strict smaller
948// than the last dimension of the output.
949//
950// getEqualAndLarger(set[i0, i1, ..., iX]):
951//
952// set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
953// : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
954//
955static isl_map *getEqualAndLarger(isl_space *setDomain) {
956 isl_space *Space = isl_space_map_from_set(setDomain);
957 isl_map *Map = isl_map_universe(Space);
958 unsigned lastDimension = isl_map_dim(Map, isl_dim_in) - 1;
959
960 // Set all but the last dimension to be equal for the input and output
961 //
962 // input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
963 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
964 for (unsigned i = 0; i < lastDimension; ++i)
965 Map = isl_map_equate(Map, isl_dim_in, i, isl_dim_out, i);
966
967 // Set the last dimension of the input to be strict smaller than the
968 // last dimension of the output.
969 //
970 // input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
971 Map = isl_map_order_lt(Map, isl_dim_in, lastDimension, isl_dim_out,
972 lastDimension);
973 return Map;
974}
975
976__isl_give isl_set *
977MemoryAccess::getStride(__isl_take const isl_map *Schedule) const {
978 isl_map *S = const_cast<isl_map *>(Schedule);
979 isl_map *AccessRelation = getAccessRelation();
980 isl_space *Space = isl_space_range(isl_map_get_space(S));
981 isl_map *NextScatt = getEqualAndLarger(Space);
982
983 S = isl_map_reverse(S);
984 NextScatt = isl_map_lexmin(NextScatt);
985
986 NextScatt = isl_map_apply_range(NextScatt, isl_map_copy(S));
987 NextScatt = isl_map_apply_range(NextScatt, isl_map_copy(AccessRelation));
988 NextScatt = isl_map_apply_domain(NextScatt, S);
989 NextScatt = isl_map_apply_domain(NextScatt, AccessRelation);
990
991 isl_set *Deltas = isl_map_deltas(NextScatt);
992 return Deltas;
993}
994
995bool MemoryAccess::isStrideX(__isl_take const isl_map *Schedule,
996 int StrideWidth) const {
997 isl_set *Stride, *StrideX;
998 bool IsStrideX;
999
1000 Stride = getStride(Schedule);
1001 StrideX = isl_set_universe(isl_set_get_space(Stride));
1002 for (unsigned i = 0; i < isl_set_dim(StrideX, isl_dim_set) - 1; i++)
1003 StrideX = isl_set_fix_si(StrideX, isl_dim_set, i, 0);
1004 StrideX = isl_set_fix_si(StrideX, isl_dim_set,
1005 isl_set_dim(StrideX, isl_dim_set) - 1, StrideWidth);
1006 IsStrideX = isl_set_is_subset(Stride, StrideX);
1007
1008 isl_set_free(StrideX);
1009 isl_set_free(Stride);
1010
1011 return IsStrideX;
1012}
1013
1014bool MemoryAccess::isStrideZero(const isl_map *Schedule) const {
1015 return isStrideX(Schedule, 0);
1016}
1017
1018bool MemoryAccess::isStrideOne(const isl_map *Schedule) const {
1019 return isStrideX(Schedule, 1);
1020}
1021
1022void MemoryAccess::setNewAccessRelation(isl_map *NewAccess) {
1023 isl_map_free(NewAccessRelation);
1024 NewAccessRelation = NewAccess;
1025}
1026
1027//===----------------------------------------------------------------------===//
1028
1029__isl_give isl_map *ScopStmt::getSchedule() const {
1030 isl_set *Domain = getDomain();
1031 if (isl_set_is_empty(Domain)) {
1032 isl_set_free(Domain);
1033 return isl_map_from_aff(
1034 isl_aff_zero_on_domain(isl_local_space_from_space(getDomainSpace())));
1035 }
1036 auto *Schedule = getParent()->getSchedule();
1037 Schedule = isl_union_map_intersect_domain(
1038 Schedule, isl_union_set_from_set(isl_set_copy(Domain)));
1039 if (isl_union_map_is_empty(Schedule)) {
1040 isl_set_free(Domain);
1041 isl_union_map_free(Schedule);
1042 return isl_map_from_aff(
1043 isl_aff_zero_on_domain(isl_local_space_from_space(getDomainSpace())));
1044 }
1045 auto *M = isl_map_from_union_map(Schedule);
1046 M = isl_map_coalesce(M);
1047 M = isl_map_gist_domain(M, Domain);
1048 M = isl_map_coalesce(M);
1049 return M;
1050}
1051
1052__isl_give isl_pw_aff *ScopStmt::getPwAff(const SCEV *E, bool NonNegative) {
1053 PWACtx PWAC = getParent()->getPwAff(E, getEntryBlock(), NonNegative);
1054 InvalidDomain = isl_set_union(InvalidDomain, PWAC.second);
1055 return PWAC.first;
1056}
1057
1058void ScopStmt::restrictDomain(__isl_take isl_set *NewDomain) {
1059 assert(isl_set_is_subset(NewDomain, Domain) &&((isl_set_is_subset(NewDomain, Domain) && "New domain is not a subset of old domain!"
) ? static_cast<void> (0) : __assert_fail ("isl_set_is_subset(NewDomain, Domain) && \"New domain is not a subset of old domain!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1060, __PRETTY_FUNCTION__))
1060 "New domain is not a subset of old domain!")((isl_set_is_subset(NewDomain, Domain) && "New domain is not a subset of old domain!"
) ? static_cast<void> (0) : __assert_fail ("isl_set_is_subset(NewDomain, Domain) && \"New domain is not a subset of old domain!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1060, __PRETTY_FUNCTION__))
;
1061 isl_set_free(Domain);
1062 Domain = NewDomain;
1063}
1064
1065void ScopStmt::buildAccessRelations() {
1066 Scop &S = *getParent();
1067 for (MemoryAccess *Access : MemAccs) {
1068 Type *ElementType = Access->getElementType();
1069
1070 ScopArrayInfo::MemoryKind Ty;
1071 if (Access->isPHIKind())
1072 Ty = ScopArrayInfo::MK_PHI;
1073 else if (Access->isExitPHIKind())
1074 Ty = ScopArrayInfo::MK_ExitPHI;
1075 else if (Access->isValueKind())
1076 Ty = ScopArrayInfo::MK_Value;
1077 else
1078 Ty = ScopArrayInfo::MK_Array;
1079
1080 auto *SAI = S.getOrCreateScopArrayInfo(Access->getBaseAddr(), ElementType,
1081 Access->Sizes, Ty);
1082 Access->buildAccessRelation(SAI);
1083 }
1084}
1085
1086void ScopStmt::addAccess(MemoryAccess *Access) {
1087 Instruction *AccessInst = Access->getAccessInstruction();
1088
1089 if (Access->isArrayKind()) {
1090 MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1091 MAL.emplace_front(Access);
1092 } else if (Access->isValueKind() && Access->isWrite()) {
1093 Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
1094 assert(Parent.getStmtFor(AccessVal) == this)((Parent.getStmtFor(AccessVal) == this) ? static_cast<void
> (0) : __assert_fail ("Parent.getStmtFor(AccessVal) == this"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1094, __PRETTY_FUNCTION__))
;
1095 assert(!ValueWrites.lookup(AccessVal))((!ValueWrites.lookup(AccessVal)) ? static_cast<void> (
0) : __assert_fail ("!ValueWrites.lookup(AccessVal)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1095, __PRETTY_FUNCTION__))
;
1096
1097 ValueWrites[AccessVal] = Access;
1098 } else if (Access->isValueKind() && Access->isRead()) {
1099 Value *AccessVal = Access->getAccessValue();
1100 assert(!ValueReads.lookup(AccessVal))((!ValueReads.lookup(AccessVal)) ? static_cast<void> (0
) : __assert_fail ("!ValueReads.lookup(AccessVal)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1100, __PRETTY_FUNCTION__))
;
1101
1102 ValueReads[AccessVal] = Access;
1103 } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1104 PHINode *PHI = cast<PHINode>(Access->getBaseAddr());
1105 assert(!PHIWrites.lookup(PHI))((!PHIWrites.lookup(PHI)) ? static_cast<void> (0) : __assert_fail
("!PHIWrites.lookup(PHI)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1105, __PRETTY_FUNCTION__))
;
1106
1107 PHIWrites[PHI] = Access;
1108 }
1109
1110 MemAccs.push_back(Access);
1111}
1112
1113void ScopStmt::realignParams() {
1114 for (MemoryAccess *MA : *this)
1115 MA->realignParams();
1116
1117 auto *Ctx = Parent.getContext();
1118 InvalidDomain = isl_set_gist_params(InvalidDomain, isl_set_copy(Ctx));
1119 Domain = isl_set_gist_params(Domain, Ctx);
1120}
1121
1122/// @brief Add @p BSet to the set @p User if @p BSet is bounded.
1123static isl_stat collectBoundedParts(__isl_take isl_basic_set *BSet,
1124 void *User) {
1125 isl_set **BoundedParts = static_cast<isl_set **>(User);
1126 if (isl_basic_set_is_bounded(BSet))
1127 *BoundedParts = isl_set_union(*BoundedParts, isl_set_from_basic_set(BSet));
1128 else
1129 isl_basic_set_free(BSet);
1130 return isl_stat_ok;
1131}
1132
1133/// @brief Return the bounded parts of @p S.
1134static __isl_give isl_set *collectBoundedParts(__isl_take isl_set *S) {
1135 isl_set *BoundedParts = isl_set_empty(isl_set_get_space(S));
1136 isl_set_foreach_basic_set(S, collectBoundedParts, &BoundedParts);
1137 isl_set_free(S);
1138 return BoundedParts;
1139}
1140
1141/// @brief Compute the (un)bounded parts of @p S wrt. to dimension @p Dim.
1142///
1143/// @returns A separation of @p S into first an unbounded then a bounded subset,
1144/// both with regards to the dimension @p Dim.
1145static std::pair<__isl_give isl_set *, __isl_give isl_set *>
1146partitionSetParts(__isl_take isl_set *S, unsigned Dim) {
1147
1148 for (unsigned u = 0, e = isl_set_n_dim(S); u < e; u++)
1149 S = isl_set_lower_bound_si(S, isl_dim_set, u, 0);
1150
1151 unsigned NumDimsS = isl_set_n_dim(S);
1152 isl_set *OnlyDimS = isl_set_copy(S);
1153
1154 // Remove dimensions that are greater than Dim as they are not interesting.
1155 assert(NumDimsS >= Dim + 1)((NumDimsS >= Dim + 1) ? static_cast<void> (0) : __assert_fail
("NumDimsS >= Dim + 1", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1155, __PRETTY_FUNCTION__))
;
1156 OnlyDimS =
1157 isl_set_project_out(OnlyDimS, isl_dim_set, Dim + 1, NumDimsS - Dim - 1);
1158
1159 // Create artificial parametric upper bounds for dimensions smaller than Dim
1160 // as we are not interested in them.
1161 OnlyDimS = isl_set_insert_dims(OnlyDimS, isl_dim_param, 0, Dim);
1162 for (unsigned u = 0; u < Dim; u++) {
1163 isl_constraint *C = isl_inequality_alloc(
1164 isl_local_space_from_space(isl_set_get_space(OnlyDimS)));
1165 C = isl_constraint_set_coefficient_si(C, isl_dim_param, u, 1);
1166 C = isl_constraint_set_coefficient_si(C, isl_dim_set, u, -1);
1167 OnlyDimS = isl_set_add_constraint(OnlyDimS, C);
1168 }
1169
1170 // Collect all bounded parts of OnlyDimS.
1171 isl_set *BoundedParts = collectBoundedParts(OnlyDimS);
1172
1173 // Create the dimensions greater than Dim again.
1174 BoundedParts = isl_set_insert_dims(BoundedParts, isl_dim_set, Dim + 1,
1175 NumDimsS - Dim - 1);
1176
1177 // Remove the artificial upper bound parameters again.
1178 BoundedParts = isl_set_remove_dims(BoundedParts, isl_dim_param, 0, Dim);
1179
1180 isl_set *UnboundedParts = isl_set_subtract(S, isl_set_copy(BoundedParts));
1181 return std::make_pair(UnboundedParts, BoundedParts);
1182}
1183
1184/// @brief Set the dimension Ids from @p From in @p To.
1185static __isl_give isl_set *setDimensionIds(__isl_keep isl_set *From,
1186 __isl_take isl_set *To) {
1187 for (unsigned u = 0, e = isl_set_n_dim(From); u < e; u++) {
1188 isl_id *DimId = isl_set_get_dim_id(From, isl_dim_set, u);
1189 To = isl_set_set_dim_id(To, isl_dim_set, u, DimId);
1190 }
1191 return To;
1192}
1193
1194/// @brief Create the conditions under which @p L @p Pred @p R is true.
1195static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
1196 __isl_take isl_pw_aff *L,
1197 __isl_take isl_pw_aff *R) {
1198 switch (Pred) {
1199 case ICmpInst::ICMP_EQ:
1200 return isl_pw_aff_eq_set(L, R);
1201 case ICmpInst::ICMP_NE:
1202 return isl_pw_aff_ne_set(L, R);
1203 case ICmpInst::ICMP_SLT:
1204 return isl_pw_aff_lt_set(L, R);
1205 case ICmpInst::ICMP_SLE:
1206 return isl_pw_aff_le_set(L, R);
1207 case ICmpInst::ICMP_SGT:
1208 return isl_pw_aff_gt_set(L, R);
1209 case ICmpInst::ICMP_SGE:
1210 return isl_pw_aff_ge_set(L, R);
1211 case ICmpInst::ICMP_ULT:
1212 return isl_pw_aff_lt_set(L, R);
1213 case ICmpInst::ICMP_UGT:
1214 return isl_pw_aff_gt_set(L, R);
1215 case ICmpInst::ICMP_ULE:
1216 return isl_pw_aff_le_set(L, R);
1217 case ICmpInst::ICMP_UGE:
1218 return isl_pw_aff_ge_set(L, R);
1219 default:
1220 llvm_unreachable("Non integer predicate not supported")::llvm::llvm_unreachable_internal("Non integer predicate not supported"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1220)
;
1221 }
1222}
1223
1224/// @brief Create the conditions under which @p L @p Pred @p R is true.
1225///
1226/// Helper function that will make sure the dimensions of the result have the
1227/// same isl_id's as the @p Domain.
1228static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
1229 __isl_take isl_pw_aff *L,
1230 __isl_take isl_pw_aff *R,
1231 __isl_keep isl_set *Domain) {
1232 isl_set *ConsequenceCondSet = buildConditionSet(Pred, L, R);
1233 return setDimensionIds(Domain, ConsequenceCondSet);
1234}
1235
1236/// @brief Build the conditions sets for the switch @p SI in the @p Domain.
1237///
1238/// This will fill @p ConditionSets with the conditions under which control
1239/// will be moved from @p SI to its successors. Hence, @p ConditionSets will
1240/// have as many elements as @p SI has successors.
1241static bool
1242buildConditionSets(ScopStmt &Stmt, SwitchInst *SI, Loop *L,
1243 __isl_keep isl_set *Domain,
1244 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1245
1246 Value *Condition = getConditionFromTerminator(SI);
1247 assert(Condition && "No condition for switch")((Condition && "No condition for switch") ? static_cast
<void> (0) : __assert_fail ("Condition && \"No condition for switch\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1247, __PRETTY_FUNCTION__))
;
1248
1249 Scop &S = *Stmt.getParent();
1250 ScalarEvolution &SE = *S.getSE();
1251 isl_pw_aff *LHS, *RHS;
1252 LHS = Stmt.getPwAff(SE.getSCEVAtScope(Condition, L));
1253
1254 unsigned NumSuccessors = SI->getNumSuccessors();
1255 ConditionSets.resize(NumSuccessors);
1256 for (auto &Case : SI->cases()) {
1257 unsigned Idx = Case.getSuccessorIndex();
1258 ConstantInt *CaseValue = Case.getCaseValue();
1259
1260 RHS = Stmt.getPwAff(SE.getSCEV(CaseValue));
1261 isl_set *CaseConditionSet =
1262 buildConditionSet(ICmpInst::ICMP_EQ, isl_pw_aff_copy(LHS), RHS, Domain);
1263 ConditionSets[Idx] = isl_set_coalesce(
1264 isl_set_intersect(CaseConditionSet, isl_set_copy(Domain)));
1265 }
1266
1267 assert(ConditionSets[0] == nullptr && "Default condition set was set")((ConditionSets[0] == nullptr && "Default condition set was set"
) ? static_cast<void> (0) : __assert_fail ("ConditionSets[0] == nullptr && \"Default condition set was set\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1267, __PRETTY_FUNCTION__))
;
1268 isl_set *ConditionSetUnion = isl_set_copy(ConditionSets[1]);
1269 for (unsigned u = 2; u < NumSuccessors; u++)
1270 ConditionSetUnion =
1271 isl_set_union(ConditionSetUnion, isl_set_copy(ConditionSets[u]));
1272 ConditionSets[0] = setDimensionIds(
1273 Domain, isl_set_subtract(isl_set_copy(Domain), ConditionSetUnion));
1274
1275 isl_pw_aff_free(LHS);
1276
1277 return true;
1278}
1279
1280/// @brief Build the conditions sets for the branch condition @p Condition in
1281/// the @p Domain.
1282///
1283/// This will fill @p ConditionSets with the conditions under which control
1284/// will be moved from @p TI to its successors. Hence, @p ConditionSets will
1285/// have as many elements as @p TI has successors. If @p TI is nullptr the
1286/// context under which @p Condition is true/false will be returned as the
1287/// new elements of @p ConditionSets.
1288static bool
1289buildConditionSets(ScopStmt &Stmt, Value *Condition, TerminatorInst *TI,
1290 Loop *L, __isl_keep isl_set *Domain,
1291 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1292
1293 Scop &S = *Stmt.getParent();
1294 isl_set *ConsequenceCondSet = nullptr;
1295 if (auto *CCond = dyn_cast<ConstantInt>(Condition)) {
1296 if (CCond->isZero())
1297 ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
1298 else
1299 ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
1300 } else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Condition)) {
1301 auto Opcode = BinOp->getOpcode();
1302 assert(Opcode == Instruction::And || Opcode == Instruction::Or)((Opcode == Instruction::And || Opcode == Instruction::Or) ? static_cast
<void> (0) : __assert_fail ("Opcode == Instruction::And || Opcode == Instruction::Or"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1302, __PRETTY_FUNCTION__))
;
1303
1304 bool Valid = buildConditionSets(Stmt, BinOp->getOperand(0), TI, L, Domain,
1305 ConditionSets) &&
1306 buildConditionSets(Stmt, BinOp->getOperand(1), TI, L, Domain,
1307 ConditionSets);
1308 if (!Valid) {
1309 while (!ConditionSets.empty())
1310 isl_set_free(ConditionSets.pop_back_val());
1311 return false;
1312 }
1313
1314 isl_set_free(ConditionSets.pop_back_val());
1315 isl_set *ConsCondPart0 = ConditionSets.pop_back_val();
1316 isl_set_free(ConditionSets.pop_back_val());
1317 isl_set *ConsCondPart1 = ConditionSets.pop_back_val();
1318
1319 if (Opcode == Instruction::And)
1320 ConsequenceCondSet = isl_set_intersect(ConsCondPart0, ConsCondPart1);
1321 else
1322 ConsequenceCondSet = isl_set_union(ConsCondPart0, ConsCondPart1);
1323 } else {
1324 auto *ICond = dyn_cast<ICmpInst>(Condition);
1325 assert(ICond &&((ICond && "Condition of exiting branch was neither constant nor ICmp!"
) ? static_cast<void> (0) : __assert_fail ("ICond && \"Condition of exiting branch was neither constant nor ICmp!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1326, __PRETTY_FUNCTION__))
1326 "Condition of exiting branch was neither constant nor ICmp!")((ICond && "Condition of exiting branch was neither constant nor ICmp!"
) ? static_cast<void> (0) : __assert_fail ("ICond && \"Condition of exiting branch was neither constant nor ICmp!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1326, __PRETTY_FUNCTION__))
;
1327
1328 ScalarEvolution &SE = *S.getSE();
1329 isl_pw_aff *LHS, *RHS;
1330 // For unsigned comparisons we assumed the signed bit of neither operand
1331 // to be set. The comparison is equal to a signed comparison under this
1332 // assumption.
1333 bool NonNeg = ICond->isUnsigned();
1334 LHS = Stmt.getPwAff(SE.getSCEVAtScope(ICond->getOperand(0), L), NonNeg);
1335 RHS = Stmt.getPwAff(SE.getSCEVAtScope(ICond->getOperand(1), L), NonNeg);
1336 ConsequenceCondSet =
1337 buildConditionSet(ICond->getPredicate(), LHS, RHS, Domain);
1338 }
1339
1340 // If no terminator was given we are only looking for parameter constraints
1341 // under which @p Condition is true/false.
1342 if (!TI)
1343 ConsequenceCondSet = isl_set_params(ConsequenceCondSet);
1344 assert(ConsequenceCondSet)((ConsequenceCondSet) ? static_cast<void> (0) : __assert_fail
("ConsequenceCondSet", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1344, __PRETTY_FUNCTION__))
;
1345 ConsequenceCondSet = isl_set_coalesce(
1346 isl_set_intersect(ConsequenceCondSet, isl_set_copy(Domain)));
1347
1348 isl_set *AlternativeCondSet = nullptr;
1349 bool TooComplex =
1350 isl_set_n_basic_set(ConsequenceCondSet) >= MaxDisjunctionsInDomain;
1351
1352 if (!TooComplex) {
1353 AlternativeCondSet = isl_set_subtract(isl_set_copy(Domain),
1354 isl_set_copy(ConsequenceCondSet));
1355 TooComplex =
1356 isl_set_n_basic_set(AlternativeCondSet) >= MaxDisjunctionsInDomain;
1357 }
1358
1359 if (TooComplex) {
1360 S.invalidate(COMPLEXITY, TI ? TI->getDebugLoc() : DebugLoc());
1361 isl_set_free(AlternativeCondSet);
1362 isl_set_free(ConsequenceCondSet);
1363 return false;
1364 }
1365
1366 ConditionSets.push_back(ConsequenceCondSet);
1367 ConditionSets.push_back(isl_set_coalesce(AlternativeCondSet));
1368
1369 return true;
1370}
1371
1372/// @brief Build the conditions sets for the terminator @p TI in the @p Domain.
1373///
1374/// This will fill @p ConditionSets with the conditions under which control
1375/// will be moved from @p TI to its successors. Hence, @p ConditionSets will
1376/// have as many elements as @p TI has successors.
1377static bool
1378buildConditionSets(ScopStmt &Stmt, TerminatorInst *TI, Loop *L,
1379 __isl_keep isl_set *Domain,
1380 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1381
1382 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
1383 return buildConditionSets(Stmt, SI, L, Domain, ConditionSets);
1384
1385 assert(isa<BranchInst>(TI) && "Terminator was neither branch nor switch.")((isa<BranchInst>(TI) && "Terminator was neither branch nor switch."
) ? static_cast<void> (0) : __assert_fail ("isa<BranchInst>(TI) && \"Terminator was neither branch nor switch.\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1385, __PRETTY_FUNCTION__))
;
1386
1387 if (TI->getNumSuccessors() == 1) {
1388 ConditionSets.push_back(isl_set_copy(Domain));
1389 return true;
1390 }
1391
1392 Value *Condition = getConditionFromTerminator(TI);
1393 assert(Condition && "No condition for Terminator")((Condition && "No condition for Terminator") ? static_cast
<void> (0) : __assert_fail ("Condition && \"No condition for Terminator\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1393, __PRETTY_FUNCTION__))
;
1394
1395 return buildConditionSets(Stmt, Condition, TI, L, Domain, ConditionSets);
1396}
1397
1398void ScopStmt::buildDomain() {
1399 isl_id *Id = isl_id_alloc(getIslCtx(), getBaseName(), this);
1400
1401 Domain = getParent()->getDomainConditions(this);
1402 Domain = isl_set_set_tuple_id(Domain, Id);
1403}
1404
1405void ScopStmt::deriveAssumptionsFromGEP(GetElementPtrInst *GEP, LoopInfo &LI) {
1406 isl_ctx *Ctx = Parent.getIslCtx();
1407 isl_local_space *LSpace = isl_local_space_from_space(getDomainSpace());
1408 Type *Ty = GEP->getPointerOperandType();
1409 ScalarEvolution &SE = *Parent.getSE();
1410
1411 // The set of loads that are required to be invariant.
1412 auto &ScopRIL = Parent.getRequiredInvariantLoads();
1413
1414 std::vector<const SCEV *> Subscripts;
1415 std::vector<int> Sizes;
1416
1417 std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, SE);
1418
1419 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
1420 Ty = PtrTy->getElementType();
Value stored to 'Ty' is never read
1421 }
1422
1423 int IndexOffset = Subscripts.size() - Sizes.size();
1424
1425 assert(IndexOffset <= 1 && "Unexpected large index offset")((IndexOffset <= 1 && "Unexpected large index offset"
) ? static_cast<void> (0) : __assert_fail ("IndexOffset <= 1 && \"Unexpected large index offset\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1425, __PRETTY_FUNCTION__))
;
1426
1427 auto *NotExecuted = isl_set_complement(isl_set_params(getDomain()));
1428 for (size_t i = 0; i < Sizes.size(); i++) {
1429 auto *Expr = Subscripts[i + IndexOffset];
1430 auto Size = Sizes[i];
1431
1432 auto *Scope = LI.getLoopFor(getEntryBlock());
1433 InvariantLoadsSetTy AccessILS;
1434 if (!isAffineExpr(&Parent.getRegion(), Scope, Expr, SE, &AccessILS))
1435 continue;
1436
1437 bool NonAffine = false;
1438 for (LoadInst *LInst : AccessILS)
1439 if (!ScopRIL.count(LInst))
1440 NonAffine = true;
1441
1442 if (NonAffine)
1443 continue;
1444
1445 isl_pw_aff *AccessOffset = getPwAff(Expr);
1446 AccessOffset =
1447 isl_pw_aff_set_tuple_id(AccessOffset, isl_dim_in, getDomainId());
1448
1449 isl_pw_aff *DimSize = isl_pw_aff_from_aff(isl_aff_val_on_domain(
1450 isl_local_space_copy(LSpace), isl_val_int_from_si(Ctx, Size)));
1451
1452 isl_set *OutOfBound = isl_pw_aff_ge_set(AccessOffset, DimSize);
1453 OutOfBound = isl_set_intersect(getDomain(), OutOfBound);
1454 OutOfBound = isl_set_params(OutOfBound);
1455 isl_set *InBound = isl_set_complement(OutOfBound);
1456
1457 // A => B == !A or B
1458 isl_set *InBoundIfExecuted =
1459 isl_set_union(isl_set_copy(NotExecuted), InBound);
1460
1461 InBoundIfExecuted = isl_set_coalesce(InBoundIfExecuted);
1462 Parent.recordAssumption(INBOUNDS, InBoundIfExecuted, GEP->getDebugLoc(),
1463 AS_ASSUMPTION);
1464 }
1465
1466 isl_local_space_free(LSpace);
1467 isl_set_free(NotExecuted);
1468}
1469
1470void ScopStmt::deriveAssumptions(LoopInfo &LI) {
1471 for (auto *MA : *this) {
1472 if (!MA->isArrayKind())
1473 continue;
1474
1475 MemAccInst Acc(MA->getAccessInstruction());
1476 auto *GEP = dyn_cast_or_null<GetElementPtrInst>(Acc.getPointerOperand());
1477
1478 if (GEP)
1479 deriveAssumptionsFromGEP(GEP, LI);
1480 }
1481}
1482
1483void ScopStmt::collectSurroundingLoops() {
1484 for (unsigned u = 0, e = isl_set_n_dim(Domain); u < e; u++) {
1485 isl_id *DimId = isl_set_get_dim_id(Domain, isl_dim_set, u);
1486 NestLoops.push_back(static_cast<Loop *>(isl_id_get_user(DimId)));
1487 isl_id_free(DimId);
1488 }
1489}
1490
1491ScopStmt::ScopStmt(Scop &parent, Region &R)
1492 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), BB(nullptr),
1493 R(&R), Build(nullptr) {
1494
1495 BaseName = getIslCompatibleName("Stmt_", R.getNameStr(), "");
1496}
1497
1498ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb)
1499 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), BB(&bb),
1500 R(nullptr), Build(nullptr) {
1501
1502 BaseName = getIslCompatibleName("Stmt_", &bb, "");
1503}
1504
1505void ScopStmt::init(LoopInfo &LI) {
1506 assert(!Domain && "init must be called only once")((!Domain && "init must be called only once") ? static_cast
<void> (0) : __assert_fail ("!Domain && \"init must be called only once\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1506, __PRETTY_FUNCTION__))
;
1507
1508 buildDomain();
1509 collectSurroundingLoops();
1510 buildAccessRelations();
1511
1512 deriveAssumptions(LI);
1513
1514 if (DetectReductions)
1515 checkForReductions();
1516}
1517
1518/// @brief Collect loads which might form a reduction chain with @p StoreMA
1519///
1520/// Check if the stored value for @p StoreMA is a binary operator with one or
1521/// two loads as operands. If the binary operand is commutative & associative,
1522/// used only once (by @p StoreMA) and its load operands are also used only
1523/// once, we have found a possible reduction chain. It starts at an operand
1524/// load and includes the binary operator and @p StoreMA.
1525///
1526/// Note: We allow only one use to ensure the load and binary operator cannot
1527/// escape this block or into any other store except @p StoreMA.
1528void ScopStmt::collectCandiateReductionLoads(
1529 MemoryAccess *StoreMA, SmallVectorImpl<MemoryAccess *> &Loads) {
1530 auto *Store = dyn_cast<StoreInst>(StoreMA->getAccessInstruction());
1531 if (!Store)
1532 return;
1533
1534 // Skip if there is not one binary operator between the load and the store
1535 auto *BinOp = dyn_cast<BinaryOperator>(Store->getValueOperand());
1536 if (!BinOp)
1537 return;
1538
1539 // Skip if the binary operators has multiple uses
1540 if (BinOp->getNumUses() != 1)
1541 return;
1542
1543 // Skip if the opcode of the binary operator is not commutative/associative
1544 if (!BinOp->isCommutative() || !BinOp->isAssociative())
1545 return;
1546
1547 // Skip if the binary operator is outside the current SCoP
1548 if (BinOp->getParent() != Store->getParent())
1549 return;
1550
1551 // Skip if it is a multiplicative reduction and we disabled them
1552 if (DisableMultiplicativeReductions &&
1553 (BinOp->getOpcode() == Instruction::Mul ||
1554 BinOp->getOpcode() == Instruction::FMul))
1555 return;
1556
1557 // Check the binary operator operands for a candidate load
1558 auto *PossibleLoad0 = dyn_cast<LoadInst>(BinOp->getOperand(0));
1559 auto *PossibleLoad1 = dyn_cast<LoadInst>(BinOp->getOperand(1));
1560 if (!PossibleLoad0 && !PossibleLoad1)
1561 return;
1562
1563 // A load is only a candidate if it cannot escape (thus has only this use)
1564 if (PossibleLoad0 && PossibleLoad0->getNumUses() == 1)
1565 if (PossibleLoad0->getParent() == Store->getParent())
1566 Loads.push_back(&getArrayAccessFor(PossibleLoad0));
1567 if (PossibleLoad1 && PossibleLoad1->getNumUses() == 1)
1568 if (PossibleLoad1->getParent() == Store->getParent())
1569 Loads.push_back(&getArrayAccessFor(PossibleLoad1));
1570}
1571
1572/// @brief Check for reductions in this ScopStmt
1573///
1574/// Iterate over all store memory accesses and check for valid binary reduction
1575/// like chains. For all candidates we check if they have the same base address
1576/// and there are no other accesses which overlap with them. The base address
1577/// check rules out impossible reductions candidates early. The overlap check,
1578/// together with the "only one user" check in collectCandiateReductionLoads,
1579/// guarantees that none of the intermediate results will escape during
1580/// execution of the loop nest. We basically check here that no other memory
1581/// access can access the same memory as the potential reduction.
1582void ScopStmt::checkForReductions() {
1583 SmallVector<MemoryAccess *, 2> Loads;
1584 SmallVector<std::pair<MemoryAccess *, MemoryAccess *>, 4> Candidates;
1585
1586 // First collect candidate load-store reduction chains by iterating over all
1587 // stores and collecting possible reduction loads.
1588 for (MemoryAccess *StoreMA : MemAccs) {
1589 if (StoreMA->isRead())
1590 continue;
1591
1592 Loads.clear();
1593 collectCandiateReductionLoads(StoreMA, Loads);
1594 for (MemoryAccess *LoadMA : Loads)
1595 Candidates.push_back(std::make_pair(LoadMA, StoreMA));
1596 }
1597
1598 // Then check each possible candidate pair.
1599 for (const auto &CandidatePair : Candidates) {
1600 bool Valid = true;
1601 isl_map *LoadAccs = CandidatePair.first->getAccessRelation();
1602 isl_map *StoreAccs = CandidatePair.second->getAccessRelation();
1603
1604 // Skip those with obviously unequal base addresses.
1605 if (!isl_map_has_equal_space(LoadAccs, StoreAccs)) {
1606 isl_map_free(LoadAccs);
1607 isl_map_free(StoreAccs);
1608 continue;
1609 }
1610
1611 // And check if the remaining for overlap with other memory accesses.
1612 isl_map *AllAccsRel = isl_map_union(LoadAccs, StoreAccs);
1613 AllAccsRel = isl_map_intersect_domain(AllAccsRel, getDomain());
1614 isl_set *AllAccs = isl_map_range(AllAccsRel);
1615
1616 for (MemoryAccess *MA : MemAccs) {
1617 if (MA == CandidatePair.first || MA == CandidatePair.second)
1618 continue;
1619
1620 isl_map *AccRel =
1621 isl_map_intersect_domain(MA->getAccessRelation(), getDomain());
1622 isl_set *Accs = isl_map_range(AccRel);
1623
1624 if (isl_set_has_equal_space(AllAccs, Accs) || isl_set_free(Accs)) {
1625 isl_set *OverlapAccs = isl_set_intersect(Accs, isl_set_copy(AllAccs));
1626 Valid = Valid && isl_set_is_empty(OverlapAccs);
1627 isl_set_free(OverlapAccs);
1628 }
1629 }
1630
1631 isl_set_free(AllAccs);
1632 if (!Valid)
1633 continue;
1634
1635 const LoadInst *Load =
1636 dyn_cast<const LoadInst>(CandidatePair.first->getAccessInstruction());
1637 MemoryAccess::ReductionType RT =
1638 getReductionType(dyn_cast<BinaryOperator>(Load->user_back()), Load);
1639
1640 // If no overlapping access was found we mark the load and store as
1641 // reduction like.
1642 CandidatePair.first->markAsReductionLike(RT);
1643 CandidatePair.second->markAsReductionLike(RT);
1644 }
1645}
1646
1647std::string ScopStmt::getDomainStr() const { return stringFromIslObj(Domain); }
1648
1649std::string ScopStmt::getScheduleStr() const {
1650 auto *S = getSchedule();
1651 auto Str = stringFromIslObj(S);
1652 isl_map_free(S);
1653 return Str;
1654}
1655
1656void ScopStmt::setInvalidDomain(__isl_take isl_set *ID) {
1657 isl_set_free(InvalidDomain);
1658 InvalidDomain = ID;
1659}
1660
1661BasicBlock *ScopStmt::getEntryBlock() const {
1662 if (isBlockStmt())
1663 return getBasicBlock();
1664 return getRegion()->getEntry();
1665}
1666
1667unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1668
1669const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1670
1671Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1672 return NestLoops[Dimension];
1673}
1674
1675isl_ctx *ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1676
1677__isl_give isl_set *ScopStmt::getDomain() const { return isl_set_copy(Domain); }
1678
1679__isl_give isl_space *ScopStmt::getDomainSpace() const {
1680 return isl_set_get_space(Domain);
1681}
1682
1683__isl_give isl_id *ScopStmt::getDomainId() const {
1684 return isl_set_get_tuple_id(Domain);
1685}
1686
1687ScopStmt::~ScopStmt() {
1688 isl_set_free(Domain);
1689 isl_set_free(InvalidDomain);
1690}
1691
1692void ScopStmt::print(raw_ostream &OS) const {
1693 OS << "\t" << getBaseName() << "\n";
1694 OS.indent(12) << "Domain :=\n";
1695
1696 if (Domain) {
1697 OS.indent(16) << getDomainStr() << ";\n";
1698 } else
1699 OS.indent(16) << "n/a\n";
1700
1701 OS.indent(12) << "Schedule :=\n";
1702
1703 if (Domain) {
1704 OS.indent(16) << getScheduleStr() << ";\n";
1705 } else
1706 OS.indent(16) << "n/a\n";
1707
1708 for (MemoryAccess *Access : MemAccs)
1709 Access->print(OS);
1710}
1711
1712void ScopStmt::dump() const { print(dbgs()); }
1713
1714void ScopStmt::removeMemoryAccesses(MemoryAccessList &InvMAs) {
1715 // Remove all memory accesses in @p InvMAs from this statement
1716 // together with all scalar accesses that were caused by them.
1717 // MK_Value READs have no access instruction, hence would not be removed by
1718 // this function. However, it is only used for invariant LoadInst accesses,
1719 // its arguments are always affine, hence synthesizable, and therefore there
1720 // are no MK_Value READ accesses to be removed.
1721 for (MemoryAccess *MA : InvMAs) {
1722 auto Predicate = [&](MemoryAccess *Acc) {
1723 return Acc->getAccessInstruction() == MA->getAccessInstruction();
1724 };
1725 MemAccs.erase(std::remove_if(MemAccs.begin(), MemAccs.end(), Predicate),
1726 MemAccs.end());
1727 InstructionToAccess.erase(MA->getAccessInstruction());
1728 }
1729}
1730
1731//===----------------------------------------------------------------------===//
1732/// Scop class implement
1733
1734void Scop::setContext(__isl_take isl_set *NewContext) {
1735 NewContext = isl_set_align_params(NewContext, isl_set_get_space(Context));
1736 isl_set_free(Context);
1737 Context = NewContext;
1738}
1739
1740/// @brief Remap parameter values but keep AddRecs valid wrt. invariant loads.
1741struct SCEVSensitiveParameterRewriter
1742 : public SCEVVisitor<SCEVSensitiveParameterRewriter, const SCEV *> {
1743 ValueToValueMap &VMap;
1744 ScalarEvolution &SE;
1745
1746public:
1747 SCEVSensitiveParameterRewriter(ValueToValueMap &VMap, ScalarEvolution &SE)
1748 : VMap(VMap), SE(SE) {}
1749
1750 static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1751 ValueToValueMap &VMap) {
1752 SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1753 return SSPR.visit(E);
1754 }
1755
1756 const SCEV *visit(const SCEV *E) {
1757 return SCEVVisitor<SCEVSensitiveParameterRewriter, const SCEV *>::visit(E);
1758 }
1759
1760 const SCEV *visitConstant(const SCEVConstant *E) { return E; }
1761
1762 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
1763 return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
1764 }
1765
1766 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
1767 return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
1768 }
1769
1770 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
1771 return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
1772 }
1773
1774 const SCEV *visitAddExpr(const SCEVAddExpr *E) {
1775 SmallVector<const SCEV *, 4> Operands;
1776 for (int i = 0, e = E->getNumOperands(); i < e; ++i)
1777 Operands.push_back(visit(E->getOperand(i)));
1778 return SE.getAddExpr(Operands);
1779 }
1780
1781 const SCEV *visitMulExpr(const SCEVMulExpr *E) {
1782 SmallVector<const SCEV *, 4> Operands;
1783 for (int i = 0, e = E->getNumOperands(); i < e; ++i)
1784 Operands.push_back(visit(E->getOperand(i)));
1785 return SE.getMulExpr(Operands);
1786 }
1787
1788 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
1789 SmallVector<const SCEV *, 4> Operands;
1790 for (int i = 0, e = E->getNumOperands(); i < e; ++i)
1791 Operands.push_back(visit(E->getOperand(i)));
1792 return SE.getSMaxExpr(Operands);
1793 }
1794
1795 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
1796 SmallVector<const SCEV *, 4> Operands;
1797 for (int i = 0, e = E->getNumOperands(); i < e; ++i)
1798 Operands.push_back(visit(E->getOperand(i)));
1799 return SE.getUMaxExpr(Operands);
1800 }
1801
1802 const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
1803 return SE.getUDivExpr(visit(E->getLHS()), visit(E->getRHS()));
1804 }
1805
1806 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1807 auto *Start = visit(E->getStart());
1808 auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
1809 visit(E->getStepRecurrence(SE)),
1810 E->getLoop(), SCEV::FlagAnyWrap);
1811 return SE.getAddExpr(Start, AddRec);
1812 }
1813
1814 const SCEV *visitUnknown(const SCEVUnknown *E) {
1815 if (auto *NewValue = VMap.lookup(E->getValue()))
1816 return SE.getUnknown(NewValue);
1817 return E;
1818 }
1819};
1820
1821const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *S) {
1822 return SCEVSensitiveParameterRewriter::rewrite(S, *SE, InvEquivClassVMap);
1823}
1824
1825void Scop::createParameterId(const SCEV *Parameter) {
1826 assert(Parameters.count(Parameter))((Parameters.count(Parameter)) ? static_cast<void> (0) :
__assert_fail ("Parameters.count(Parameter)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1826, __PRETTY_FUNCTION__))
;
1827 assert(!ParameterIds.count(Parameter))((!ParameterIds.count(Parameter)) ? static_cast<void> (
0) : __assert_fail ("!ParameterIds.count(Parameter)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 1827, __PRETTY_FUNCTION__))
;
1828
1829 std::string ParameterName = "p_" + std::to_string(getNumParams() - 1);
1830
1831 if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
1832 Value *Val = ValueParameter->getValue();
1833
1834 // If this parameter references a specific Value and this value has a name
1835 // we use this name as it is likely to be unique and more useful than just
1836 // a number.
1837 if (Val->hasName())
1838 ParameterName = Val->getName();
1839 else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
1840 auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
1841 if (LoadOrigin->hasName()) {
1842 ParameterName += "_loaded_from_";
1843 ParameterName +=
1844 LI->getPointerOperand()->stripInBoundsOffsets()->getName();
1845 }
1846 }
1847 }
1848
1849 auto *Id = isl_id_alloc(getIslCtx(), ParameterName.c_str(),
1850 const_cast<void *>((const void *)Parameter));
1851 ParameterIds[Parameter] = Id;
1852}
1853
1854void Scop::addParams(const ParameterSetTy &NewParameters) {
1855 for (const SCEV *Parameter : NewParameters) {
1856 // Normalize the SCEV to get the representing element for an invariant load.
1857 Parameter = extractConstantFactor(Parameter, *SE).second;
1858 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1859
1860 if (Parameters.insert(Parameter))
1861 createParameterId(Parameter);
1862 }
1863}
1864
1865__isl_give isl_id *Scop::getIdForParam(const SCEV *Parameter) {
1866 // Normalize the SCEV to get the representing element for an invariant load.
1867 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1868 return isl_id_copy(ParameterIds.lookup(Parameter));
1869}
1870
1871__isl_give isl_set *Scop::addNonEmptyDomainConstraints(isl_set *C) const {
1872 isl_set *DomainContext = isl_union_set_params(getDomains());
1873 return isl_set_intersect_params(C, DomainContext);
1874}
1875
1876void Scop::addUserAssumptions(AssumptionCache &AC, DominatorTree &DT,
1877 LoopInfo &LI) {
1878 auto *R = &getRegion();
1879 auto &F = *R->getEntry()->getParent();
1880 for (auto &Assumption : AC.assumptions()) {
1881 auto *CI = dyn_cast_or_null<CallInst>(Assumption);
1882 if (!CI || CI->getNumArgOperands() != 1)
1883 continue;
1884
1885 bool InR = R->contains(CI);
1886 if (!InR && !DT.dominates(CI->getParent(), R->getEntry()))
1887 continue;
1888
1889 auto *L = LI.getLoopFor(CI->getParent());
1890 auto *Val = CI->getArgOperand(0);
1891 ParameterSetTy DetectedParams;
1892 if (!isAffineConstraint(Val, R, L, *SE, DetectedParams)) {
1893 emitOptimizationRemarkAnalysis(F.getContext(), DEBUG_TYPE"polly-scops", F,
1894 CI->getDebugLoc(),
1895 "Non-affine user assumption ignored.");
1896 continue;
1897 }
1898
1899 // Collect all newly introduced parameters.
1900 ParameterSetTy NewParams;
1901 for (auto *Param : DetectedParams) {
1902 Param = extractConstantFactor(Param, *SE).second;
1903 Param = getRepresentingInvariantLoadSCEV(Param);
1904 if (Parameters.count(Param))
1905 continue;
1906 NewParams.insert(Param);
1907 }
1908
1909 SmallVector<isl_set *, 2> ConditionSets;
1910 auto *TI = InR ? CI->getParent()->getTerminator() : nullptr;
1911 auto &Stmt = InR ? *getStmtFor(CI->getParent()) : *Stmts.begin();
1912 auto *Dom = InR ? getDomainConditions(&Stmt) : isl_set_copy(Context);
1913 bool Valid = buildConditionSets(Stmt, Val, TI, L, Dom, ConditionSets);
1914 isl_set_free(Dom);
1915
1916 if (!Valid)
1917 continue;
1918
1919 isl_set *AssumptionCtx = nullptr;
1920 if (InR) {
1921 AssumptionCtx = isl_set_complement(isl_set_params(ConditionSets[1]));
1922 isl_set_free(ConditionSets[0]);
1923 } else {
1924 AssumptionCtx = isl_set_complement(ConditionSets[1]);
1925 AssumptionCtx = isl_set_intersect(AssumptionCtx, ConditionSets[0]);
1926 }
1927
1928 // Project out newly introduced parameters as they are not otherwise useful.
1929 if (!NewParams.empty()) {
1930 for (unsigned u = 0; u < isl_set_n_param(AssumptionCtx); u++) {
1931 auto *Id = isl_set_get_dim_id(AssumptionCtx, isl_dim_param, u);
1932 auto *Param = static_cast<const SCEV *>(isl_id_get_user(Id));
1933 isl_id_free(Id);
1934
1935 if (!NewParams.count(Param))
1936 continue;
1937
1938 AssumptionCtx =
1939 isl_set_project_out(AssumptionCtx, isl_dim_param, u--, 1);
1940 }
1941 }
1942
1943 emitOptimizationRemarkAnalysis(
1944 F.getContext(), DEBUG_TYPE"polly-scops", F, CI->getDebugLoc(),
1945 "Use user assumption: " + stringFromIslObj(AssumptionCtx));
1946 Context = isl_set_intersect(Context, AssumptionCtx);
1947 }
1948}
1949
1950void Scop::addUserContext() {
1951 if (UserContextStr.empty())
1952 return;
1953
1954 isl_set *UserContext =
1955 isl_set_read_from_str(getIslCtx(), UserContextStr.c_str());
1956 isl_space *Space = getParamSpace();
1957 if (isl_space_dim(Space, isl_dim_param) !=
1958 isl_set_dim(UserContext, isl_dim_param)) {
1959 auto SpaceStr = isl_space_to_str(Space);
1960 errs() << "Error: the context provided in -polly-context has not the same "
1961 << "number of dimensions than the computed context. Due to this "
1962 << "mismatch, the -polly-context option is ignored. Please provide "
1963 << "the context in the parameter space: " << SpaceStr << ".\n";
1964 free(SpaceStr);
1965 isl_set_free(UserContext);
1966 isl_space_free(Space);
1967 return;
1968 }
1969
1970 for (unsigned i = 0; i < isl_space_dim(Space, isl_dim_param); i++) {
1971 auto *NameContext = isl_set_get_dim_name(Context, isl_dim_param, i);
1972 auto *NameUserContext = isl_set_get_dim_name(UserContext, isl_dim_param, i);
1973
1974 if (strcmp(NameContext, NameUserContext) != 0) {
1975 auto SpaceStr = isl_space_to_str(Space);
1976 errs() << "Error: the name of dimension " << i
1977 << " provided in -polly-context "
1978 << "is '" << NameUserContext << "', but the name in the computed "
1979 << "context is '" << NameContext
1980 << "'. Due to this name mismatch, "
1981 << "the -polly-context option is ignored. Please provide "
1982 << "the context in the parameter space: " << SpaceStr << ".\n";
1983 free(SpaceStr);
1984 isl_set_free(UserContext);
1985 isl_space_free(Space);
1986 return;
1987 }
1988
1989 UserContext =
1990 isl_set_set_dim_id(UserContext, isl_dim_param, i,
1991 isl_space_get_dim_id(Space, isl_dim_param, i));
1992 }
1993
1994 Context = isl_set_intersect(Context, UserContext);
1995 isl_space_free(Space);
1996}
1997
1998void Scop::buildInvariantEquivalenceClasses() {
1999 DenseMap<std::pair<const SCEV *, Type *>, LoadInst *> EquivClasses;
2000
2001 const InvariantLoadsSetTy &RIL = getRequiredInvariantLoads();
2002 for (LoadInst *LInst : RIL) {
2003 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
2004
2005 Type *Ty = LInst->getType();
2006 LoadInst *&ClassRep = EquivClasses[std::make_pair(PointerSCEV, Ty)];
2007 if (ClassRep) {
2008 InvEquivClassVMap[LInst] = ClassRep;
2009 continue;
2010 }
2011
2012 ClassRep = LInst;
2013 InvariantEquivClasses.emplace_back(PointerSCEV, MemoryAccessList(), nullptr,
2014 Ty);
2015 }
2016}
2017
2018void Scop::buildContext() {
2019 isl_space *Space = isl_space_params_alloc(getIslCtx(), 0);
2020 Context = isl_set_universe(isl_space_copy(Space));
2021 InvalidContext = isl_set_empty(isl_space_copy(Space));
2022 AssumedContext = isl_set_universe(Space);
2023}
2024
2025void Scop::addParameterBounds() {
2026 unsigned PDim = 0;
2027 for (auto *Parameter : Parameters) {
2028 ConstantRange SRange = SE->getSignedRange(Parameter);
2029 Context = addRangeBoundsToSet(Context, SRange, PDim++, isl_dim_param);
2030 }
2031}
2032
2033void Scop::realignParams() {
2034 // Add all parameters into a common model.
2035 isl_space *Space = isl_space_params_alloc(getIslCtx(), ParameterIds.size());
2036
2037 unsigned PDim = 0;
2038 for (const auto *Parameter : Parameters) {
2039 isl_id *id = getIdForParam(Parameter);
2040 Space = isl_space_set_dim_id(Space, isl_dim_param, PDim++, id);
2041 }
2042
2043 // Align the parameters of all data structures to the model.
2044 Context = isl_set_align_params(Context, Space);
2045
2046 // As all parameters are known add bounds to them.
2047 addParameterBounds();
2048
2049 for (ScopStmt &Stmt : *this)
2050 Stmt.realignParams();
2051}
2052
2053static __isl_give isl_set *
2054simplifyAssumptionContext(__isl_take isl_set *AssumptionContext,
2055 const Scop &S) {
2056 // If we modelt all blocks in the SCoP that have side effects we can simplify
2057 // the context with the constraints that are needed for anything to be
2058 // executed at all. However, if we have error blocks in the SCoP we already
2059 // assumed some parameter combinations cannot occure and removed them from the
2060 // domains, thus we cannot use the remaining domain to simplify the
2061 // assumptions.
2062 if (!S.hasErrorBlock()) {
2063 isl_set *DomainParameters = isl_union_set_params(S.getDomains());
2064 AssumptionContext =
2065 isl_set_gist_params(AssumptionContext, DomainParameters);
2066 }
2067
2068 AssumptionContext = isl_set_gist_params(AssumptionContext, S.getContext());
2069 return AssumptionContext;
2070}
2071
2072void Scop::simplifyContexts() {
2073 // The parameter constraints of the iteration domains give us a set of
2074 // constraints that need to hold for all cases where at least a single
2075 // statement iteration is executed in the whole scop. We now simplify the
2076 // assumed context under the assumption that such constraints hold and at
2077 // least a single statement iteration is executed. For cases where no
2078 // statement instances are executed, the assumptions we have taken about
2079 // the executed code do not matter and can be changed.
2080 //
2081 // WARNING: This only holds if the assumptions we have taken do not reduce
2082 // the set of statement instances that are executed. Otherwise we
2083 // may run into a case where the iteration domains suggest that
2084 // for a certain set of parameter constraints no code is executed,
2085 // but in the original program some computation would have been
2086 // performed. In such a case, modifying the run-time conditions and
2087 // possibly influencing the run-time check may cause certain scops
2088 // to not be executed.
2089 //
2090 // Example:
2091 //
2092 // When delinearizing the following code:
2093 //
2094 // for (long i = 0; i < 100; i++)
2095 // for (long j = 0; j < m; j++)
2096 // A[i+p][j] = 1.0;
2097 //
2098 // we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
2099 // otherwise we would access out of bound data. Now, knowing that code is
2100 // only executed for the case m >= 0, it is sufficient to assume p >= 0.
2101 AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
2102 InvalidContext = isl_set_align_params(InvalidContext, getParamSpace());
2103}
2104
2105/// @brief Add the minimal/maximal access in @p Set to @p User.
2106static isl_stat buildMinMaxAccess(__isl_take isl_set *Set, void *User) {
2107 Scop::MinMaxVectorTy *MinMaxAccesses = (Scop::MinMaxVectorTy *)User;
2108 isl_pw_multi_aff *MinPMA, *MaxPMA;
2109 isl_pw_aff *LastDimAff;
2110 isl_aff *OneAff;
2111 unsigned Pos;
2112
2113 Set = isl_set_remove_divs(Set);
2114
2115 if (isl_set_n_basic_set(Set) >= MaxDisjunctionsInDomain) {
2116 isl_set_free(Set);
2117 return isl_stat_error;
2118 }
2119
2120 // Restrict the number of parameters involved in the access as the lexmin/
2121 // lexmax computation will take too long if this number is high.
2122 //
2123 // Experiments with a simple test case using an i7 4800MQ:
2124 //
2125 // #Parameters involved | Time (in sec)
2126 // 6 | 0.01
2127 // 7 | 0.04
2128 // 8 | 0.12
2129 // 9 | 0.40
2130 // 10 | 1.54
2131 // 11 | 6.78
2132 // 12 | 30.38
2133 //
2134 if (isl_set_n_param(Set) > RunTimeChecksMaxParameters) {
2135 unsigned InvolvedParams = 0;
2136 for (unsigned u = 0, e = isl_set_n_param(Set); u < e; u++)
2137 if (isl_set_involves_dims(Set, isl_dim_param, u, 1))
2138 InvolvedParams++;
2139
2140 if (InvolvedParams > RunTimeChecksMaxParameters) {
2141 isl_set_free(Set);
2142 return isl_stat_error;
2143 }
2144 }
2145
2146 MinPMA = isl_set_lexmin_pw_multi_aff(isl_set_copy(Set));
2147 MaxPMA = isl_set_lexmax_pw_multi_aff(isl_set_copy(Set));
2148
2149 MinPMA = isl_pw_multi_aff_coalesce(MinPMA);
2150 MaxPMA = isl_pw_multi_aff_coalesce(MaxPMA);
2151
2152 // Adjust the last dimension of the maximal access by one as we want to
2153 // enclose the accessed memory region by MinPMA and MaxPMA. The pointer
2154 // we test during code generation might now point after the end of the
2155 // allocated array but we will never dereference it anyway.
2156 assert(isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) &&((isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) && "Assumed at least one output dimension"
) ? static_cast<void> (0) : __assert_fail ("isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) && \"Assumed at least one output dimension\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2157, __PRETTY_FUNCTION__))
2157 "Assumed at least one output dimension")((isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) && "Assumed at least one output dimension"
) ? static_cast<void> (0) : __assert_fail ("isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) && \"Assumed at least one output dimension\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2157, __PRETTY_FUNCTION__))
;
2158 Pos = isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) - 1;
2159 LastDimAff = isl_pw_multi_aff_get_pw_aff(MaxPMA, Pos);
2160 OneAff = isl_aff_zero_on_domain(
2161 isl_local_space_from_space(isl_pw_aff_get_domain_space(LastDimAff)));
2162 OneAff = isl_aff_add_constant_si(OneAff, 1);
2163 LastDimAff = isl_pw_aff_add(LastDimAff, isl_pw_aff_from_aff(OneAff));
2164 MaxPMA = isl_pw_multi_aff_set_pw_aff(MaxPMA, Pos, LastDimAff);
2165
2166 MinMaxAccesses->push_back(std::make_pair(MinPMA, MaxPMA));
2167
2168 isl_set_free(Set);
2169 return isl_stat_ok;
2170}
2171
2172static __isl_give isl_set *getAccessDomain(MemoryAccess *MA) {
2173 isl_set *Domain = MA->getStatement()->getDomain();
2174 Domain = isl_set_project_out(Domain, isl_dim_set, 0, isl_set_n_dim(Domain));
2175 return isl_set_reset_tuple_id(Domain);
2176}
2177
2178/// @brief Wrapper function to calculate minimal/maximal accesses to each array.
2179static bool calculateMinMaxAccess(__isl_take isl_union_map *Accesses,
2180 __isl_take isl_union_set *Domains,
2181 Scop::MinMaxVectorTy &MinMaxAccesses) {
2182
2183 Accesses = isl_union_map_intersect_domain(Accesses, Domains);
2184 isl_union_set *Locations = isl_union_map_range(Accesses);
2185 Locations = isl_union_set_coalesce(Locations);
2186 Locations = isl_union_set_detect_equalities(Locations);
2187 bool Valid = (0 == isl_union_set_foreach_set(Locations, buildMinMaxAccess,
2188 &MinMaxAccesses));
2189 isl_union_set_free(Locations);
2190 return Valid;
2191}
2192
2193/// @brief Helper to treat non-affine regions and basic blocks the same.
2194///
2195///{
2196
2197/// @brief Return the block that is the representing block for @p RN.
2198static inline BasicBlock *getRegionNodeBasicBlock(RegionNode *RN) {
2199 return RN->isSubRegion() ? RN->getNodeAs<Region>()->getEntry()
2200 : RN->getNodeAs<BasicBlock>();
2201}
2202
2203/// @brief Return the @p idx'th block that is executed after @p RN.
2204static inline BasicBlock *
2205getRegionNodeSuccessor(RegionNode *RN, TerminatorInst *TI, unsigned idx) {
2206 if (RN->isSubRegion()) {
2207 assert(idx == 0)((idx == 0) ? static_cast<void> (0) : __assert_fail ("idx == 0"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2207, __PRETTY_FUNCTION__))
;
2208 return RN->getNodeAs<Region>()->getExit();
2209 }
2210 return TI->getSuccessor(idx);
2211}
2212
2213/// @brief Return the smallest loop surrounding @p RN.
2214static inline Loop *getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
2215 if (!RN->isSubRegion())
2216 return LI.getLoopFor(RN->getNodeAs<BasicBlock>());
2217
2218 Region *NonAffineSubRegion = RN->getNodeAs<Region>();
2219 Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
2220 while (L && NonAffineSubRegion->contains(L))
2221 L = L->getParentLoop();
2222 return L;
2223}
2224
2225static inline unsigned getNumBlocksInRegionNode(RegionNode *RN) {
2226 if (!RN->isSubRegion())
2227 return 1;
2228
2229 Region *R = RN->getNodeAs<Region>();
2230 return std::distance(R->block_begin(), R->block_end());
2231}
2232
2233static bool containsErrorBlock(RegionNode *RN, const Region &R, LoopInfo &LI,
2234 const DominatorTree &DT) {
2235 if (!RN->isSubRegion())
2236 return isErrorBlock(*RN->getNodeAs<BasicBlock>(), R, LI, DT);
2237 for (BasicBlock *BB : RN->getNodeAs<Region>()->blocks())
2238 if (isErrorBlock(*BB, R, LI, DT))
2239 return true;
2240 return false;
2241}
2242
2243///}
2244
2245static inline __isl_give isl_set *addDomainDimId(__isl_take isl_set *Domain,
2246 unsigned Dim, Loop *L) {
2247 Domain = isl_set_lower_bound_si(Domain, isl_dim_set, Dim, -1);
2248 isl_id *DimId =
2249 isl_id_alloc(isl_set_get_ctx(Domain), nullptr, static_cast<void *>(L));
2250 return isl_set_set_dim_id(Domain, isl_dim_set, Dim, DimId);
2251}
2252
2253__isl_give isl_set *Scop::getDomainConditions(const ScopStmt *Stmt) const {
2254 return getDomainConditions(Stmt->getEntryBlock());
2255}
2256
2257__isl_give isl_set *Scop::getDomainConditions(BasicBlock *BB) const {
2258 auto DIt = DomainMap.find(BB);
2259 if (DIt != DomainMap.end())
2260 return isl_set_copy(DIt->getSecond());
2261
2262 auto &RI = *R.getRegionInfo();
2263 auto *BBR = RI.getRegionFor(BB);
2264 while (BBR->getEntry() == BB)
2265 BBR = BBR->getParent();
2266 return getDomainConditions(BBR->getEntry());
2267}
2268
2269bool Scop::buildDomains(Region *R, DominatorTree &DT, LoopInfo &LI) {
2270
2271 bool IsOnlyNonAffineRegion = isNonAffineSubRegion(R);
2272 auto *EntryBB = R->getEntry();
2273 auto *L = IsOnlyNonAffineRegion ? nullptr : LI.getLoopFor(EntryBB);
2274 int LD = getRelativeLoopDepth(L);
2275 auto *S = isl_set_universe(isl_space_set_alloc(getIslCtx(), 0, LD + 1));
2276
2277 while (LD-- >= 0) {
2278 S = addDomainDimId(S, LD + 1, L);
2279 L = L->getParentLoop();
2280 }
2281
2282 // Initialize the invalid domain.
2283 auto *EntryStmt = getStmtFor(EntryBB);
2284 EntryStmt->setInvalidDomain(isl_set_empty(isl_set_get_space(S)));
2285
2286 DomainMap[EntryBB] = S;
2287
2288 if (IsOnlyNonAffineRegion)
2289 return !containsErrorBlock(R->getNode(), *R, LI, DT);
2290
2291 if (!buildDomainsWithBranchConstraints(R, DT, LI))
2292 return false;
2293
2294 if (!propagateDomainConstraints(R, DT, LI))
2295 return false;
2296
2297 // Error blocks and blocks dominated by them have been assumed to never be
2298 // executed. Representing them in the Scop does not add any value. In fact,
2299 // it is likely to cause issues during construction of the ScopStmts. The
2300 // contents of error blocks have not been verified to be expressible and
2301 // will cause problems when building up a ScopStmt for them.
2302 // Furthermore, basic blocks dominated by error blocks may reference
2303 // instructions in the error block which, if the error block is not modeled,
2304 // can themselves not be constructed properly. To this end we will replace
2305 // the domains of error blocks and those only reachable via error blocks
2306 // with an empty set. Additionally, we will record for each block under which
2307 // parameter combination it would be reached via an error block in its
2308 // InvalidDomain. This information is needed during load hoisting.
2309 if (!propagateInvalidStmtDomains(R, DT, LI))
2310 return false;
2311
2312 return true;
2313}
2314
2315static Loop *getFirstNonBoxedLoopFor(BasicBlock *BB, LoopInfo &LI,
2316 const BoxedLoopsSetTy &BoxedLoops) {
2317 auto *L = LI.getLoopFor(BB);
2318 while (BoxedLoops.count(L))
2319 L = L->getParentLoop();
2320 return L;
2321}
2322
2323/// @brief Adjust the dimensions of @p Dom that was constructed for @p OldL
2324/// to be compatible to domains constructed for loop @p NewL.
2325///
2326/// This function assumes @p NewL and @p OldL are equal or there is a CFG
2327/// edge from @p OldL to @p NewL.
2328static __isl_give isl_set *adjustDomainDimensions(Scop &S,
2329 __isl_take isl_set *Dom,
2330 Loop *OldL, Loop *NewL) {
2331
2332 // If the loops are the same there is nothing to do.
2333 if (NewL == OldL)
2334 return Dom;
2335
2336 int OldDepth = S.getRelativeLoopDepth(OldL);
2337 int NewDepth = S.getRelativeLoopDepth(NewL);
2338 // If both loops are non-affine loops there is nothing to do.
2339 if (OldDepth == -1 && NewDepth == -1)
2340 return Dom;
2341
2342 // Distinguish three cases:
2343 // 1) The depth is the same but the loops are not.
2344 // => One loop was left one was entered.
2345 // 2) The depth increased from OldL to NewL.
2346 // => One loop was entered, none was left.
2347 // 3) The depth decreased from OldL to NewL.
2348 // => Loops were left were difference of the depths defines how many.
2349 if (OldDepth == NewDepth) {
2350 assert(OldL->getParentLoop() == NewL->getParentLoop())((OldL->getParentLoop() == NewL->getParentLoop()) ? static_cast
<void> (0) : __assert_fail ("OldL->getParentLoop() == NewL->getParentLoop()"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2350, __PRETTY_FUNCTION__))
;
2351 Dom = isl_set_project_out(Dom, isl_dim_set, NewDepth, 1);
2352 Dom = isl_set_add_dims(Dom, isl_dim_set, 1);
2353 Dom = addDomainDimId(Dom, NewDepth, NewL);
2354 } else if (OldDepth < NewDepth) {
2355 assert(OldDepth + 1 == NewDepth)((OldDepth + 1 == NewDepth) ? static_cast<void> (0) : __assert_fail
("OldDepth + 1 == NewDepth", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2355, __PRETTY_FUNCTION__))
;
2356 auto &R = S.getRegion();
2357 (void)R;
2358 assert(NewL->getParentLoop() == OldL ||((NewL->getParentLoop() == OldL || ((!OldL || !R.contains(
OldL)) && R.contains(NewL))) ? static_cast<void>
(0) : __assert_fail ("NewL->getParentLoop() == OldL || ((!OldL || !R.contains(OldL)) && R.contains(NewL))"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2359, __PRETTY_FUNCTION__))
2359 ((!OldL || !R.contains(OldL)) && R.contains(NewL)))((NewL->getParentLoop() == OldL || ((!OldL || !R.contains(
OldL)) && R.contains(NewL))) ? static_cast<void>
(0) : __assert_fail ("NewL->getParentLoop() == OldL || ((!OldL || !R.contains(OldL)) && R.contains(NewL))"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2359, __PRETTY_FUNCTION__))
;
2360 Dom = isl_set_add_dims(Dom, isl_dim_set, 1);
2361 Dom = addDomainDimId(Dom, NewDepth, NewL);
2362 } else {
2363 assert(OldDepth > NewDepth)((OldDepth > NewDepth) ? static_cast<void> (0) : __assert_fail
("OldDepth > NewDepth", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2363, __PRETTY_FUNCTION__))
;
2364 int Diff = OldDepth - NewDepth;
2365 int NumDim = isl_set_n_dim(Dom);
2366 assert(NumDim >= Diff)((NumDim >= Diff) ? static_cast<void> (0) : __assert_fail
("NumDim >= Diff", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2366, __PRETTY_FUNCTION__))
;
2367 Dom = isl_set_project_out(Dom, isl_dim_set, NumDim - Diff, Diff);
2368 }
2369
2370 return Dom;
2371}
2372
2373bool Scop::propagateInvalidStmtDomains(Region *R, DominatorTree &DT,
2374 LoopInfo &LI) {
2375 auto &BoxedLoops = getBoxedLoops();
2376
2377 ReversePostOrderTraversal<Region *> RTraversal(R);
2378 for (auto *RN : RTraversal) {
2379
2380 // Recurse for affine subregions but go on for basic blocks and non-affine
2381 // subregions.
2382 if (RN->isSubRegion()) {
2383 Region *SubRegion = RN->getNodeAs<Region>();
2384 if (!isNonAffineSubRegion(SubRegion)) {
2385 propagateInvalidStmtDomains(SubRegion, DT, LI);
2386 continue;
2387 }
2388 }
2389
2390 bool ContainsErrorBlock = containsErrorBlock(RN, getRegion(), LI, DT);
2391 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2392 ScopStmt *Stmt = getStmtFor(BB);
2393 isl_set *&Domain = DomainMap[BB];
2394 assert(Domain && "Cannot propagate a nullptr")((Domain && "Cannot propagate a nullptr") ? static_cast
<void> (0) : __assert_fail ("Domain && \"Cannot propagate a nullptr\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2394, __PRETTY_FUNCTION__))
;
2395
2396 auto *InvalidDomain = Stmt->getInvalidDomain();
2397 bool IsInvalidBlock =
2398 ContainsErrorBlock || isl_set_is_subset(Domain, InvalidDomain);
2399
2400 if (!IsInvalidBlock) {
2401 InvalidDomain = isl_set_intersect(InvalidDomain, isl_set_copy(Domain));
2402 } else {
2403 isl_set_free(InvalidDomain);
2404 InvalidDomain = Domain;
2405 isl_set *DomPar = isl_set_params(isl_set_copy(Domain));
2406 recordAssumption(ERRORBLOCK, DomPar, BB->getTerminator()->getDebugLoc(),
2407 AS_RESTRICTION);
2408 Domain = nullptr;
2409 }
2410
2411 if (isl_set_is_empty(InvalidDomain)) {
2412 Stmt->setInvalidDomain(InvalidDomain);
2413 continue;
2414 }
2415
2416 auto *BBLoop = getRegionNodeLoop(RN, LI);
2417 auto *TI = BB->getTerminator();
2418 unsigned NumSuccs = RN->isSubRegion() ? 1 : TI->getNumSuccessors();
2419 for (unsigned u = 0; u < NumSuccs; u++) {
2420 auto *SuccBB = getRegionNodeSuccessor(RN, TI, u);
2421 auto *SuccStmt = getStmtFor(SuccBB);
2422
2423 // Skip successors outside the SCoP.
2424 if (!SuccStmt)
2425 continue;
2426
2427 // Skip backedges.
2428 if (DT.dominates(SuccBB, BB))
2429 continue;
2430
2431 auto *SuccBBLoop = getFirstNonBoxedLoopFor(SuccBB, LI, BoxedLoops);
2432 auto *AdjustedInvalidDomain = adjustDomainDimensions(
2433 *this, isl_set_copy(InvalidDomain), BBLoop, SuccBBLoop);
2434 auto *SuccInvalidDomain = SuccStmt->getInvalidDomain();
2435 SuccInvalidDomain =
2436 isl_set_union(SuccInvalidDomain, AdjustedInvalidDomain);
2437 SuccInvalidDomain = isl_set_coalesce(SuccInvalidDomain);
2438 unsigned NumConjucts = isl_set_n_basic_set(SuccInvalidDomain);
2439 SuccStmt->setInvalidDomain(SuccInvalidDomain);
2440
2441 // Check if the maximal number of domain disjunctions was reached.
2442 // In case this happens we will bail.
2443 if (NumConjucts < MaxDisjunctionsInDomain)
2444 continue;
2445
2446 isl_set_free(InvalidDomain);
2447 invalidate(COMPLEXITY, TI->getDebugLoc());
2448 return false;
2449 }
2450
2451 Stmt->setInvalidDomain(InvalidDomain);
2452 }
2453
2454 return true;
2455}
2456
2457void Scop::propagateDomainConstraintsToRegionExit(
2458 BasicBlock *BB, Loop *BBLoop,
2459 SmallPtrSetImpl<BasicBlock *> &FinishedExitBlocks, LoopInfo &LI) {
2460
2461 // Check if the block @p BB is the entry of a region. If so we propagate it's
2462 // domain to the exit block of the region. Otherwise we are done.
2463 auto *RI = R.getRegionInfo();
2464 auto *BBReg = RI ? RI->getRegionFor(BB) : nullptr;
2465 auto *ExitBB = BBReg ? BBReg->getExit() : nullptr;
2466 if (!BBReg || BBReg->getEntry() != BB || !R.contains(ExitBB))
2467 return;
2468
2469 auto &BoxedLoops = getBoxedLoops();
2470 // Do not propagate the domain if there is a loop backedge inside the region
2471 // that would prevent the exit block from beeing executed.
2472 auto *L = BBLoop;
2473 while (L && R.contains(L)) {
2474 SmallVector<BasicBlock *, 4> LatchBBs;
2475 BBLoop->getLoopLatches(LatchBBs);
2476 for (auto *LatchBB : LatchBBs)
2477 if (BB != LatchBB && BBReg->contains(LatchBB))
2478 return;
2479 L = L->getParentLoop();
2480 }
2481
2482 auto *Domain = DomainMap[BB];
2483 assert(Domain && "Cannot propagate a nullptr")((Domain && "Cannot propagate a nullptr") ? static_cast
<void> (0) : __assert_fail ("Domain && \"Cannot propagate a nullptr\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2483, __PRETTY_FUNCTION__))
;
2484
2485 auto *ExitBBLoop = getFirstNonBoxedLoopFor(ExitBB, LI, BoxedLoops);
2486
2487 // Since the dimensions of @p BB and @p ExitBB might be different we have to
2488 // adjust the domain before we can propagate it.
2489 auto *AdjustedDomain =
2490 adjustDomainDimensions(*this, isl_set_copy(Domain), BBLoop, ExitBBLoop);
2491 auto *&ExitDomain = DomainMap[ExitBB];
2492
2493 // If the exit domain is not yet created we set it otherwise we "add" the
2494 // current domain.
2495 ExitDomain =
2496 ExitDomain ? isl_set_union(AdjustedDomain, ExitDomain) : AdjustedDomain;
2497
2498 // Initialize the invalid domain.
2499 auto *ExitStmt = getStmtFor(ExitBB);
2500 ExitStmt->setInvalidDomain(isl_set_empty(isl_set_get_space(ExitDomain)));
2501
2502 FinishedExitBlocks.insert(ExitBB);
2503}
2504
2505bool Scop::buildDomainsWithBranchConstraints(Region *R, DominatorTree &DT,
2506 LoopInfo &LI) {
2507 // To create the domain for each block in R we iterate over all blocks and
2508 // subregions in R and propagate the conditions under which the current region
2509 // element is executed. To this end we iterate in reverse post order over R as
2510 // it ensures that we first visit all predecessors of a region node (either a
2511 // basic block or a subregion) before we visit the region node itself.
2512 // Initially, only the domain for the SCoP region entry block is set and from
2513 // there we propagate the current domain to all successors, however we add the
2514 // condition that the successor is actually executed next.
2515 // As we are only interested in non-loop carried constraints here we can
2516 // simply skip loop back edges.
2517
2518 SmallPtrSet<BasicBlock *, 8> FinishedExitBlocks;
2519 ReversePostOrderTraversal<Region *> RTraversal(R);
2520 for (auto *RN : RTraversal) {
2521
2522 // Recurse for affine subregions but go on for basic blocks and non-affine
2523 // subregions.
2524 if (RN->isSubRegion()) {
2525 Region *SubRegion = RN->getNodeAs<Region>();
2526 if (!isNonAffineSubRegion(SubRegion)) {
2527 if (!buildDomainsWithBranchConstraints(SubRegion, DT, LI))
2528 return false;
2529 continue;
2530 }
2531 }
2532
2533 if (containsErrorBlock(RN, getRegion(), LI, DT))
2534 HasErrorBlock = true;
2535
2536 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2537 TerminatorInst *TI = BB->getTerminator();
2538
2539 if (isa<UnreachableInst>(TI))
2540 continue;
2541
2542 isl_set *Domain = DomainMap.lookup(BB);
2543 if (!Domain)
2544 continue;
2545 MaxLoopDepth = std::max(MaxLoopDepth, isl_set_n_dim(Domain));
2546
2547 auto *BBLoop = getRegionNodeLoop(RN, LI);
2548 // Propagate the domain from BB directly to blocks that have a superset
2549 // domain, at the moment only region exit nodes of regions that start in BB.
2550 propagateDomainConstraintsToRegionExit(BB, BBLoop, FinishedExitBlocks, LI);
2551
2552 // If all successors of BB have been set a domain through the propagation
2553 // above we do not need to build condition sets but can just skip this
2554 // block. However, it is important to note that this is a local property
2555 // with regards to the region @p R. To this end FinishedExitBlocks is a
2556 // local variable.
2557 auto IsFinishedRegionExit = [&FinishedExitBlocks](BasicBlock *SuccBB) {
2558 return FinishedExitBlocks.count(SuccBB);
2559 };
2560 if (std::all_of(succ_begin(BB), succ_end(BB), IsFinishedRegionExit))
2561 continue;
2562
2563 // Build the condition sets for the successor nodes of the current region
2564 // node. If it is a non-affine subregion we will always execute the single
2565 // exit node, hence the single entry node domain is the condition set. For
2566 // basic blocks we use the helper function buildConditionSets.
2567 SmallVector<isl_set *, 8> ConditionSets;
2568 if (RN->isSubRegion())
2569 ConditionSets.push_back(isl_set_copy(Domain));
2570 else if (!buildConditionSets(*getStmtFor(BB), TI, BBLoop, Domain,
2571 ConditionSets))
2572 return false;
2573
2574 // Now iterate over the successors and set their initial domain based on
2575 // their condition set. We skip back edges here and have to be careful when
2576 // we leave a loop not to keep constraints over a dimension that doesn't
2577 // exist anymore.
2578 assert(RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets.size())((RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets
.size()) ? static_cast<void> (0) : __assert_fail ("RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets.size()"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2578, __PRETTY_FUNCTION__))
;
2579 for (unsigned u = 0, e = ConditionSets.size(); u < e; u++) {
2580 isl_set *CondSet = ConditionSets[u];
2581 BasicBlock *SuccBB = getRegionNodeSuccessor(RN, TI, u);
2582
2583 auto *SuccStmt = getStmtFor(SuccBB);
2584 // Skip blocks outside the region.
2585 if (!SuccStmt) {
2586 isl_set_free(CondSet);
2587 continue;
2588 }
2589
2590 // If we propagate the domain of some block to "SuccBB" we do not have to
2591 // adjust the domain.
2592 if (FinishedExitBlocks.count(SuccBB)) {
2593 isl_set_free(CondSet);
2594 continue;
2595 }
2596
2597 // Skip back edges.
2598 if (DT.dominates(SuccBB, BB)) {
2599 isl_set_free(CondSet);
2600 continue;
2601 }
2602
2603 auto &BoxedLoops = getBoxedLoops();
2604 auto *SuccBBLoop = getFirstNonBoxedLoopFor(SuccBB, LI, BoxedLoops);
2605 CondSet = adjustDomainDimensions(*this, CondSet, BBLoop, SuccBBLoop);
2606
2607 // Set the domain for the successor or merge it with an existing domain in
2608 // case there are multiple paths (without loop back edges) to the
2609 // successor block.
2610 isl_set *&SuccDomain = DomainMap[SuccBB];
2611
2612 if (SuccDomain) {
2613 SuccDomain = isl_set_coalesce(isl_set_union(SuccDomain, CondSet));
2614 } else {
2615 // Initialize the invalid domain.
2616 SuccStmt->setInvalidDomain(isl_set_empty(isl_set_get_space(CondSet)));
2617 SuccDomain = CondSet;
2618 }
2619
2620 // Check if the maximal number of domain disjunctions was reached.
2621 // In case this happens we will clean up and bail.
2622 if (isl_set_n_basic_set(SuccDomain) < MaxDisjunctionsInDomain)
2623 continue;
2624
2625 invalidate(COMPLEXITY, DebugLoc());
2626 while (++u < ConditionSets.size())
2627 isl_set_free(ConditionSets[u]);
2628 return false;
2629 }
2630 }
2631
2632 return true;
2633}
2634
2635__isl_give isl_set *Scop::getPredecessorDomainConstraints(BasicBlock *BB,
2636 isl_set *Domain,
2637 DominatorTree &DT,
2638 LoopInfo &LI) {
2639 // If @p BB is the ScopEntry we are done
2640 if (R.getEntry() == BB)
2641 return isl_set_universe(isl_set_get_space(Domain));
2642
2643 // The set of boxed loops (loops in non-affine subregions) for this SCoP.
2644 auto &BoxedLoops = getBoxedLoops();
2645
2646 // The region info of this function.
2647 auto &RI = *R.getRegionInfo();
2648
2649 auto *BBLoop = getFirstNonBoxedLoopFor(BB, LI, BoxedLoops);
2650
2651 // A domain to collect all predecessor domains, thus all conditions under
2652 // which the block is executed. To this end we start with the empty domain.
2653 isl_set *PredDom = isl_set_empty(isl_set_get_space(Domain));
2654
2655 // Set of regions of which the entry block domain has been propagated to BB.
2656 // all predecessors inside any of the regions can be skipped.
2657 SmallSet<Region *, 8> PropagatedRegions;
2658
2659 for (auto *PredBB : predecessors(BB)) {
2660 // Skip backedges.
2661 if (DT.dominates(BB, PredBB))
2662 continue;
2663
2664 // If the predecessor is in a region we used for propagation we can skip it.
2665 auto PredBBInRegion = [PredBB](Region *PR) { return PR->contains(PredBB); };
2666 if (std::any_of(PropagatedRegions.begin(), PropagatedRegions.end(),
2667 PredBBInRegion)) {
2668 continue;
2669 }
2670
2671 // Check if there is a valid region we can use for propagation, thus look
2672 // for a region that contains the predecessor and has @p BB as exit block.
2673 auto *PredR = RI.getRegionFor(PredBB);
2674 while (PredR->getExit() != BB && !PredR->contains(BB))
2675 PredR->getParent();
2676
2677 // If a valid region for propagation was found use the entry of that region
2678 // for propagation, otherwise the PredBB directly.
2679 if (PredR->getExit() == BB) {
2680 PredBB = PredR->getEntry();
2681 PropagatedRegions.insert(PredR);
2682 }
2683
2684 auto *PredBBDom = getDomainConditions(PredBB);
2685 auto *PredBBLoop = getFirstNonBoxedLoopFor(PredBB, LI, BoxedLoops);
2686 PredBBDom = adjustDomainDimensions(*this, PredBBDom, PredBBLoop, BBLoop);
2687
2688 PredDom = isl_set_union(PredDom, PredBBDom);
2689 }
2690
2691 return PredDom;
2692}
2693
2694bool Scop::propagateDomainConstraints(Region *R, DominatorTree &DT,
2695 LoopInfo &LI) {
2696 // Iterate over the region R and propagate the domain constrains from the
2697 // predecessors to the current node. In contrast to the
2698 // buildDomainsWithBranchConstraints function, this one will pull the domain
2699 // information from the predecessors instead of pushing it to the successors.
2700 // Additionally, we assume the domains to be already present in the domain
2701 // map here. However, we iterate again in reverse post order so we know all
2702 // predecessors have been visited before a block or non-affine subregion is
2703 // visited.
2704
2705 ReversePostOrderTraversal<Region *> RTraversal(R);
2706 for (auto *RN : RTraversal) {
2707
2708 // Recurse for affine subregions but go on for basic blocks and non-affine
2709 // subregions.
2710 if (RN->isSubRegion()) {
2711 Region *SubRegion = RN->getNodeAs<Region>();
2712 if (!isNonAffineSubRegion(SubRegion)) {
2713 if (!propagateDomainConstraints(SubRegion, DT, LI))
2714 return false;
2715 continue;
2716 }
2717 }
2718
2719 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2720 isl_set *&Domain = DomainMap[BB];
2721 assert(Domain)((Domain) ? static_cast<void> (0) : __assert_fail ("Domain"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2721, __PRETTY_FUNCTION__))
;
2722
2723 // Under the union of all predecessor conditions we can reach this block.
2724 auto *PredDom = getPredecessorDomainConstraints(BB, Domain, DT, LI);
2725 Domain = isl_set_coalesce(isl_set_intersect(Domain, PredDom));
2726 Domain = isl_set_align_params(Domain, getParamSpace());
2727
2728 Loop *BBLoop = getRegionNodeLoop(RN, LI);
2729 if (BBLoop && BBLoop->getHeader() == BB && getRegion().contains(BBLoop))
2730 if (!addLoopBoundsToHeaderDomain(BBLoop, LI))
2731 return false;
2732 }
2733
2734 return true;
2735}
2736
2737/// @brief Create a map from SetSpace -> SetSpace where the dimensions @p Dim
2738/// is incremented by one and all other dimensions are equal, e.g.,
2739/// [i0, i1, i2, i3] -> [i0, i1, i2 + 1, i3]
2740/// if @p Dim is 2 and @p SetSpace has 4 dimensions.
2741static __isl_give isl_map *
2742createNextIterationMap(__isl_take isl_space *SetSpace, unsigned Dim) {
2743 auto *MapSpace = isl_space_map_from_set(SetSpace);
2744 auto *NextIterationMap = isl_map_universe(isl_space_copy(MapSpace));
2745 for (unsigned u = 0; u < isl_map_n_in(NextIterationMap); u++)
2746 if (u != Dim)
2747 NextIterationMap =
2748 isl_map_equate(NextIterationMap, isl_dim_in, u, isl_dim_out, u);
2749 auto *C = isl_constraint_alloc_equality(isl_local_space_from_space(MapSpace));
2750 C = isl_constraint_set_constant_si(C, 1);
2751 C = isl_constraint_set_coefficient_si(C, isl_dim_in, Dim, 1);
2752 C = isl_constraint_set_coefficient_si(C, isl_dim_out, Dim, -1);
2753 NextIterationMap = isl_map_add_constraint(NextIterationMap, C);
2754 return NextIterationMap;
2755}
2756
2757bool Scop::addLoopBoundsToHeaderDomain(Loop *L, LoopInfo &LI) {
2758 int LoopDepth = getRelativeLoopDepth(L);
2759 assert(LoopDepth >= 0 && "Loop in region should have at least depth one")((LoopDepth >= 0 && "Loop in region should have at least depth one"
) ? static_cast<void> (0) : __assert_fail ("LoopDepth >= 0 && \"Loop in region should have at least depth one\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2759, __PRETTY_FUNCTION__))
;
2760
2761 BasicBlock *HeaderBB = L->getHeader();
2762 assert(DomainMap.count(HeaderBB))((DomainMap.count(HeaderBB)) ? static_cast<void> (0) : __assert_fail
("DomainMap.count(HeaderBB)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2762, __PRETTY_FUNCTION__))
;
2763 isl_set *&HeaderBBDom = DomainMap[HeaderBB];
2764
2765 isl_map *NextIterationMap =
2766 createNextIterationMap(isl_set_get_space(HeaderBBDom), LoopDepth);
2767
2768 isl_set *UnionBackedgeCondition =
2769 isl_set_empty(isl_set_get_space(HeaderBBDom));
2770
2771 SmallVector<llvm::BasicBlock *, 4> LatchBlocks;
2772 L->getLoopLatches(LatchBlocks);
2773
2774 for (BasicBlock *LatchBB : LatchBlocks) {
2775
2776 // If the latch is only reachable via error statements we skip it.
2777 isl_set *LatchBBDom = DomainMap.lookup(LatchBB);
2778 if (!LatchBBDom)
2779 continue;
2780
2781 isl_set *BackedgeCondition = nullptr;
2782
2783 TerminatorInst *TI = LatchBB->getTerminator();
2784 BranchInst *BI = dyn_cast<BranchInst>(TI);
2785 if (BI && BI->isUnconditional())
2786 BackedgeCondition = isl_set_copy(LatchBBDom);
2787 else {
2788 SmallVector<isl_set *, 8> ConditionSets;
2789 int idx = BI->getSuccessor(0) != HeaderBB;
2790 if (!buildConditionSets(*getStmtFor(LatchBB), TI, L, LatchBBDom,
2791 ConditionSets))
2792 return false;
2793
2794 // Free the non back edge condition set as we do not need it.
2795 isl_set_free(ConditionSets[1 - idx]);
2796
2797 BackedgeCondition = ConditionSets[idx];
2798 }
2799
2800 int LatchLoopDepth = getRelativeLoopDepth(LI.getLoopFor(LatchBB));
2801 assert(LatchLoopDepth >= LoopDepth)((LatchLoopDepth >= LoopDepth) ? static_cast<void> (
0) : __assert_fail ("LatchLoopDepth >= LoopDepth", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 2801, __PRETTY_FUNCTION__))
;
2802 BackedgeCondition =
2803 isl_set_project_out(BackedgeCondition, isl_dim_set, LoopDepth + 1,
2804 LatchLoopDepth - LoopDepth);
2805 UnionBackedgeCondition =
2806 isl_set_union(UnionBackedgeCondition, BackedgeCondition);
2807 }
2808
2809 isl_map *ForwardMap = isl_map_lex_le(isl_set_get_space(HeaderBBDom));
2810 for (int i = 0; i < LoopDepth; i++)
2811 ForwardMap = isl_map_equate(ForwardMap, isl_dim_in, i, isl_dim_out, i);
2812
2813 isl_set *UnionBackedgeConditionComplement =
2814 isl_set_complement(UnionBackedgeCondition);
2815 UnionBackedgeConditionComplement = isl_set_lower_bound_si(
2816 UnionBackedgeConditionComplement, isl_dim_set, LoopDepth, 0);
2817 UnionBackedgeConditionComplement =
2818 isl_set_apply(UnionBackedgeConditionComplement, ForwardMap);
2819 HeaderBBDom = isl_set_subtract(HeaderBBDom, UnionBackedgeConditionComplement);
2820 HeaderBBDom = isl_set_apply(HeaderBBDom, NextIterationMap);
2821
2822 auto Parts = partitionSetParts(HeaderBBDom, LoopDepth);
2823 HeaderBBDom = Parts.second;
2824
2825 // Check if there is a <nsw> tagged AddRec for this loop and if so do not add
2826 // the bounded assumptions to the context as they are already implied by the
2827 // <nsw> tag.
2828 if (Affinator.hasNSWAddRecForLoop(L)) {
2829 isl_set_free(Parts.first);
2830 return true;
2831 }
2832
2833 isl_set *UnboundedCtx = isl_set_params(Parts.first);
2834 recordAssumption(INFINITELOOP, UnboundedCtx,
2835 HeaderBB->getTerminator()->getDebugLoc(), AS_RESTRICTION);
2836 return true;
2837}
2838
2839MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
2840 auto *BaseAddr = SE->getSCEV(MA->getBaseAddr());
2841 auto *PointerBase = dyn_cast<SCEVUnknown>(SE->getPointerBase(BaseAddr));
2842 if (!PointerBase)
2843 return nullptr;
2844
2845 auto *PointerBaseInst = dyn_cast<Instruction>(PointerBase->getValue());
2846 if (!PointerBaseInst)
2847 return nullptr;
2848
2849 auto *BasePtrStmt = getStmtFor(PointerBaseInst);
2850 if (!BasePtrStmt)
2851 return nullptr;
2852
2853 return BasePtrStmt->getArrayAccessOrNULLFor(PointerBaseInst);
2854}
2855
2856bool Scop::hasNonHoistableBasePtrInScop(MemoryAccess *MA,
2857 __isl_keep isl_union_map *Writes) {
2858 if (auto *BasePtrMA = lookupBasePtrAccess(MA))
2859 return !isHoistableAccess(BasePtrMA, Writes);
2860
2861 auto *BaseAddr = SE->getSCEV(MA->getBaseAddr());
2862 auto *PointerBase = dyn_cast<SCEVUnknown>(SE->getPointerBase(BaseAddr));
2863 if (auto *BasePtrInst = dyn_cast<Instruction>(PointerBase->getValue()))
2864 if (!isa<LoadInst>(BasePtrInst))
2865 return R.contains(BasePtrInst);
2866
2867 return false;
2868}
2869
2870void Scop::buildAliasChecks(AliasAnalysis &AA) {
2871 if (!PollyUseRuntimeAliasChecks)
2872 return;
2873
2874 if (buildAliasGroups(AA))
2875 return;
2876
2877 // If a problem occurs while building the alias groups we need to delete
2878 // this SCoP and pretend it wasn't valid in the first place. To this end
2879 // we make the assumed context infeasible.
2880 invalidate(ALIASING, DebugLoc());
2881
2882 DEBUG(dbgs() << "\n\nNOTE: Run time checks for " << getNameStr()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("polly-scops")) { dbgs() << "\n\nNOTE: Run time checks for "
<< getNameStr() << " could not be created as the number of parameters involved "
"is too high. The SCoP will be " "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
"the maximal number of parameters but be advised that the " "compile time might increase exponentially.\n\n"
; } } while (0)
2883 << " could not be created as the number of parameters involved "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("polly-scops")) { dbgs() << "\n\nNOTE: Run time checks for "
<< getNameStr() << " could not be created as the number of parameters involved "
"is too high. The SCoP will be " "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
"the maximal number of parameters but be advised that the " "compile time might increase exponentially.\n\n"
; } } while (0)
2884 "is too high. The SCoP will be "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("polly-scops")) { dbgs() << "\n\nNOTE: Run time checks for "
<< getNameStr() << " could not be created as the number of parameters involved "
"is too high. The SCoP will be " "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
"the maximal number of parameters but be advised that the " "compile time might increase exponentially.\n\n"
; } } while (0)
2885 "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("polly-scops")) { dbgs() << "\n\nNOTE: Run time checks for "
<< getNameStr() << " could not be created as the number of parameters involved "
"is too high. The SCoP will be " "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
"the maximal number of parameters but be advised that the " "compile time might increase exponentially.\n\n"
; } } while (0)
2886 "the maximal number of parameters but be advised that the "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("polly-scops")) { dbgs() << "\n\nNOTE: Run time checks for "
<< getNameStr() << " could not be created as the number of parameters involved "
"is too high. The SCoP will be " "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
"the maximal number of parameters but be advised that the " "compile time might increase exponentially.\n\n"
; } } while (0)
2887 "compile time might increase exponentially.\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("polly-scops")) { dbgs() << "\n\nNOTE: Run time checks for "
<< getNameStr() << " could not be created as the number of parameters involved "
"is too high. The SCoP will be " "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
"the maximal number of parameters but be advised that the " "compile time might increase exponentially.\n\n"
; } } while (0)
;
2888}
2889
2890bool Scop::buildAliasGroups(AliasAnalysis &AA) {
2891 // To create sound alias checks we perform the following steps:
2892 // o) Use the alias analysis and an alias set tracker to build alias sets
2893 // for all memory accesses inside the SCoP.
2894 // o) For each alias set we then map the aliasing pointers back to the
2895 // memory accesses we know, thus obtain groups of memory accesses which
2896 // might alias.
2897 // o) We divide each group based on the domains of the minimal/maximal
2898 // accesses. That means two minimal/maximal accesses are only in a group
2899 // if their access domains intersect, otherwise they are in different
2900 // ones.
2901 // o) We partition each group into read only and non read only accesses.
2902 // o) For each group with more than one base pointer we then compute minimal
2903 // and maximal accesses to each array of a group in read only and non
2904 // read only partitions separately.
2905 using AliasGroupTy = SmallVector<MemoryAccess *, 4>;
2906
2907 AliasSetTracker AST(AA);
2908
2909 DenseMap<Value *, MemoryAccess *> PtrToAcc;
2910 DenseSet<Value *> HasWriteAccess;
2911 for (ScopStmt &Stmt : *this) {
2912
2913 // Skip statements with an empty domain as they will never be executed.
2914 isl_set *StmtDomain = Stmt.getDomain();
2915 bool StmtDomainEmpty = isl_set_is_empty(StmtDomain);
2916 isl_set_free(StmtDomain);
2917 if (StmtDomainEmpty)
2918 continue;
2919
2920 for (MemoryAccess *MA : Stmt) {
2921 if (MA->isScalarKind())
2922 continue;
2923 if (!MA->isRead())
2924 HasWriteAccess.insert(MA->getBaseAddr());
2925 MemAccInst Acc(MA->getAccessInstruction());
2926 if (MA->isRead() && isa<MemTransferInst>(Acc))
2927 PtrToAcc[cast<MemTransferInst>(Acc)->getSource()] = MA;
2928 else
2929 PtrToAcc[Acc.getPointerOperand()] = MA;
2930 AST.add(Acc);
2931 }
2932 }
2933
2934 SmallVector<AliasGroupTy, 4> AliasGroups;
2935 for (AliasSet &AS : AST) {
2936 if (AS.isMustAlias() || AS.isForwardingAliasSet())
2937 continue;
2938 AliasGroupTy AG;
2939 for (auto &PR : AS)
2940 AG.push_back(PtrToAcc[PR.getValue()]);
2941 if (AG.size() < 2)
2942 continue;
2943 AliasGroups.push_back(std::move(AG));
2944 }
2945
2946 // Split the alias groups based on their domain.
2947 for (unsigned u = 0; u < AliasGroups.size(); u++) {
2948 AliasGroupTy NewAG;
2949 AliasGroupTy &AG = AliasGroups[u];
2950 AliasGroupTy::iterator AGI = AG.begin();
2951 isl_set *AGDomain = getAccessDomain(*AGI);
2952 while (AGI != AG.end()) {
2953 MemoryAccess *MA = *AGI;
2954 isl_set *MADomain = getAccessDomain(MA);
2955 if (isl_set_is_disjoint(AGDomain, MADomain)) {
2956 NewAG.push_back(MA);
2957 AGI = AG.erase(AGI);
2958 isl_set_free(MADomain);
2959 } else {
2960 AGDomain = isl_set_union(AGDomain, MADomain);
2961 AGI++;
2962 }
2963 }
2964 if (NewAG.size() > 1)
2965 AliasGroups.push_back(std::move(NewAG));
2966 isl_set_free(AGDomain);
2967 }
2968
2969 auto &F = *getRegion().getEntry()->getParent();
2970 MapVector<const Value *, SmallPtrSet<MemoryAccess *, 8>> ReadOnlyPairs;
2971 SmallPtrSet<const Value *, 4> NonReadOnlyBaseValues;
2972 for (AliasGroupTy &AG : AliasGroups) {
2973 NonReadOnlyBaseValues.clear();
2974 ReadOnlyPairs.clear();
2975
2976 if (AG.size() < 2) {
2977 AG.clear();
2978 continue;
2979 }
2980
2981 for (auto II = AG.begin(); II != AG.end();) {
2982 emitOptimizationRemarkAnalysis(
2983 F.getContext(), DEBUG_TYPE"polly-scops", F,
2984 (*II)->getAccessInstruction()->getDebugLoc(),
2985 "Possibly aliasing pointer, use restrict keyword.");
2986
2987 Value *BaseAddr = (*II)->getBaseAddr();
2988 if (HasWriteAccess.count(BaseAddr)) {
2989 NonReadOnlyBaseValues.insert(BaseAddr);
2990 II++;
2991 } else {
2992 ReadOnlyPairs[BaseAddr].insert(*II);
2993 II = AG.erase(II);
2994 }
2995 }
2996
2997 // If we don't have read only pointers check if there are at least two
2998 // non read only pointers, otherwise clear the alias group.
2999 if (ReadOnlyPairs.empty() && NonReadOnlyBaseValues.size() <= 1) {
3000 AG.clear();
3001 continue;
3002 }
3003
3004 // If we don't have non read only pointers clear the alias group.
3005 if (NonReadOnlyBaseValues.empty()) {
3006 AG.clear();
3007 continue;
3008 }
3009
3010 // Check if we have non-affine accesses left, if so bail out as we cannot
3011 // generate a good access range yet.
3012 for (auto *MA : AG) {
3013 if (!MA->isAffine()) {
3014 invalidate(ALIASING, MA->getAccessInstruction()->getDebugLoc());
3015 return false;
3016 }
3017 if (auto *BasePtrMA = lookupBasePtrAccess(MA))
3018 addRequiredInvariantLoad(
3019 cast<LoadInst>(BasePtrMA->getAccessInstruction()));
3020 }
3021 for (auto &ReadOnlyPair : ReadOnlyPairs)
3022 for (auto *MA : ReadOnlyPair.second) {
3023 if (!MA->isAffine()) {
3024 invalidate(ALIASING, MA->getAccessInstruction()->getDebugLoc());
3025 return false;
3026 }
3027 if (auto *BasePtrMA = lookupBasePtrAccess(MA))
3028 addRequiredInvariantLoad(
3029 cast<LoadInst>(BasePtrMA->getAccessInstruction()));
3030 }
3031
3032 // Calculate minimal and maximal accesses for non read only accesses.
3033 MinMaxAliasGroups.emplace_back();
3034 MinMaxVectorPairTy &pair = MinMaxAliasGroups.back();
3035 MinMaxVectorTy &MinMaxAccessesNonReadOnly = pair.first;
3036 MinMaxVectorTy &MinMaxAccessesReadOnly = pair.second;
3037 MinMaxAccessesNonReadOnly.reserve(AG.size());
3038
3039 isl_union_map *Accesses = isl_union_map_empty(getParamSpace());
3040
3041 // AG contains only non read only accesses.
3042 for (MemoryAccess *MA : AG)
3043 Accesses = isl_union_map_add_map(Accesses, MA->getAccessRelation());
3044
3045 bool Valid = calculateMinMaxAccess(Accesses, getDomains(),
3046 MinMaxAccessesNonReadOnly);
3047
3048 // Bail out if the number of values we need to compare is too large.
3049 // This is important as the number of comparisions grows quadratically with
3050 // the number of values we need to compare.
3051 if (!Valid || (MinMaxAccessesNonReadOnly.size() + !ReadOnlyPairs.empty() >
3052 RunTimeChecksMaxArraysPerGroup))
3053 return false;
3054
3055 // Calculate minimal and maximal accesses for read only accesses.
3056 MinMaxAccessesReadOnly.reserve(ReadOnlyPairs.size());
3057 Accesses = isl_union_map_empty(getParamSpace());
3058
3059 for (const auto &ReadOnlyPair : ReadOnlyPairs)
3060 for (MemoryAccess *MA : ReadOnlyPair.second)
3061 Accesses = isl_union_map_add_map(Accesses, MA->getAccessRelation());
3062
3063 Valid =
3064 calculateMinMaxAccess(Accesses, getDomains(), MinMaxAccessesReadOnly);
3065
3066 if (!Valid)
3067 return false;
3068 }
3069
3070 return true;
3071}
3072
3073/// @brief Get the smallest loop that contains @p R but is not in @p R.
3074static Loop *getLoopSurroundingRegion(Region &R, LoopInfo &LI) {
3075 // Start with the smallest loop containing the entry and expand that
3076 // loop until it contains all blocks in the region. If there is a loop
3077 // containing all blocks in the region check if it is itself contained
3078 // and if so take the parent loop as it will be the smallest containing
3079 // the region but not contained by it.
3080 Loop *L = LI.getLoopFor(R.getEntry());
3081 while (L) {
3082 bool AllContained = true;
3083 for (auto *BB : R.blocks())
3084 AllContained &= L->contains(BB);
3085 if (AllContained)
3086 break;
3087 L = L->getParentLoop();
3088 }
3089
3090 return L ? (R.contains(L) ? L->getParentLoop() : L) : nullptr;
3091}
3092
3093Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
3094 ScopDetection::DetectionContext &DC)
3095 : SE(&ScalarEvolution), R(R), IsOptimized(false),
3096 HasSingleExitEdge(R.getExitingBlock()), HasErrorBlock(false),
3097 MaxLoopDepth(0), DC(DC), IslCtx(isl_ctx_alloc(), isl_ctx_free),
3098 Context(nullptr), Affinator(this, LI), AssumedContext(nullptr),
3099 InvalidContext(nullptr), Schedule(nullptr) {
3100 if (IslOnErrorAbort)
3101 isl_options_set_on_error(getIslCtx(), ISL_ON_ERROR_ABORT2);
3102 buildContext();
3103}
3104
3105void Scop::init(AliasAnalysis &AA, AssumptionCache &AC, DominatorTree &DT,
3106 LoopInfo &LI) {
3107 buildInvariantEquivalenceClasses();
3108
3109 if (!buildDomains(&R, DT, LI))
3110 return;
3111
3112 addUserAssumptions(AC, DT, LI);
3113
3114 // Remove empty statements.
3115 // Exit early in case there are no executable statements left in this scop.
3116 simplifySCoP(false, DT, LI);
3117 if (Stmts.empty())
3118 return;
3119
3120 // The ScopStmts now have enough information to initialize themselves.
3121 for (ScopStmt &Stmt : Stmts)
3122 Stmt.init(LI);
3123
3124 // Check early for profitability. Afterwards it cannot change anymore,
3125 // only the runtime context could become infeasible.
3126 if (!isProfitable()) {
3127 invalidate(PROFITABLE, DebugLoc());
3128 return;
3129 }
3130
3131 buildSchedule(LI);
3132
3133 updateAccessDimensionality();
3134 realignParams();
3135 addUserContext();
3136
3137 // After the context was fully constructed, thus all our knowledge about
3138 // the parameters is in there, we add all recorded assumptions to the
3139 // assumed/invalid context.
3140 addRecordedAssumptions();
3141
3142 simplifyContexts();
3143 buildAliasChecks(AA);
3144
3145 hoistInvariantLoads();
3146 verifyInvariantLoads();
3147 simplifySCoP(true, DT, LI);
3148
3149 // Check late for a feasible runtime context because profitability did not
3150 // change.
3151 if (!hasFeasibleRuntimeContext()) {
3152 invalidate(PROFITABLE, DebugLoc());
3153 return;
3154 }
3155}
3156
3157Scop::~Scop() {
3158 isl_set_free(Context);
3159 isl_set_free(AssumedContext);
3160 isl_set_free(InvalidContext);
3161 isl_schedule_free(Schedule);
3162
3163 for (auto &It : ParameterIds)
3164 isl_id_free(It.second);
3165
3166 for (auto It : DomainMap)
3167 isl_set_free(It.second);
3168
3169 for (auto &AS : RecordedAssumptions)
3170 isl_set_free(AS.Set);
3171
3172 // Free the alias groups
3173 for (MinMaxVectorPairTy &MinMaxAccessPair : MinMaxAliasGroups) {
3174 for (MinMaxAccessTy &MMA : MinMaxAccessPair.first) {
3175 isl_pw_multi_aff_free(MMA.first);
3176 isl_pw_multi_aff_free(MMA.second);
3177 }
3178 for (MinMaxAccessTy &MMA : MinMaxAccessPair.second) {
3179 isl_pw_multi_aff_free(MMA.first);
3180 isl_pw_multi_aff_free(MMA.second);
3181 }
3182 }
3183
3184 for (const auto &IAClass : InvariantEquivClasses)
3185 isl_set_free(std::get<2>(IAClass));
3186
3187 // Explicitly release all Scop objects and the underlying isl objects before
3188 // we relase the isl context.
3189 Stmts.clear();
3190 ScopArrayInfoMap.clear();
3191 AccFuncMap.clear();
3192}
3193
3194void Scop::updateAccessDimensionality() {
3195 // Check all array accesses for each base pointer and find a (virtual) element
3196 // size for the base pointer that divides all access functions.
3197 for (auto &Stmt : *this)
3198 for (auto *Access : Stmt) {
3199 if (!Access->isArrayKind())
3200 continue;
3201 auto &SAI = ScopArrayInfoMap[std::make_pair(Access->getBaseAddr(),
3202 ScopArrayInfo::MK_Array)];
3203 if (SAI->getNumberOfDimensions() != 1)
3204 continue;
3205 unsigned DivisibleSize = SAI->getElemSizeInBytes();
3206 auto *Subscript = Access->getSubscript(0);
3207 while (!isDivisible(Subscript, DivisibleSize, *SE))
3208 DivisibleSize /= 2;
3209 auto *Ty = IntegerType::get(SE->getContext(), DivisibleSize * 8);
3210 SAI->updateElementType(Ty);
3211 }
3212
3213 for (auto &Stmt : *this)
3214 for (auto &Access : Stmt)
3215 Access->updateDimensionality();
3216}
3217
3218void Scop::simplifySCoP(bool AfterHoisting, DominatorTree &DT, LoopInfo &LI) {
3219 for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
3220 ScopStmt &Stmt = *StmtIt;
3221
3222 bool RemoveStmt = Stmt.isEmpty();
3223 if (!RemoveStmt)
3224 RemoveStmt = !DomainMap[Stmt.getEntryBlock()];
3225
3226 // Remove read only statements only after invariant loop hoisting.
3227 if (!RemoveStmt && AfterHoisting) {
3228 bool OnlyRead = true;
3229 for (MemoryAccess *MA : Stmt) {
3230 if (MA->isRead())
3231 continue;
3232
3233 OnlyRead = false;
3234 break;
3235 }
3236
3237 RemoveStmt = OnlyRead;
3238 }
3239
3240 if (!RemoveStmt) {
3241 StmtIt++;
3242 continue;
3243 }
3244
3245 // Remove the statement because it is unnecessary.
3246 if (Stmt.isRegionStmt())
3247 for (BasicBlock *BB : Stmt.getRegion()->blocks())
3248 StmtMap.erase(BB);
3249 else
3250 StmtMap.erase(Stmt.getBasicBlock());
3251
3252 StmtIt = Stmts.erase(StmtIt);
3253 }
3254}
3255
3256InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
3257 LoadInst *LInst = dyn_cast<LoadInst>(Val);
3258 if (!LInst)
3259 return nullptr;
3260
3261 if (Value *Rep = InvEquivClassVMap.lookup(LInst))
3262 LInst = cast<LoadInst>(Rep);
3263
3264 Type *Ty = LInst->getType();
3265 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
3266 for (auto &IAClass : InvariantEquivClasses) {
3267 if (PointerSCEV != std::get<0>(IAClass) || Ty != std::get<3>(IAClass))
3268 continue;
3269
3270 auto &MAs = std::get<1>(IAClass);
3271 for (auto *MA : MAs)
3272 if (MA->getAccessInstruction() == Val)
3273 return &IAClass;
3274 }
3275
3276 return nullptr;
3277}
3278
3279/// @brief Check if @p MA can always be hoisted without execution context.
3280static bool canAlwaysBeHoisted(MemoryAccess *MA, bool StmtInvalidCtxIsEmpty,
3281 bool MAInvalidCtxIsEmpty) {
3282 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
3283 const DataLayout &DL = LInst->getParent()->getModule()->getDataLayout();
3284 // TODO: We can provide more information for better but more expensive
3285 // results.
3286 if (!isDereferenceableAndAlignedPointer(LInst->getPointerOperand(),
3287 LInst->getAlignment(), DL))
3288 return false;
3289
3290 // If a dereferencable load is in a statement that is modeled precisely we can
3291 // hoist it.
3292 if (StmtInvalidCtxIsEmpty && MAInvalidCtxIsEmpty)
3293 return true;
3294
3295 // Even if the statement is not modeled precisely we can hoist the load if it
3296 // does not involve any parameters that might have been specilized by the
3297 // statement domain.
3298 for (unsigned u = 0, e = MA->getNumSubscripts(); u < e; u++)
3299 if (!isa<SCEVConstant>(MA->getSubscript(u)))
3300 return false;
3301 return true;
3302}
3303
3304void Scop::addInvariantLoads(ScopStmt &Stmt, MemoryAccessList &InvMAs) {
3305
3306 if (InvMAs.empty())
3307 return;
3308
3309 auto *StmtInvalidCtx = Stmt.getInvalidContext();
3310 bool StmtInvalidCtxIsEmpty = isl_set_is_empty(StmtInvalidCtx);
3311
3312 // Get the context under which the statement is executed but remove the error
3313 // context under which this statement is reached.
3314 isl_set *DomainCtx = isl_set_params(Stmt.getDomain());
3315 DomainCtx = isl_set_subtract(DomainCtx, StmtInvalidCtx);
3316
3317 if (isl_set_n_basic_set(DomainCtx) >= MaxDisjunctionsInDomain) {
3318 auto *AccInst = InvMAs.front()->getAccessInstruction();
3319 invalidate(COMPLEXITY, AccInst->getDebugLoc());
3320 isl_set_free(DomainCtx);
3321 return;
3322 }
3323
3324 // Project out all parameters that relate to loads in the statement. Otherwise
3325 // we could have cyclic dependences on the constraints under which the
3326 // hoisted loads are executed and we could not determine an order in which to
3327 // pre-load them. This happens because not only lower bounds are part of the
3328 // domain but also upper bounds.
3329 for (MemoryAccess *MA : InvMAs) {
3330 Instruction *AccInst = MA->getAccessInstruction();
3331 if (SE->isSCEVable(AccInst->getType())) {
3332 SetVector<Value *> Values;
3333 for (const SCEV *Parameter : Parameters) {
3334 Values.clear();
3335 findValues(Parameter, *SE, Values);
3336 if (!Values.count(AccInst))
3337 continue;
3338
3339 if (isl_id *ParamId = getIdForParam(Parameter)) {
3340 int Dim = isl_set_find_dim_by_id(DomainCtx, isl_dim_param, ParamId);
3341 DomainCtx = isl_set_eliminate(DomainCtx, isl_dim_param, Dim, 1);
3342 isl_id_free(ParamId);
3343 }
3344 }
3345 }
3346 }
3347
3348 for (MemoryAccess *MA : InvMAs) {
3349 // Check for another invariant access that accesses the same location as
3350 // MA and if found consolidate them. Otherwise create a new equivalence
3351 // class at the end of InvariantEquivClasses.
3352 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
3353 Type *Ty = LInst->getType();
3354 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
3355
3356 auto *MAInvalidCtx = MA->getInvalidContext();
3357 bool MAInvalidCtxIsEmpty = isl_set_is_empty(MAInvalidCtx);
3358
3359 isl_set *MACtx;
3360 // Check if we know that this pointer can be speculatively accessed.
3361 if (canAlwaysBeHoisted(MA, StmtInvalidCtxIsEmpty, MAInvalidCtxIsEmpty)) {
3362 MACtx = isl_set_universe(isl_set_get_space(DomainCtx));
3363 isl_set_free(MAInvalidCtx);
3364 } else {
3365 MACtx = isl_set_copy(DomainCtx);
3366 MACtx = isl_set_subtract(MACtx, MAInvalidCtx);
3367 MACtx = isl_set_gist_params(MACtx, getContext());
3368 }
3369
3370 bool Consolidated = false;
3371 for (auto &IAClass : InvariantEquivClasses) {
3372 if (PointerSCEV != std::get<0>(IAClass) || Ty != std::get<3>(IAClass))
3373 continue;
3374
3375 // If the pointer and the type is equal check if the access function wrt.
3376 // to the domain is equal too. It can happen that the domain fixes
3377 // parameter values and these can be different for distinct part of the
3378 // SCoP. If this happens we cannot consolidate the loads but need to
3379 // create a new invariant load equivalence class.
3380 auto &MAs = std::get<1>(IAClass);
3381 if (!MAs.empty()) {
3382 auto *LastMA = MAs.front();
3383
3384 auto *AR = isl_map_range(MA->getAccessRelation());
3385 auto *LastAR = isl_map_range(LastMA->getAccessRelation());
3386 bool SameAR = isl_set_is_equal(AR, LastAR);
3387 isl_set_free(AR);
3388 isl_set_free(LastAR);
3389
3390 if (!SameAR)
3391 continue;
3392 }
3393
3394 // Add MA to the list of accesses that are in this class.
3395 MAs.push_front(MA);
3396
3397 Consolidated = true;
3398
3399 // Unify the execution context of the class and this statement.
3400 isl_set *&IAClassDomainCtx = std::get<2>(IAClass);
3401 if (IAClassDomainCtx)
3402 IAClassDomainCtx =
3403 isl_set_coalesce(isl_set_union(IAClassDomainCtx, MACtx));
3404 else
3405 IAClassDomainCtx = MACtx;
3406 break;
3407 }
3408
3409 if (Consolidated)
3410 continue;
3411
3412 // If we did not consolidate MA, thus did not find an equivalence class
3413 // for it, we create a new one.
3414 InvariantEquivClasses.emplace_back(PointerSCEV, MemoryAccessList{MA}, MACtx,
3415 Ty);
3416 }
3417
3418 isl_set_free(DomainCtx);
3419}
3420
3421bool Scop::isHoistableAccess(MemoryAccess *Access,
3422 __isl_keep isl_union_map *Writes) {
3423 // TODO: Loads that are not loop carried, hence are in a statement with
3424 // zero iterators, are by construction invariant, though we
3425 // currently "hoist" them anyway. This is necessary because we allow
3426 // them to be treated as parameters (e.g., in conditions) and our code
3427 // generation would otherwise use the old value.
3428
3429 auto &Stmt = *Access->getStatement();
3430 BasicBlock *BB = Stmt.getEntryBlock();
3431
3432 if (Access->isScalarKind() || Access->isWrite() || !Access->isAffine())
3433 return false;
3434
3435 // Skip accesses that have an invariant base pointer which is defined but
3436 // not loaded inside the SCoP. This can happened e.g., if a readnone call
3437 // returns a pointer that is used as a base address. However, as we want
3438 // to hoist indirect pointers, we allow the base pointer to be defined in
3439 // the region if it is also a memory access. Each ScopArrayInfo object
3440 // that has a base pointer origin has a base pointer that is loaded and
3441 // that it is invariant, thus it will be hoisted too. However, if there is
3442 // no base pointer origin we check that the base pointer is defined
3443 // outside the region.
3444 if (hasNonHoistableBasePtrInScop(Access, Writes))
3445 return false;
3446
3447 // Skip accesses in non-affine subregions as they might not be executed
3448 // under the same condition as the entry of the non-affine subregion.
3449 auto *LI = cast<LoadInst>(Access->getAccessInstruction());
3450 if (BB != LI->getParent())
3451 return false;
3452
3453 isl_map *AccessRelation = Access->getAccessRelation();
3454 assert(!isl_map_is_empty(AccessRelation))((!isl_map_is_empty(AccessRelation)) ? static_cast<void>
(0) : __assert_fail ("!isl_map_is_empty(AccessRelation)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 3454, __PRETTY_FUNCTION__))
;
3455
3456 if (isl_map_involves_dims(AccessRelation, isl_dim_in, 0,
3457 Stmt.getNumIterators())) {
3458 isl_map_free(AccessRelation);
3459 return false;
3460 }
3461
3462 AccessRelation = isl_map_intersect_domain(AccessRelation, Stmt.getDomain());
3463 isl_set *AccessRange = isl_map_range(AccessRelation);
3464
3465 isl_union_map *Written = isl_union_map_intersect_range(
3466 isl_union_map_copy(Writes), isl_union_set_from_set(AccessRange));
3467 bool IsWritten = !isl_union_map_is_empty(Written);
3468 isl_union_map_free(Written);
3469
3470 return !IsWritten;
3471}
3472
3473void Scop::verifyInvariantLoads() {
3474 auto &RIL = getRequiredInvariantLoads();
3475 for (LoadInst *LI : RIL) {
3476 assert(LI && getRegion().contains(LI))((LI && getRegion().contains(LI)) ? static_cast<void
> (0) : __assert_fail ("LI && getRegion().contains(LI)"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 3476, __PRETTY_FUNCTION__))
;
3477 ScopStmt *Stmt = getStmtFor(LI);
3478 if (Stmt && Stmt->getArrayAccessOrNULLFor(LI)) {
3479 invalidate(INVARIANTLOAD, LI->getDebugLoc());
3480 return;
3481 }
3482 }
3483}
3484
3485void Scop::hoistInvariantLoads() {
3486 if (!PollyInvariantLoadHoisting)
3487 return;
3488
3489 isl_union_map *Writes = getWrites();
3490 for (ScopStmt &Stmt : *this) {
3491 MemoryAccessList InvariantAccesses;
3492
3493 for (MemoryAccess *Access : Stmt)
3494 if (isHoistableAccess(Access, Writes))
3495 InvariantAccesses.push_front(Access);
3496
3497 // We inserted invariant accesses always in the front but need them to be
3498 // sorted in a "natural order". The statements are already sorted in
3499 // reverse post order and that suffices for the accesses too. The reason
3500 // we require an order in the first place is the dependences between
3501 // invariant loads that can be caused by indirect loads.
3502 InvariantAccesses.reverse();
3503
3504 // Transfer the memory access from the statement to the SCoP.
3505 Stmt.removeMemoryAccesses(InvariantAccesses);
3506 addInvariantLoads(Stmt, InvariantAccesses);
3507 }
3508 isl_union_map_free(Writes);
3509}
3510
3511const ScopArrayInfo *
3512Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
3513 ArrayRef<const SCEV *> Sizes,
3514 ScopArrayInfo::MemoryKind Kind) {
3515 auto &SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)];
3516 if (!SAI) {
3517 auto &DL = getRegion().getEntry()->getModule()->getDataLayout();
3518 SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
3519 DL, this));
3520 } else {
3521 SAI->updateElementType(ElementType);
3522 // In case of mismatching array sizes, we bail out by setting the run-time
3523 // context to false.
3524 if (!SAI->updateSizes(Sizes))
3525 invalidate(DELINEARIZATION, DebugLoc());
3526 }
3527 return SAI.get();
3528}
3529
3530const ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr,
3531 ScopArrayInfo::MemoryKind Kind) {
3532 auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
3533 assert(SAI && "No ScopArrayInfo available for this base pointer")((SAI && "No ScopArrayInfo available for this base pointer"
) ? static_cast<void> (0) : __assert_fail ("SAI && \"No ScopArrayInfo available for this base pointer\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 3533, __PRETTY_FUNCTION__))
;
3534 return SAI;
3535}
3536
3537std::string Scop::getContextStr() const { return stringFromIslObj(Context); }
3538
3539std::string Scop::getAssumedContextStr() const {
3540 assert(AssumedContext && "Assumed context not yet built")((AssumedContext && "Assumed context not yet built") ?
static_cast<void> (0) : __assert_fail ("AssumedContext && \"Assumed context not yet built\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 3540, __PRETTY_FUNCTION__))
;
3541 return stringFromIslObj(AssumedContext);
3542}
3543
3544std::string Scop::getInvalidContextStr() const {
3545 return stringFromIslObj(InvalidContext);
3546}
3547
3548std::string Scop::getNameStr() const {
3549 std::string ExitName, EntryName;
3550 raw_string_ostream ExitStr(ExitName);
3551 raw_string_ostream EntryStr(EntryName);
3552
3553 R.getEntry()->printAsOperand(EntryStr, false);
3554 EntryStr.str();
3555
3556 if (R.getExit()) {
3557 R.getExit()->printAsOperand(ExitStr, false);
3558 ExitStr.str();
3559 } else
3560 ExitName = "FunctionExit";
3561
3562 return EntryName + "---" + ExitName;
3563}
3564
3565__isl_give isl_set *Scop::getContext() const { return isl_set_copy(Context); }
3566__isl_give isl_space *Scop::getParamSpace() const {
3567 return isl_set_get_space(Context);
3568}
3569
3570__isl_give isl_set *Scop::getAssumedContext() const {
3571 assert(AssumedContext && "Assumed context not yet built")((AssumedContext && "Assumed context not yet built") ?
static_cast<void> (0) : __assert_fail ("AssumedContext && \"Assumed context not yet built\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 3571, __PRETTY_FUNCTION__))
;
3572 return isl_set_copy(AssumedContext);
3573}
3574
3575bool Scop::isProfitable() const {
3576 if (PollyProcessUnprofitable)
3577 return true;
3578
3579 if (!hasFeasibleRuntimeContext())
3580 return false;
3581
3582 if (isEmpty())
3583 return false;
3584
3585 unsigned OptimizableStmtsOrLoops = 0;
3586 for (auto &Stmt : *this) {
3587 if (Stmt.getNumIterators() == 0)
3588 continue;
3589
3590 bool ContainsArrayAccs = false;
3591 bool ContainsScalarAccs = false;
3592 for (auto *MA : Stmt) {
3593 if (MA->isRead())
3594 continue;
3595 ContainsArrayAccs |= MA->isArrayKind();
3596 ContainsScalarAccs |= MA->isScalarKind();
3597 }
3598
3599 if (ContainsArrayAccs && !ContainsScalarAccs)
3600 OptimizableStmtsOrLoops += Stmt.getNumIterators();
3601 }
3602
3603 return OptimizableStmtsOrLoops > 1;
3604}
3605
3606bool Scop::hasFeasibleRuntimeContext() const {
3607 auto *PositiveContext = getAssumedContext();
3608 auto *NegativeContext = getInvalidContext();
3609 PositiveContext = addNonEmptyDomainConstraints(PositiveContext);
3610 bool IsFeasible = !(isl_set_is_empty(PositiveContext) ||
3611 isl_set_is_subset(PositiveContext, NegativeContext));
3612 isl_set_free(PositiveContext);
3613 if (!IsFeasible) {
3614 isl_set_free(NegativeContext);
3615 return false;
3616 }
3617
3618 auto *DomainContext = isl_union_set_params(getDomains());
3619 IsFeasible = !isl_set_is_subset(DomainContext, NegativeContext);
3620 IsFeasible &= !isl_set_is_subset(Context, NegativeContext);
3621 isl_set_free(NegativeContext);
3622 isl_set_free(DomainContext);
3623
3624 return IsFeasible;
3625}
3626
3627static std::string toString(AssumptionKind Kind) {
3628 switch (Kind) {
3629 case ALIASING:
3630 return "No-aliasing";
3631 case INBOUNDS:
3632 return "Inbounds";
3633 case WRAPPING:
3634 return "No-overflows";
3635 case UNSIGNED:
3636 return "Signed-unsigned";
3637 case COMPLEXITY:
3638 return "Low complexity";
3639 case PROFITABLE:
3640 return "Profitable";
3641 case ERRORBLOCK:
3642 return "No-error";
3643 case INFINITELOOP:
3644 return "Finite loop";
3645 case INVARIANTLOAD:
3646 return "Invariant load";
3647 case DELINEARIZATION:
3648 return "Delinearization";
3649 }
3650 llvm_unreachable("Unknown AssumptionKind!")::llvm::llvm_unreachable_internal("Unknown AssumptionKind!", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 3650)
;
3651}
3652
3653bool Scop::trackAssumption(AssumptionKind Kind, __isl_keep isl_set *Set,
3654 DebugLoc Loc, AssumptionSign Sign) {
3655 if (PollyRemarksMinimal) {
3656 if (Sign == AS_ASSUMPTION) {
3657 if (isl_set_is_subset(Context, Set))
3658 return false;
3659
3660 if (isl_set_is_subset(AssumedContext, Set))
3661 return false;
3662 } else {
3663 if (isl_set_is_disjoint(Set, Context))
3664 return false;
3665
3666 if (isl_set_is_subset(Set, InvalidContext))
3667 return false;
3668 }
3669 }
3670
3671 auto &F = *getRegion().getEntry()->getParent();
3672 auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
3673 std::string Msg = toString(Kind) + Suffix + stringFromIslObj(Set);
3674 emitOptimizationRemarkAnalysis(F.getContext(), DEBUG_TYPE"polly-scops", F, Loc, Msg);
3675 return true;
3676}
3677
3678void Scop::addAssumption(AssumptionKind Kind, __isl_take isl_set *Set,
3679 DebugLoc Loc, AssumptionSign Sign) {
3680 // Simplify the assumptions/restrictions first.
3681 Set = isl_set_gist_params(Set, getContext());
3682
3683 if (!trackAssumption(Kind, Set, Loc, Sign)) {
3684 isl_set_free(Set);
3685 return;
3686 }
3687
3688 if (Sign == AS_ASSUMPTION) {
3689 AssumedContext = isl_set_intersect(AssumedContext, Set);
3690 AssumedContext = isl_set_coalesce(AssumedContext);
3691 } else {
3692 InvalidContext = isl_set_union(InvalidContext, Set);
3693 InvalidContext = isl_set_coalesce(InvalidContext);
3694 }
3695}
3696
3697void Scop::recordAssumption(AssumptionKind Kind, __isl_take isl_set *Set,
3698 DebugLoc Loc, AssumptionSign Sign, BasicBlock *BB) {
3699 RecordedAssumptions.push_back({Kind, Sign, Set, Loc, BB});
3700}
3701
3702void Scop::addRecordedAssumptions() {
3703 while (!RecordedAssumptions.empty()) {
3704 const Assumption &AS = RecordedAssumptions.pop_back_val();
3705
3706 if (!AS.BB) {
3707 addAssumption(AS.Kind, AS.Set, AS.Loc, AS.Sign);
3708 continue;
3709 }
3710
3711 // If the domain was deleted the assumptions are void.
3712 isl_set *Dom = getDomainConditions(AS.BB);
3713 if (!Dom) {
3714 isl_set_free(AS.Set);
3715 continue;
3716 }
3717
3718 // If a basic block was given use its domain to simplify the assumption.
3719 // In case of restrictions we know they only have to hold on the domain,
3720 // thus we can intersect them with the domain of the block. However, for
3721 // assumptions the domain has to imply them, thus:
3722 // _ _____
3723 // Dom => S <==> A v B <==> A - B
3724 //
3725 // To avoid the complement we will register A - B as a restricton not an
3726 // assumption.
3727 isl_set *S = AS.Set;
3728 if (AS.Sign == AS_RESTRICTION)
3729 S = isl_set_params(isl_set_intersect(S, Dom));
3730 else /* (AS.Sign == AS_ASSUMPTION) */
3731 S = isl_set_params(isl_set_subtract(Dom, S));
3732
3733 addAssumption(AS.Kind, S, AS.Loc, AS_RESTRICTION);
3734 }
3735}
3736
3737void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc) {
3738 addAssumption(Kind, isl_set_empty(getParamSpace()), Loc, AS_ASSUMPTION);
3739}
3740
3741__isl_give isl_set *Scop::getInvalidContext() const {
3742 return isl_set_copy(InvalidContext);
3743}
3744
3745void Scop::printContext(raw_ostream &OS) const {
3746 OS << "Context:\n";
3747 OS.indent(4) << Context << "\n";
3748
3749 OS.indent(4) << "Assumed Context:\n";
3750 OS.indent(4) << AssumedContext << "\n";
3751
3752 OS.indent(4) << "Invalid Context:\n";
3753 OS.indent(4) << InvalidContext << "\n";
3754
3755 unsigned Dim = 0;
3756 for (const SCEV *Parameter : Parameters)
3757 OS.indent(4) << "p" << Dim++ << ": " << *Parameter << "\n";
3758}
3759
3760void Scop::printAliasAssumptions(raw_ostream &OS) const {
3761 int noOfGroups = 0;
3762 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
3763 if (Pair.second.size() == 0)
3764 noOfGroups += 1;
3765 else
3766 noOfGroups += Pair.second.size();
3767 }
3768
3769 OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
3770 if (MinMaxAliasGroups.empty()) {
3771 OS.indent(8) << "n/a\n";
3772 return;
3773 }
3774
3775 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
3776
3777 // If the group has no read only accesses print the write accesses.
3778 if (Pair.second.empty()) {
3779 OS.indent(8) << "[[";
3780 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
3781 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
3782 << ">";
3783 }
3784 OS << " ]]\n";
3785 }
3786
3787 for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
3788 OS.indent(8) << "[[";
3789 OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
3790 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
3791 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
3792 << ">";
3793 }
3794 OS << " ]]\n";
3795 }
3796 }
3797}
3798
3799void Scop::printStatements(raw_ostream &OS) const {
3800 OS << "Statements {\n";
3801
3802 for (const ScopStmt &Stmt : *this)
3803 OS.indent(4) << Stmt;
3804
3805 OS.indent(4) << "}\n";
3806}
3807
3808void Scop::printArrayInfo(raw_ostream &OS) const {
3809 OS << "Arrays {\n";
3810
3811 for (auto &Array : arrays())
3812 Array.second->print(OS);
3813
3814 OS.indent(4) << "}\n";
3815
3816 OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
3817
3818 for (auto &Array : arrays())
3819 Array.second->print(OS, /* SizeAsPwAff */ true);
3820
3821 OS.indent(4) << "}\n";
3822}
3823
3824void Scop::print(raw_ostream &OS) const {
3825 OS.indent(4) << "Function: " << getRegion().getEntry()->getParent()->getName()
3826 << "\n";
3827 OS.indent(4) << "Region: " << getNameStr() << "\n";
3828 OS.indent(4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
3829 OS.indent(4) << "Invariant Accesses: {\n";
3830 for (const auto &IAClass : InvariantEquivClasses) {
3831 const auto &MAs = std::get<1>(IAClass);
3832 if (MAs.empty()) {
3833 OS.indent(12) << "Class Pointer: " << *std::get<0>(IAClass) << "\n";
3834 } else {
3835 MAs.front()->print(OS);
3836 OS.indent(12) << "Execution Context: " << std::get<2>(IAClass) << "\n";
3837 }
3838 }
3839 OS.indent(4) << "}\n";
3840 printContext(OS.indent(4));
3841 printArrayInfo(OS.indent(4));
3842 printAliasAssumptions(OS);
3843 printStatements(OS.indent(4));
3844}
3845
3846void Scop::dump() const { print(dbgs()); }
3847
3848isl_ctx *Scop::getIslCtx() const { return IslCtx.get(); }
3849
3850__isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
3851 bool NonNegative) {
3852 // First try to use the SCEVAffinator to generate a piecewise defined
3853 // affine function from @p E in the context of @p BB. If that tasks becomes to
3854 // complex the affinator might return a nullptr. In such a case we invalidate
3855 // the SCoP and return a dummy value. This way we do not need to add error
3856 // handling cdoe to all users of this function.
3857 auto PWAC = Affinator.getPwAff(E, BB);
3858 if (PWAC.first) {
3859 // TODO: We could use a heuristic and either use:
3860 // SCEVAffinator::takeNonNegativeAssumption
3861 // or
3862 // SCEVAffinator::interpretAsUnsigned
3863 // to deal with unsigned or "NonNegative" SCEVs.
3864 if (NonNegative)
3865 Affinator.takeNonNegativeAssumption(PWAC);
3866 return PWAC;
3867 }
3868
3869 auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
3870 invalidate(COMPLEXITY, DL);
3871 return Affinator.getPwAff(SE->getZero(E->getType()), BB);
3872}
3873
3874__isl_give isl_union_set *Scop::getDomains() const {
3875 isl_union_set *Domain = isl_union_set_empty(getParamSpace());
3876
3877 for (const ScopStmt &Stmt : *this)
3878 Domain = isl_union_set_add_set(Domain, Stmt.getDomain());
3879
3880 return Domain;
3881}
3882
3883__isl_give isl_pw_aff *Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB) {
3884 PWACtx PWAC = getPwAff(E, BB);
3885 isl_set_free(PWAC.second);
3886 return PWAC.first;
3887}
3888
3889__isl_give isl_union_map *
3890Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
3891 isl_union_map *Accesses = isl_union_map_empty(getParamSpace());
3892
3893 for (ScopStmt &Stmt : *this) {
3894 for (MemoryAccess *MA : Stmt) {
3895 if (!Predicate(*MA))
3896 continue;
3897
3898 isl_set *Domain = Stmt.getDomain();
3899 isl_map *AccessDomain = MA->getAccessRelation();
3900 AccessDomain = isl_map_intersect_domain(AccessDomain, Domain);
3901 Accesses = isl_union_map_add_map(Accesses, AccessDomain);
3902 }
3903 }
3904 return isl_union_map_coalesce(Accesses);
3905}
3906
3907__isl_give isl_union_map *Scop::getMustWrites() {
3908 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
3909}
3910
3911__isl_give isl_union_map *Scop::getMayWrites() {
3912 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
3913}
3914
3915__isl_give isl_union_map *Scop::getWrites() {
3916 return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
3917}
3918
3919__isl_give isl_union_map *Scop::getReads() {
3920 return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
3921}
3922
3923__isl_give isl_union_map *Scop::getAccesses() {
3924 return getAccessesOfType([](MemoryAccess &MA) { return true; });
3925}
3926
3927__isl_give isl_union_map *Scop::getSchedule() const {
3928 auto *Tree = getScheduleTree();
3929 auto *S = isl_schedule_get_map(Tree);
3930 isl_schedule_free(Tree);
3931 return S;
3932}
3933
3934__isl_give isl_schedule *Scop::getScheduleTree() const {
3935 return isl_schedule_intersect_domain(isl_schedule_copy(Schedule),
3936 getDomains());
3937}
3938
3939void Scop::setSchedule(__isl_take isl_union_map *NewSchedule) {
3940 auto *S = isl_schedule_from_domain(getDomains());
3941 S = isl_schedule_insert_partial_schedule(
3942 S, isl_multi_union_pw_aff_from_union_map(NewSchedule));
3943 isl_schedule_free(Schedule);
3944 Schedule = S;
3945}
3946
3947void Scop::setScheduleTree(__isl_take isl_schedule *NewSchedule) {
3948 isl_schedule_free(Schedule);
3949 Schedule = NewSchedule;
3950}
3951
3952bool Scop::restrictDomains(__isl_take isl_union_set *Domain) {
3953 bool Changed = false;
3954 for (ScopStmt &Stmt : *this) {
3955 isl_union_set *StmtDomain = isl_union_set_from_set(Stmt.getDomain());
3956 isl_union_set *NewStmtDomain = isl_union_set_intersect(
3957 isl_union_set_copy(StmtDomain), isl_union_set_copy(Domain));
3958
3959 if (isl_union_set_is_subset(StmtDomain, NewStmtDomain)) {
3960 isl_union_set_free(StmtDomain);
3961 isl_union_set_free(NewStmtDomain);
3962 continue;
3963 }
3964
3965 Changed = true;
3966
3967 isl_union_set_free(StmtDomain);
3968 NewStmtDomain = isl_union_set_coalesce(NewStmtDomain);
3969
3970 if (isl_union_set_is_empty(NewStmtDomain)) {
3971 Stmt.restrictDomain(isl_set_empty(Stmt.getDomainSpace()));
3972 isl_union_set_free(NewStmtDomain);
3973 } else
3974 Stmt.restrictDomain(isl_set_from_union_set(NewStmtDomain));
3975 }
3976 isl_union_set_free(Domain);
3977 return Changed;
3978}
3979
3980ScalarEvolution *Scop::getSE() const { return SE; }
3981
3982struct MapToDimensionDataTy {
3983 int N;
3984 isl_union_pw_multi_aff *Res;
3985};
3986
3987// @brief Create a function that maps the elements of 'Set' to its N-th
3988// dimension and add it to User->Res.
3989//
3990// @param Set The input set.
3991// @param User->N The dimension to map to.
3992// @param User->Res The isl_union_pw_multi_aff to which to add the result.
3993//
3994// @returns isl_stat_ok if no error occured, othewise isl_stat_error.
3995static isl_stat mapToDimension_AddSet(__isl_take isl_set *Set, void *User) {
3996 struct MapToDimensionDataTy *Data = (struct MapToDimensionDataTy *)User;
3997 int Dim;
3998 isl_space *Space;
3999 isl_pw_multi_aff *PMA;
4000
4001 Dim = isl_set_dim(Set, isl_dim_set);
4002 Space = isl_set_get_space(Set);
4003 PMA = isl_pw_multi_aff_project_out_map(Space, isl_dim_set, Data->N,
4004 Dim - Data->N);
4005 if (Data->N > 1)
4006 PMA = isl_pw_multi_aff_drop_dims(PMA, isl_dim_out, 0, Data->N - 1);
4007 Data->Res = isl_union_pw_multi_aff_add_pw_multi_aff(Data->Res, PMA);
4008
4009 isl_set_free(Set);
4010
4011 return isl_stat_ok;
4012}
4013
4014// @brief Create an isl_multi_union_aff that defines an identity mapping
4015// from the elements of USet to their N-th dimension.
4016//
4017// # Example:
4018//
4019// Domain: { A[i,j]; B[i,j,k] }
4020// N: 1
4021//
4022// Resulting Mapping: { {A[i,j] -> [(j)]; B[i,j,k] -> [(j)] }
4023//
4024// @param USet A union set describing the elements for which to generate a
4025// mapping.
4026// @param N The dimension to map to.
4027// @returns A mapping from USet to its N-th dimension.
4028static __isl_give isl_multi_union_pw_aff *
4029mapToDimension(__isl_take isl_union_set *USet, int N) {
4030 assert(N >= 0)((N >= 0) ? static_cast<void> (0) : __assert_fail ("N >= 0"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4030, __PRETTY_FUNCTION__))
;
4031 assert(USet)((USet) ? static_cast<void> (0) : __assert_fail ("USet"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4031, __PRETTY_FUNCTION__))
;
4032 assert(!isl_union_set_is_empty(USet))((!isl_union_set_is_empty(USet)) ? static_cast<void> (0
) : __assert_fail ("!isl_union_set_is_empty(USet)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4032, __PRETTY_FUNCTION__))
;
4033
4034 struct MapToDimensionDataTy Data;
4035
4036 auto *Space = isl_union_set_get_space(USet);
4037 auto *PwAff = isl_union_pw_multi_aff_empty(Space);
4038
4039 Data = {N, PwAff};
4040
4041 auto Res = isl_union_set_foreach_set(USet, &mapToDimension_AddSet, &Data);
4042 (void)Res;
4043
4044 assert(Res == isl_stat_ok)((Res == isl_stat_ok) ? static_cast<void> (0) : __assert_fail
("Res == isl_stat_ok", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4044, __PRETTY_FUNCTION__))
;
4045
4046 isl_union_set_free(USet);
4047 return isl_multi_union_pw_aff_from_union_pw_multi_aff(Data.Res);
4048}
4049
4050void Scop::addScopStmt(BasicBlock *BB, Region *R) {
4051 if (BB) {
4052 Stmts.emplace_back(*this, *BB);
4053 auto *Stmt = &Stmts.back();
4054 StmtMap[BB] = Stmt;
4055 } else {
4056 assert(R && "Either basic block or a region expected.")((R && "Either basic block or a region expected.") ? static_cast
<void> (0) : __assert_fail ("R && \"Either basic block or a region expected.\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4056, __PRETTY_FUNCTION__))
;
4057 Stmts.emplace_back(*this, *R);
4058 auto *Stmt = &Stmts.back();
4059 for (BasicBlock *BB : R->blocks())
4060 StmtMap[BB] = Stmt;
4061 }
4062}
4063
4064void Scop::buildSchedule(LoopInfo &LI) {
4065 Loop *L = getLoopSurroundingRegion(getRegion(), LI);
4066 LoopStackTy LoopStack({LoopStackElementTy(L, nullptr, 0)});
4067 buildSchedule(getRegion().getNode(), LoopStack, LI);
4068 assert(LoopStack.size() == 1 && LoopStack.back().L == L)((LoopStack.size() == 1 && LoopStack.back().L == L) ?
static_cast<void> (0) : __assert_fail ("LoopStack.size() == 1 && LoopStack.back().L == L"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4068, __PRETTY_FUNCTION__))
;
4069 Schedule = LoopStack[0].Schedule;
4070}
4071
4072/// To generate a schedule for the elements in a Region we traverse the Region
4073/// in reverse-post-order and add the contained RegionNodes in traversal order
4074/// to the schedule of the loop that is currently at the top of the LoopStack.
4075/// For loop-free codes, this results in a correct sequential ordering.
4076///
4077/// Example:
4078/// bb1(0)
4079/// / \.
4080/// bb2(1) bb3(2)
4081/// \ / \.
4082/// bb4(3) bb5(4)
4083/// \ /
4084/// bb6(5)
4085///
4086/// Including loops requires additional processing. Whenever a loop header is
4087/// encountered, the corresponding loop is added to the @p LoopStack. Starting
4088/// from an empty schedule, we first process all RegionNodes that are within
4089/// this loop and complete the sequential schedule at this loop-level before
4090/// processing about any other nodes. To implement this
4091/// loop-nodes-first-processing, the reverse post-order traversal is
4092/// insufficient. Hence, we additionally check if the traversal yields
4093/// sub-regions or blocks that are outside the last loop on the @p LoopStack.
4094/// These region-nodes are then queue and only traverse after the all nodes
4095/// within the current loop have been processed.
4096void Scop::buildSchedule(Region *R, LoopStackTy &LoopStack, LoopInfo &LI) {
4097 Loop *OuterScopLoop = getLoopSurroundingRegion(getRegion(), LI);
4098
4099 ReversePostOrderTraversal<Region *> RTraversal(R);
4100 std::deque<RegionNode *> WorkList(RTraversal.begin(), RTraversal.end());
4101 std::deque<RegionNode *> DelayList;
4102 bool LastRNWaiting = false;
4103
4104 // Iterate over the region @p R in reverse post-order but queue
4105 // sub-regions/blocks iff they are not part of the last encountered but not
4106 // completely traversed loop. The variable LastRNWaiting is a flag to indicate
4107 // that we queued the last sub-region/block from the reverse post-order
4108 // iterator. If it is set we have to explore the next sub-region/block from
4109 // the iterator (if any) to guarantee progress. If it is not set we first try
4110 // the next queued sub-region/blocks.
4111 while (!WorkList.empty() || !DelayList.empty()) {
4112 RegionNode *RN;
4113
4114 if ((LastRNWaiting && !WorkList.empty()) || DelayList.size() == 0) {
4115 RN = WorkList.front();
4116 WorkList.pop_front();
4117 LastRNWaiting = false;
4118 } else {
4119 RN = DelayList.front();
4120 DelayList.pop_front();
4121 }
4122
4123 Loop *L = getRegionNodeLoop(RN, LI);
4124 if (!getRegion().contains(L))
4125 L = OuterScopLoop;
4126
4127 Loop *LastLoop = LoopStack.back().L;
4128 if (LastLoop != L) {
4129 if (LastLoop && !LastLoop->contains(L)) {
4130 LastRNWaiting = true;
4131 DelayList.push_back(RN);
4132 continue;
4133 }
4134 LoopStack.push_back({L, nullptr, 0});
4135 }
4136 buildSchedule(RN, LoopStack, LI);
4137 }
4138
4139 return;
4140}
4141
4142void Scop::buildSchedule(RegionNode *RN, LoopStackTy &LoopStack, LoopInfo &LI) {
4143
4144 if (RN->isSubRegion()) {
4145 auto *LocalRegion = RN->getNodeAs<Region>();
4146 if (!isNonAffineSubRegion(LocalRegion)) {
4147 buildSchedule(LocalRegion, LoopStack, LI);
4148 return;
4149 }
4150 }
4151
4152 auto &LoopData = LoopStack.back();
4153 LoopData.NumBlocksProcessed += getNumBlocksInRegionNode(RN);
4154
4155 if (auto *Stmt = getStmtFor(RN)) {
4156 auto *UDomain = isl_union_set_from_set(Stmt->getDomain());
4157 auto *StmtSchedule = isl_schedule_from_domain(UDomain);
4158 LoopData.Schedule = combineInSequence(LoopData.Schedule, StmtSchedule);
4159 }
4160
4161 // Check if we just processed the last node in this loop. If we did, finalize
4162 // the loop by:
4163 //
4164 // - adding new schedule dimensions
4165 // - folding the resulting schedule into the parent loop schedule
4166 // - dropping the loop schedule from the LoopStack.
4167 //
4168 // Then continue to check surrounding loops, which might also have been
4169 // completed by this node.
4170 while (LoopData.L &&
4171 LoopData.NumBlocksProcessed == LoopData.L->getNumBlocks()) {
4172 auto *Schedule = LoopData.Schedule;
4173 auto NumBlocksProcessed = LoopData.NumBlocksProcessed;
4174
4175 LoopStack.pop_back();
4176 auto &NextLoopData = LoopStack.back();
4177
4178 if (Schedule) {
4179 auto *Domain = isl_schedule_get_domain(Schedule);
4180 auto *MUPA = mapToDimension(Domain, LoopStack.size());
4181 Schedule = isl_schedule_insert_partial_schedule(Schedule, MUPA);
4182 NextLoopData.Schedule =
4183 combineInSequence(NextLoopData.Schedule, Schedule);
4184 }
4185
4186 NextLoopData.NumBlocksProcessed += NumBlocksProcessed;
4187 LoopData = NextLoopData;
4188 }
4189}
4190
4191ScopStmt *Scop::getStmtFor(BasicBlock *BB) const {
4192 auto StmtMapIt = StmtMap.find(BB);
4193 if (StmtMapIt == StmtMap.end())
4194 return nullptr;
4195 return StmtMapIt->second;
4196}
4197
4198ScopStmt *Scop::getStmtFor(RegionNode *RN) const {
4199 if (RN->isSubRegion())
4200 return getStmtFor(RN->getNodeAs<Region>());
4201 return getStmtFor(RN->getNodeAs<BasicBlock>());
4202}
4203
4204ScopStmt *Scop::getStmtFor(Region *R) const {
4205 ScopStmt *Stmt = getStmtFor(R->getEntry());
4206 assert(!Stmt || Stmt->getRegion() == R)((!Stmt || Stmt->getRegion() == R) ? static_cast<void>
(0) : __assert_fail ("!Stmt || Stmt->getRegion() == R", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4206, __PRETTY_FUNCTION__))
;
4207 return Stmt;
4208}
4209
4210int Scop::getRelativeLoopDepth(const Loop *L) const {
4211 Loop *OuterLoop =
4212 L ? R.outermostLoopInRegion(const_cast<Loop *>(L)) : nullptr;
4213 if (!OuterLoop)
4214 return -1;
4215 return L->getLoopDepth() - OuterLoop->getLoopDepth();
4216}
4217
4218void ScopInfo::buildPHIAccesses(PHINode *PHI, Region &R,
4219 Region *NonAffineSubRegion, bool IsExitBlock) {
4220
4221 // PHI nodes that are in the exit block of the region, hence if IsExitBlock is
4222 // true, are not modeled as ordinary PHI nodes as they are not part of the
4223 // region. However, we model the operands in the predecessor blocks that are
4224 // part of the region as regular scalar accesses.
4225
4226 // If we can synthesize a PHI we can skip it, however only if it is in
4227 // the region. If it is not it can only be in the exit block of the region.
4228 // In this case we model the operands but not the PHI itself.
4229 auto *Scope = LI->getLoopFor(PHI->getParent());
4230 if (!IsExitBlock && canSynthesize(PHI, LI, SE, &R, Scope))
4231 return;
4232
4233 // PHI nodes are modeled as if they had been demoted prior to the SCoP
4234 // detection. Hence, the PHI is a load of a new memory location in which the
4235 // incoming value was written at the end of the incoming basic block.
4236 bool OnlyNonAffineSubRegionOperands = true;
4237 for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
4238 Value *Op = PHI->getIncomingValue(u);
4239 BasicBlock *OpBB = PHI->getIncomingBlock(u);
4240
4241 // Do not build scalar dependences inside a non-affine subregion.
4242 if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB))
4243 continue;
4244
4245 OnlyNonAffineSubRegionOperands = false;
4246 ensurePHIWrite(PHI, OpBB, Op, IsExitBlock);
4247 }
4248
4249 if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
4250 addPHIReadAccess(PHI);
4251 }
4252}
4253
4254void ScopInfo::buildScalarDependences(Instruction *Inst) {
4255 assert(!isa<PHINode>(Inst))((!isa<PHINode>(Inst)) ? static_cast<void> (0) : __assert_fail
("!isa<PHINode>(Inst)", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4255, __PRETTY_FUNCTION__))
;
4256
4257 // Pull-in required operands.
4258 for (Use &Op : Inst->operands())
4259 ensureValueRead(Op.get(), Inst->getParent());
4260}
4261
4262void ScopInfo::buildEscapingDependences(Instruction *Inst) {
4263 Region *R = &scop->getRegion();
4264
4265 // Check for uses of this instruction outside the scop. Because we do not
4266 // iterate over such instructions and therefore did not "ensure" the existence
4267 // of a write, we must determine such use here.
4268 for (Use &U : Inst->uses()) {
4269 Instruction *UI = dyn_cast<Instruction>(U.getUser());
4270 if (!UI)
4271 continue;
4272
4273 BasicBlock *UseParent = getUseBlock(U);
4274 BasicBlock *UserParent = UI->getParent();
4275
4276 // An escaping value is either used by an instruction not within the scop,
4277 // or (when the scop region's exit needs to be simplified) by a PHI in the
4278 // scop's exit block. This is because region simplification before code
4279 // generation inserts new basic blocks before the PHI such that its incoming
4280 // blocks are not in the scop anymore.
4281 if (!R->contains(UseParent) ||
4282 (isa<PHINode>(UI) && UserParent == R->getExit() &&
4283 R->getExitingBlock())) {
4284 // At least one escaping use found.
4285 ensureValueWrite(Inst);
4286 break;
4287 }
4288 }
4289}
4290
4291bool ScopInfo::buildAccessMultiDimFixed(MemAccInst Inst, Loop *L, Region *R) {
4292 Value *Val = Inst.getValueOperand();
4293 Type *ElementType = Val->getType();
4294 Value *Address = Inst.getPointerOperand();
4295 const SCEV *AccessFunction = SE->getSCEVAtScope(Address, L);
4296 const SCEVUnknown *BasePointer =
4297 dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
4298 enum MemoryAccess::AccessType AccType =
4299 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
4300
4301 if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
4302 auto *Src = BitCast->getOperand(0);
4303 auto *SrcTy = Src->getType();
4304 auto *DstTy = BitCast->getType();
4305 // Do not try to delinearize non-sized (opaque) pointers.
4306 if ((SrcTy->isPointerTy() && !SrcTy->getPointerElementType()->isSized()) ||
4307 (DstTy->isPointerTy() && !DstTy->getPointerElementType()->isSized())) {
4308 return false;
4309 }
4310 if (SrcTy->isPointerTy() && DstTy->isPointerTy() &&
4311 DL->getTypeAllocSize(SrcTy->getPointerElementType()) ==
4312 DL->getTypeAllocSize(DstTy->getPointerElementType()))
4313 Address = Src;
4314 }
4315
4316 auto *GEP = dyn_cast<GetElementPtrInst>(Address);
4317 if (!GEP)
4318 return false;
4319
4320 std::vector<const SCEV *> Subscripts;
4321 std::vector<int> Sizes;
4322 std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, *SE);
4323 auto *BasePtr = GEP->getOperand(0);
4324
4325 if (auto *BasePtrCast = dyn_cast<BitCastInst>(BasePtr))
4326 BasePtr = BasePtrCast->getOperand(0);
4327
4328 // Check for identical base pointers to ensure that we do not miss index
4329 // offsets that have been added before this GEP is applied.
4330 if (BasePtr != BasePointer->getValue())
4331 return false;
4332
4333 std::vector<const SCEV *> SizesSCEV;
4334
4335 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
4336 for (auto *Subscript : Subscripts) {
4337 InvariantLoadsSetTy AccessILS;
4338 if (!isAffineExpr(R, L, Subscript, *SE, &AccessILS))
4339 return false;
4340
4341 for (LoadInst *LInst : AccessILS)
4342 if (!ScopRIL.count(LInst))
4343 return false;
4344 }
4345
4346 if (Sizes.empty())
4347 return false;
4348
4349 for (auto V : Sizes)
4350 SizesSCEV.push_back(SE->getSCEV(
4351 ConstantInt::get(IntegerType::getInt64Ty(BasePtr->getContext()), V)));
4352
4353 addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
4354 Subscripts, SizesSCEV, Val);
4355 return true;
4356}
4357
4358bool ScopInfo::buildAccessMultiDimParam(MemAccInst Inst, Loop *L, Region *R) {
4359 if (!PollyDelinearize)
4360 return false;
4361
4362 Value *Address = Inst.getPointerOperand();
4363 Value *Val = Inst.getValueOperand();
4364 Type *ElementType = Val->getType();
4365 unsigned ElementSize = DL->getTypeAllocSize(ElementType);
4366 enum MemoryAccess::AccessType AccType =
4367 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
4368
4369 const SCEV *AccessFunction = SE->getSCEVAtScope(Address, L);
4370 const SCEVUnknown *BasePointer =
4371 dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
4372
4373 assert(BasePointer && "Could not find base pointer")((BasePointer && "Could not find base pointer") ? static_cast
<void> (0) : __assert_fail ("BasePointer && \"Could not find base pointer\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4373, __PRETTY_FUNCTION__))
;
4374 AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);
4375
4376 auto &InsnToMemAcc = scop->getInsnToMemAccMap();
4377 auto AccItr = InsnToMemAcc.find(Inst);
4378 if (AccItr == InsnToMemAcc.end())
4379 return false;
4380
4381 std::vector<const SCEV *> Sizes(
4382 AccItr->second.Shape->DelinearizedSizes.begin(),
4383 AccItr->second.Shape->DelinearizedSizes.end());
4384 // Remove the element size. This information is already provided by the
4385 // ElementSize parameter. In case the element size of this access and the
4386 // element size used for delinearization differs the delinearization is
4387 // incorrect. Hence, we invalidate the scop.
4388 //
4389 // TODO: Handle delinearization with differing element sizes.
4390 auto DelinearizedSize =
4391 cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
4392 Sizes.pop_back();
4393 if (ElementSize != DelinearizedSize)
4394 scop->invalidate(DELINEARIZATION, Inst->getDebugLoc());
4395
4396 addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
4397 AccItr->second.DelinearizedSubscripts, Sizes, Val);
4398 return true;
4399}
4400
4401bool ScopInfo::buildAccessMemIntrinsic(MemAccInst Inst, Loop *L, Region *R) {
4402 auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
4403
4404 if (MemIntr == nullptr)
4405 return false;
4406
4407 auto *LengthVal = SE->getSCEVAtScope(MemIntr->getLength(), L);
4408 assert(LengthVal)((LengthVal) ? static_cast<void> (0) : __assert_fail ("LengthVal"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4408, __PRETTY_FUNCTION__))
;
4409
4410 // Check if the length val is actually affine or if we overapproximate it
4411 InvariantLoadsSetTy AccessILS;
4412 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
4413 bool LengthIsAffine = isAffineExpr(R, L, LengthVal, *SE, &AccessILS);
4414 for (LoadInst *LInst : AccessILS)
4415 if (!ScopRIL.count(LInst))
4416 LengthIsAffine = false;
4417 if (!LengthIsAffine)
4418 LengthVal = nullptr;
4419
4420 auto *DestPtrVal = MemIntr->getDest();
4421 assert(DestPtrVal)((DestPtrVal) ? static_cast<void> (0) : __assert_fail (
"DestPtrVal", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4421, __PRETTY_FUNCTION__))
;
4422
4423 auto *DestAccFunc = SE->getSCEVAtScope(DestPtrVal, L);
4424 assert(DestAccFunc)((DestAccFunc) ? static_cast<void> (0) : __assert_fail (
"DestAccFunc", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4424, __PRETTY_FUNCTION__))
;
4425 // Ignore accesses to "NULL".
4426 // TODO: We could use this to optimize the region further, e.g., intersect
4427 // the context with
4428 // isl_set_complement(isl_set_params(getDomain()))
4429 // as we know it would be undefined to execute this instruction anyway.
4430 if (DestAccFunc->isZero())
4431 return true;
4432
4433 auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE->getPointerBase(DestAccFunc));
4434 assert(DestPtrSCEV)((DestPtrSCEV) ? static_cast<void> (0) : __assert_fail (
"DestPtrSCEV", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4434, __PRETTY_FUNCTION__))
;
4435 DestAccFunc = SE->getMinusSCEV(DestAccFunc, DestPtrSCEV);
4436 addArrayAccess(Inst, MemoryAccess::MUST_WRITE, DestPtrSCEV->getValue(),
4437 IntegerType::getInt8Ty(DestPtrVal->getContext()), false,
4438 {DestAccFunc, LengthVal}, {}, Inst.getValueOperand());
4439
4440 auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
4441 if (!MemTrans)
4442 return true;
4443
4444 auto *SrcPtrVal = MemTrans->getSource();
4445 assert(SrcPtrVal)((SrcPtrVal) ? static_cast<void> (0) : __assert_fail ("SrcPtrVal"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4445, __PRETTY_FUNCTION__))
;
4446
4447 auto *SrcAccFunc = SE->getSCEVAtScope(SrcPtrVal, L);
4448 assert(SrcAccFunc)((SrcAccFunc) ? static_cast<void> (0) : __assert_fail (
"SrcAccFunc", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4448, __PRETTY_FUNCTION__))
;
4449 // Ignore accesses to "NULL".
4450 // TODO: See above TODO
4451 if (SrcAccFunc->isZero())
4452 return true;
4453
4454 auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE->getPointerBase(SrcAccFunc));
4455 assert(SrcPtrSCEV)((SrcPtrSCEV) ? static_cast<void> (0) : __assert_fail (
"SrcPtrSCEV", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4455, __PRETTY_FUNCTION__))
;
4456 SrcAccFunc = SE->getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
4457 addArrayAccess(Inst, MemoryAccess::READ, SrcPtrSCEV->getValue(),
4458 IntegerType::getInt8Ty(SrcPtrVal->getContext()), false,
4459 {SrcAccFunc, LengthVal}, {}, Inst.getValueOperand());
4460
4461 return true;
4462}
4463
4464bool ScopInfo::buildAccessCallInst(MemAccInst Inst, Loop *L, Region *R) {
4465 auto *CI = dyn_cast_or_null<CallInst>(Inst);
4466
4467 if (CI == nullptr)
4468 return false;
4469
4470 if (CI->doesNotAccessMemory() || isIgnoredIntrinsic(CI))
4471 return true;
4472
4473 bool ReadOnly = false;
4474 auto *AF = SE->getConstant(IntegerType::getInt64Ty(CI->getContext()), 0);
4475 auto *CalledFunction = CI->getCalledFunction();
4476 switch (AA->getModRefBehavior(CalledFunction)) {
4477 case llvm::FMRB_UnknownModRefBehavior:
4478 llvm_unreachable("Unknown mod ref behaviour cannot be represented.")::llvm::llvm_unreachable_internal("Unknown mod ref behaviour cannot be represented."
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4478)
;
4479 case llvm::FMRB_DoesNotAccessMemory:
4480 return true;
4481 case llvm::FMRB_OnlyReadsMemory:
4482 GlobalReads.push_back(CI);
4483 return true;
4484 case llvm::FMRB_OnlyReadsArgumentPointees:
4485 ReadOnly = true;
4486 // Fall through
4487 case llvm::FMRB_OnlyAccessesArgumentPointees:
4488 auto AccType = ReadOnly ? MemoryAccess::READ : MemoryAccess::MAY_WRITE;
4489 for (const auto &Arg : CI->arg_operands()) {
4490 if (!Arg->getType()->isPointerTy())
4491 continue;
4492
4493 auto *ArgSCEV = SE->getSCEVAtScope(Arg, L);
4494 if (ArgSCEV->isZero())
4495 continue;
4496
4497 auto *ArgBasePtr = cast<SCEVUnknown>(SE->getPointerBase(ArgSCEV));
4498 addArrayAccess(Inst, AccType, ArgBasePtr->getValue(),
4499 ArgBasePtr->getType(), false, {AF}, {}, CI);
4500 }
4501 return true;
4502 }
4503
4504 return true;
4505}
4506
4507void ScopInfo::buildAccessSingleDim(MemAccInst Inst, Loop *L, Region *R) {
4508 Value *Address = Inst.getPointerOperand();
4509 Value *Val = Inst.getValueOperand();
4510 Type *ElementType = Val->getType();
4511 enum MemoryAccess::AccessType AccType =
4512 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
4513
4514 const SCEV *AccessFunction = SE->getSCEVAtScope(Address, L);
4515 const SCEVUnknown *BasePointer =
4516 dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
4517
4518 assert(BasePointer && "Could not find base pointer")((BasePointer && "Could not find base pointer") ? static_cast
<void> (0) : __assert_fail ("BasePointer && \"Could not find base pointer\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4518, __PRETTY_FUNCTION__))
;
4519 AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);
4520
4521 // Check if the access depends on a loop contained in a non-affine subregion.
4522 bool isVariantInNonAffineLoop = false;
4523 SetVector<const Loop *> Loops;
4524 auto &BoxedLoops = scop->getBoxedLoops();
4525 findLoops(AccessFunction, Loops);
4526 for (const Loop *L : Loops)
4527 if (BoxedLoops.count(L))
4528 isVariantInNonAffineLoop = true;
4529
4530 InvariantLoadsSetTy AccessILS;
4531 bool IsAffine = !isVariantInNonAffineLoop &&
4532 isAffineExpr(R, L, AccessFunction, *SE, &AccessILS);
4533
4534 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
4535 for (LoadInst *LInst : AccessILS)
4536 if (!ScopRIL.count(LInst))
4537 IsAffine = false;
4538
4539 if (!IsAffine && AccType == MemoryAccess::MUST_WRITE)
4540 AccType = MemoryAccess::MAY_WRITE;
4541
4542 addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, IsAffine,
4543 {AccessFunction}, {}, Val);
4544}
4545
4546void ScopInfo::buildMemoryAccess(MemAccInst Inst, Loop *L, Region *R) {
4547
4548 if (buildAccessMemIntrinsic(Inst, L, R))
4549 return;
4550
4551 if (buildAccessCallInst(Inst, L, R))
4552 return;
4553
4554 if (buildAccessMultiDimFixed(Inst, L, R))
4555 return;
4556
4557 if (buildAccessMultiDimParam(Inst, L, R))
4558 return;
4559
4560 buildAccessSingleDim(Inst, L, R);
4561}
4562
4563void ScopInfo::buildAccessFunctions(Region &R, Region &SR) {
4564
4565 if (scop->isNonAffineSubRegion(&SR)) {
4566 for (BasicBlock *BB : SR.blocks())
4567 buildAccessFunctions(R, *BB, &SR);
4568 return;
4569 }
4570
4571 for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
4572 if (I->isSubRegion())
4573 buildAccessFunctions(R, *I->getNodeAs<Region>());
4574 else
4575 buildAccessFunctions(R, *I->getNodeAs<BasicBlock>());
4576}
4577
4578void ScopInfo::buildStmts(Region &R, Region &SR) {
4579
4580 if (scop->isNonAffineSubRegion(&SR)) {
4581 scop->addScopStmt(nullptr, &SR);
4582 return;
4583 }
4584
4585 for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
4586 if (I->isSubRegion())
4587 buildStmts(R, *I->getNodeAs<Region>());
4588 else
4589 scop->addScopStmt(I->getNodeAs<BasicBlock>(), nullptr);
4590}
4591
4592void ScopInfo::buildAccessFunctions(Region &R, BasicBlock &BB,
4593 Region *NonAffineSubRegion,
4594 bool IsExitBlock) {
4595 // We do not build access functions for error blocks, as they may contain
4596 // instructions we can not model.
4597 if (isErrorBlock(BB, R, *LI, *DT) && !IsExitBlock)
4598 return;
4599
4600 Loop *L = LI->getLoopFor(&BB);
4601
4602 for (Instruction &Inst : BB) {
4603 PHINode *PHI = dyn_cast<PHINode>(&Inst);
4604 if (PHI)
4605 buildPHIAccesses(PHI, R, NonAffineSubRegion, IsExitBlock);
4606
4607 // For the exit block we stop modeling after the last PHI node.
4608 if (!PHI && IsExitBlock)
4609 break;
4610
4611 if (auto MemInst = MemAccInst::dyn_cast(Inst))
4612 buildMemoryAccess(MemInst, L, &R);
4613
4614 if (isIgnoredIntrinsic(&Inst))
4615 continue;
4616
4617 // PHI nodes have already been modeled above and TerminatorInsts that are
4618 // not part of a non-affine subregion are fully modeled and regenerated
4619 // from the polyhedral domains. Hence, they do not need to be modeled as
4620 // explicit data dependences.
4621 if (!PHI && (!isa<TerminatorInst>(&Inst) || NonAffineSubRegion))
4622 buildScalarDependences(&Inst);
4623
4624 if (!IsExitBlock)
4625 buildEscapingDependences(&Inst);
4626 }
4627}
4628
4629MemoryAccess *ScopInfo::addMemoryAccess(BasicBlock *BB, Instruction *Inst,
4630 MemoryAccess::AccessType AccType,
4631 Value *BaseAddress, Type *ElementType,
4632 bool Affine, Value *AccessValue,
4633 ArrayRef<const SCEV *> Subscripts,
4634 ArrayRef<const SCEV *> Sizes,
4635 ScopArrayInfo::MemoryKind Kind) {
4636 ScopStmt *Stmt = scop->getStmtFor(BB);
4637
4638 // Do not create a memory access for anything not in the SCoP. It would be
4639 // ignored anyway.
4640 if (!Stmt)
4641 return nullptr;
4642
4643 AccFuncSetType &AccList = scop->getOrCreateAccessFunctions(BB);
4644 Value *BaseAddr = BaseAddress;
4645 std::string BaseName = getIslCompatibleName("MemRef_", BaseAddr, "");
4646
4647 bool isKnownMustAccess = false;
4648
4649 // Accesses in single-basic block statements are always excuted.
4650 if (Stmt->isBlockStmt())
4651 isKnownMustAccess = true;
4652
4653 if (Stmt->isRegionStmt()) {
4654 // Accesses that dominate the exit block of a non-affine region are always
4655 // executed. In non-affine regions there may exist MK_Values that do not
4656 // dominate the exit. MK_Values will always dominate the exit and MK_PHIs
4657 // only if there is at most one PHI_WRITE in the non-affine region.
4658 if (DT->dominates(BB, Stmt->getRegion()->getExit()))
4659 isKnownMustAccess = true;
4660 }
4661
4662 // Non-affine PHI writes do not "happen" at a particular instruction, but
4663 // after exiting the statement. Therefore they are guaranteed execute and
4664 // overwrite the old value.
4665 if (Kind == ScopArrayInfo::MK_PHI || Kind == ScopArrayInfo::MK_ExitPHI)
4666 isKnownMustAccess = true;
4667
4668 if (!isKnownMustAccess && AccType == MemoryAccess::MUST_WRITE)
4669 AccType = MemoryAccess::MAY_WRITE;
4670
4671 AccList.emplace_back(Stmt, Inst, AccType, BaseAddress, ElementType, Affine,
4672 Subscripts, Sizes, AccessValue, Kind, BaseName);
4673 Stmt->addAccess(&AccList.back());
4674 return &AccList.back();
4675}
4676
4677void ScopInfo::addArrayAccess(MemAccInst MemAccInst,
4678 MemoryAccess::AccessType AccType,
4679 Value *BaseAddress, Type *ElementType,
4680 bool IsAffine, ArrayRef<const SCEV *> Subscripts,
4681 ArrayRef<const SCEV *> Sizes,
4682 Value *AccessValue) {
4683 ArrayBasePointers.insert(BaseAddress);
4684 addMemoryAccess(MemAccInst->getParent(), MemAccInst, AccType, BaseAddress,
4685 ElementType, IsAffine, AccessValue, Subscripts, Sizes,
4686 ScopArrayInfo::MK_Array);
4687}
4688
4689void ScopInfo::ensureValueWrite(Instruction *Inst) {
4690 ScopStmt *Stmt = scop->getStmtFor(Inst);
4691
4692 // Inst not defined within this SCoP.
4693 if (!Stmt)
4694 return;
4695
4696 // Do not process further if the instruction is already written.
4697 if (Stmt->lookupValueWriteOf(Inst))
4698 return;
4699
4700 addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::MUST_WRITE, Inst,
4701 Inst->getType(), true, Inst, ArrayRef<const SCEV *>(),
4702 ArrayRef<const SCEV *>(), ScopArrayInfo::MK_Value);
4703}
4704
4705void ScopInfo::ensureValueRead(Value *V, BasicBlock *UserBB) {
4706
4707 // There cannot be an "access" for literal constants. BasicBlock references
4708 // (jump destinations) also never change.
4709 if ((isa<Constant>(V) && !isa<GlobalVariable>(V)) || isa<BasicBlock>(V))
4710 return;
4711
4712 // If the instruction can be synthesized and the user is in the region we do
4713 // not need to add a value dependences.
4714 Region &ScopRegion = scop->getRegion();
4715 auto *Scope = LI->getLoopFor(UserBB);
4716 if (canSynthesize(V, LI, SE, &ScopRegion, Scope))
4717 return;
4718
4719 // Do not build scalar dependences for required invariant loads as we will
4720 // hoist them later on anyway or drop the SCoP if we cannot.
4721 auto &ScopRIL = scop->getRequiredInvariantLoads();
4722 if (ScopRIL.count(dyn_cast<LoadInst>(V)))
4723 return;
4724
4725 // Determine the ScopStmt containing the value's definition and use. There is
4726 // no defining ScopStmt if the value is a function argument, a global value,
4727 // or defined outside the SCoP.
4728 Instruction *ValueInst = dyn_cast<Instruction>(V);
4729 ScopStmt *ValueStmt = ValueInst ? scop->getStmtFor(ValueInst) : nullptr;
4730
4731 ScopStmt *UserStmt = scop->getStmtFor(UserBB);
4732
4733 // We do not model uses outside the scop.
4734 if (!UserStmt)
4735 return;
4736
4737 // Add MemoryAccess for invariant values only if requested.
4738 if (!ModelReadOnlyScalars && !ValueStmt)
4739 return;
4740
4741 // Ignore use-def chains within the same ScopStmt.
4742 if (ValueStmt == UserStmt)
4743 return;
4744
4745 // Do not create another MemoryAccess for reloading the value if one already
4746 // exists.
4747 if (UserStmt->lookupValueReadOf(V))
4748 return;
4749
4750 // For exit PHIs use the MK_ExitPHI MemoryKind not MK_Value.
4751 ScopArrayInfo::MemoryKind Kind = ScopArrayInfo::MK_Value;
4752 if (!ValueStmt && isa<PHINode>(V))
4753 Kind = ScopArrayInfo::MK_ExitPHI;
4754
4755 addMemoryAccess(UserBB, nullptr, MemoryAccess::READ, V, V->getType(), true, V,
4756 ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(), Kind);
4757 if (ValueInst)
4758 ensureValueWrite(ValueInst);
4759}
4760
4761void ScopInfo::ensurePHIWrite(PHINode *PHI, BasicBlock *IncomingBlock,
4762 Value *IncomingValue, bool IsExitBlock) {
4763 // As the incoming block might turn out to be an error statement ensure we
4764 // will create an exit PHI SAI object. It is needed during code generation
4765 // and would be created later anyway.
4766 if (IsExitBlock)
4767 scop->getOrCreateScopArrayInfo(PHI, PHI->getType(), {},
4768 ScopArrayInfo::MK_ExitPHI);
4769
4770 ScopStmt *IncomingStmt = scop->getStmtFor(IncomingBlock);
4771 if (!IncomingStmt)
4772 return;
4773
4774 // Take care for the incoming value being available in the incoming block.
4775 // This must be done before the check for multiple PHI writes because multiple
4776 // exiting edges from subregion each can be the effective written value of the
4777 // subregion. As such, all of them must be made available in the subregion
4778 // statement.
4779 ensureValueRead(IncomingValue, IncomingBlock);
4780
4781 // Do not add more than one MemoryAccess per PHINode and ScopStmt.
4782 if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
4783 assert(Acc->getAccessInstruction() == PHI)((Acc->getAccessInstruction() == PHI) ? static_cast<void
> (0) : __assert_fail ("Acc->getAccessInstruction() == PHI"
, "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4783, __PRETTY_FUNCTION__))
;
4784 Acc->addIncoming(IncomingBlock, IncomingValue);
4785 return;
4786 }
4787
4788 MemoryAccess *Acc = addMemoryAccess(
4789 IncomingStmt->getEntryBlock(), PHI, MemoryAccess::MUST_WRITE, PHI,
4790 PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
4791 ArrayRef<const SCEV *>(),
4792 IsExitBlock ? ScopArrayInfo::MK_ExitPHI : ScopArrayInfo::MK_PHI);
4793 assert(Acc)((Acc) ? static_cast<void> (0) : __assert_fail ("Acc", "/tmp/buildd/llvm-toolchain-snapshot-3.9~svn270412/tools/polly/lib/Analysis/ScopInfo.cpp"
, 4793, __PRETTY_FUNCTION__))
;
4794 Acc->addIncoming(IncomingBlock, IncomingValue);
4795}
4796
4797void ScopInfo::addPHIReadAccess(PHINode *PHI) {
4798 addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI,
4799 PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
4800 ArrayRef<const SCEV *>(), ScopArrayInfo::MK_PHI);
4801}
4802
4803void ScopInfo::buildScop(Region &R, AssumptionCache &AC) {
4804 scop.reset(new Scop(R, *SE, *LI, *SD->getDetectionContext(&R)));
4805
4806 buildStmts(R, R);
4807 buildAccessFunctions(R, R);
4808
4809 // In case the region does not have an exiting block we will later (during
4810 // code generation) split the exit block. This will move potential PHI nodes
4811 // from the current exit block into the new region exiting block. Hence, PHI
4812 // nodes that are at this point not part of the region will be.
4813 // To handle these PHI nodes later we will now model their operands as scalar
4814 // accesses. Note that we do not model anything in the exit block if we have
4815 // an exiting block in the region, as there will not be any splitting later.
4816 if (!R.getExitingBlock())
4817 buildAccessFunctions(R, *R.getExit(), nullptr,
4818 /* IsExitBlock */ true);
4819
4820 // Create memory accesses for global reads since all arrays are now known.
4821 auto *AF = SE->getConstant(IntegerType::getInt64Ty(SE->getContext()), 0);
4822 for (auto *GlobalRead : GlobalReads)
4823 for (auto *BP : ArrayBasePointers)
4824 addArrayAccess(MemAccInst(GlobalRead), MemoryAccess::READ, BP,
4825 BP->getType(), false, {AF}, {}, GlobalRead);
4826
4827 scop->init(*AA, AC, *DT, *LI);
4828}
4829
4830void ScopInfo::print(raw_ostream &OS, const Module *) const {
4831 if (!scop) {
4832 OS << "Invalid Scop!\n";
4833 return;
4834 }
4835
4836 scop->print(OS);
4837}
4838
4839void ScopInfo::clear() { scop.reset(); }
4840
4841//===----------------------------------------------------------------------===//
4842ScopInfo::ScopInfo() : RegionPass(ID) {}
4843
4844ScopInfo::~ScopInfo() { clear(); }
4845
4846void ScopInfo::getAnalysisUsage(AnalysisUsage &AU) const {
4847 AU.addRequired<LoopInfoWrapperPass>();
4848 AU.addRequired<RegionInfoPass>();
4849 AU.addRequired<DominatorTreeWrapperPass>();
4850 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
4851 AU.addRequiredTransitive<ScopDetection>();
4852 AU.addRequired<AAResultsWrapperPass>();
4853 AU.addRequired<AssumptionCacheTracker>();
4854 AU.setPreservesAll();
4855}
4856
4857bool ScopInfo::runOnRegion(Region *R, RGPassManager &RGM) {
4858 SD = &getAnalysis<ScopDetection>();
4859
4860 if (!SD->isMaxRegionInScop(*R))
4861 return false;
4862
4863 Function *F = R->getEntry()->getParent();
4864 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
4865 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
4866 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4867 DL = &F->getParent()->getDataLayout();
4868 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
4869 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
4870
4871 DebugLoc Beg, End;
4872 getDebugLocations(getBBPairForRegion(R), Beg, End);
4873 std::string Msg = "SCoP begins here.";
4874 emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE"polly-scops", *F, Beg, Msg);
4875
4876 buildScop(*R, AC);
4877
4878 DEBUG(scop->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("polly-scops")) { scop->print(dbgs()); } } while (0)
;
4879
4880 if (!scop->hasFeasibleRuntimeContext()) {
4881 Msg = "SCoP ends here but was dismissed.";
4882 scop.reset();
4883 } else {
4884 Msg = "SCoP ends here.";
4885 ++ScopFound;
4886 if (scop->getMaxLoopDepth() > 0)
4887 ++RichScopFound;
4888 }
4889
4890 emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE"polly-scops", *F, End, Msg);
4891
4892 return false;
4893}
4894
4895char ScopInfo::ID = 0;
4896
4897Pass *polly::createScopInfoPass() { return new ScopInfo(); }
4898
4899INITIALIZE_PASS_BEGIN(ScopInfo, "polly-scops",static void* initializeScopInfoPassOnce(PassRegistry &Registry
) {
4900 "Polly - Create polyhedral description of Scops", false,static void* initializeScopInfoPassOnce(PassRegistry &Registry
) {
4901 false)static void* initializeScopInfoPassOnce(PassRegistry &Registry
) {
;
4902INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);;
4903INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);;
4904INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry);;
4905INITIALIZE_PASS_DEPENDENCY(RegionInfoPass)initializeRegionInfoPassPass(Registry);;
4906INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)initializeScalarEvolutionWrapperPassPass(Registry);;
4907INITIALIZE_PASS_DEPENDENCY(ScopDetection)initializeScopDetectionPass(Registry);;
4908INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);;
4909INITIALIZE_PASS_END(ScopInfo, "polly-scops",PassInfo *PI = new PassInfo("Polly - Create polyhedral description of Scops"
, "polly-scops", & ScopInfo ::ID, PassInfo::NormalCtor_t(
callDefaultCtor< ScopInfo >), false, false); Registry.registerPass
(*PI, true); return PI; } void llvm::initializeScopInfoPass(PassRegistry
&Registry) { static volatile sys::cas_flag initialized =
0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized
, 1, 0); if (old_val == 0) { initializeScopInfoPassOnce(Registry
); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag
tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp
= initialized; sys::MemoryFence(); } } ; }
4910 "Polly - Create polyhedral description of Scops", false,PassInfo *PI = new PassInfo("Polly - Create polyhedral description of Scops"
, "polly-scops", & ScopInfo ::ID, PassInfo::NormalCtor_t(
callDefaultCtor< ScopInfo >), false, false); Registry.registerPass
(*PI, true); return PI; } void llvm::initializeScopInfoPass(PassRegistry
&Registry) { static volatile sys::cas_flag initialized =
0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized
, 1, 0); if (old_val == 0) { initializeScopInfoPassOnce(Registry
); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag
tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp
= initialized; sys::MemoryFence(); } } ; }
4911 false)PassInfo *PI = new PassInfo("Polly - Create polyhedral description of Scops"
, "polly-scops", & ScopInfo ::ID, PassInfo::NormalCtor_t(
callDefaultCtor< ScopInfo >), false, false); Registry.registerPass
(*PI, true); return PI; } void llvm::initializeScopInfoPass(PassRegistry
&Registry) { static volatile sys::cas_flag initialized =
0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized
, 1, 0); if (old_val == 0) { initializeScopInfoPassOnce(Registry
); sys::MemoryFence(); ; ; initialized = 2; ; } else { sys::cas_flag
tmp = initialized; sys::MemoryFence(); while (tmp != 2) { tmp
= initialized; sys::MemoryFence(); } } ; }