LLVM  10.0.0svn
SROA.cpp
Go to the documentation of this file.
1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This transformation implements the well known scalar replacement of
10 /// aggregates transformation. It tries to identify promotable elements of an
11 /// aggregate alloca, and promote them to registers. It will also try to
12 /// convert uses of an element (or set of elements) of an alloca into a vector
13 /// or bitfield-style integer scalar if appropriate.
14 ///
15 /// It works to do this with minimal slicing of the alloca so that regions
16 /// which are merely transferred in and out of external memory remain unchanged
17 /// and are not decomposed to scalar code.
18 ///
19 /// Because this also performs alloca promotion, it can be thought of as also
20 /// serving the purpose of SSA formation. The algorithm iterates on the
21 /// function until all opportunities for promotion have been realized.
22 ///
23 //===----------------------------------------------------------------------===//
24 
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SetVector.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/ADT/iterator.h"
42 #include "llvm/Analysis/Loads.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/ConstantFolder.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DIBuilder.h"
51 #include "llvm/IR/DataLayout.h"
53 #include "llvm/IR/DerivedTypes.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GlobalAlias.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstVisitor.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/LLVMContext.h"
66 #include "llvm/IR/Metadata.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/Operator.h"
69 #include "llvm/IR/PassManager.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/Debug.h"
82 #include "llvm/Transforms/Scalar.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <chrono>
87 #include <cstddef>
88 #include <cstdint>
89 #include <cstring>
90 #include <iterator>
91 #include <string>
92 #include <tuple>
93 #include <utility>
94 #include <vector>
95 
96 #ifndef NDEBUG
97 // We only use this for a debug check.
98 #include <random>
99 #endif
100 
101 using namespace llvm;
102 using namespace llvm::sroa;
103 
104 #define DEBUG_TYPE "sroa"
105 
106 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
107 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
108 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
109 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
110 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
111 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
112 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
113 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
114 STATISTIC(NumDeleted, "Number of instructions deleted");
115 STATISTIC(NumVectorized, "Number of vectorized aggregates");
116 
117 /// Hidden option to enable randomly shuffling the slices to help uncover
118 /// instability in their order.
119 static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
120  cl::init(false), cl::Hidden);
121 
122 /// Hidden option to experiment with completely strict handling of inbounds
123 /// GEPs.
124 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false),
125  cl::Hidden);
126 
127 namespace {
128 
129 /// A custom IRBuilder inserter which prefixes all names, but only in
130 /// Assert builds.
131 class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter {
132  std::string Prefix;
133 
134  const Twine getNameWithPrefix(const Twine &Name) const {
135  return Name.isTriviallyEmpty() ? Name : Prefix + Name;
136  }
137 
138 public:
139  void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
140 
141 protected:
142  void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
143  BasicBlock::iterator InsertPt) const {
144  IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB,
145  InsertPt);
146  }
147 };
148 
149 /// Provide a type for IRBuilder that drops names in release builds.
151 
152 /// A used slice of an alloca.
153 ///
154 /// This structure represents a slice of an alloca used by some instruction. It
155 /// stores both the begin and end offsets of this use, a pointer to the use
156 /// itself, and a flag indicating whether we can classify the use as splittable
157 /// or not when forming partitions of the alloca.
158 class Slice {
159  /// The beginning offset of the range.
160  uint64_t BeginOffset = 0;
161 
162  /// The ending offset, not included in the range.
163  uint64_t EndOffset = 0;
164 
165  /// Storage for both the use of this slice and whether it can be
166  /// split.
167  PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
168 
169 public:
170  Slice() = default;
171 
172  Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
173  : BeginOffset(BeginOffset), EndOffset(EndOffset),
174  UseAndIsSplittable(U, IsSplittable) {}
175 
176  uint64_t beginOffset() const { return BeginOffset; }
177  uint64_t endOffset() const { return EndOffset; }
178 
179  bool isSplittable() const { return UseAndIsSplittable.getInt(); }
180  void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
181 
182  Use *getUse() const { return UseAndIsSplittable.getPointer(); }
183 
184  bool isDead() const { return getUse() == nullptr; }
185  void kill() { UseAndIsSplittable.setPointer(nullptr); }
186 
187  /// Support for ordering ranges.
188  ///
189  /// This provides an ordering over ranges such that start offsets are
190  /// always increasing, and within equal start offsets, the end offsets are
191  /// decreasing. Thus the spanning range comes first in a cluster with the
192  /// same start position.
193  bool operator<(const Slice &RHS) const {
194  if (beginOffset() < RHS.beginOffset())
195  return true;
196  if (beginOffset() > RHS.beginOffset())
197  return false;
198  if (isSplittable() != RHS.isSplittable())
199  return !isSplittable();
200  if (endOffset() > RHS.endOffset())
201  return true;
202  return false;
203  }
204 
205  /// Support comparison with a single offset to allow binary searches.
206  friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
207  uint64_t RHSOffset) {
208  return LHS.beginOffset() < RHSOffset;
209  }
210  friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
211  const Slice &RHS) {
212  return LHSOffset < RHS.beginOffset();
213  }
214 
215  bool operator==(const Slice &RHS) const {
216  return isSplittable() == RHS.isSplittable() &&
217  beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
218  }
219  bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
220 };
221 
222 } // end anonymous namespace
223 
224 /// Representation of the alloca slices.
225 ///
226 /// This class represents the slices of an alloca which are formed by its
227 /// various uses. If a pointer escapes, we can't fully build a representation
228 /// for the slices used and we reflect that in this structure. The uses are
229 /// stored, sorted by increasing beginning offset and with unsplittable slices
230 /// starting at a particular offset before splittable slices.
232 public:
233  /// Construct the slices of a particular alloca.
234  AllocaSlices(const DataLayout &DL, AllocaInst &AI);
235 
236  /// Test whether a pointer to the allocation escapes our analysis.
237  ///
238  /// If this is true, the slices are never fully built and should be
239  /// ignored.
240  bool isEscaped() const { return PointerEscapingInstr; }
241 
242  /// Support for iterating over the slices.
243  /// @{
246 
247  iterator begin() { return Slices.begin(); }
248  iterator end() { return Slices.end(); }
249 
252 
253  const_iterator begin() const { return Slices.begin(); }
254  const_iterator end() const { return Slices.end(); }
255  /// @}
256 
257  /// Erase a range of slices.
258  void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); }
259 
260  /// Insert new slices for this alloca.
261  ///
262  /// This moves the slices into the alloca's slices collection, and re-sorts
263  /// everything so that the usual ordering properties of the alloca's slices
264  /// hold.
265  void insert(ArrayRef<Slice> NewSlices) {
266  int OldSize = Slices.size();
267  Slices.append(NewSlices.begin(), NewSlices.end());
268  auto SliceI = Slices.begin() + OldSize;
269  llvm::sort(SliceI, Slices.end());
270  std::inplace_merge(Slices.begin(), SliceI, Slices.end());
271  }
272 
273  // Forward declare the iterator and range accessor for walking the
274  // partitions.
275  class partition_iterator;
277 
278  /// Access the dead users for this alloca.
279  ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; }
280 
281  /// Access the dead operands referring to this alloca.
282  ///
283  /// These are operands which have cannot actually be used to refer to the
284  /// alloca as they are outside its range and the user doesn't correct for
285  /// that. These mostly consist of PHI node inputs and the like which we just
286  /// need to replace with undef.
287  ArrayRef<Use *> getDeadOperands() const { return DeadOperands; }
288 
289 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
290  void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
291  void printSlice(raw_ostream &OS, const_iterator I,
292  StringRef Indent = " ") const;
293  void printUse(raw_ostream &OS, const_iterator I,
294  StringRef Indent = " ") const;
295  void print(raw_ostream &OS) const;
296  void dump(const_iterator I) const;
297  void dump() const;
298 #endif
299 
300 private:
301  template <typename DerivedT, typename RetT = void> class BuilderBase;
302  class SliceBuilder;
303 
304  friend class AllocaSlices::SliceBuilder;
305 
306 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
307  /// Handle to alloca instruction to simplify method interfaces.
308  AllocaInst &AI;
309 #endif
310 
311  /// The instruction responsible for this alloca not having a known set
312  /// of slices.
313  ///
314  /// When an instruction (potentially) escapes the pointer to the alloca, we
315  /// store a pointer to that here and abort trying to form slices of the
316  /// alloca. This will be null if the alloca slices are analyzed successfully.
317  Instruction *PointerEscapingInstr;
318 
319  /// The slices of the alloca.
320  ///
321  /// We store a vector of the slices formed by uses of the alloca here. This
322  /// vector is sorted by increasing begin offset, and then the unsplittable
323  /// slices before the splittable ones. See the Slice inner class for more
324  /// details.
325  SmallVector<Slice, 8> Slices;
326 
327  /// Instructions which will become dead if we rewrite the alloca.
328  ///
329  /// Note that these are not separated by slice. This is because we expect an
330  /// alloca to be completely rewritten or not rewritten at all. If rewritten,
331  /// all these instructions can simply be removed and replaced with undef as
332  /// they come from outside of the allocated space.
334 
335  /// Operands which will become dead if we rewrite the alloca.
336  ///
337  /// These are operands that in their particular use can be replaced with
338  /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
339  /// to PHI nodes and the like. They aren't entirely dead (there might be
340  /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
341  /// want to swap this particular input for undef to simplify the use lists of
342  /// the alloca.
343  SmallVector<Use *, 8> DeadOperands;
344 };
345 
346 /// A partition of the slices.
347 ///
348 /// An ephemeral representation for a range of slices which can be viewed as
349 /// a partition of the alloca. This range represents a span of the alloca's
350 /// memory which cannot be split, and provides access to all of the slices
351 /// overlapping some part of the partition.
352 ///
353 /// Objects of this type are produced by traversing the alloca's slices, but
354 /// are only ephemeral and not persistent.
356 private:
357  friend class AllocaSlices;
359 
360  using iterator = AllocaSlices::iterator;
361 
362  /// The beginning and ending offsets of the alloca for this
363  /// partition.
364  uint64_t BeginOffset, EndOffset;
365 
366  /// The start and end iterators of this partition.
367  iterator SI, SJ;
368 
369  /// A collection of split slice tails overlapping the partition.
370  SmallVector<Slice *, 4> SplitTails;
371 
372  /// Raw constructor builds an empty partition starting and ending at
373  /// the given iterator.
374  Partition(iterator SI) : SI(SI), SJ(SI) {}
375 
376 public:
377  /// The start offset of this partition.
378  ///
379  /// All of the contained slices start at or after this offset.
380  uint64_t beginOffset() const { return BeginOffset; }
381 
382  /// The end offset of this partition.
383  ///
384  /// All of the contained slices end at or before this offset.
385  uint64_t endOffset() const { return EndOffset; }
386 
387  /// The size of the partition.
388  ///
389  /// Note that this can never be zero.
390  uint64_t size() const {
391  assert(BeginOffset < EndOffset && "Partitions must span some bytes!");
392  return EndOffset - BeginOffset;
393  }
394 
395  /// Test whether this partition contains no slices, and merely spans
396  /// a region occupied by split slices.
397  bool empty() const { return SI == SJ; }
398 
399  /// \name Iterate slices that start within the partition.
400  /// These may be splittable or unsplittable. They have a begin offset >= the
401  /// partition begin offset.
402  /// @{
403  // FIXME: We should probably define a "concat_iterator" helper and use that
404  // to stitch together pointee_iterators over the split tails and the
405  // contiguous iterators of the partition. That would give a much nicer
406  // interface here. We could then additionally expose filtered iterators for
407  // split, unsplit, and unsplittable splices based on the usage patterns.
408  iterator begin() const { return SI; }
409  iterator end() const { return SJ; }
410  /// @}
411 
412  /// Get the sequence of split slice tails.
413  ///
414  /// These tails are of slices which start before this partition but are
415  /// split and overlap into the partition. We accumulate these while forming
416  /// partitions.
417  ArrayRef<Slice *> splitSliceTails() const { return SplitTails; }
418 };
419 
420 /// An iterator over partitions of the alloca's slices.
421 ///
422 /// This iterator implements the core algorithm for partitioning the alloca's
423 /// slices. It is a forward iterator as we don't support backtracking for
424 /// efficiency reasons, and re-use a single storage area to maintain the
425 /// current set of split slices.
426 ///
427 /// It is templated on the slice iterator type to use so that it can operate
428 /// with either const or non-const slice iterators.
430  : public iterator_facade_base<partition_iterator, std::forward_iterator_tag,
431  Partition> {
432  friend class AllocaSlices;
433 
434  /// Most of the state for walking the partitions is held in a class
435  /// with a nice interface for examining them.
436  Partition P;
437 
438  /// We need to keep the end of the slices to know when to stop.
440 
441  /// We also need to keep track of the maximum split end offset seen.
442  /// FIXME: Do we really?
443  uint64_t MaxSplitSliceEndOffset = 0;
444 
445  /// Sets the partition to be empty at given iterator, and sets the
446  /// end iterator.
448  : P(SI), SE(SE) {
449  // If not already at the end, advance our state to form the initial
450  // partition.
451  if (SI != SE)
452  advance();
453  }
454 
455  /// Advance the iterator to the next partition.
456  ///
457  /// Requires that the iterator not be at the end of the slices.
458  void advance() {
459  assert((P.SI != SE || !P.SplitTails.empty()) &&
460  "Cannot advance past the end of the slices!");
461 
462  // Clear out any split uses which have ended.
463  if (!P.SplitTails.empty()) {
464  if (P.EndOffset >= MaxSplitSliceEndOffset) {
465  // If we've finished all splits, this is easy.
466  P.SplitTails.clear();
467  MaxSplitSliceEndOffset = 0;
468  } else {
469  // Remove the uses which have ended in the prior partition. This
470  // cannot change the max split slice end because we just checked that
471  // the prior partition ended prior to that max.
472  P.SplitTails.erase(llvm::remove_if(P.SplitTails,
473  [&](Slice *S) {
474  return S->endOffset() <=
475  P.EndOffset;
476  }),
477  P.SplitTails.end());
478  assert(llvm::any_of(P.SplitTails,
479  [&](Slice *S) {
480  return S->endOffset() == MaxSplitSliceEndOffset;
481  }) &&
482  "Could not find the current max split slice offset!");
483  assert(llvm::all_of(P.SplitTails,
484  [&](Slice *S) {
485  return S->endOffset() <= MaxSplitSliceEndOffset;
486  }) &&
487  "Max split slice end offset is not actually the max!");
488  }
489  }
490 
491  // If P.SI is already at the end, then we've cleared the split tail and
492  // now have an end iterator.
493  if (P.SI == SE) {
494  assert(P.SplitTails.empty() && "Failed to clear the split slices!");
495  return;
496  }
497 
498  // If we had a non-empty partition previously, set up the state for
499  // subsequent partitions.
500  if (P.SI != P.SJ) {
501  // Accumulate all the splittable slices which started in the old
502  // partition into the split list.
503  for (Slice &S : P)
504  if (S.isSplittable() && S.endOffset() > P.EndOffset) {
505  P.SplitTails.push_back(&S);
506  MaxSplitSliceEndOffset =
507  std::max(S.endOffset(), MaxSplitSliceEndOffset);
508  }
509 
510  // Start from the end of the previous partition.
511  P.SI = P.SJ;
512 
513  // If P.SI is now at the end, we at most have a tail of split slices.
514  if (P.SI == SE) {
515  P.BeginOffset = P.EndOffset;
516  P.EndOffset = MaxSplitSliceEndOffset;
517  return;
518  }
519 
520  // If the we have split slices and the next slice is after a gap and is
521  // not splittable immediately form an empty partition for the split
522  // slices up until the next slice begins.
523  if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset &&
524  !P.SI->isSplittable()) {
525  P.BeginOffset = P.EndOffset;
526  P.EndOffset = P.SI->beginOffset();
527  return;
528  }
529  }
530 
531  // OK, we need to consume new slices. Set the end offset based on the
532  // current slice, and step SJ past it. The beginning offset of the
533  // partition is the beginning offset of the next slice unless we have
534  // pre-existing split slices that are continuing, in which case we begin
535  // at the prior end offset.
536  P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset;
537  P.EndOffset = P.SI->endOffset();
538  ++P.SJ;
539 
540  // There are two strategies to form a partition based on whether the
541  // partition starts with an unsplittable slice or a splittable slice.
542  if (!P.SI->isSplittable()) {
543  // When we're forming an unsplittable region, it must always start at
544  // the first slice and will extend through its end.
545  assert(P.BeginOffset == P.SI->beginOffset());
546 
547  // Form a partition including all of the overlapping slices with this
548  // unsplittable slice.
549  while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
550  if (!P.SJ->isSplittable())
551  P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
552  ++P.SJ;
553  }
554 
555  // We have a partition across a set of overlapping unsplittable
556  // partitions.
557  return;
558  }
559 
560  // If we're starting with a splittable slice, then we need to form
561  // a synthetic partition spanning it and any other overlapping splittable
562  // splices.
563  assert(P.SI->isSplittable() && "Forming a splittable partition!");
564 
565  // Collect all of the overlapping splittable slices.
566  while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset &&
567  P.SJ->isSplittable()) {
568  P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset());
569  ++P.SJ;
570  }
571 
572  // Back upiP.EndOffset if we ended the span early when encountering an
573  // unsplittable slice. This synthesizes the early end offset of
574  // a partition spanning only splittable slices.
575  if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) {
576  assert(!P.SJ->isSplittable());
577  P.EndOffset = P.SJ->beginOffset();
578  }
579  }
580 
581 public:
582  bool operator==(const partition_iterator &RHS) const {
583  assert(SE == RHS.SE &&
584  "End iterators don't match between compared partition iterators!");
585 
586  // The observed positions of partitions is marked by the P.SI iterator and
587  // the emptiness of the split slices. The latter is only relevant when
588  // P.SI == SE, as the end iterator will additionally have an empty split
589  // slices list, but the prior may have the same P.SI and a tail of split
590  // slices.
591  if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) {
592  assert(P.SJ == RHS.P.SJ &&
593  "Same set of slices formed two different sized partitions!");
594  assert(P.SplitTails.size() == RHS.P.SplitTails.size() &&
595  "Same slice position with differently sized non-empty split "
596  "slice tails!");
597  return true;
598  }
599  return false;
600  }
601 
603  advance();
604  return *this;
605  }
606 
607  Partition &operator*() { return P; }
608 };
609 
610 /// A forward range over the partitions of the alloca's slices.
611 ///
612 /// This accesses an iterator range over the partitions of the alloca's
613 /// slices. It computes these partitions on the fly based on the overlapping
614 /// offsets of the slices and the ability to split them. It will visit "empty"
615 /// partitions to cover regions of the alloca only accessed via split
616 /// slices.
618  return make_range(partition_iterator(begin(), end()),
619  partition_iterator(end(), end()));
620 }
621 
623  // If the condition being selected on is a constant or the same value is
624  // being selected between, fold the select. Yes this does (rarely) happen
625  // early on.
626  if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
627  return SI.getOperand(1 + CI->isZero());
628  if (SI.getOperand(1) == SI.getOperand(2))
629  return SI.getOperand(1);
630 
631  return nullptr;
632 }
633 
634 /// A helper that folds a PHI node or a select.
636  if (PHINode *PN = dyn_cast<PHINode>(&I)) {
637  // If PN merges together the same value, return that value.
638  return PN->hasConstantValue();
639  }
640  return foldSelectInst(cast<SelectInst>(I));
641 }
642 
643 /// Builder for the alloca slices.
644 ///
645 /// This class builds a set of alloca slices by recursively visiting the uses
646 /// of an alloca and making a slice for each load and store at each offset.
647 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
649  friend class InstVisitor<SliceBuilder>;
650 
652 
653  const uint64_t AllocSize;
654  AllocaSlices &AS;
655 
656  SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
658 
659  /// Set to de-duplicate dead instructions found in the use walk.
660  SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
661 
662 public:
665  AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {}
666 
667 private:
668  void markAsDead(Instruction &I) {
669  if (VisitedDeadInsts.insert(&I).second)
670  AS.DeadUsers.push_back(&I);
671  }
672 
673  void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
674  bool IsSplittable = false) {
675  // Completely skip uses which have a zero size or start either before or
676  // past the end of the allocation.
677  if (Size == 0 || Offset.uge(AllocSize)) {
678  LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @"
679  << Offset
680  << " which has zero size or starts outside of the "
681  << AllocSize << " byte alloca:\n"
682  << " alloca: " << AS.AI << "\n"
683  << " use: " << I << "\n");
684  return markAsDead(I);
685  }
686 
687  uint64_t BeginOffset = Offset.getZExtValue();
688  uint64_t EndOffset = BeginOffset + Size;
689 
690  // Clamp the end offset to the end of the allocation. Note that this is
691  // formulated to handle even the case where "BeginOffset + Size" overflows.
692  // This may appear superficially to be something we could ignore entirely,
693  // but that is not so! There may be widened loads or PHI-node uses where
694  // some instructions are dead but not others. We can't completely ignore
695  // them, and so have to record at least the information here.
696  assert(AllocSize >= BeginOffset); // Established above.
697  if (Size > AllocSize - BeginOffset) {
698  LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @"
699  << Offset << " to remain within the " << AllocSize
700  << " byte alloca:\n"
701  << " alloca: " << AS.AI << "\n"
702  << " use: " << I << "\n");
703  EndOffset = AllocSize;
704  }
705 
706  AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
707  }
708 
709  void visitBitCastInst(BitCastInst &BC) {
710  if (BC.use_empty())
711  return markAsDead(BC);
712 
713  return Base::visitBitCastInst(BC);
714  }
715 
716  void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
717  if (ASC.use_empty())
718  return markAsDead(ASC);
719 
720  return Base::visitAddrSpaceCastInst(ASC);
721  }
722 
723  void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
724  if (GEPI.use_empty())
725  return markAsDead(GEPI);
726 
727  if (SROAStrictInbounds && GEPI.isInBounds()) {
728  // FIXME: This is a manually un-factored variant of the basic code inside
729  // of GEPs with checking of the inbounds invariant specified in the
730  // langref in a very strict sense. If we ever want to enable
731  // SROAStrictInbounds, this code should be factored cleanly into
732  // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
733  // by writing out the code here where we have the underlying allocation
734  // size readily available.
735  APInt GEPOffset = Offset;
736  const DataLayout &DL = GEPI.getModule()->getDataLayout();
737  for (gep_type_iterator GTI = gep_type_begin(GEPI),
738  GTE = gep_type_end(GEPI);
739  GTI != GTE; ++GTI) {
740  ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
741  if (!OpC)
742  break;
743 
744  // Handle a struct index, which adds its field offset to the pointer.
745  if (StructType *STy = GTI.getStructTypeOrNull()) {
746  unsigned ElementIdx = OpC->getZExtValue();
747  const StructLayout *SL = DL.getStructLayout(STy);
748  GEPOffset +=
749  APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
750  } else {
751  // For array or vector indices, scale the index by the size of the
752  // type.
753  APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
754  GEPOffset += Index * APInt(Offset.getBitWidth(),
755  DL.getTypeAllocSize(GTI.getIndexedType()));
756  }
757 
758  // If this index has computed an intermediate pointer which is not
759  // inbounds, then the result of the GEP is a poison value and we can
760  // delete it and all uses.
761  if (GEPOffset.ugt(AllocSize))
762  return markAsDead(GEPI);
763  }
764  }
765 
766  return Base::visitGetElementPtrInst(GEPI);
767  }
768 
769  void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
770  uint64_t Size, bool IsVolatile) {
771  // We allow splitting of non-volatile loads and stores where the type is an
772  // integer type. These may be used to implement 'memcpy' or other "transfer
773  // of bits" patterns.
774  bool IsSplittable = Ty->isIntegerTy() && !IsVolatile;
775 
776  insertUse(I, Offset, Size, IsSplittable);
777  }
778 
779  void visitLoadInst(LoadInst &LI) {
780  assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
781  "All simple FCA loads should have been pre-split");
782 
783  if (!IsOffsetKnown)
784  return PI.setAborted(&LI);
785 
786  if (LI.isVolatile() &&
787  LI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
788  return PI.setAborted(&LI);
789 
790  uint64_t Size = DL.getTypeStoreSize(LI.getType());
791  return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
792  }
793 
794  void visitStoreInst(StoreInst &SI) {
795  Value *ValOp = SI.getValueOperand();
796  if (ValOp == *U)
797  return PI.setEscapedAndAborted(&SI);
798  if (!IsOffsetKnown)
799  return PI.setAborted(&SI);
800 
801  if (SI.isVolatile() &&
802  SI.getPointerAddressSpace() != DL.getAllocaAddrSpace())
803  return PI.setAborted(&SI);
804 
805  uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
806 
807  // If this memory access can be shown to *statically* extend outside the
808  // bounds of the allocation, it's behavior is undefined, so simply
809  // ignore it. Note that this is more strict than the generic clamping
810  // behavior of insertUse. We also try to handle cases which might run the
811  // risk of overflow.
812  // FIXME: We should instead consider the pointer to have escaped if this
813  // function is being instrumented for addressing bugs or race conditions.
814  if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
815  LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @"
816  << Offset << " which extends past the end of the "
817  << AllocSize << " byte alloca:\n"
818  << " alloca: " << AS.AI << "\n"
819  << " use: " << SI << "\n");
820  return markAsDead(SI);
821  }
822 
823  assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
824  "All simple FCA stores should have been pre-split");
825  handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
826  }
827 
828  void visitMemSetInst(MemSetInst &II) {
829  assert(II.getRawDest() == *U && "Pointer use is not the destination?");
830  ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
831  if ((Length && Length->getValue() == 0) ||
832  (IsOffsetKnown && Offset.uge(AllocSize)))
833  // Zero-length mem transfer intrinsics can be ignored entirely.
834  return markAsDead(II);
835 
836  if (!IsOffsetKnown)
837  return PI.setAborted(&II);
838 
839  // Don't replace this with a store with a different address space. TODO:
840  // Use a store with the casted new alloca?
841  if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace())
842  return PI.setAborted(&II);
843 
844  insertUse(II, Offset, Length ? Length->getLimitedValue()
845  : AllocSize - Offset.getLimitedValue(),
846  (bool)Length);
847  }
848 
849  void visitMemTransferInst(MemTransferInst &II) {
850  ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
851  if (Length && Length->getValue() == 0)
852  // Zero-length mem transfer intrinsics can be ignored entirely.
853  return markAsDead(II);
854 
855  // Because we can visit these intrinsics twice, also check to see if the
856  // first time marked this instruction as dead. If so, skip it.
857  if (VisitedDeadInsts.count(&II))
858  return;
859 
860  if (!IsOffsetKnown)
861  return PI.setAborted(&II);
862 
863  // Don't replace this with a load/store with a different address space.
864  // TODO: Use a store with the casted new alloca?
865  if (II.isVolatile() &&
866  (II.getDestAddressSpace() != DL.getAllocaAddrSpace() ||
867  II.getSourceAddressSpace() != DL.getAllocaAddrSpace()))
868  return PI.setAborted(&II);
869 
870  // This side of the transfer is completely out-of-bounds, and so we can
871  // nuke the entire transfer. However, we also need to nuke the other side
872  // if already added to our partitions.
873  // FIXME: Yet another place we really should bypass this when
874  // instrumenting for ASan.
875  if (Offset.uge(AllocSize)) {
877  MemTransferSliceMap.find(&II);
878  if (MTPI != MemTransferSliceMap.end())
879  AS.Slices[MTPI->second].kill();
880  return markAsDead(II);
881  }
882 
883  uint64_t RawOffset = Offset.getLimitedValue();
884  uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset;
885 
886  // Check for the special case where the same exact value is used for both
887  // source and dest.
888  if (*U == II.getRawDest() && *U == II.getRawSource()) {
889  // For non-volatile transfers this is a no-op.
890  if (!II.isVolatile())
891  return markAsDead(II);
892 
893  return insertUse(II, Offset, Size, /*IsSplittable=*/false);
894  }
895 
896  // If we have seen both source and destination for a mem transfer, then
897  // they both point to the same alloca.
898  bool Inserted;
900  std::tie(MTPI, Inserted) =
901  MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size()));
902  unsigned PrevIdx = MTPI->second;
903  if (!Inserted) {
904  Slice &PrevP = AS.Slices[PrevIdx];
905 
906  // Check if the begin offsets match and this is a non-volatile transfer.
907  // In that case, we can completely elide the transfer.
908  if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
909  PrevP.kill();
910  return markAsDead(II);
911  }
912 
913  // Otherwise we have an offset transfer within the same alloca. We can't
914  // split those.
915  PrevP.makeUnsplittable();
916  }
917 
918  // Insert the use now that we've fixed up the splittable nature.
919  insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
920 
921  // Check that we ended up with a valid index in the map.
922  assert(AS.Slices[PrevIdx].getUse()->getUser() == &II &&
923  "Map index doesn't point back to a slice with this user.");
924  }
925 
926  // Disable SRoA for any intrinsics except for lifetime invariants.
927  // FIXME: What about debug intrinsics? This matches old behavior, but
928  // doesn't make sense.
929  void visitIntrinsicInst(IntrinsicInst &II) {
930  if (!IsOffsetKnown)
931  return PI.setAborted(&II);
932 
933  if (II.isLifetimeStartOrEnd()) {
934  ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
935  uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
936  Length->getLimitedValue());
937  insertUse(II, Offset, Size, true);
938  return;
939  }
940 
941  Base::visitIntrinsicInst(II);
942  }
943 
944  Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
945  // We consider any PHI or select that results in a direct load or store of
946  // the same offset to be a viable use for slicing purposes. These uses
947  // are considered unsplittable and the size is the maximum loaded or stored
948  // size.
951  Visited.insert(Root);
952  Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
953  const DataLayout &DL = Root->getModule()->getDataLayout();
954  // If there are no loads or stores, the access is dead. We mark that as
955  // a size zero access.
956  Size = 0;
957  do {
958  Instruction *I, *UsedI;
959  std::tie(UsedI, I) = Uses.pop_back_val();
960 
961  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
962  Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
963  continue;
964  }
965  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
966  Value *Op = SI->getOperand(0);
967  if (Op == UsedI)
968  return SI;
969  Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
970  continue;
971  }
972 
973  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
974  if (!GEP->hasAllZeroIndices())
975  return GEP;
976  } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
977  !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) {
978  return I;
979  }
980 
981  for (User *U : I->users())
982  if (Visited.insert(cast<Instruction>(U)).second)
983  Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
984  } while (!Uses.empty());
985 
986  return nullptr;
987  }
988 
989  void visitPHINodeOrSelectInst(Instruction &I) {
990  assert(isa<PHINode>(I) || isa<SelectInst>(I));
991  if (I.use_empty())
992  return markAsDead(I);
993 
994  // TODO: We could use SimplifyInstruction here to fold PHINodes and
995  // SelectInsts. However, doing so requires to change the current
996  // dead-operand-tracking mechanism. For instance, suppose neither loading
997  // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
998  // trap either. However, if we simply replace %U with undef using the
999  // current dead-operand-tracking mechanism, "load (select undef, undef,
1000  // %other)" may trap because the select may return the first operand
1001  // "undef".
1002  if (Value *Result = foldPHINodeOrSelectInst(I)) {
1003  if (Result == *U)
1004  // If the result of the constant fold will be the pointer, recurse
1005  // through the PHI/select as if we had RAUW'ed it.
1006  enqueueUsers(I);
1007  else
1008  // Otherwise the operand to the PHI/select is dead, and we can replace
1009  // it with undef.
1010  AS.DeadOperands.push_back(U);
1011 
1012  return;
1013  }
1014 
1015  if (!IsOffsetKnown)
1016  return PI.setAborted(&I);
1017 
1018  // See if we already have computed info on this node.
1019  uint64_t &Size = PHIOrSelectSizes[&I];
1020  if (!Size) {
1021  // This is a new PHI/Select, check for an unsafe use of it.
1022  if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size))
1023  return PI.setAborted(UnsafeI);
1024  }
1025 
1026  // For PHI and select operands outside the alloca, we can't nuke the entire
1027  // phi or select -- the other side might still be relevant, so we special
1028  // case them here and use a separate structure to track the operands
1029  // themselves which should be replaced with undef.
1030  // FIXME: This should instead be escaped in the event we're instrumenting
1031  // for address sanitization.
1032  if (Offset.uge(AllocSize)) {
1033  AS.DeadOperands.push_back(U);
1034  return;
1035  }
1036 
1037  insertUse(I, Offset, Size);
1038  }
1039 
1040  void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); }
1041 
1042  void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); }
1043 
1044  /// Disable SROA entirely if there are unhandled users of the alloca.
1045  void visitInstruction(Instruction &I) { PI.setAborted(&I); }
1046 };
1047 
1049  :
1050 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1051  AI(AI),
1052 #endif
1053  PointerEscapingInstr(nullptr) {
1054  SliceBuilder PB(DL, AI, *this);
1055  SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
1056  if (PtrI.isEscaped() || PtrI.isAborted()) {
1057  // FIXME: We should sink the escape vs. abort info into the caller nicely,
1058  // possibly by just storing the PtrInfo in the AllocaSlices.
1059  PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
1060  : PtrI.getAbortingInst();
1061  assert(PointerEscapingInstr && "Did not track a bad instruction");
1062  return;
1063  }
1064 
1065  Slices.erase(
1066  llvm::remove_if(Slices, [](const Slice &S) { return S.isDead(); }),
1067  Slices.end());
1068 
1069 #ifndef NDEBUG
1071  std::mt19937 MT(static_cast<unsigned>(
1072  std::chrono::system_clock::now().time_since_epoch().count()));
1073  std::shuffle(Slices.begin(), Slices.end(), MT);
1074  }
1075 #endif
1076 
1077  // Sort the uses. This arranges for the offsets to be in ascending order,
1078  // and the sizes to be in descending order.
1079  llvm::sort(Slices);
1080 }
1081 
1082 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1083 
1084 void AllocaSlices::print(raw_ostream &OS, const_iterator I,
1085  StringRef Indent) const {
1086  printSlice(OS, I, Indent);
1087  OS << "\n";
1088  printUse(OS, I, Indent);
1089 }
1090 
1091 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
1092  StringRef Indent) const {
1093  OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
1094  << " slice #" << (I - begin())
1095  << (I->isSplittable() ? " (splittable)" : "");
1096 }
1097 
1098 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
1099  StringRef Indent) const {
1100  OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
1101 }
1102 
1103 void AllocaSlices::print(raw_ostream &OS) const {
1104  if (PointerEscapingInstr) {
1105  OS << "Can't analyze slices for alloca: " << AI << "\n"
1106  << " A pointer to this alloca escaped by:\n"
1107  << " " << *PointerEscapingInstr << "\n";
1108  return;
1109  }
1110 
1111  OS << "Slices of alloca: " << AI << "\n";
1112  for (const_iterator I = begin(), E = end(); I != E; ++I)
1113  print(OS, I);
1114 }
1115 
1116 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
1117  print(dbgs(), I);
1118 }
1119 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
1120 
1121 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1122 
1123 /// Walk the range of a partitioning looking for a common type to cover this
1124 /// sequence of slices.
1127  uint64_t EndOffset) {
1128  Type *Ty = nullptr;
1129  bool TyIsCommon = true;
1130  IntegerType *ITy = nullptr;
1131 
1132  // Note that we need to look at *every* alloca slice's Use to ensure we
1133  // always get consistent results regardless of the order of slices.
1134  for (AllocaSlices::const_iterator I = B; I != E; ++I) {
1135  Use *U = I->getUse();
1136  if (isa<IntrinsicInst>(*U->getUser()))
1137  continue;
1138  if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
1139  continue;
1140 
1141  Type *UserTy = nullptr;
1142  if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1143  UserTy = LI->getType();
1144  } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1145  UserTy = SI->getValueOperand()->getType();
1146  }
1147 
1148  if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
1149  // If the type is larger than the partition, skip it. We only encounter
1150  // this for split integer operations where we want to use the type of the
1151  // entity causing the split. Also skip if the type is not a byte width
1152  // multiple.
1153  if (UserITy->getBitWidth() % 8 != 0 ||
1154  UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
1155  continue;
1156 
1157  // Track the largest bitwidth integer type used in this way in case there
1158  // is no common type.
1159  if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
1160  ITy = UserITy;
1161  }
1162 
1163  // To avoid depending on the order of slices, Ty and TyIsCommon must not
1164  // depend on types skipped above.
1165  if (!UserTy || (Ty && Ty != UserTy))
1166  TyIsCommon = false; // Give up on anything but an iN type.
1167  else
1168  Ty = UserTy;
1169  }
1170 
1171  return TyIsCommon ? Ty : ITy;
1172 }
1173 
1174 /// PHI instructions that use an alloca and are subsequently loaded can be
1175 /// rewritten to load both input pointers in the pred blocks and then PHI the
1176 /// results, allowing the load of the alloca to be promoted.
1177 /// From this:
1178 /// %P2 = phi [i32* %Alloca, i32* %Other]
1179 /// %V = load i32* %P2
1180 /// to:
1181 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1182 /// ...
1183 /// %V2 = load i32* %Other
1184 /// ...
1185 /// %V = phi [i32 %V1, i32 %V2]
1186 ///
1187 /// We can do this to a select if its only uses are loads and if the operands
1188 /// to the select can be loaded unconditionally.
1189 ///
1190 /// FIXME: This should be hoisted into a generic utility, likely in
1191 /// Transforms/Util/Local.h
1192 static bool isSafePHIToSpeculate(PHINode &PN) {
1193  const DataLayout &DL = PN.getModule()->getDataLayout();
1194 
1195  // For now, we can only do this promotion if the load is in the same block
1196  // as the PHI, and if there are no stores between the phi and load.
1197  // TODO: Allow recursive phi users.
1198  // TODO: Allow stores.
1199  BasicBlock *BB = PN.getParent();
1200  unsigned MaxAlign = 0;
1201  uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType());
1202  APInt MaxSize(APWidth, 0);
1203  bool HaveLoad = false;
1204  for (User *U : PN.users()) {
1205  LoadInst *LI = dyn_cast<LoadInst>(U);
1206  if (!LI || !LI->isSimple())
1207  return false;
1208 
1209  // For now we only allow loads in the same block as the PHI. This is
1210  // a common case that happens when instcombine merges two loads through
1211  // a PHI.
1212  if (LI->getParent() != BB)
1213  return false;
1214 
1215  // Ensure that there are no instructions between the PHI and the load that
1216  // could store.
1217  for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI)
1218  if (BBI->mayWriteToMemory())
1219  return false;
1220 
1221  uint64_t Size = DL.getTypeStoreSize(LI->getType());
1222  MaxAlign = std::max(MaxAlign, LI->getAlignment());
1223  MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize;
1224  HaveLoad = true;
1225  }
1226 
1227  if (!HaveLoad)
1228  return false;
1229 
1230  // We can only transform this if it is safe to push the loads into the
1231  // predecessor blocks. The only thing to watch out for is that we can't put
1232  // a possibly trapping load in the predecessor if it is a critical edge.
1233  for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1234  Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator();
1235  Value *InVal = PN.getIncomingValue(Idx);
1236 
1237  // If the value is produced by the terminator of the predecessor (an
1238  // invoke) or it has side-effects, there is no valid place to put a load
1239  // in the predecessor.
1240  if (TI == InVal || TI->mayHaveSideEffects())
1241  return false;
1242 
1243  // If the predecessor has a single successor, then the edge isn't
1244  // critical.
1245  if (TI->getNumSuccessors() == 1)
1246  continue;
1247 
1248  // If this pointer is always safe to load, or if we can prove that there
1249  // is already a load in the block, then we can move the load to the pred
1250  // block.
1251  if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI))
1252  continue;
1253 
1254  return false;
1255  }
1256 
1257  return true;
1258 }
1259 
1260 static void speculatePHINodeLoads(PHINode &PN) {
1261  LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
1262 
1263  LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
1264  Type *LoadTy = SomeLoad->getType();
1265  IRBuilderTy PHIBuilder(&PN);
1266  PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1267  PN.getName() + ".sroa.speculated");
1268 
1269  // Get the AA tags and alignment to use from one of the loads. It doesn't
1270  // matter which one we get and if any differ.
1271  AAMDNodes AATags;
1272  SomeLoad->getAAMetadata(AATags);
1273  unsigned Align = SomeLoad->getAlignment();
1274 
1275  // Rewrite all loads of the PN to use the new PHI.
1276  while (!PN.use_empty()) {
1277  LoadInst *LI = cast<LoadInst>(PN.user_back());
1278  LI->replaceAllUsesWith(NewPN);
1279  LI->eraseFromParent();
1280  }
1281 
1282  // Inject loads into all of the pred blocks.
1283  DenseMap<BasicBlock*, Value*> InjectedLoads;
1284  for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1285  BasicBlock *Pred = PN.getIncomingBlock(Idx);
1286  Value *InVal = PN.getIncomingValue(Idx);
1287 
1288  // A PHI node is allowed to have multiple (duplicated) entries for the same
1289  // basic block, as long as the value is the same. So if we already injected
1290  // a load in the predecessor, then we should reuse the same load for all
1291  // duplicated entries.
1292  if (Value* V = InjectedLoads.lookup(Pred)) {
1293  NewPN->addIncoming(V, Pred);
1294  continue;
1295  }
1296 
1297  Instruction *TI = Pred->getTerminator();
1298  IRBuilderTy PredBuilder(TI);
1299 
1300  LoadInst *Load = PredBuilder.CreateLoad(
1301  LoadTy, InVal,
1302  (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
1303  ++NumLoadsSpeculated;
1304  Load->setAlignment(Align);
1305  if (AATags)
1306  Load->setAAMetadata(AATags);
1307  NewPN->addIncoming(Load, Pred);
1308  InjectedLoads[Pred] = Load;
1309  }
1310 
1311  LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
1312  PN.eraseFromParent();
1313 }
1314 
1315 /// Select instructions that use an alloca and are subsequently loaded can be
1316 /// rewritten to load both input pointers and then select between the result,
1317 /// allowing the load of the alloca to be promoted.
1318 /// From this:
1319 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1320 /// %V = load i32* %P2
1321 /// to:
1322 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1323 /// %V2 = load i32* %Other
1324 /// %V = select i1 %cond, i32 %V1, i32 %V2
1325 ///
1326 /// We can do this to a select if its only uses are loads and if the operand
1327 /// to the select can be loaded unconditionally.
1329  Value *TValue = SI.getTrueValue();
1330  Value *FValue = SI.getFalseValue();
1331  const DataLayout &DL = SI.getModule()->getDataLayout();
1332 
1333  for (User *U : SI.users()) {
1334  LoadInst *LI = dyn_cast<LoadInst>(U);
1335  if (!LI || !LI->isSimple())
1336  return false;
1337 
1338  // Both operands to the select need to be dereferenceable, either
1339  // absolutely (e.g. allocas) or at this point because we can see other
1340  // accesses to it.
1341  if (!isSafeToLoadUnconditionally(TValue, LI->getType(), LI->getAlignment(),
1342  DL, LI))
1343  return false;
1344  if (!isSafeToLoadUnconditionally(FValue, LI->getType(), LI->getAlignment(),
1345  DL, LI))
1346  return false;
1347  }
1348 
1349  return true;
1350 }
1351 
1353  LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
1354 
1355  IRBuilderTy IRB(&SI);
1356  Value *TV = SI.getTrueValue();
1357  Value *FV = SI.getFalseValue();
1358  // Replace the loads of the select with a select of two loads.
1359  while (!SI.use_empty()) {
1360  LoadInst *LI = cast<LoadInst>(SI.user_back());
1361  assert(LI->isSimple() && "We only speculate simple loads");
1362 
1363  IRB.SetInsertPoint(LI);
1364  LoadInst *TL = IRB.CreateLoad(LI->getType(), TV,
1365  LI->getName() + ".sroa.speculate.load.true");
1366  LoadInst *FL = IRB.CreateLoad(LI->getType(), FV,
1367  LI->getName() + ".sroa.speculate.load.false");
1368  NumLoadsSpeculated += 2;
1369 
1370  // Transfer alignment and AA info if present.
1371  TL->setAlignment(LI->getAlignment());
1372  FL->setAlignment(LI->getAlignment());
1373 
1374  AAMDNodes Tags;
1375  LI->getAAMetadata(Tags);
1376  if (Tags) {
1377  TL->setAAMetadata(Tags);
1378  FL->setAAMetadata(Tags);
1379  }
1380 
1381  Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1382  LI->getName() + ".sroa.speculated");
1383 
1384  LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n");
1385  LI->replaceAllUsesWith(V);
1386  LI->eraseFromParent();
1387  }
1388  SI.eraseFromParent();
1389 }
1390 
1391 /// Build a GEP out of a base pointer and indices.
1392 ///
1393 /// This will return the BasePtr if that is valid, or build a new GEP
1394 /// instruction using the IRBuilder if GEP-ing is needed.
1395 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
1396  SmallVectorImpl<Value *> &Indices, Twine NamePrefix) {
1397  if (Indices.empty())
1398  return BasePtr;
1399 
1400  // A single zero index is a no-op, so check for this and avoid building a GEP
1401  // in that case.
1402  if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1403  return BasePtr;
1404 
1405  return IRB.CreateInBoundsGEP(BasePtr->getType()->getPointerElementType(),
1406  BasePtr, Indices, NamePrefix + "sroa_idx");
1407 }
1408 
1409 /// Get a natural GEP off of the BasePtr walking through Ty toward
1410 /// TargetTy without changing the offset of the pointer.
1411 ///
1412 /// This routine assumes we've already established a properly offset GEP with
1413 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1414 /// zero-indices down through type layers until we find one the same as
1415 /// TargetTy. If we can't find one with the same type, we at least try to use
1416 /// one with the same size. If none of that works, we just produce the GEP as
1417 /// indicated by Indices to have the correct offset.
1418 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
1419  Value *BasePtr, Type *Ty, Type *TargetTy,
1420  SmallVectorImpl<Value *> &Indices,
1421  Twine NamePrefix) {
1422  if (Ty == TargetTy)
1423  return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1424 
1425  // Offset size to use for the indices.
1426  unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType());
1427 
1428  // See if we can descend into a struct and locate a field with the correct
1429  // type.
1430  unsigned NumLayers = 0;
1431  Type *ElementTy = Ty;
1432  do {
1433  if (ElementTy->isPointerTy())
1434  break;
1435 
1436  if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
1437  ElementTy = ArrayTy->getElementType();
1438  Indices.push_back(IRB.getIntN(OffsetSize, 0));
1439  } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
1440  ElementTy = VectorTy->getElementType();
1441  Indices.push_back(IRB.getInt32(0));
1442  } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1443  if (STy->element_begin() == STy->element_end())
1444  break; // Nothing left to descend into.
1445  ElementTy = *STy->element_begin();
1446  Indices.push_back(IRB.getInt32(0));
1447  } else {
1448  break;
1449  }
1450  ++NumLayers;
1451  } while (ElementTy != TargetTy);
1452  if (ElementTy != TargetTy)
1453  Indices.erase(Indices.end() - NumLayers, Indices.end());
1454 
1455  return buildGEP(IRB, BasePtr, Indices, NamePrefix);
1456 }
1457 
1458 /// Recursively compute indices for a natural GEP.
1459 ///
1460 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
1461 /// element types adding appropriate indices for the GEP.
1462 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
1463  Value *Ptr, Type *Ty, APInt &Offset,
1464  Type *TargetTy,
1465  SmallVectorImpl<Value *> &Indices,
1466  Twine NamePrefix) {
1467  if (Offset == 0)
1468  return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices,
1469  NamePrefix);
1470 
1471  // We can't recurse through pointer types.
1472  if (Ty->isPointerTy())
1473  return nullptr;
1474 
1475  // We try to analyze GEPs over vectors here, but note that these GEPs are
1476  // extremely poorly defined currently. The long-term goal is to remove GEPing
1477  // over a vector from the IR completely.
1478  if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
1479  unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
1480  if (ElementSizeInBits % 8 != 0) {
1481  // GEPs over non-multiple of 8 size vector elements are invalid.
1482  return nullptr;
1483  }
1484  APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
1485  APInt NumSkippedElements = Offset.sdiv(ElementSize);
1486  if (NumSkippedElements.ugt(VecTy->getNumElements()))
1487  return nullptr;
1488  Offset -= NumSkippedElements * ElementSize;
1489  Indices.push_back(IRB.getInt(NumSkippedElements));
1490  return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
1491  Offset, TargetTy, Indices, NamePrefix);
1492  }
1493 
1494  if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
1495  Type *ElementTy = ArrTy->getElementType();
1496  APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1497  APInt NumSkippedElements = Offset.sdiv(ElementSize);
1498  if (NumSkippedElements.ugt(ArrTy->getNumElements()))
1499  return nullptr;
1500 
1501  Offset -= NumSkippedElements * ElementSize;
1502  Indices.push_back(IRB.getInt(NumSkippedElements));
1503  return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1504  Indices, NamePrefix);
1505  }
1506 
1507  StructType *STy = dyn_cast<StructType>(Ty);
1508  if (!STy)
1509  return nullptr;
1510 
1511  const StructLayout *SL = DL.getStructLayout(STy);
1512  uint64_t StructOffset = Offset.getZExtValue();
1513  if (StructOffset >= SL->getSizeInBytes())
1514  return nullptr;
1515  unsigned Index = SL->getElementContainingOffset(StructOffset);
1516  Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1517  Type *ElementTy = STy->getElementType(Index);
1518  if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
1519  return nullptr; // The offset points into alignment padding.
1520 
1521  Indices.push_back(IRB.getInt32(Index));
1522  return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1523  Indices, NamePrefix);
1524 }
1525 
1526 /// Get a natural GEP from a base pointer to a particular offset and
1527 /// resulting in a particular type.
1528 ///
1529 /// The goal is to produce a "natural" looking GEP that works with the existing
1530 /// composite types to arrive at the appropriate offset and element type for
1531 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1532 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1533 /// Indices, and setting Ty to the result subtype.
1534 ///
1535 /// If no natural GEP can be constructed, this function returns null.
1536 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
1537  Value *Ptr, APInt Offset, Type *TargetTy,
1538  SmallVectorImpl<Value *> &Indices,
1539  Twine NamePrefix) {
1540  PointerType *Ty = cast<PointerType>(Ptr->getType());
1541 
1542  // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1543  // an i8.
1544  if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
1545  return nullptr;
1546 
1547  Type *ElementTy = Ty->getElementType();
1548  if (!ElementTy->isSized())
1549  return nullptr; // We can't GEP through an unsized element.
1550  APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
1551  if (ElementSize == 0)
1552  return nullptr; // Zero-length arrays can't help us build a natural GEP.
1553  APInt NumSkippedElements = Offset.sdiv(ElementSize);
1554 
1555  Offset -= NumSkippedElements * ElementSize;
1556  Indices.push_back(IRB.getInt(NumSkippedElements));
1557  return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
1558  Indices, NamePrefix);
1559 }
1560 
1561 /// Compute an adjusted pointer from Ptr by Offset bytes where the
1562 /// resulting pointer has PointerTy.
1563 ///
1564 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1565 /// and produces the pointer type desired. Where it cannot, it will try to use
1566 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1567 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1568 /// bitcast to the type.
1569 ///
1570 /// The strategy for finding the more natural GEPs is to peel off layers of the
1571 /// pointer, walking back through bit casts and GEPs, searching for a base
1572 /// pointer from which we can compute a natural GEP with the desired
1573 /// properties. The algorithm tries to fold as many constant indices into
1574 /// a single GEP as possible, thus making each GEP more independent of the
1575 /// surrounding code.
1576 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
1577  APInt Offset, Type *PointerTy, Twine NamePrefix) {
1578  // Even though we don't look through PHI nodes, we could be called on an
1579  // instruction in an unreachable block, which may be on a cycle.
1580  SmallPtrSet<Value *, 4> Visited;
1581  Visited.insert(Ptr);
1582  SmallVector<Value *, 4> Indices;
1583 
1584  // We may end up computing an offset pointer that has the wrong type. If we
1585  // never are able to compute one directly that has the correct type, we'll
1586  // fall back to it, so keep it and the base it was computed from around here.
1587  Value *OffsetPtr = nullptr;
1588  Value *OffsetBasePtr;
1589 
1590  // Remember any i8 pointer we come across to re-use if we need to do a raw
1591  // byte offset.
1592  Value *Int8Ptr = nullptr;
1593  APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1594 
1595  PointerType *TargetPtrTy = cast<PointerType>(PointerTy);
1596  Type *TargetTy = TargetPtrTy->getElementType();
1597 
1598  // As `addrspacecast` is , `Ptr` (the storage pointer) may have different
1599  // address space from the expected `PointerTy` (the pointer to be used).
1600  // Adjust the pointer type based the original storage pointer.
1601  auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
1602  PointerTy = TargetTy->getPointerTo(AS);
1603 
1604  do {
1605  // First fold any existing GEPs into the offset.
1606  while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1607  APInt GEPOffset(Offset.getBitWidth(), 0);
1608  if (!GEP->accumulateConstantOffset(DL, GEPOffset))
1609  break;
1610  Offset += GEPOffset;
1611  Ptr = GEP->getPointerOperand();
1612  if (!Visited.insert(Ptr).second)
1613  break;
1614  }
1615 
1616  // See if we can perform a natural GEP here.
1617  Indices.clear();
1618  if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
1619  Indices, NamePrefix)) {
1620  // If we have a new natural pointer at the offset, clear out any old
1621  // offset pointer we computed. Unless it is the base pointer or
1622  // a non-instruction, we built a GEP we don't need. Zap it.
1623  if (OffsetPtr && OffsetPtr != OffsetBasePtr)
1624  if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) {
1625  assert(I->use_empty() && "Built a GEP with uses some how!");
1626  I->eraseFromParent();
1627  }
1628  OffsetPtr = P;
1629  OffsetBasePtr = Ptr;
1630  // If we also found a pointer of the right type, we're done.
1631  if (P->getType() == PointerTy)
1632  break;
1633  }
1634 
1635  // Stash this pointer if we've found an i8*.
1636  if (Ptr->getType()->isIntegerTy(8)) {
1637  Int8Ptr = Ptr;
1638  Int8PtrOffset = Offset;
1639  }
1640 
1641  // Peel off a layer of the pointer and update the offset appropriately.
1642  if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1643  Ptr = cast<Operator>(Ptr)->getOperand(0);
1644  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1645  if (GA->isInterposable())
1646  break;
1647  Ptr = GA->getAliasee();
1648  } else {
1649  break;
1650  }
1651  assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1652  } while (Visited.insert(Ptr).second);
1653 
1654  if (!OffsetPtr) {
1655  if (!Int8Ptr) {
1656  Int8Ptr = IRB.CreateBitCast(
1657  Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
1658  NamePrefix + "sroa_raw_cast");
1659  Int8PtrOffset = Offset;
1660  }
1661 
1662  OffsetPtr = Int8PtrOffset == 0
1663  ? Int8Ptr
1664  : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr,
1665  IRB.getInt(Int8PtrOffset),
1666  NamePrefix + "sroa_raw_idx");
1667  }
1668  Ptr = OffsetPtr;
1669 
1670  // On the off chance we were targeting i8*, guard the bitcast here.
1671  if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) {
1672  Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr,
1673  TargetPtrTy,
1674  NamePrefix + "sroa_cast");
1675  }
1676 
1677  return Ptr;
1678 }
1679 
1680 /// Compute the adjusted alignment for a load or store from an offset.
1681 static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset,
1682  const DataLayout &DL) {
1683  unsigned Alignment;
1684  Type *Ty;
1685  if (auto *LI = dyn_cast<LoadInst>(I)) {
1686  Alignment = LI->getAlignment();
1687  Ty = LI->getType();
1688  } else if (auto *SI = dyn_cast<StoreInst>(I)) {
1689  Alignment = SI->getAlignment();
1690  Ty = SI->getValueOperand()->getType();
1691  } else {
1692  llvm_unreachable("Only loads and stores are allowed!");
1693  }
1694 
1695  if (!Alignment)
1696  Alignment = DL.getABITypeAlignment(Ty);
1697 
1698  return MinAlign(Alignment, Offset);
1699 }
1700 
1701 /// Test whether we can convert a value from the old to the new type.
1702 ///
1703 /// This predicate should be used to guard calls to convertValue in order to
1704 /// ensure that we only try to convert viable values. The strategy is that we
1705 /// will peel off single element struct and array wrappings to get to an
1706 /// underlying value, and convert that value.
1707 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
1708  if (OldTy == NewTy)
1709  return true;
1710 
1711  // For integer types, we can't handle any bit-width differences. This would
1712  // break both vector conversions with extension and introduce endianness
1713  // issues when in conjunction with loads and stores.
1714  if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) {
1715  assert(cast<IntegerType>(OldTy)->getBitWidth() !=
1716  cast<IntegerType>(NewTy)->getBitWidth() &&
1717  "We can't have the same bitwidth for different int types");
1718  return false;
1719  }
1720 
1721  if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
1722  return false;
1723  if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
1724  return false;
1725 
1726  // We can convert pointers to integers and vice-versa. Same for vectors
1727  // of pointers and integers.
1728  OldTy = OldTy->getScalarType();
1729  NewTy = NewTy->getScalarType();
1730  if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
1731  if (NewTy->isPointerTy() && OldTy->isPointerTy()) {
1732  return cast<PointerType>(NewTy)->getPointerAddressSpace() ==
1733  cast<PointerType>(OldTy)->getPointerAddressSpace();
1734  }
1735 
1736  // We can convert integers to integral pointers, but not to non-integral
1737  // pointers.
1738  if (OldTy->isIntegerTy())
1739  return !DL.isNonIntegralPointerType(NewTy);
1740 
1741  // We can convert integral pointers to integers, but non-integral pointers
1742  // need to remain pointers.
1743  if (!DL.isNonIntegralPointerType(OldTy))
1744  return NewTy->isIntegerTy();
1745 
1746  return false;
1747  }
1748 
1749  return true;
1750 }
1751 
1752 /// Generic routine to convert an SSA value to a value of a different
1753 /// type.
1754 ///
1755 /// This will try various different casting techniques, such as bitcasts,
1756 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
1757 /// two types for viability with this routine.
1758 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
1759  Type *NewTy) {
1760  Type *OldTy = V->getType();
1761  assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
1762 
1763  if (OldTy == NewTy)
1764  return V;
1765 
1766  assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) &&
1767  "Integer types must be the exact same to convert.");
1768 
1769  // See if we need inttoptr for this type pair. A cast involving both scalars
1770  // and vectors requires and additional bitcast.
1771  if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
1772  // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
1773  if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1774  return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1775  NewTy);
1776 
1777  // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
1778  if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1779  return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
1780  NewTy);
1781 
1782  return IRB.CreateIntToPtr(V, NewTy);
1783  }
1784 
1785  // See if we need ptrtoint for this type pair. A cast involving both scalars
1786  // and vectors requires and additional bitcast.
1787  if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) {
1788  // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
1789  if (OldTy->isVectorTy() && !NewTy->isVectorTy())
1790  return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1791  NewTy);
1792 
1793  // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
1794  if (!OldTy->isVectorTy() && NewTy->isVectorTy())
1795  return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
1796  NewTy);
1797 
1798  return IRB.CreatePtrToInt(V, NewTy);
1799  }
1800 
1801  return IRB.CreateBitCast(V, NewTy);
1802 }
1803 
1804 /// Test whether the given slice use can be promoted to a vector.
1805 ///
1806 /// This function is called to test each entry in a partition which is slated
1807 /// for a single slice.
1808 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
1809  VectorType *Ty,
1810  uint64_t ElementSize,
1811  const DataLayout &DL) {
1812  // First validate the slice offsets.
1813  uint64_t BeginOffset =
1814  std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset();
1815  uint64_t BeginIndex = BeginOffset / ElementSize;
1816  if (BeginIndex * ElementSize != BeginOffset ||
1817  BeginIndex >= Ty->getNumElements())
1818  return false;
1819  uint64_t EndOffset =
1820  std::min(S.endOffset(), P.endOffset()) - P.beginOffset();
1821  uint64_t EndIndex = EndOffset / ElementSize;
1822  if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
1823  return false;
1824 
1825  assert(EndIndex > BeginIndex && "Empty vector!");
1826  uint64_t NumElements = EndIndex - BeginIndex;
1827  Type *SliceTy = (NumElements == 1)
1828  ? Ty->getElementType()
1829  : VectorType::get(Ty->getElementType(), NumElements);
1830 
1831  Type *SplitIntTy =
1832  Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
1833 
1834  Use *U = S.getUse();
1835 
1836  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
1837  if (MI->isVolatile())
1838  return false;
1839  if (!S.isSplittable())
1840  return false; // Skip any unsplittable intrinsics.
1841  } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
1842  if (!II->isLifetimeStartOrEnd())
1843  return false;
1844  } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
1845  // Disable vector promotion when there are loads or stores of an FCA.
1846  return false;
1847  } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
1848  if (LI->isVolatile())
1849  return false;
1850  Type *LTy = LI->getType();
1851  if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1852  assert(LTy->isIntegerTy());
1853  LTy = SplitIntTy;
1854  }
1855  if (!canConvertValue(DL, SliceTy, LTy))
1856  return false;
1857  } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
1858  if (SI->isVolatile())
1859  return false;
1860  Type *STy = SI->getValueOperand()->getType();
1861  if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) {
1862  assert(STy->isIntegerTy());
1863  STy = SplitIntTy;
1864  }
1865  if (!canConvertValue(DL, STy, SliceTy))
1866  return false;
1867  } else {
1868  return false;
1869  }
1870 
1871  return true;
1872 }
1873 
1874 /// Test whether the given alloca partitioning and range of slices can be
1875 /// promoted to a vector.
1876 ///
1877 /// This is a quick test to check whether we can rewrite a particular alloca
1878 /// partition (and its newly formed alloca) into a vector alloca with only
1879 /// whole-vector loads and stores such that it could be promoted to a vector
1880 /// SSA value. We only can ensure this for a limited set of operations, and we
1881 /// don't want to do the rewrites unless we are confident that the result will
1882 /// be promotable, so we have an early test here.
1884  // Collect the candidate types for vector-based promotion. Also track whether
1885  // we have different element types.
1886  SmallVector<VectorType *, 4> CandidateTys;
1887  Type *CommonEltTy = nullptr;
1888  bool HaveCommonEltTy = true;
1889  auto CheckCandidateType = [&](Type *Ty) {
1890  if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1891  CandidateTys.push_back(VTy);
1892  if (!CommonEltTy)
1893  CommonEltTy = VTy->getElementType();
1894  else if (CommonEltTy != VTy->getElementType())
1895  HaveCommonEltTy = false;
1896  }
1897  };
1898  // Consider any loads or stores that are the exact size of the slice.
1899  for (const Slice &S : P)
1900  if (S.beginOffset() == P.beginOffset() &&
1901  S.endOffset() == P.endOffset()) {
1902  if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser()))
1903  CheckCandidateType(LI->getType());
1904  else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
1905  CheckCandidateType(SI->getValueOperand()->getType());
1906  }
1907 
1908  // If we didn't find a vector type, nothing to do here.
1909  if (CandidateTys.empty())
1910  return nullptr;
1911 
1912  // Remove non-integer vector types if we had multiple common element types.
1913  // FIXME: It'd be nice to replace them with integer vector types, but we can't
1914  // do that until all the backends are known to produce good code for all
1915  // integer vector types.
1916  if (!HaveCommonEltTy) {
1917  CandidateTys.erase(
1918  llvm::remove_if(CandidateTys,
1919  [](VectorType *VTy) {
1920  return !VTy->getElementType()->isIntegerTy();
1921  }),
1922  CandidateTys.end());
1923 
1924  // If there were no integer vector types, give up.
1925  if (CandidateTys.empty())
1926  return nullptr;
1927 
1928  // Rank the remaining candidate vector types. This is easy because we know
1929  // they're all integer vectors. We sort by ascending number of elements.
1930  auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) {
1931  (void)DL;
1932  assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) &&
1933  "Cannot have vector types of different sizes!");
1934  assert(RHSTy->getElementType()->isIntegerTy() &&
1935  "All non-integer types eliminated!");
1936  assert(LHSTy->getElementType()->isIntegerTy() &&
1937  "All non-integer types eliminated!");
1938  return RHSTy->getNumElements() < LHSTy->getNumElements();
1939  };
1940  llvm::sort(CandidateTys, RankVectorTypes);
1941  CandidateTys.erase(
1942  std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
1943  CandidateTys.end());
1944  } else {
1945 // The only way to have the same element type in every vector type is to
1946 // have the same vector type. Check that and remove all but one.
1947 #ifndef NDEBUG
1948  for (VectorType *VTy : CandidateTys) {
1949  assert(VTy->getElementType() == CommonEltTy &&
1950  "Unaccounted for element type!");
1951  assert(VTy == CandidateTys[0] &&
1952  "Different vector types with the same element type!");
1953  }
1954 #endif
1955  CandidateTys.resize(1);
1956  }
1957 
1958  // Try each vector type, and return the one which works.
1959  auto CheckVectorTypeForPromotion = [&](VectorType *VTy) {
1960  uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType());
1961 
1962  // While the definition of LLVM vectors is bitpacked, we don't support sizes
1963  // that aren't byte sized.
1964  if (ElementSize % 8)
1965  return false;
1966  assert((DL.getTypeSizeInBits(VTy) % 8) == 0 &&
1967  "vector size not a multiple of element size?");
1968  ElementSize /= 8;
1969 
1970  for (const Slice &S : P)
1971  if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL))
1972  return false;
1973 
1974  for (const Slice *S : P.splitSliceTails())
1975  if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL))
1976  return false;
1977 
1978  return true;
1979  };
1980  for (VectorType *VTy : CandidateTys)
1981  if (CheckVectorTypeForPromotion(VTy))
1982  return VTy;
1983 
1984  return nullptr;
1985 }
1986 
1987 /// Test whether a slice of an alloca is valid for integer widening.
1988 ///
1989 /// This implements the necessary checking for the \c isIntegerWideningViable
1990 /// test below on a single slice of the alloca.
1991 static bool isIntegerWideningViableForSlice(const Slice &S,
1992  uint64_t AllocBeginOffset,
1993  Type *AllocaTy,
1994  const DataLayout &DL,
1995  bool &WholeAllocaOp) {
1996  uint64_t Size = DL.getTypeStoreSize(AllocaTy);
1997 
1998  uint64_t RelBegin = S.beginOffset() - AllocBeginOffset;
1999  uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
2000 
2001  // We can't reasonably handle cases where the load or store extends past
2002  // the end of the alloca's type and into its padding.
2003  if (RelEnd > Size)
2004  return false;
2005 
2006  Use *U = S.getUse();
2007 
2008  if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
2009  if (LI->isVolatile())
2010  return false;
2011  // We can't handle loads that extend past the allocated memory.
2012  if (DL.getTypeStoreSize(LI->getType()) > Size)
2013  return false;
2014  // So far, AllocaSliceRewriter does not support widening split slice tails
2015  // in rewriteIntegerLoad.
2016  if (S.beginOffset() < AllocBeginOffset)
2017  return false;
2018  // Note that we don't count vector loads or stores as whole-alloca
2019  // operations which enable integer widening because we would prefer to use
2020  // vector widening instead.
2021  if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size)
2022  WholeAllocaOp = true;
2023  if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
2024  if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
2025  return false;
2026  } else if (RelBegin != 0 || RelEnd != Size ||
2027  !canConvertValue(DL, AllocaTy, LI->getType())) {
2028  // Non-integer loads need to be convertible from the alloca type so that
2029  // they are promotable.
2030  return false;
2031  }
2032  } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
2033  Type *ValueTy = SI->getValueOperand()->getType();
2034  if (SI->isVolatile())
2035  return false;
2036  // We can't handle stores that extend past the allocated memory.
2037  if (DL.getTypeStoreSize(ValueTy) > Size)
2038  return false;
2039  // So far, AllocaSliceRewriter does not support widening split slice tails
2040  // in rewriteIntegerStore.
2041  if (S.beginOffset() < AllocBeginOffset)
2042  return false;
2043  // Note that we don't count vector loads or stores as whole-alloca
2044  // operations which enable integer widening because we would prefer to use
2045  // vector widening instead.
2046  if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size)
2047  WholeAllocaOp = true;
2048  if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
2049  if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
2050  return false;
2051  } else if (RelBegin != 0 || RelEnd != Size ||
2052  !canConvertValue(DL, ValueTy, AllocaTy)) {
2053  // Non-integer stores need to be convertible to the alloca type so that
2054  // they are promotable.
2055  return false;
2056  }
2057  } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
2058  if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
2059  return false;
2060  if (!S.isSplittable())
2061  return false; // Skip any unsplittable intrinsics.
2062  } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
2063  if (!II->isLifetimeStartOrEnd())
2064  return false;
2065  } else {
2066  return false;
2067  }
2068 
2069  return true;
2070 }
2071 
2072 /// Test whether the given alloca partition's integer operations can be
2073 /// widened to promotable ones.
2074 ///
2075 /// This is a quick test to check whether we can rewrite the integer loads and
2076 /// stores to a particular alloca into wider loads and stores and be able to
2077 /// promote the resulting alloca.
2078 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy,
2079  const DataLayout &DL) {
2080  uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
2081  // Don't create integer types larger than the maximum bitwidth.
2082  if (SizeInBits > IntegerType::MAX_INT_BITS)
2083  return false;
2084 
2085  // Don't try to handle allocas with bit-padding.
2086  if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
2087  return false;
2088 
2089  // We need to ensure that an integer type with the appropriate bitwidth can
2090  // be converted to the alloca type, whatever that is. We don't want to force
2091  // the alloca itself to have an integer type if there is a more suitable one.
2092  Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
2093  if (!canConvertValue(DL, AllocaTy, IntTy) ||
2094  !canConvertValue(DL, IntTy, AllocaTy))
2095  return false;
2096 
2097  // While examining uses, we ensure that the alloca has a covering load or
2098  // store. We don't want to widen the integer operations only to fail to
2099  // promote due to some other unsplittable entry (which we may make splittable
2100  // later). However, if there are only splittable uses, go ahead and assume
2101  // that we cover the alloca.
2102  // FIXME: We shouldn't consider split slices that happen to start in the
2103  // partition here...
2104  bool WholeAllocaOp =
2105  P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits);
2106 
2107  for (const Slice &S : P)
2108  if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL,
2109  WholeAllocaOp))
2110  return false;
2111 
2112  for (const Slice *S : P.splitSliceTails())
2113  if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL,
2114  WholeAllocaOp))
2115  return false;
2116 
2117  return WholeAllocaOp;
2118 }
2119 
2120 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
2121  IntegerType *Ty, uint64_t Offset,
2122  const Twine &Name) {
2123  LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
2124  IntegerType *IntTy = cast<IntegerType>(V->getType());
2125  assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
2126  "Element extends past full value");
2127  uint64_t ShAmt = 8 * Offset;
2128  if (DL.isBigEndian())
2129  ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
2130  if (ShAmt) {
2131  V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
2132  LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
2133  }
2134  assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2135  "Cannot extract to a larger integer!");
2136  if (Ty != IntTy) {
2137  V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
2138  LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n");
2139  }
2140  return V;
2141 }
2142 
2143 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
2144  Value *V, uint64_t Offset, const Twine &Name) {
2145  IntegerType *IntTy = cast<IntegerType>(Old->getType());
2146  IntegerType *Ty = cast<IntegerType>(V->getType());
2147  assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
2148  "Cannot insert a larger integer!");
2149  LLVM_DEBUG(dbgs() << " start: " << *V << "\n");
2150  if (Ty != IntTy) {
2151  V = IRB.CreateZExt(V, IntTy, Name + ".ext");
2152  LLVM_DEBUG(dbgs() << " extended: " << *V << "\n");
2153  }
2154  assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
2155  "Element store outside of alloca store");
2156  uint64_t ShAmt = 8 * Offset;
2157  if (DL.isBigEndian())
2158  ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
2159  if (ShAmt) {
2160  V = IRB.CreateShl(V, ShAmt, Name + ".shift");
2161  LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n");
2162  }
2163 
2164  if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
2165  APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
2166  Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
2167  LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n");
2168  V = IRB.CreateOr(Old, V, Name + ".insert");
2169  LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n");
2170  }
2171  return V;
2172 }
2173 
2174 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
2175  unsigned EndIndex, const Twine &Name) {
2176  VectorType *VecTy = cast<VectorType>(V->getType());
2177  unsigned NumElements = EndIndex - BeginIndex;
2178  assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2179 
2180  if (NumElements == VecTy->getNumElements())
2181  return V;
2182 
2183  if (NumElements == 1) {
2184  V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
2185  Name + ".extract");
2186  LLVM_DEBUG(dbgs() << " extract: " << *V << "\n");
2187  return V;
2188  }
2189 
2191  Mask.reserve(NumElements);
2192  for (unsigned i = BeginIndex; i != EndIndex; ++i)
2193  Mask.push_back(IRB.getInt32(i));
2194  V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
2195  ConstantVector::get(Mask), Name + ".extract");
2196  LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
2197  return V;
2198 }
2199 
2200 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
2201  unsigned BeginIndex, const Twine &Name) {
2202  VectorType *VecTy = cast<VectorType>(Old->getType());
2203  assert(VecTy && "Can only insert a vector into a vector");
2204 
2205  VectorType *Ty = dyn_cast<VectorType>(V->getType());
2206  if (!Ty) {
2207  // Single element to insert.
2208  V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
2209  Name + ".insert");
2210  LLVM_DEBUG(dbgs() << " insert: " << *V << "\n");
2211  return V;
2212  }
2213 
2214  assert(Ty->getNumElements() <= VecTy->getNumElements() &&
2215  "Too many elements!");
2216  if (Ty->getNumElements() == VecTy->getNumElements()) {
2217  assert(V->getType() == VecTy && "Vector type mismatch");
2218  return V;
2219  }
2220  unsigned EndIndex = BeginIndex + Ty->getNumElements();
2221 
2222  // When inserting a smaller vector into the larger to store, we first
2223  // use a shuffle vector to widen it with undef elements, and then
2224  // a second shuffle vector to select between the loaded vector and the
2225  // incoming vector.
2227  Mask.reserve(VecTy->getNumElements());
2228  for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
2229  if (i >= BeginIndex && i < EndIndex)
2230  Mask.push_back(IRB.getInt32(i - BeginIndex));
2231  else
2232  Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
2233  V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
2234  ConstantVector::get(Mask), Name + ".expand");
2235  LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n");
2236 
2237  Mask.clear();
2238  for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
2239  Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
2240 
2241  V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
2242 
2243  LLVM_DEBUG(dbgs() << " blend: " << *V << "\n");
2244  return V;
2245 }
2246 
2247 /// Visitor to rewrite instructions using p particular slice of an alloca
2248 /// to use a new alloca.
2249 ///
2250 /// Also implements the rewriting to vector-based accesses when the partition
2251 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2252 /// lives here.
2254  : public InstVisitor<AllocaSliceRewriter, bool> {
2255  // Befriend the base class so it can delegate to private visit methods.
2257 
2259 
2260  const DataLayout &DL;
2261  AllocaSlices &AS;
2262  SROA &Pass;
2263  AllocaInst &OldAI, &NewAI;
2264  const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
2265  Type *NewAllocaTy;
2266 
2267  // This is a convenience and flag variable that will be null unless the new
2268  // alloca's integer operations should be widened to this integer type due to
2269  // passing isIntegerWideningViable above. If it is non-null, the desired
2270  // integer type will be stored here for easy access during rewriting.
2271  IntegerType *IntTy;
2272 
2273  // If we are rewriting an alloca partition which can be written as pure
2274  // vector operations, we stash extra information here. When VecTy is
2275  // non-null, we have some strict guarantees about the rewritten alloca:
2276  // - The new alloca is exactly the size of the vector type here.
2277  // - The accesses all either map to the entire vector or to a single
2278  // element.
2279  // - The set of accessing instructions is only one of those handled above
2280  // in isVectorPromotionViable. Generally these are the same access kinds
2281  // which are promotable via mem2reg.
2282  VectorType *VecTy;
2283  Type *ElementTy;
2284  uint64_t ElementSize;
2285 
2286  // The original offset of the slice currently being rewritten relative to
2287  // the original alloca.
2288  uint64_t BeginOffset = 0;
2289  uint64_t EndOffset = 0;
2290 
2291  // The new offsets of the slice currently being rewritten relative to the
2292  // original alloca.
2293  uint64_t NewBeginOffset, NewEndOffset;
2294 
2295  uint64_t SliceSize;
2296  bool IsSplittable = false;
2297  bool IsSplit = false;
2298  Use *OldUse = nullptr;
2299  Instruction *OldPtr = nullptr;
2300 
2301  // Track post-rewrite users which are PHI nodes and Selects.
2302  SmallSetVector<PHINode *, 8> &PHIUsers;
2303  SmallSetVector<SelectInst *, 8> &SelectUsers;
2304 
2305  // Utility IR builder, whose name prefix is setup for each visited use, and
2306  // the insertion point is set to point to the user.
2307  IRBuilderTy IRB;
2308 
2309 public:
2311  AllocaInst &OldAI, AllocaInst &NewAI,
2312  uint64_t NewAllocaBeginOffset,
2313  uint64_t NewAllocaEndOffset, bool IsIntegerPromotable,
2314  VectorType *PromotableVecTy,
2315  SmallSetVector<PHINode *, 8> &PHIUsers,
2316  SmallSetVector<SelectInst *, 8> &SelectUsers)
2317  : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
2318  NewAllocaBeginOffset(NewAllocaBeginOffset),
2319  NewAllocaEndOffset(NewAllocaEndOffset),
2320  NewAllocaTy(NewAI.getAllocatedType()),
2321  IntTy(IsIntegerPromotable
2322  ? Type::getIntNTy(
2323  NewAI.getContext(),
2324  DL.getTypeSizeInBits(NewAI.getAllocatedType()))
2325  : nullptr),
2326  VecTy(PromotableVecTy),
2327  ElementTy(VecTy ? VecTy->getElementType() : nullptr),
2328  ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
2329  PHIUsers(PHIUsers), SelectUsers(SelectUsers),
2330  IRB(NewAI.getContext(), ConstantFolder()) {
2331  if (VecTy) {
2332  assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
2333  "Only multiple-of-8 sized vector elements are viable");
2334  ++NumVectorized;
2335  }
2336  assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy));
2337  }
2338 
2340  bool CanSROA = true;
2341  BeginOffset = I->beginOffset();
2342  EndOffset = I->endOffset();
2343  IsSplittable = I->isSplittable();
2344  IsSplit =
2345  BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
2346  LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : ""));
2347  LLVM_DEBUG(AS.printSlice(dbgs(), I, ""));
2348  LLVM_DEBUG(dbgs() << "\n");
2349 
2350  // Compute the intersecting offset range.
2351  assert(BeginOffset < NewAllocaEndOffset);
2352  assert(EndOffset > NewAllocaBeginOffset);
2353  NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
2354  NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
2355 
2356  SliceSize = NewEndOffset - NewBeginOffset;
2357 
2358  OldUse = I->getUse();
2359  OldPtr = cast<Instruction>(OldUse->get());
2360 
2361  Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
2362  IRB.SetInsertPoint(OldUserI);
2363  IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
2364  IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
2365 
2366  CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
2367  if (VecTy || IntTy)
2368  assert(CanSROA);
2369  return CanSROA;
2370  }
2371 
2372 private:
2373  // Make sure the other visit overloads are visible.
2374  using Base::visit;
2375 
2376  // Every instruction which can end up as a user must have a rewrite rule.
2377  bool visitInstruction(Instruction &I) {
2378  LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
2379  llvm_unreachable("No rewrite rule for this instruction!");
2380  }
2381 
2382  Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
2383  // Note that the offset computation can use BeginOffset or NewBeginOffset
2384  // interchangeably for unsplit slices.
2385  assert(IsSplit || BeginOffset == NewBeginOffset);
2386  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2387 
2388 #ifndef NDEBUG
2389  StringRef OldName = OldPtr->getName();
2390  // Skip through the last '.sroa.' component of the name.
2391  size_t LastSROAPrefix = OldName.rfind(".sroa.");
2392  if (LastSROAPrefix != StringRef::npos) {
2393  OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
2394  // Look for an SROA slice index.
2395  size_t IndexEnd = OldName.find_first_not_of("0123456789");
2396  if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
2397  // Strip the index and look for the offset.
2398  OldName = OldName.substr(IndexEnd + 1);
2399  size_t OffsetEnd = OldName.find_first_not_of("0123456789");
2400  if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
2401  // Strip the offset.
2402  OldName = OldName.substr(OffsetEnd + 1);
2403  }
2404  }
2405  // Strip any SROA suffixes as well.
2406  OldName = OldName.substr(0, OldName.find(".sroa_"));
2407 #endif
2408 
2409  return getAdjustedPtr(IRB, DL, &NewAI,
2410  APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset),
2411  PointerTy,
2412 #ifndef NDEBUG
2413  Twine(OldName) + "."
2414 #else
2415  Twine()
2416 #endif
2417  );
2418  }
2419 
2420  /// Compute suitable alignment to access this slice of the *new*
2421  /// alloca.
2422  ///
2423  /// You can optionally pass a type to this routine and if that type's ABI
2424  /// alignment is itself suitable, this will return zero.
2425  unsigned getSliceAlign(Type *Ty = nullptr) {
2426  unsigned NewAIAlign = NewAI.getAlignment();
2427  if (!NewAIAlign)
2428  NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
2429  unsigned Align =
2430  MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset);
2431  return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align;
2432  }
2433 
2434  unsigned getIndex(uint64_t Offset) {
2435  assert(VecTy && "Can only call getIndex when rewriting a vector");
2436  uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2437  assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2438  uint32_t Index = RelOffset / ElementSize;
2439  assert(Index * ElementSize == RelOffset);
2440  return Index;
2441  }
2442 
2443  void deleteIfTriviallyDead(Value *V) {
2444  Instruction *I = cast<Instruction>(V);
2446  Pass.DeadInsts.insert(I);
2447  }
2448 
2449  Value *rewriteVectorizedLoadInst() {
2450  unsigned BeginIndex = getIndex(NewBeginOffset);
2451  unsigned EndIndex = getIndex(NewEndOffset);
2452  assert(EndIndex > BeginIndex && "Empty vector!");
2453 
2454  Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2455  NewAI.getAlignment(), "load");
2456  return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
2457  }
2458 
2459  Value *rewriteIntegerLoad(LoadInst &LI) {
2460  assert(IntTy && "We cannot insert an integer to the alloca");
2461  assert(!LI.isVolatile());
2462  Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2463  NewAI.getAlignment(), "load");
2464  V = convertValue(DL, IRB, V, IntTy);
2465  assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2466  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2467  if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) {
2468  IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8);
2469  V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract");
2470  }
2471  // It is possible that the extracted type is not the load type. This
2472  // happens if there is a load past the end of the alloca, and as
2473  // a consequence the slice is narrower but still a candidate for integer
2474  // lowering. To handle this case, we just zero extend the extracted
2475  // integer.
2476  assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 &&
2477  "Can only handle an extract for an overly wide load");
2478  if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8)
2479  V = IRB.CreateZExt(V, LI.getType());
2480  return V;
2481  }
2482 
2483  bool visitLoadInst(LoadInst &LI) {
2484  LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
2485  Value *OldOp = LI.getOperand(0);
2486  assert(OldOp == OldPtr);
2487 
2488  AAMDNodes AATags;
2489  LI.getAAMetadata(AATags);
2490 
2491  unsigned AS = LI.getPointerAddressSpace();
2492 
2493  Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
2494  : LI.getType();
2495  const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize;
2496  bool IsPtrAdjusted = false;
2497  Value *V;
2498  if (VecTy) {
2499  V = rewriteVectorizedLoadInst();
2500  } else if (IntTy && LI.getType()->isIntegerTy()) {
2501  V = rewriteIntegerLoad(LI);
2502  } else if (NewBeginOffset == NewAllocaBeginOffset &&
2503  NewEndOffset == NewAllocaEndOffset &&
2504  (canConvertValue(DL, NewAllocaTy, TargetTy) ||
2505  (IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
2506  TargetTy->isIntegerTy()))) {
2507  LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2508  NewAI.getAlignment(),
2509  LI.isVolatile(), LI.getName());
2510  if (AATags)
2511  NewLI->setAAMetadata(AATags);
2512  if (LI.isVolatile())
2513  NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2514 
2515  // Any !nonnull metadata or !range metadata on the old load is also valid
2516  // on the new load. This is even true in some cases even when the loads
2517  // are different types, for example by mapping !nonnull metadata to
2518  // !range metadata by modeling the null pointer constant converted to the
2519  // integer type.
2520  // FIXME: Add support for range metadata here. Currently the utilities
2521  // for this don't propagate range metadata in trivial cases from one
2522  // integer load to another, don't handle non-addrspace-0 null pointers
2523  // correctly, and don't have any support for mapping ranges as the
2524  // integer type becomes winder or narrower.
2525  if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull))
2526  copyNonnullMetadata(LI, N, *NewLI);
2527 
2528  // Try to preserve nonnull metadata
2529  V = NewLI;
2530 
2531  // If this is an integer load past the end of the slice (which means the
2532  // bytes outside the slice are undef or this load is dead) just forcibly
2533  // fix the integer size with correct handling of endianness.
2534  if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2535  if (auto *TITy = dyn_cast<IntegerType>(TargetTy))
2536  if (AITy->getBitWidth() < TITy->getBitWidth()) {
2537  V = IRB.CreateZExt(V, TITy, "load.ext");
2538  if (DL.isBigEndian())
2539  V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(),
2540  "endian_shift");
2541  }
2542  } else {
2543  Type *LTy = TargetTy->getPointerTo(AS);
2544  LoadInst *NewLI = IRB.CreateAlignedLoad(
2545  TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy),
2546  LI.isVolatile(), LI.getName());
2547  if (AATags)
2548  NewLI->setAAMetadata(AATags);
2549  if (LI.isVolatile())
2550  NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
2551 
2552  V = NewLI;
2553  IsPtrAdjusted = true;
2554  }
2555  V = convertValue(DL, IRB, V, TargetTy);
2556 
2557  if (IsSplit) {
2558  assert(!LI.isVolatile());
2559  assert(LI.getType()->isIntegerTy() &&
2560  "Only integer type loads and stores are split");
2561  assert(SliceSize < DL.getTypeStoreSize(LI.getType()) &&
2562  "Split load isn't smaller than original load");
2564  "Non-byte-multiple bit width");
2565  // Move the insertion point just past the load so that we can refer to it.
2566  IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI)));
2567  // Create a placeholder value with the same type as LI to use as the
2568  // basis for the new value. This allows us to replace the uses of LI with
2569  // the computed value, and then replace the placeholder with LI, leaving
2570  // LI only used for this computation.
2571  Value *Placeholder = new LoadInst(
2572  LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)));
2573  V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset,
2574  "insert");
2575  LI.replaceAllUsesWith(V);
2576  Placeholder->replaceAllUsesWith(&LI);
2577  Placeholder->deleteValue();
2578  } else {
2579  LI.replaceAllUsesWith(V);
2580  }
2581 
2582  Pass.DeadInsts.insert(&LI);
2583  deleteIfTriviallyDead(OldOp);
2584  LLVM_DEBUG(dbgs() << " to: " << *V << "\n");
2585  return !LI.isVolatile() && !IsPtrAdjusted;
2586  }
2587 
2588  bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
2589  AAMDNodes AATags) {
2590  if (V->getType() != VecTy) {
2591  unsigned BeginIndex = getIndex(NewBeginOffset);
2592  unsigned EndIndex = getIndex(NewEndOffset);
2593  assert(EndIndex > BeginIndex && "Empty vector!");
2594  unsigned NumElements = EndIndex - BeginIndex;
2595  assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2596  Type *SliceTy = (NumElements == 1)
2597  ? ElementTy
2598  : VectorType::get(ElementTy, NumElements);
2599  if (V->getType() != SliceTy)
2600  V = convertValue(DL, IRB, V, SliceTy);
2601 
2602  // Mix in the existing elements.
2603  Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2604  NewAI.getAlignment(), "load");
2605  V = insertVector(IRB, Old, V, BeginIndex, "vec");
2606  }
2607  StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2608  if (AATags)
2609  Store->setAAMetadata(AATags);
2610  Pass.DeadInsts.insert(&SI);
2611 
2612  LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
2613  return true;
2614  }
2615 
2616  bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) {
2617  assert(IntTy && "We cannot extract an integer from the alloca");
2618  assert(!SI.isVolatile());
2619  if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
2620  Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2621  NewAI.getAlignment(), "oldload");
2622  Old = convertValue(DL, IRB, Old, IntTy);
2623  assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
2624  uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
2625  V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert");
2626  }
2627  V = convertValue(DL, IRB, V, NewAllocaTy);
2628  StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2629  Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2630  LLVMContext::MD_access_group});
2631  if (AATags)
2632  Store->setAAMetadata(AATags);
2633  Pass.DeadInsts.insert(&SI);
2634  LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
2635  return true;
2636  }
2637 
2638  bool visitStoreInst(StoreInst &SI) {
2639  LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
2640  Value *OldOp = SI.getOperand(1);
2641  assert(OldOp == OldPtr);
2642 
2643  AAMDNodes AATags;
2644  SI.getAAMetadata(AATags);
2645 
2646  Value *V = SI.getValueOperand();
2647 
2648  // Strip all inbounds GEPs and pointer casts to try to dig out any root
2649  // alloca that should be re-examined after promoting this alloca.
2650  if (V->getType()->isPointerTy())
2651  if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
2652  Pass.PostPromotionWorklist.insert(AI);
2653 
2654  if (SliceSize < DL.getTypeStoreSize(V->getType())) {
2655  assert(!SI.isVolatile());
2656  assert(V->getType()->isIntegerTy() &&
2657  "Only integer type loads and stores are split");
2659  "Non-byte-multiple bit width");
2660  IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
2661  V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset,
2662  "extract");
2663  }
2664 
2665  if (VecTy)
2666  return rewriteVectorizedStoreInst(V, SI, OldOp, AATags);
2667  if (IntTy && V->getType()->isIntegerTy())
2668  return rewriteIntegerStore(V, SI, AATags);
2669 
2670  const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize;
2671  StoreInst *NewSI;
2672  if (NewBeginOffset == NewAllocaBeginOffset &&
2673  NewEndOffset == NewAllocaEndOffset &&
2674  (canConvertValue(DL, V->getType(), NewAllocaTy) ||
2675  (IsStorePastEnd && NewAllocaTy->isIntegerTy() &&
2676  V->getType()->isIntegerTy()))) {
2677  // If this is an integer store past the end of slice (and thus the bytes
2678  // past that point are irrelevant or this is unreachable), truncate the
2679  // value prior to storing.
2680  if (auto *VITy = dyn_cast<IntegerType>(V->getType()))
2681  if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy))
2682  if (VITy->getBitWidth() > AITy->getBitWidth()) {
2683  if (DL.isBigEndian())
2684  V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(),
2685  "endian_shift");
2686  V = IRB.CreateTrunc(V, AITy, "load.trunc");
2687  }
2688 
2689  V = convertValue(DL, IRB, V, NewAllocaTy);
2690  NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2691  SI.isVolatile());
2692  } else {
2693  unsigned AS = SI.getPointerAddressSpace();
2694  Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS));
2695  NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
2696  SI.isVolatile());
2697  }
2698  NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access,
2699  LLVMContext::MD_access_group});
2700  if (AATags)
2701  NewSI->setAAMetadata(AATags);
2702  if (SI.isVolatile())
2703  NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
2704  Pass.DeadInsts.insert(&SI);
2705  deleteIfTriviallyDead(OldOp);
2706 
2707  LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n");
2708  return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
2709  }
2710 
2711  /// Compute an integer value from splatting an i8 across the given
2712  /// number of bytes.
2713  ///
2714  /// Note that this routine assumes an i8 is a byte. If that isn't true, don't
2715  /// call this routine.
2716  /// FIXME: Heed the advice above.
2717  ///
2718  /// \param V The i8 value to splat.
2719  /// \param Size The number of bytes in the output (assuming i8 is one byte)
2720  Value *getIntegerSplat(Value *V, unsigned Size) {
2721  assert(Size > 0 && "Expected a positive number of bytes.");
2722  IntegerType *VTy = cast<IntegerType>(V->getType());
2723  assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
2724  if (Size == 1)
2725  return V;
2726 
2727  Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8);
2728  V = IRB.CreateMul(
2729  IRB.CreateZExt(V, SplatIntTy, "zext"),
2731  Constant::getAllOnesValue(SplatIntTy),
2733  SplatIntTy)),
2734  "isplat");
2735  return V;
2736  }
2737 
2738  /// Compute a vector splat for a given element value.
2739  Value *getVectorSplat(Value *V, unsigned NumElements) {
2740  V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
2741  LLVM_DEBUG(dbgs() << " splat: " << *V << "\n");
2742  return V;
2743  }
2744 
2745  bool visitMemSetInst(MemSetInst &II) {
2746  LLVM_DEBUG(dbgs() << " original: " << II << "\n");
2747  assert(II.getRawDest() == OldPtr);
2748 
2749  AAMDNodes AATags;
2750  II.getAAMetadata(AATags);
2751 
2752  // If the memset has a variable size, it cannot be split, just adjust the
2753  // pointer to the new alloca.
2754  if (!isa<Constant>(II.getLength())) {
2755  assert(!IsSplit);
2756  assert(NewBeginOffset == BeginOffset);
2757  II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
2758  II.setDestAlignment(getSliceAlign());
2759 
2760  deleteIfTriviallyDead(OldPtr);
2761  return false;
2762  }
2763 
2764  // Record this instruction for deletion.
2765  Pass.DeadInsts.insert(&II);
2766 
2767  Type *AllocaTy = NewAI.getAllocatedType();
2768  Type *ScalarTy = AllocaTy->getScalarType();
2769 
2770  const bool CanContinue = [&]() {
2771  if (VecTy || IntTy)
2772  return true;
2773  if (BeginOffset > NewAllocaBeginOffset ||
2774  EndOffset < NewAllocaEndOffset)
2775  return false;
2776  auto *C = cast<ConstantInt>(II.getLength());
2777  if (C->getBitWidth() > 64)
2778  return false;
2779  const auto Len = C->getZExtValue();
2780  auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext());
2781  auto *SrcTy = VectorType::get(Int8Ty, Len);
2782  return canConvertValue(DL, SrcTy, AllocaTy) &&
2783  DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy));
2784  }();
2785 
2786  // If this doesn't map cleanly onto the alloca type, and that type isn't
2787  // a single value type, just emit a memset.
2788  if (!CanContinue) {
2789  Type *SizeTy = II.getLength()->getType();
2790  Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2791  CallInst *New = IRB.CreateMemSet(
2792  getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
2793  getSliceAlign(), II.isVolatile());
2794  if (AATags)
2795  New->setAAMetadata(AATags);
2796  LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
2797  return false;
2798  }
2799 
2800  // If we can represent this as a simple value, we have to build the actual
2801  // value to store, which requires expanding the byte present in memset to
2802  // a sensible representation for the alloca type. This is essentially
2803  // splatting the byte to a sufficiently wide integer, splatting it across
2804  // any desired vector width, and bitcasting to the final type.
2805  Value *V;
2806 
2807  if (VecTy) {
2808  // If this is a memset of a vectorized alloca, insert it.
2809  assert(ElementTy == ScalarTy);
2810 
2811  unsigned BeginIndex = getIndex(NewBeginOffset);
2812  unsigned EndIndex = getIndex(NewEndOffset);
2813  assert(EndIndex > BeginIndex && "Empty vector!");
2814  unsigned NumElements = EndIndex - BeginIndex;
2815  assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
2816 
2817  Value *Splat =
2818  getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
2819  Splat = convertValue(DL, IRB, Splat, ElementTy);
2820  if (NumElements > 1)
2821  Splat = getVectorSplat(Splat, NumElements);
2822 
2823  Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2824  NewAI.getAlignment(), "oldload");
2825  V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
2826  } else if (IntTy) {
2827  // If this is a memset on an alloca where we can widen stores, insert the
2828  // set integer.
2829  assert(!II.isVolatile());
2830 
2831  uint64_t Size = NewEndOffset - NewBeginOffset;
2832  V = getIntegerSplat(II.getValue(), Size);
2833 
2834  if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
2835  EndOffset != NewAllocaBeginOffset)) {
2836  Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
2837  NewAI.getAlignment(), "oldload");
2838  Old = convertValue(DL, IRB, Old, IntTy);
2839  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
2840  V = insertInteger(DL, IRB, Old, V, Offset, "insert");
2841  } else {
2842  assert(V->getType() == IntTy &&
2843  "Wrong type for an alloca wide integer!");
2844  }
2845  V = convertValue(DL, IRB, V, AllocaTy);
2846  } else {
2847  // Established these invariants above.
2848  assert(NewBeginOffset == NewAllocaBeginOffset);
2849  assert(NewEndOffset == NewAllocaEndOffset);
2850 
2851  V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
2852  if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
2853  V = getVectorSplat(V, AllocaVecTy->getNumElements());
2854 
2855  V = convertValue(DL, IRB, V, AllocaTy);
2856  }
2857 
2858  StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2859  II.isVolatile());
2860  if (AATags)
2861  New->setAAMetadata(AATags);
2862  LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
2863  return !II.isVolatile();
2864  }
2865 
2866  bool visitMemTransferInst(MemTransferInst &II) {
2867  // Rewriting of memory transfer instructions can be a bit tricky. We break
2868  // them into two categories: split intrinsics and unsplit intrinsics.
2869 
2870  LLVM_DEBUG(dbgs() << " original: " << II << "\n");
2871 
2872  AAMDNodes AATags;
2873  II.getAAMetadata(AATags);
2874 
2875  bool IsDest = &II.getRawDestUse() == OldUse;
2876  assert((IsDest && II.getRawDest() == OldPtr) ||
2877  (!IsDest && II.getRawSource() == OldPtr));
2878 
2879  unsigned SliceAlign = getSliceAlign();
2880 
2881  // For unsplit intrinsics, we simply modify the source and destination
2882  // pointers in place. This isn't just an optimization, it is a matter of
2883  // correctness. With unsplit intrinsics we may be dealing with transfers
2884  // within a single alloca before SROA ran, or with transfers that have
2885  // a variable length. We may also be dealing with memmove instead of
2886  // memcpy, and so simply updating the pointers is the necessary for us to
2887  // update both source and dest of a single call.
2888  if (!IsSplittable) {
2889  Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2890  if (IsDest) {
2891  II.setDest(AdjustedPtr);
2892  II.setDestAlignment(SliceAlign);
2893  }
2894  else {
2895  II.setSource(AdjustedPtr);
2896  II.setSourceAlignment(SliceAlign);
2897  }
2898 
2899  LLVM_DEBUG(dbgs() << " to: " << II << "\n");
2900  deleteIfTriviallyDead(OldPtr);
2901  return false;
2902  }
2903  // For split transfer intrinsics we have an incredibly useful assurance:
2904  // the source and destination do not reside within the same alloca, and at
2905  // least one of them does not escape. This means that we can replace
2906  // memmove with memcpy, and we don't need to worry about all manner of
2907  // downsides to splitting and transforming the operations.
2908 
2909  // If this doesn't map cleanly onto the alloca type, and that type isn't
2910  // a single value type, just emit a memcpy.
2911  bool EmitMemCpy =
2912  !VecTy && !IntTy &&
2913  (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
2914  SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) ||
2915  !NewAI.getAllocatedType()->isSingleValueType());
2916 
2917  // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2918  // size hasn't been shrunk based on analysis of the viable range, this is
2919  // a no-op.
2920  if (EmitMemCpy && &OldAI == &NewAI) {
2921  // Ensure the start lines up.
2922  assert(NewBeginOffset == BeginOffset);
2923 
2924  // Rewrite the size as needed.
2925  if (NewEndOffset != EndOffset)
2927  NewEndOffset - NewBeginOffset));
2928  return false;
2929  }
2930  // Record this instruction for deletion.
2931  Pass.DeadInsts.insert(&II);
2932 
2933  // Strip all inbounds GEPs and pointer casts to try to dig out any root
2934  // alloca that should be re-examined after rewriting this instruction.
2935  Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2936  if (AllocaInst *AI =
2937  dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
2938  assert(AI != &OldAI && AI != &NewAI &&
2939  "Splittable transfers cannot reach the same alloca on both ends.");
2940  Pass.Worklist.insert(AI);
2941  }
2942 
2943  Type *OtherPtrTy = OtherPtr->getType();
2944  unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
2945 
2946  // Compute the relative offset for the other pointer within the transfer.
2947  unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS);
2948  APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset);
2949  unsigned OtherAlign =
2950  IsDest ? II.getSourceAlignment() : II.getDestAlignment();
2951  OtherAlign = MinAlign(OtherAlign ? OtherAlign : 1,
2952  OtherOffset.zextOrTrunc(64).getZExtValue());
2953 
2954  if (EmitMemCpy) {
2955  // Compute the other pointer, folding as much as possible to produce
2956  // a single, simple GEP in most cases.
2957  OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
2958  OtherPtr->getName() + ".");
2959 
2960  Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
2961  Type *SizeTy = II.getLength()->getType();
2962  Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
2963 
2964  Value *DestPtr, *SrcPtr;
2965  unsigned DestAlign, SrcAlign;
2966  // Note: IsDest is true iff we're copying into the new alloca slice
2967  if (IsDest) {
2968  DestPtr = OurPtr;
2969  DestAlign = SliceAlign;
2970  SrcPtr = OtherPtr;
2971  SrcAlign = OtherAlign;
2972  } else {
2973  DestPtr = OtherPtr;
2974  DestAlign = OtherAlign;
2975  SrcPtr = OurPtr;
2976  SrcAlign = SliceAlign;
2977  }
2978  CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign,
2979  Size, II.isVolatile());
2980  if (AATags)
2981  New->setAAMetadata(AATags);
2982  LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
2983  return false;
2984  }
2985 
2986  bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
2987  NewEndOffset == NewAllocaEndOffset;
2988  uint64_t Size = NewEndOffset - NewBeginOffset;
2989  unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
2990  unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
2991  unsigned NumElements = EndIndex - BeginIndex;
2992  IntegerType *SubIntTy =
2993  IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr;
2994 
2995  // Reset the other pointer type to match the register type we're going to
2996  // use, but using the address space of the original other pointer.
2997  Type *OtherTy;
2998  if (VecTy && !IsWholeAlloca) {
2999  if (NumElements == 1)
3000  OtherTy = VecTy->getElementType();
3001  else
3002  OtherTy = VectorType::get(VecTy->getElementType(), NumElements);
3003  } else if (IntTy && !IsWholeAlloca) {
3004  OtherTy = SubIntTy;
3005  } else {
3006  OtherTy = NewAllocaTy;
3007  }
3008  OtherPtrTy = OtherTy->getPointerTo(OtherAS);
3009 
3010  Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
3011  OtherPtr->getName() + ".");
3012  unsigned SrcAlign = OtherAlign;
3013  Value *DstPtr = &NewAI;
3014  unsigned DstAlign = SliceAlign;
3015  if (!IsDest) {
3016  std::swap(SrcPtr, DstPtr);
3017  std::swap(SrcAlign, DstAlign);
3018  }
3019 
3020  Value *Src;
3021  if (VecTy && !IsWholeAlloca && !IsDest) {
3022  Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3023  NewAI.getAlignment(), "load");
3024  Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
3025  } else if (IntTy && !IsWholeAlloca && !IsDest) {
3026  Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3027  NewAI.getAlignment(), "load");
3028  Src = convertValue(DL, IRB, Src, IntTy);
3029  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
3030  Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
3031  } else {
3032  LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign,
3033  II.isVolatile(), "copyload");
3034  if (AATags)
3035  Load->setAAMetadata(AATags);
3036  Src = Load;
3037  }
3038 
3039  if (VecTy && !IsWholeAlloca && IsDest) {
3040  Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3041  NewAI.getAlignment(), "oldload");
3042  Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
3043  } else if (IntTy && !IsWholeAlloca && IsDest) {
3044  Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI,
3045  NewAI.getAlignment(), "oldload");
3046  Old = convertValue(DL, IRB, Old, IntTy);
3047  uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
3048  Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
3049  Src = convertValue(DL, IRB, Src, NewAllocaTy);
3050  }
3051 
3052  StoreInst *Store = cast<StoreInst>(
3053  IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
3054  if (AATags)
3055  Store->setAAMetadata(AATags);
3056  LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
3057  return !II.isVolatile();
3058  }
3059 
3060  bool visitIntrinsicInst(IntrinsicInst &II) {
3062  LLVM_DEBUG(dbgs() << " original: " << II << "\n");
3063  assert(II.getArgOperand(1) == OldPtr);
3064 
3065  // Record this instruction for deletion.
3066  Pass.DeadInsts.insert(&II);
3067 
3068  // Lifetime intrinsics are only promotable if they cover the whole alloca.
3069  // Therefore, we drop lifetime intrinsics which don't cover the whole
3070  // alloca.
3071  // (In theory, intrinsics which partially cover an alloca could be
3072  // promoted, but PromoteMemToReg doesn't handle that case.)
3073  // FIXME: Check whether the alloca is promotable before dropping the
3074  // lifetime intrinsics?
3075  if (NewBeginOffset != NewAllocaBeginOffset ||
3076  NewEndOffset != NewAllocaEndOffset)
3077  return true;
3078 
3079  ConstantInt *Size =
3080  ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
3081  NewEndOffset - NewBeginOffset);
3082  // Lifetime intrinsics always expect an i8* so directly get such a pointer
3083  // for the new alloca slice.
3084  Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace());
3085  Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy);
3086  Value *New;
3087  if (II.getIntrinsicID() == Intrinsic::lifetime_start)
3088  New = IRB.CreateLifetimeStart(Ptr, Size);
3089  else
3090  New = IRB.CreateLifetimeEnd(Ptr, Size);
3091 
3092  (void)New;
3093  LLVM_DEBUG(dbgs() << " to: " << *New << "\n");
3094 
3095  return true;
3096  }
3097 
3098  void fixLoadStoreAlign(Instruction &Root) {
3099  // This algorithm implements the same visitor loop as
3100  // hasUnsafePHIOrSelectUse, and fixes the alignment of each load
3101  // or store found.
3104  Visited.insert(&Root);
3105  Uses.push_back(&Root);
3106  do {
3107  Instruction *I = Uses.pop_back_val();
3108 
3109  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
3110  unsigned LoadAlign = LI->getAlignment();
3111  if (!LoadAlign)
3112  LoadAlign = DL.getABITypeAlignment(LI->getType());
3113  LI->setAlignment(std::min(LoadAlign, getSliceAlign()));
3114  continue;
3115  }
3116  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
3117  unsigned StoreAlign = SI->getAlignment();
3118  if (!StoreAlign) {
3119  Value *Op = SI->getOperand(0);
3120  StoreAlign = DL.getABITypeAlignment(Op->getType());
3121  }
3122  SI->setAlignment(std::min(StoreAlign, getSliceAlign()));
3123  continue;
3124  }
3125 
3126  assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) ||
3127  isa<PHINode>(I) || isa<SelectInst>(I) ||
3128  isa<GetElementPtrInst>(I));
3129  for (User *U : I->users())
3130  if (Visited.insert(cast<Instruction>(U)).second)
3131  Uses.push_back(cast<Instruction>(U));
3132  } while (!Uses.empty());
3133  }
3134 
3135  bool visitPHINode(PHINode &PN) {
3136  LLVM_DEBUG(dbgs() << " original: " << PN << "\n");
3137  assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
3138  assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
3139 
3140  // We would like to compute a new pointer in only one place, but have it be
3141  // as local as possible to the PHI. To do that, we re-use the location of
3142  // the old pointer, which necessarily must be in the right position to
3143  // dominate the PHI.
3144  IRBuilderTy PtrBuilder(IRB);
3145  if (isa<PHINode>(OldPtr))
3146  PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt());
3147  else
3148  PtrBuilder.SetInsertPoint(OldPtr);
3149  PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
3150 
3151  Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
3152  // Replace the operands which were using the old pointer.
3153  std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
3154 
3155  LLVM_DEBUG(dbgs() << " to: " << PN << "\n");
3156  deleteIfTriviallyDead(OldPtr);
3157 
3158  // Fix the alignment of any loads or stores using this PHI node.
3159  fixLoadStoreAlign(PN);
3160 
3161  // PHIs can't be promoted on their own, but often can be speculated. We
3162  // check the speculation outside of the rewriter so that we see the
3163  // fully-rewritten alloca.
3164  PHIUsers.insert(&PN);
3165  return true;
3166  }
3167 
3168  bool visitSelectInst(SelectInst &SI) {
3169  LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
3170  assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
3171  "Pointer isn't an operand!");
3172  assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
3173  assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
3174 
3175  Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
3176  // Replace the operands which were using the old pointer.
3177  if (SI.getOperand(1) == OldPtr)
3178  SI.setOperand(1, NewPtr);
3179  if (SI.getOperand(2) == OldPtr)
3180  SI.setOperand(2, NewPtr);
3181 
3182  LLVM_DEBUG(dbgs() << " to: " << SI << "\n");
3183  deleteIfTriviallyDead(OldPtr);
3184 
3185  // Fix the alignment of any loads or stores using this select.
3186  fixLoadStoreAlign(SI);
3187 
3188  // Selects can't be promoted on their own, but often can be speculated. We
3189  // check the speculation outside of the rewriter so that we see the
3190  // fully-rewritten alloca.
3191  SelectUsers.insert(&SI);
3192  return true;
3193  }
3194 };
3195 
3196 namespace {
3197 
3198 /// Visitor to rewrite aggregate loads and stores as scalar.
3199 ///
3200 /// This pass aggressively rewrites all aggregate loads and stores on
3201 /// a particular pointer (or any pointer derived from it which we can identify)
3202 /// with scalar loads and stores.
3203 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
3204  // Befriend the base class so it can delegate to private visit methods.
3205  friend class InstVisitor<AggLoadStoreRewriter, bool>;
3206 
3207  /// Queue of pointer uses to analyze and potentially rewrite.
3208  SmallVector<Use *, 8> Queue;
3209 
3210  /// Set to prevent us from cycling with phi nodes and loops.
3211  SmallPtrSet<User *, 8> Visited;
3212 
3213  /// The current pointer use being rewritten. This is used to dig up the used
3214  /// value (as opposed to the user).
3215  Use *U;
3216 
3217  /// Used to calculate offsets, and hence alignment, of subobjects.
3218  const DataLayout &DL;
3219 
3220 public:
3221  AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
3222 
3223  /// Rewrite loads and stores through a pointer and all pointers derived from
3224  /// it.
3225  bool rewrite(Instruction &I) {
3226  LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
3227  enqueueUsers(I);
3228  bool Changed = false;
3229  while (!Queue.empty()) {
3230  U = Queue.pop_back_val();
3231  Changed |= visit(cast<Instruction>(U->getUser()));
3232  }
3233  return Changed;
3234  }
3235 
3236 private:
3237  /// Enqueue all the users of the given instruction for further processing.
3238  /// This uses a set to de-duplicate users.
3239  void enqueueUsers(Instruction &I) {
3240  for (Use &U : I.uses())
3241  if (Visited.insert(U.getUser()).second)
3242  Queue.push_back(&U);
3243  }
3244 
3245  // Conservative default is to not rewrite anything.
3246  bool visitInstruction(Instruction &I) { return false; }
3247 
3248  /// Generic recursive split emission class.
3249  template <typename Derived> class OpSplitter {
3250  protected:
3251  /// The builder used to form new instructions.
3252  IRBuilderTy IRB;
3253 
3254  /// The indices which to be used with insert- or extractvalue to select the
3255  /// appropriate value within the aggregate.
3256  SmallVector<unsigned, 4> Indices;
3257 
3258  /// The indices to a GEP instruction which will move Ptr to the correct slot
3259  /// within the aggregate.
3260  SmallVector<Value *, 4> GEPIndices;
3261 
3262  /// The base pointer of the original op, used as a base for GEPing the
3263  /// split operations.
3264  Value *Ptr;
3265 
3266  /// The base pointee type being GEPed into.
3267  Type *BaseTy;
3268 
3269  /// Known alignment of the base pointer.
3270  unsigned BaseAlign;
3271 
3272  /// To calculate offset of each component so we can correctly deduce
3273  /// alignments.
3274  const DataLayout &DL;
3275 
3276  /// Initialize the splitter with an insertion point, Ptr and start with a
3277  /// single zero GEP index.
3278  OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3279  unsigned BaseAlign, const DataLayout &DL)
3280  : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr),
3281  BaseTy(BaseTy), BaseAlign(BaseAlign), DL(DL) {}
3282 
3283  public:
3284  /// Generic recursive split emission routine.
3285  ///
3286  /// This method recursively splits an aggregate op (load or store) into
3287  /// scalar or vector ops. It splits recursively until it hits a single value
3288  /// and emits that single value operation via the template argument.
3289  ///
3290  /// The logic of this routine relies on GEPs and insertvalue and
3291  /// extractvalue all operating with the same fundamental index list, merely
3292  /// formatted differently (GEPs need actual values).
3293  ///
3294  /// \param Ty The type being split recursively into smaller ops.
3295  /// \param Agg The aggregate value being built up or stored, depending on
3296  /// whether this is splitting a load or a store respectively.
3297  void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
3298  if (Ty->isSingleValueType()) {
3299  unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices);
3300  return static_cast<Derived *>(this)->emitFunc(
3301  Ty, Agg, MinAlign(BaseAlign, Offset), Name);
3302  }
3303 
3304  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
3305  unsigned OldSize = Indices.size();
3306  (void)OldSize;
3307  for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
3308  ++Idx) {
3309  assert(Indices.size() == OldSize && "Did not return to the old size");
3310  Indices.push_back(Idx);
3311  GEPIndices.push_back(IRB.getInt32(Idx));
3312  emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
3313  GEPIndices.pop_back();
3314  Indices.pop_back();
3315  }
3316  return;
3317  }
3318 
3319  if (StructType *STy = dyn_cast<StructType>(Ty)) {
3320  unsigned OldSize = Indices.size();
3321  (void)OldSize;
3322  for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
3323  ++Idx) {
3324  assert(Indices.size() == OldSize && "Did not return to the old size");
3325  Indices.push_back(Idx);
3326  GEPIndices.push_back(IRB.getInt32(Idx));
3327  emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
3328  GEPIndices.pop_back();
3329  Indices.pop_back();
3330  }
3331  return;
3332  }
3333 
3334  llvm_unreachable("Only arrays and structs are aggregate loadable types");
3335  }
3336  };
3337 
3338  struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
3339  AAMDNodes AATags;
3340 
3341  LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3342  AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL)
3343  : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
3344  DL), AATags(AATags) {}
3345 
3346  /// Emit a leaf load of a single value. This is called at the leaves of the
3347  /// recursive emission to actually load values.
3348  void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) {
3349  assert(Ty->isSingleValueType());
3350  // Load the single value and insert it using the indices.
3351  Value *GEP =
3352  IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
3353  LoadInst *Load = IRB.CreateAlignedLoad(Ty, GEP, Align, Name + ".load");
3354  if (AATags)
3355  Load->setAAMetadata(AATags);
3356  Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
3357  LLVM_DEBUG(dbgs() << " to: " << *Load << "\n");
3358  }
3359  };
3360 
3361  bool visitLoadInst(LoadInst &LI) {
3362  assert(LI.getPointerOperand() == *U);
3363  if (!LI.isSimple() || LI.getType()->isSingleValueType())
3364  return false;
3365 
3366  // We have an aggregate being loaded, split it apart.
3367  LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
3368  AAMDNodes AATags;
3369  LI.getAAMetadata(AATags);
3370  LoadOpSplitter Splitter(&LI, *U, LI.getType(), AATags,
3371  getAdjustedAlignment(&LI, 0, DL), DL);
3372  Value *V = UndefValue::get(LI.getType());
3373  Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
3374  LI.replaceAllUsesWith(V);
3375  LI.eraseFromParent();
3376  return true;
3377  }
3378 
3379  struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
3380  StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy,
3381  AAMDNodes AATags, unsigned BaseAlign, const DataLayout &DL)
3382  : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign,
3383  DL),
3384  AATags(AATags) {}
3385  AAMDNodes AATags;
3386  /// Emit a leaf store of a single value. This is called at the leaves of the
3387  /// recursive emission to actually produce stores.
3388  void emitFunc(Type *Ty, Value *&Agg, unsigned Align, const Twine &Name) {
3389  assert(Ty->isSingleValueType());
3390  // Extract the single value and store it using the indices.
3391  //
3392  // The gep and extractvalue values are factored out of the CreateStore
3393  // call to make the output independent of the argument evaluation order.
3394  Value *ExtractValue =
3395  IRB.CreateExtractValue(Agg, Indices, Name + ".extract");
3396  Value *InBoundsGEP =
3397  IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep");
3398  StoreInst *Store =
3399  IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Align);
3400  if (AATags)
3401  Store->setAAMetadata(AATags);
3402  LLVM_DEBUG(dbgs() << " to: " << *Store << "\n");
3403  }
3404  };
3405 
3406  bool visitStoreInst(StoreInst &SI) {
3407  if (!SI.isSimple() || SI.getPointerOperand() != *U)
3408  return false;
3409  Value *V = SI.getValueOperand();
3410  if (V->getType()->isSingleValueType())
3411  return false;
3412 
3413  // We have an aggregate being stored, split it apart.
3414  LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
3415  AAMDNodes AATags;
3416  SI.getAAMetadata(AATags);
3417  StoreOpSplitter Splitter(&SI, *U, V->getType(), AATags,
3418  getAdjustedAlignment(&SI, 0, DL), DL);
3419  Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
3420  SI.eraseFromParent();
3421  return true;
3422  }
3423 
3424  bool visitBitCastInst(BitCastInst &BC) {
3425  enqueueUsers(BC);
3426  return false;
3427  }
3428 
3429  bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
3430  enqueueUsers(ASC);
3431  return false;
3432  }
3433 
3434  bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
3435  enqueueUsers(GEPI);
3436  return false;
3437  }
3438 
3439  bool visitPHINode(PHINode &PN) {
3440  enqueueUsers(PN);
3441  return false;
3442  }
3443 
3444  bool visitSelectInst(SelectInst &SI) {
3445  enqueueUsers(SI);
3446  return false;
3447  }
3448 };
3449 
3450 } // end anonymous namespace
3451 
3452 /// Strip aggregate type wrapping.
3453 ///
3454 /// This removes no-op aggregate types wrapping an underlying type. It will
3455 /// strip as many layers of types as it can without changing either the type
3456 /// size or the allocated size.
3458  if (Ty->isSingleValueType())
3459  return Ty;
3460 
3461  uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3462  uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
3463 
3464  Type *InnerTy;
3465  if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
3466  InnerTy = ArrTy->getElementType();
3467  } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
3468  const StructLayout *SL = DL.getStructLayout(STy);
3469  unsigned Index = SL->getElementContainingOffset(0);
3470  InnerTy = STy->getElementType(Index);
3471  } else {
3472  return Ty;
3473  }
3474 
3475  if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
3476  TypeSize > DL.getTypeSizeInBits(InnerTy))
3477  return Ty;
3478 
3479  return stripAggregateTypeWrapping(DL, InnerTy);
3480 }
3481 
3482 /// Try to find a partition of the aggregate type passed in for a given
3483 /// offset and size.
3484 ///
3485 /// This recurses through the aggregate type and tries to compute a subtype
3486 /// based on the offset and size. When the offset and size span a sub-section
3487 /// of an array, it will even compute a new array type for that sub-section,
3488 /// and the same for structs.
3489 ///
3490 /// Note that this routine is very strict and tries to find a partition of the
3491 /// type which produces the *exact* right offset and size. It is not forgiving
3492 /// when the size or offset cause either end of type-based partition to be off.
3493 /// Also, this is a best-effort routine. It is reasonable to give up and not
3494 /// return a type if necessary.
3495 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
3496  uint64_t Size) {
3497  if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
3498  return stripAggregateTypeWrapping(DL, Ty);
3499  if (Offset > DL.getTypeAllocSize(Ty) ||
3500  (DL.getTypeAllocSize(Ty) - Offset) < Size)
3501  return nullptr;
3502 
3503  if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
3504  Type *ElementTy = SeqTy->getElementType();
3505  uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
3506  uint64_t NumSkippedElements = Offset / ElementSize;
3507  if (NumSkippedElements >= SeqTy->getNumElements())
3508  return nullptr;
3509  Offset -= NumSkippedElements * ElementSize;
3510 
3511  // First check if we need to recurse.
3512  if (Offset > 0 || Size < ElementSize) {
3513  // Bail if the partition ends in a different array element.
3514  if ((Offset + Size) > ElementSize)
3515  return nullptr;
3516  // Recurse through the element type trying to peel off offset bytes.
3517  return getTypePartition(DL, ElementTy, Offset, Size);
3518  }
3519  assert(Offset == 0);
3520 
3521  if (Size == ElementSize)
3522  return stripAggregateTypeWrapping(DL, ElementTy);
3523  assert(Size > ElementSize);
3524  uint64_t NumElements = Size / ElementSize;
3525  if (NumElements * ElementSize != Size)
3526  return nullptr;
3527  return ArrayType::get(ElementTy, NumElements);
3528  }
3529 
3530  StructType *STy = dyn_cast<StructType>(Ty);
3531  if (!STy)
3532  return nullptr;
3533 
3534  const StructLayout *SL = DL.getStructLayout(STy);
3535  if (Offset >= SL->getSizeInBytes())
3536  return nullptr;
3537  uint64_t EndOffset = Offset + Size;
3538  if (EndOffset > SL->getSizeInBytes())
3539  return nullptr;
3540 
3541  unsigned Index = SL->getElementContainingOffset(Offset);
3542  Offset -= SL->getElementOffset(Index);
3543 
3544  Type *ElementTy = STy->getElementType(Index);
3545  uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
3546  if (Offset >= ElementSize)
3547  return nullptr; // The offset points into alignment padding.
3548 
3549  // See if any partition must be contained by the element.
3550  if (Offset > 0 || Size < ElementSize) {
3551  if ((Offset + Size) > ElementSize)
3552  return nullptr;
3553  return getTypePartition(DL, ElementTy, Offset, Size);
3554  }
3555  assert(Offset == 0);
3556 
3557  if (Size == ElementSize)
3558  return stripAggregateTypeWrapping(DL, ElementTy);
3559 
3561  EE = STy->element_end();
3562  if (EndOffset < SL->getSizeInBytes()) {
3563  unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3564  if (Index == EndIndex)
3565  return nullptr; // Within a single element and its padding.
3566 
3567  // Don't try to form "natural" types if the elements don't line up with the
3568  // expected size.
3569  // FIXME: We could potentially recurse down through the last element in the
3570  // sub-struct to find a natural end point.
3571  if (SL->getElementOffset(EndIndex) != EndOffset)
3572  return nullptr;
3573 
3574  assert(Index < EndIndex);
3575  EE = STy->element_begin() + EndIndex;
3576  }
3577 
3578  // Try to build up a sub-structure.
3579  StructType *SubTy =
3580  StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked());
3581  const StructLayout *SubSL = DL.getStructLayout(SubTy);
3582  if (Size != SubSL->getSizeInBytes())
3583  return nullptr; // The sub-struct doesn't have quite the size needed.
3584 
3585  return SubTy;
3586 }
3587 
3588 /// Pre-split loads and stores to simplify rewriting.
3589 ///
3590 /// We want to break up the splittable load+store pairs as much as
3591 /// possible. This is important to do as a preprocessing step, as once we
3592 /// start rewriting the accesses to partitions of the alloca we lose the
3593 /// necessary information to correctly split apart paired loads and stores
3594 /// which both point into this alloca. The case to consider is something like
3595 /// the following:
3596 ///
3597 /// %a = alloca [12 x i8]
3598 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
3599 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
3600 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
3601 /// %iptr1 = bitcast i8* %gep1 to i64*
3602 /// %iptr2 = bitcast i8* %gep2 to i64*
3603 /// %fptr1 = bitcast i8* %gep1 to float*
3604 /// %fptr2 = bitcast i8* %gep2 to float*
3605 /// %fptr3 = bitcast i8* %gep3 to float*
3606 /// store float 0.0, float* %fptr1
3607 /// store float 1.0, float* %fptr2
3608 /// %v = load i64* %iptr1
3609 /// store i64 %v, i64* %iptr2
3610 /// %f1 = load float* %fptr2
3611 /// %f2 = load float* %fptr3
3612 ///
3613 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
3614 /// promote everything so we recover the 2 SSA values that should have been
3615 /// there all along.
3616 ///
3617 /// \returns true if any changes are made.
3618 bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
3619  LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n");
3620 
3621  // Track the loads and stores which are candidates for pre-splitting here, in
3622  // the order they first appear during the partition scan. These give stable
3623  // iteration order and a basis for tracking which loads and stores we
3624  // actually split.
3627 
3628  // We need to accumulate the splits required of each load or store where we
3629  // can find them via a direct lookup. This is important to cross-check loads
3630  // and stores against each other. We also track the slice so that we can kill
3631  // all the slices that end up split.
3632  struct SplitOffsets {
3633  Slice *S;
3634  std::vector<uint64_t> Splits;
3635  };
3637 
3638  // Track loads out of this alloca which cannot, for any reason, be pre-split.
3639  // This is important as we also cannot pre-split stores of those loads!
3640  // FIXME: This is all pretty gross. It means that we can be more aggressive
3641  // in pre-splitting when the load feeding the store happens to come from
3642  // a separate alloca. Put another way, the effectiveness of SROA would be
3643  // decreased by a frontend which just concatenated all of its local allocas
3644  // into one big flat alloca. But defeating such patterns is exactly the job
3645  // SROA is tasked with! Sadly, to not have this discrepancy we would have
3646  // change store pre-splitting to actually force pre-splitting of the load
3647  // that feeds it *and all stores*. That makes pre-splitting much harder, but
3648  // maybe it would make it more principled?
3649  SmallPtrSet<LoadInst *, 8> UnsplittableLoads;
3650 
3651  LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n");
3652  for (auto &P : AS.partitions()) {
3653  for (Slice &S : P) {
3654  Instruction *I = cast<Instruction>(S.getUse()->getUser());
3655  if (!S.isSplittable() || S.endOffset() <= P.endOffset()) {
3656  // If this is a load we have to track that it can't participate in any
3657  // pre-splitting. If this is a store of a load we have to track that
3658  // that load also can't participate in any pre-splitting.
3659  if (auto *LI = dyn_cast<LoadInst>(I))
3660  UnsplittableLoads.insert(LI);
3661  else if (auto *SI = dyn_cast<StoreInst>(I))
3662  if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand()))
3663  UnsplittableLoads.insert(LI);
3664  continue;
3665  }
3666  assert(P.endOffset() > S.beginOffset() &&
3667  "Empty or backwards partition!");
3668 
3669  // Determine if this is a pre-splittable slice.
3670  if (auto *LI = dyn_cast<LoadInst>(I)) {
3671  assert(!LI->isVolatile() && "Cannot split volatile loads!");
3672 
3673  // The load must be used exclusively to store into other pointers for
3674  // us to be able to arbitrarily pre-split it. The stores must also be
3675  // simple to avoid changing semantics.
3676  auto IsLoadSimplyStored = [](LoadInst *LI) {
3677  for (User *LU : LI->users()) {
3678  auto *SI = dyn_cast<StoreInst>(LU);
3679  if (!SI || !SI->isSimple())
3680  return false;
3681  }
3682  return true;
3683  };
3684  if (!IsLoadSimplyStored(LI)) {
3685  UnsplittableLoads.insert(LI);
3686  continue;
3687  }
3688 
3689  Loads.push_back(LI);
3690  } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3691  if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex()))
3692  // Skip stores *of* pointers. FIXME: This shouldn't even be possible!
3693  continue;
3694  auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand());
3695  if (!StoredLoad || !StoredLoad->isSimple())
3696  continue;
3697  assert(!SI->isVolatile() && "Cannot split volatile stores!");
3698 
3699  Stores.push_back(SI);
3700  } else {
3701  // Other uses cannot be pre-split.
3702  continue;
3703  }
3704 
3705  // Record the initial split.
3706  LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n");
3707  auto &Offsets = SplitOffsetsMap[I];
3708  assert(Offsets.Splits.empty() &&
3709  "Should not have splits the first time we see an instruction!");
3710  Offsets.S = &S;
3711  Offsets.Splits.push_back(P.endOffset() - S.beginOffset());
3712  }
3713 
3714  // Now scan the already split slices, and add a split for any of them which
3715  // we're going to pre-split.
3716  for (Slice *S : P.splitSliceTails()) {
3717  auto SplitOffsetsMapI =
3718  SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser()));
3719  if (SplitOffsetsMapI == SplitOffsetsMap.end())
3720  continue;
3721  auto &Offsets = SplitOffsetsMapI->second;
3722 
3723  assert(Offsets.S == S && "Found a mismatched slice!");
3724  assert(!Offsets.Splits.empty() &&
3725  "Cannot have an empty set of splits on the second partition!");
3726  assert(Offsets.Splits.back() ==
3727  P.beginOffset() - Offsets.S->beginOffset() &&
3728  "Previous split does not end where this one begins!");
3729 
3730  // Record each split. The last partition's end isn't needed as the size
3731  // of the slice dictates that.
3732  if (S->endOffset() > P.endOffset())
3733  Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset());
3734  }
3735  }
3736 
3737  // We may have split loads where some of their stores are split stores. For
3738  // such loads and stores, we can only pre-split them if their splits exactly
3739  // match relative to their starting offset. We have to verify this prior to
3740  // any rewriting.
3741  Stores.erase(
3742  llvm::remove_if(Stores,
3743  [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) {
3744  // Lookup the load we are storing in our map of split
3745  // offsets.
3746  auto *LI = cast<LoadInst>(SI->getValueOperand());
3747  // If it was completely unsplittable, then we're done,
3748  // and this store can't be pre-split.
3749  if (UnsplittableLoads.count(LI))
3750  return true;
3751 
3752  auto LoadOffsetsI = SplitOffsetsMap.find(LI);
3753  if (LoadOffsetsI == SplitOffsetsMap.end())
3754  return false; // Unrelated loads are definitely safe.
3755  auto &LoadOffsets = LoadOffsetsI->second;
3756 
3757  // Now lookup the store's offsets.
3758  auto &StoreOffsets = SplitOffsetsMap[SI];
3759 
3760  // If the relative offsets of each split in the load and
3761  // store match exactly, then we can split them and we
3762  // don't need to remove them here.
3763  if (LoadOffsets.Splits == StoreOffsets.Splits)
3764  return false;
3765 
3766  LLVM_DEBUG(
3767  dbgs()
3768  << " Mismatched splits for load and store:\n"
3769  << " " << *LI << "\n"
3770  << " " << *SI << "\n");
3771 
3772  // We've found a store and load that we need to split
3773  // with mismatched relative splits. Just give up on them
3774  // and remove both instructions from our list of
3775  // candidates.
3776  UnsplittableLoads.insert(LI);
3777  return true;
3778  }),
3779  Stores.end());
3780  // Now we have to go *back* through all the stores, because a later store may
3781  // have caused an earlier store's load to become unsplittable and if it is
3782  // unsplittable for the later store, then we can't rely on it being split in
3783  // the earlier store either.
3784  Stores.erase(llvm::remove_if(Stores,
3785  [&UnsplittableLoads](StoreInst *SI) {
3786  auto *LI =
3787  cast<LoadInst>(SI->getValueOperand());
3788  return UnsplittableLoads.count(LI);
3789  }),
3790  Stores.end());
3791  // Once we've established all the loads that can't be split for some reason,
3792  // filter any that made it into our list out.
3793  Loads.erase(llvm::remove_if(Loads,
3794  [&UnsplittableLoads](LoadInst *LI) {
3795  return UnsplittableLoads.count(LI);
3796  }),
3797  Loads.end());
3798 
3799  // If no loads or stores are left, there is no pre-splitting to be done for
3800  // this alloca.
3801  if (Loads.empty() && Stores.empty())
3802  return false;
3803 
3804  // From here on, we can't fail and will be building new accesses, so rig up
3805  // an IR builder.
3806  IRBuilderTy IRB(&AI);
3807 
3808  // Collect the new slices which we will merge into the alloca slices.
3809  SmallVector<Slice, 4> NewSlices;
3810 
3811  // Track any allocas we end up splitting loads and stores for so we iterate
3812  // on them.
3813  SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas;
3814 
3815  // At this point, we have collected all of the loads and stores we can
3816  // pre-split, and the specific splits needed for them. We actually do the
3817  // splitting in a specific order in order to handle when one of the loads in
3818  // the value operand to one of the stores.
3819  //
3820  // First, we rewrite all of the split loads, and just accumulate each split
3821  // load in a parallel structure. We also build the slices for them and append
3822  // them to the alloca slices.
3824  std::vector<LoadInst *> SplitLoads;
3825  const DataLayout &DL = AI.getModule()->getDataLayout();
3826  for (LoadInst *LI : Loads) {
3827  SplitLoads.clear();
3828 
3829  IntegerType *Ty = cast<IntegerType>(LI->getType());
3830  uint64_t LoadSize = Ty->getBitWidth() / 8;
3831  assert(LoadSize > 0 && "Cannot have a zero-sized integer load!");
3832 
3833  auto &Offsets = SplitOffsetsMap[LI];
3834  assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
3835  "Slice size should always match load size exactly!");
3836  uint64_t BaseOffset = Offsets.S->beginOffset();
3837  assert(BaseOffset + LoadSize > BaseOffset &&
3838  "Cannot represent alloca access size using 64-bit integers!");
3839 
3840  Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand());
3841  IRB.SetInsertPoint(LI);
3842 
3843  LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n");
3844 
3845  uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3846  int Idx = 0, Size = Offsets.Splits.size();
3847  for (;;) {
3848  auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
3849  auto AS = LI->getPointerAddressSpace();
3850  auto *PartPtrTy = PartTy->getPointerTo(AS);
3851  LoadInst *PLoad = IRB.CreateAlignedLoad(
3852  PartTy,
3853  getAdjustedPtr(IRB, DL, BasePtr,
3854  APInt(DL.getIndexSizeInBits(AS), PartOffset),
3855  PartPtrTy, BasePtr->getName() + "."),
3856  getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
3857  LI->getName());
3858  PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
3859  LLVMContext::MD_access_group});
3860 
3861  // Append this load onto the list of split loads so we can find it later
3862  // to rewrite the stores.
3863  SplitLoads.push_back(PLoad);
3864 
3865  // Now build a new slice for the alloca.
3866  NewSlices.push_back(
3867  Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
3868  &PLoad->getOperandUse(PLoad->getPointerOperandIndex()),
3869  /*IsSplittable*/ false));
3870  LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
3871  << ", " << NewSlices.back().endOffset()
3872  << "): " << *PLoad << "\n");
3873 
3874  // See if we've handled all the splits.
3875  if (Idx >= Size)
3876  break;
3877 
3878  // Setup the next partition.
3879  PartOffset = Offsets.Splits[Idx];
3880  ++Idx;
3881  PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset;
3882  }
3883 
3884  // Now that we have the split loads, do the slow walk over all uses of the
3885  // load and rewrite them as split stores, or save the split loads to use
3886  // below if the store is going to be split there anyways.
3887  bool DeferredStores = false;
3888  for (User *LU : LI->users()) {
3889  StoreInst *SI = cast<StoreInst>(LU);
3890  if (!Stores.empty() && SplitOffsetsMap.count(SI)) {
3891  DeferredStores = true;
3892  LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI
3893  << "\n");
3894  continue;
3895  }
3896 
3897  Value *StoreBasePtr = SI->getPointerOperand();
3898  IRB.SetInsertPoint(SI);
3899 
3900  LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n");
3901 
3902  for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) {
3903  LoadInst *PLoad = SplitLoads[Idx];
3904  uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1];
3905  auto *PartPtrTy =
3906  PLoad->getType()->getPointerTo(SI->getPointerAddressSpace());
3907 
3908  auto AS = SI->getPointerAddressSpace();
3909  StoreInst *PStore = IRB.CreateAlignedStore(
3910  PLoad,
3911  getAdjustedPtr(IRB, DL, StoreBasePtr,
3912  APInt(DL.getIndexSizeInBits(AS), PartOffset),
3913  PartPtrTy, StoreBasePtr->getName() + "."),
3914  getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
3915  PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access,
3916  LLVMContext::MD_access_group});
3917  LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n");
3918  }
3919 
3920  // We want to immediately iterate on any allocas impacted by splitting
3921  // this store, and we have to track any promotable alloca (indicated by
3922  // a direct store) as needing to be resplit because it is no longer
3923  // promotable.
3924  if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) {
3925  ResplitPromotableAllocas.insert(OtherAI);
3926  Worklist.insert(OtherAI);
3927  } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
3928  StoreBasePtr->stripInBoundsOffsets())) {
3929  Worklist.insert(OtherAI);
3930  }
3931 
3932  // Mark the original store as dead.
3933  DeadInsts.insert(SI);
3934  }
3935 
3936  // Save the split loads if there are deferred stores among the users.
3937  if (DeferredStores)
3938  SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads)));
3939 
3940  // Mark the original load as dead and kill the original slice.
3941  DeadInsts.insert(LI);
3942  Offsets.S->kill();
3943  }
3944 
3945  // Second, we rewrite all of the split stores. At this point, we know that
3946  // all loads from this alloca have been split already. For stores of such
3947  // loads, we can simply look up the pre-existing split loads. For stores of
3948  // other loads, we split those loads first and then write split stores of
3949  // them.
3950  for (StoreInst *SI : Stores) {
3951  auto *LI = cast<LoadInst>(SI->getValueOperand());
3952  IntegerType *Ty = cast<IntegerType>(LI->getType());
3953  uint64_t StoreSize = Ty->getBitWidth() / 8;
3954  assert(StoreSize > 0 && "Cannot have a zero-sized integer store!");
3955 
3956  auto &Offsets = SplitOffsetsMap[SI];
3957  assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() &&
3958  "Slice size should always match load size exactly!");
3959  uint64_t BaseOffset = Offsets.S->beginOffset();
3960  assert(BaseOffset + StoreSize > BaseOffset &&
3961  "Cannot represent alloca access size using 64-bit integers!");
3962 
3963  Value *LoadBasePtr = LI->getPointerOperand();
3964  Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand());
3965 
3966  LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n");
3967 
3968  // Check whether we have an already split load.
3969  auto SplitLoadsMapI = SplitLoadsMap.find(LI);
3970  std::vector<LoadInst *> *SplitLoads = nullptr;
3971  if (SplitLoadsMapI != SplitLoadsMap.end()) {
3972  SplitLoads = &SplitLoadsMapI->second;
3973  assert(SplitLoads->size() == Offsets.Splits.size() + 1 &&
3974  "Too few split loads for the number of splits in the store!");
3975  } else {
3976  LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n");
3977  }
3978 
3979  uint64_t PartOffset = 0, PartSize = Offsets.Splits.front();
3980  int Idx = 0, Size = Offsets.Splits.size();
3981  for (;;) {
3982  auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
3983  auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
3984  auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
3985 
3986  // Either lookup a split load or create one.
3987  LoadInst *PLoad;
3988  if (SplitLoads) {
3989  PLoad = (*SplitLoads)[Idx];
3990  } else {
3991  IRB.SetInsertPoint(LI);
3992  auto AS = LI->getPointerAddressSpace();
3993  PLoad = IRB.CreateAlignedLoad(
3994  PartTy,
3995  getAdjustedPtr(IRB, DL, LoadBasePtr,
3996  APInt(DL.getIndexSizeInBits(AS), PartOffset),
3997  LoadPartPtrTy, LoadBasePtr->getName() + "."),
3998  getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
3999  LI->getName());
4000  }
4001 
4002  // And store this partition.
4003  IRB.SetInsertPoint(SI);
4004  auto AS = SI->getPointerAddressSpace();
4005  StoreInst *PStore = IRB.CreateAlignedStore(
4006  PLoad,
4007  getAdjustedPtr(IRB, DL, StoreBasePtr,
4008  APInt(DL.getIndexSizeInBits(AS), PartOffset),
4009  StorePartPtrTy, StoreBasePtr->getName() + "."),
4010  getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);
4011 
4012  // Now build a new slice for the alloca.
4013  NewSlices.push_back(
4014  Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize,
4015  &PStore->getOperandUse(PStore->getPointerOperandIndex()),
4016  /*IsSplittable*/ false));
4017  LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset()
4018  << ", " << NewSlices.back().endOffset()
4019  << "): " << *PStore << "\n");
4020  if (!SplitLoads) {
4021  LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n");
4022  }
4023 
4024  // See if we've finished all the splits.
4025  if (Idx >= Size)
4026  break;
4027 
4028  // Setup the next partition.
4029  PartOffset = Offsets.Splits[Idx];
4030  ++Idx;
4031  PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset;
4032  }
4033 
4034  // We want to immediately iterate on any allocas impacted by splitting
4035  // this load, which is only relevant if it isn't a load of this alloca and
4036  // thus we didn't already split the loads above. We also have to keep track
4037  // of any promotable allocas we split loads on as they can no longer be
4038  // promoted.
4039  if (!SplitLoads) {
4040  if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) {
4041  assert(OtherAI != &AI && "We can't re-split our own alloca!");
4042  ResplitPromotableAllocas.insert(OtherAI);
4043  Worklist.insert(OtherAI);
4044  } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(
4045  LoadBasePtr->stripInBoundsOffsets())) {
4046  assert(OtherAI != &AI && "We can't re-split our own alloca!");
4047  Worklist.insert(OtherAI);
4048  }
4049  }
4050 
4051  // Mark the original store as dead now that we've split it up and kill its
4052  // slice. Note that we leave the original load in place unless this store
4053  // was its only use. It may in turn be split up if it is an alloca load
4054  // for some other alloca, but it may be a normal load. This may introduce
4055  // redundant loads, but where those can be merged the rest of the optimizer
4056  // should handle the merging, and this uncovers SSA splits which is more
4057  // important. In practice, the original loads will almost always be fully
4058  // split and removed eventually, and the splits will be merged by any
4059  // trivial CSE, including instcombine.
4060  if (LI->hasOneUse()) {
4061  assert(*LI->user_begin() == SI && "Single use isn't this store!");
4062  DeadInsts.insert(LI);
4063  }
4064  DeadInsts.insert(SI);
4065  Offsets.S->kill();
4066  }
4067 
4068  // Remove the killed slices that have ben pre-split.
4069  AS.erase(llvm::remove_if(AS, [](const Slice &S) { return S.isDead(); }),
4070  AS.end());
4071 
4072  // Insert our new slices. This will sort and merge them into the sorted
4073  // sequence.
4074  AS.insert(NewSlices);
4075 
4076  LLVM_DEBUG(dbgs() << " Pre-split slices:\n");
4077 #ifndef NDEBUG
4078  for (auto I = AS.begin(), E = AS.end(); I != E; ++I)
4079  LLVM_DEBUG(AS.print(dbgs(), I, " "));
4080 #endif
4081 
4082  // Finally, don't try to promote any allocas that new require re-splitting.
4083  // They have already been added to the worklist above.
4084  PromotableAllocas.erase(
4086  PromotableAllocas,
4087  [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }),
4088  PromotableAllocas.end());
4089 
4090  return true;
4091 }
4092 
4093 /// Rewrite an alloca partition's users.
4094 ///
4095 /// This routine drives both of the rewriting goals of the SROA pass. It tries
4096 /// to rewrite uses of an alloca partition to be conducive for SSA value
4097 /// promotion. If the partition needs a new, more refined alloca, this will
4098 /// build that new alloca, preserving as much type information as possible, and
4099 /// rewrite the uses of the old alloca to point at the new one and have the
4100 /// appropriate new offsets. It also evaluates how successful the rewrite was
4101 /// at enabling promotion and if it was successful queues the alloca to be
4102 /// promoted.
4103 AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
4104  Partition &P) {
4105  // Try to compute a friendly type for this partition of the alloca. This
4106  // won't always succeed, in which case we fall back to a legal integer type
4107  // or an i8 array of an appropriate size.
4108  Type *SliceTy = nullptr;
4109  const DataLayout &DL = AI.getModule()->getDataLayout();
4110  if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset()))
4111  if (DL.getTypeAllocSize(CommonUseTy) >= P.size())
4112  SliceTy = CommonUseTy;
4113  if (!SliceTy)
4114  if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(),
4115  P.beginOffset(), P.size()))
4116  SliceTy = TypePartitionTy;
4117  if ((!SliceTy || (SliceTy->isArrayTy() &&
4118  SliceTy->getArrayElementType()->isIntegerTy())) &&
4119  DL.isLegalInteger(P.size() * 8))
4120  SliceTy = Type::getIntNTy(*C, P.size() * 8);
4121  if (!SliceTy)
4122  SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size());
4123  assert(DL.getTypeAllocSize(SliceTy) >= P.size());
4124 
4125  bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL);
4126 
4127  VectorType *VecTy =
4128  IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL);
4129  if (VecTy)
4130  SliceTy = VecTy;
4131 
4132  // Check for the case where we're going to rewrite to a new alloca of the
4133  // exact same type as the original, and with the same access offsets. In that
4134  // case, re-use the existing alloca, but still run through the rewriter to
4135  // perform phi and select speculation.
4136  // P.beginOffset() can be non-zero even with the same type in a case with
4137  // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll).
4138  AllocaInst *NewAI;
4139  if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) {
4140  NewAI = &AI;
4141  // FIXME: We should be able to bail at this point with "nothing changed".
4142  // FIXME: We might want to defer PHI speculation until after here.
4143  // FIXME: return nullptr;
4144  } else {
4145  unsigned Alignment = AI.getAlignment();
4146  if (!Alignment) {
4147  // The minimum alignment which users can rely on when the explicit
4148  // alignment is omitted or zero is that required by the ABI for this
4149  // type.
4150  Alignment = DL.getABITypeAlignment(AI.getAllocatedType());
4151  }
4152  Alignment = MinAlign(Alignment, P.beginOffset());
4153  // If we will get at least this much alignment from the type alone, leave
4154  // the alloca's alignment unconstrained.
4155  if (Alignment <= DL.getABITypeAlignment(SliceTy))
4156  Alignment = 0;
4157  NewAI = new AllocaInst(
4158  SliceTy, AI.getType()->getAddressSpace(), nullptr, Alignment,
4159  AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI);
4160  // Copy the old AI debug location over to the new one.
4161  NewAI->setDebugLoc(AI.getDebugLoc());
4162  ++NumNewAllocas;
4163  }
4164 
4165  LLVM_DEBUG(dbgs() << "Rewriting alloca partition "
4166  << "[" << P.beginOffset() << "," << P.endOffset()
4167  << ") to: " << *NewAI << "\n");
4168 
4169  // Track the high watermark on the worklist as it is only relevant for
4170  // promoted allocas. We will reset it to this point if the alloca is not in
4171  // fact scheduled for promotion.
4172  unsigned PPWOldSize = PostPromotionWorklist.size();
4173  unsigned NumUses = 0;
4175  SmallSetVector<SelectInst *, 8> SelectUsers;
4176 
4177  AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(),
4178  P.endOffset(), IsIntegerPromotable, VecTy,
4179  PHIUsers, SelectUsers);
4180  bool Promotable = true;
4181  for (Slice *S : P.splitSliceTails()) {
4182  Promotable &= Rewriter.visit(S);
4183  ++NumUses;
4184  }
4185  for (Slice &S : P) {
4186  Promotable &= Rewriter.visit(&S);
4187  ++NumUses;
4188  }
4189 
4190  NumAllocaPartitionUses += NumUses;
4191  MaxUsesPerAllocaPartition.updateMax(NumUses);
4192 
4193  // Now that we've processed all the slices in the new partition, check if any
4194  // PHIs or Selects would block promotion.
4195  for (PHINode *PHI : PHIUsers)
4196  if (!isSafePHIToSpeculate(*PHI)) {
4197  Promotable = false;
4198  PHIUsers.clear();
4199  SelectUsers.clear();
4200  break;
4201  }
4202 
4203  for (SelectInst *Sel : SelectUsers)
4204  if (!isSafeSelectToSpeculate(*Sel)) {
4205  Promotable = false;
4206  PHIUsers.clear();
4207  SelectUsers.clear();
4208  break;
4209  }
4210 
4211  if (Promotable) {
4212  if (PHIUsers.empty() && SelectUsers.empty()) {
4213  // Promote the alloca.
4214  PromotableAllocas.push_back(NewAI);
4215  } else {
4216  // If we have either PHIs or Selects to speculate, add them to those
4217  // worklists and re-queue the new alloca so that we promote in on the
4218  // next iteration.
4219  for (PHINode *PHIUser : PHIUsers)
4220  SpeculatablePHIs.insert(PHIUser);
4221  for (SelectInst *SelectUser : SelectUsers)
4222  SpeculatableSelects.insert(SelectUser);
4223  Worklist.insert(NewAI);
4224  }
4225  } else {
4226  // Drop any post-promotion work items if promotion didn't happen.
4227  while (PostPromotionWorklist.size() > PPWOldSize)
4228  PostPromotionWorklist.pop_back();
4229 
4230  // We couldn't promote and we didn't create a new partition, nothing
4231  // happened.
4232  if (NewAI == &AI)
4233  return nullptr;
4234 
4235  // If we can't promote the alloca, iterate on it to check for new
4236  // refinements exposed by splitting the current alloca. Don't iterate on an
4237  // alloca which didn't actually change and didn't get promoted.
4238  Worklist.insert(NewAI);
4239  }
4240 
4241  return NewAI;
4242 }
4243 
4244 /// Walks the slices of an alloca and form partitions based on them,
4245 /// rewriting each of their uses.
4246 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
4247  if (AS.begin() == AS.end())
4248  return false;
4249 
4250  unsigned NumPartitions = 0;
4251  bool Changed = false;
4252  const DataLayout &DL = AI.getModule()->getDataLayout();
4253 
4254  // First try to pre-split loads and stores.
4255  Changed |= presplitLoadsAndStores(AI, AS);
4256 
4257  // Now that we have identified any pre-splitting opportunities,
4258  // mark loads and stores unsplittable except for the following case.
4259  // We leave a slice splittable if all other slices are disjoint or fully
4260  // included in the slice, such as whole-alloca loads and stores.
4261  // If we fail to split these during pre-splitting, we want to force them
4262  // to be rewritten into a partition.
4263  bool IsSorted = true;
4264 
4265  uint64_t AllocaSize = DL.getTypeAllocSize(AI.getAllocatedType());
4266  const uint64_t MaxBitVectorSize = 1024;
4267  if (AllocaSize <= MaxBitVectorSize) {
4268  // If a byte boundary is included in any load or store, a slice starting or
4269  // ending at the boundary is not splittable.
4270  SmallBitVector SplittableOffset(AllocaSize + 1, true);
4271  for (Slice &S : AS)
4272  for (unsigned O = S.beginOffset() + 1;
4273  O < S.endOffset() && O < AllocaSize; O++)
4274  SplittableOffset.reset(O);
4275 
4276  for (Slice &S : AS) {
4277  if (!S.isSplittable())
4278  continue;
4279 
4280  if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) &&
4281  (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()]))
4282  continue;
4283 
4284  if (isa<LoadInst>(S.getUse()->getUser()) ||
4285  isa<StoreInst>(S.getUse()->getUser())) {
4286  S.makeUnsplittable();
4287  IsSorted = false;
4288  }
4289  }
4290  }
4291  else {
4292  // We only allow whole-alloca splittable loads and stores
4293  // for a large alloca to avoid creating too large BitVector.
4294  for (Slice &S : AS) {
4295  if (!S.isSplittable())
4296  continue;
4297 
4298  if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize)
4299  continue;
4300 
4301  if (isa<LoadInst>(S.getUse()->getUser()) ||
4302  isa<StoreInst>(S.getUse()->getUser())) {
4303  S.makeUnsplittable();
4304  IsSorted = false;
4305  }
4306  }
4307  }
4308 
4309  if (!IsSorted)
4310  llvm::sort(AS);
4311 
4312  /// Describes the allocas introduced by rewritePartition in order to migrate
4313  /// the debug info.
4314  struct Fragment {
4315  AllocaInst *Alloca;
4316  uint64_t Offset;
4317  uint64_t Size;
4318  Fragment(AllocaInst *AI, uint64_t O, uint64_t S)
4319  : Alloca(AI), Offset(O), Size(S) {}
4320  };
4321  SmallVector<Fragment, 4> Fragments;
4322 
4323  // Rewrite each partition.
4324  for (auto &P : AS.partitions()) {
4325  if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) {
4326  Changed = true;
4327  if (NewAI != &AI) {
4328  uint64_t SizeOfByte = 8;
4329  uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType());
4330  // Don't include any padding.
4331  uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte);
4332  Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size));
4333  }
4334  }
4335  ++NumPartitions;
4336  }
4337 
4338  NumAllocaPartitions += NumPartitions;
4339  MaxPartitionsPerAlloca.updateMax(NumPartitions);
4340 
4341  // Migrate debug information from the old alloca to the new alloca(s)
4342  // and the individual partitions.
4344  if (!DbgDeclares.empty()) {
4345  auto *Var = DbgDeclares.front()->getVariable();
4346  auto *Expr = DbgDeclares.front()->getExpression();
4347  auto VarSize = Var->getSizeInBits();
4348  DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false);
4349  uint64_t AllocaSize = DL.getTypeSizeInBits(AI.getAllocatedType());
4350  for (auto Fragment : Fragments) {
4351  // Create a fragment expression describing the new partition or reuse AI's
4352  // expression if there is only one partition.
4353  auto *FragmentExpr = Expr;
4354  if (Fragment.Size < AllocaSize || Expr->isFragment()) {
4355  // If this alloca is already a scalar replacement of a larger aggregate,
4356  // Fragment.Offset describes the offset inside the scalar.
4357  auto ExprFragment = Expr->getFragmentInfo();
4358  uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0;
4359  uint64_t Start = Offset + Fragment.Offset;
4360  uint64_t Size = Fragment.Size;
4361  if (ExprFragment) {
4362  uint64_t AbsEnd =
4363  ExprFragment->OffsetInBits + ExprFragment->SizeInBits;
4364  if (Start >= AbsEnd)
4365  // No need to describe a SROAed padding.
4366  continue;
4367  Size = std::min(Size, AbsEnd - Start);
4368  }
4369  // The new, smaller fragment is stenciled out from the old fragment.
4370  if (auto OrigFragment = FragmentExpr->getFragmentInfo()) {
4371  assert(Start >= OrigFragment->OffsetInBits &&
4372  "new fragment is outside of original fragment");
4373  Start -= OrigFragment->OffsetInBits;
4374  }
4375 
4376  // The alloca may be larger than the variable.
4377  if (VarSize) {
4378  if (Size > *VarSize)
4379  Size = *VarSize;
4380  if (Size == 0 || Start + Size > *VarSize)
4381  continue;
4382  }
4383 
4384  // Avoid creating a fragment expression that covers the entire variable.
4385  if (!VarSize || *VarSize != Size) {
4386  if (auto E =
4387  DIExpression::createFragmentExpression(Expr, Start, Size))
4388  FragmentExpr = *E;
4389  else
4390  continue;
4391  }
4392  }
4393 
4394  // Remove any existing intrinsics describing the same alloca.
4395  for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca))
4396  OldDII->eraseFromParent();
4397 
4398  DIB.insertDeclare(Fragment.Alloca, Var, FragmentExpr,
4399  DbgDeclares.front()->getDebugLoc(), &AI);
4400  }
4401  }
4402  return Changed;
4403 }
4404 
4405 /// Clobber a use with undef, deleting the used value if it becomes dead.
4406 void SROA::clobberUse(Use &U) {
4407  Value *OldV = U;
4408  // Replace the use with an undef value.
4409  U = UndefValue::get(OldV->getType());
4410 
4411  // Check for this making an instruction dead. We have to garbage collect
4412  // all the dead instructions to ensure the uses of any alloca end up being
4413  // minimal.
4414  if (Instruction *OldI = dyn_cast<Instruction>(OldV))
4415  if (isInstructionTriviallyDead(OldI)) {
4416  DeadInsts.insert(OldI);
4417  }
4418 }
4419 
4420 /// Analyze an alloca for SROA.
4421 ///
4422 /// This analyzes the alloca to ensure we can reason about it, builds
4423 /// the slices of the alloca, and then hands it off to be split and
4424 /// rewritten as needed.
4425 bool SROA::runOnAlloca(AllocaInst &AI) {
4426  LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
4427  ++NumAllocasAnalyzed;
4428 
4429  // Special case dead allocas, as they're trivial.
4430  if (AI.use_empty()) {
4431  AI.eraseFromParent();
4432  return true;
4433  }
4434  const DataLayout &DL = AI.getModule()->getDataLayout();
4435 
4436  // Skip alloca forms that this analysis can't handle.
4437  if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
4438  DL.getTypeAllocSize(AI.getAllocatedType()) == 0)
4439  return false;
4440 
4441  bool Changed = false;
4442 
4443  // First, split any FCA loads and stores touching this alloca to promote
4444  // better splitting and promotion opportunities.
4445  AggLoadStoreRewriter AggRewriter(DL);
4446  Changed |= AggRewriter.rewrite(AI);
4447 
4448  // Build the slices using a recursive instruction-visiting builder.
4449  AllocaSlices AS(DL, AI);
4450  LLVM_DEBUG(AS.print(dbgs()));
4451  if (AS.isEscaped())
4452  return Changed;
4453 
4454  // Delete all the dead users of this alloca before splitting and rewriting it.
4455  for (Instruction *DeadUser : AS.getDeadUsers()) {
4456  // Free up everything used by this instruction.
4457  for (Use &DeadOp : DeadUser->operands())
4458  clobberUse(DeadOp);
4459 
4460  // Now replace the uses of this instruction.
4461  DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType()));
4462 
4463  // And mark it for deletion.
4464  DeadInsts.insert(DeadUser);
4465  Changed = true;
4466  }
4467  for (Use *DeadOp : AS.getDeadOperands()) {
4468  clobberUse(*DeadOp);
4469  Changed = true;
4470  }
4471 
4472  // No slices to split. Leave the dead alloca for a later pass to clean up.
4473  if (AS.begin() == AS.end())
4474  return Changed;
4475 
4476  Changed |= splitAlloca(AI, AS);
4477 
4478  LLVM_DEBUG(dbgs() << " Speculating PHIs\n");
4479  while (!SpeculatablePHIs.empty())
4480  speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
4481 
4482  LLVM_DEBUG(dbgs() << " Speculating Selects\n");
4483  while (!SpeculatableSelects.empty())
4484  speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
4485 
4486  return Changed;
4487 }
4488 
4489 /// Delete the dead instructions accumulated in this run.
4490 ///
4491 /// Recursively deletes the dead instructions we've accumulated. This is done
4492 /// at the very end to maximize locality of the recursive delete and to
4493 /// minimize the problems of invalidated instruction pointers as such pointers
4494 /// are used heavily in the intermediate stages of the algorithm.
4495 ///
4496 /// We also record the alloca instructions deleted here so that they aren't
4497 /// subsequently handed to mem2reg to promote.
4498 bool SROA::deleteDeadInstructions(
4499  SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) {
4500  bool Changed = false;
4501  while (!DeadInsts.empty()) {
4502  Instruction *I = DeadInsts.pop_back_val();
4503  LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
4504 
4505  // If the instruction is an alloca, find the possible dbg.declare connected
4506  // to it, and remove it too. We must do this before calling RAUW or we will
4507  // not be able to find it.
4508  if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
4509  DeletedAllocas.insert(AI);
4510  for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI))
4511  OldDII->eraseFromParent();
4512  }
4513 
4515 
4516  for (Use &Operand : I->operands())
4517  if (Instruction *U = dyn_cast<Instruction>(Operand)) {
4518  // Zero out the operand and see if it becomes trivially dead.
4519  Operand = nullptr;
4521  DeadInsts.insert(U);
4522  }
4523 
4524  ++NumDeleted;
4525  I->eraseFromParent();
4526  Changed = true;
4527  }
4528  return Changed;
4529 }
4530 
4531 /// Promote the allocas, using the best available technique.
4532 ///
4533 /// This attempts to promote whatever allocas have been identified as viable in
4534 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
4535 /// This function returns whether any promotion occurred.
4536 bool SROA::promoteAllocas(Function &F) {
4537  if (PromotableAllocas.empty())
4538  return false;
4539 
4540  NumPromoted += PromotableAllocas.size();
4541 
4542  LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
4543  PromoteMemToReg(PromotableAllocas, *DT, AC);
4544  PromotableAllocas.clear();
4545  return true;
4546 }
4547 
4548 PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT,
4549  AssumptionCache &RunAC) {
4550  LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
4551  C = &F.getContext();
4552  DT = &RunDT;
4553  AC = &RunAC;
4554 
4555  BasicBlock &EntryBB = F.getEntryBlock();
4556  for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
4557  I != E; ++I) {
4558  if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
4559  Worklist.insert(AI);
4560  }
4561 
4562  bool Changed = false;
4563  // A set of deleted alloca instruction pointers which should be removed from
4564  // the list of promotable allocas.
4565  SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
4566 
4567  do {
4568  while (!Worklist.empty()) {
4569  Changed |= runOnAlloca(*Worklist.pop_back_val());
4570  Changed |= deleteDeadInstructions(DeletedAllocas);
4571 
4572  // Remove the deleted allocas from various lists so that we don't try to
4573  // continue processing them.
4574  if (!DeletedAllocas.empty()) {
4575  auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); };
4576  Worklist.remove_if(IsInSet);
4577  PostPromotionWorklist.remove_if(IsInSet);
4578  PromotableAllocas.erase(llvm::remove_if(PromotableAllocas, IsInSet),
4579  PromotableAllocas.end());
4580  DeletedAllocas.clear();
4581  }
4582  }
4583 
4584  Changed |= promoteAllocas(F);
4585 
4586  Worklist = PostPromotionWorklist;
4587  PostPromotionWorklist.clear();
4588  } while (!Worklist.empty());
4589 
4590  if (!Changed)
4591  return PreservedAnalyses::all();
4592 
4593  PreservedAnalyses PA;
4594  PA.preserveSet<CFGAnalyses>();
4595  PA.preserve<GlobalsAA>();
4596  return PA;
4597 }
4598 
4600  return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F),
4601  AM.getResult<AssumptionAnalysis>(F));
4602 }
4603 
4604 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
4605 ///
4606 /// This is in the llvm namespace purely to allow it to be a friend of the \c
4607 /// SROA pass.
4609  /// The SROA implementation.
4610  SROA Impl;
4611 
4612 public:
4613  static char ID;
4614 
4617  }
4618 
4619  bool runOnFunction(Function &F) override {
4620  if (skipFunction(F))
4621  return false;
4622 
4623  auto PA = Impl.runImpl(
4624  F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
4625  getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
4626  return !PA.areAllPreserved();
4627  }
4628 
4629  void getAnalysisUsage(AnalysisUsage &AU) const override {
4633  AU.setPreservesCFG();
4634  }
4635 
4636  StringRef getPassName() const override { return "SROA"; }
4637 };
4638 
4639 char SROALegacyPass::ID = 0;
4640 
4642 
4644  "Scalar Replacement Of Aggregates", false, false)
4648  false, false)
Legacy wrapper pass to provide the GlobalsAAResult object.
static Value * getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *PointerTy, Twine NamePrefix)
Compute an adjusted pointer from Ptr by Offset bytes where the resulting pointer has PointerTy...
Definition: SROA.cpp:1576
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static VectorType * isVectorPromotionViable(Partition &P, const DataLayout &DL)
Test whether the given alloca partitioning and range of slices can be promoted to a vector...
Definition: SROA.cpp:1883
iterator end() const
Definition: SROA.cpp:409
uint64_t CallInst * C
static Value * getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, SmallVectorImpl< Value *> &Indices, Twine NamePrefix)
Recursively compute indices for a natural GEP.
Definition: SROA.cpp:1462
Value * getValueOperand()
Definition: Instructions.h:409
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
uint64_t getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Definition: DataLayout.h:453
An iterator over partitions of the alloca&#39;s slices.
Definition: SROA.cpp:429
void setInt(IntType IntVal)
bool isSimple() const
Definition: Instructions.h:276
static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT)
This is the entry point for all transforms.
iterator_range< use_iterator > uses()
Definition: Value.h:374
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:399
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1571
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
Base class for instruction visitors.
Definition: InstVisitor.h:80
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:224
const Value * stripInBoundsOffsets() const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:596
uint64_t beginOffset() const
The start offset of this partition.
Definition: SROA.cpp:380
This is a &#39;bitvector&#39; (really, a variable-sized bit array), optimized for the case when the array is ...
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:776
This class represents lattice values for constants.
Definition: AllocatorList.h:23
PointerTy getPointer() const
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds...
Definition: Compiler.h:476
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:346
This is the interface for a simple mod/ref and alias analysis over globals.
EltTy front() const
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
iterator begin() const
Definition: ArrayRef.h:136
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1647
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
LLVM_NODISCARD size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition: StringRef.h:359
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:610
This file provides the interface for LLVM&#39;s Scalar Replacement of Aggregates pass.
void push_back(const T &Elt)
Definition: SmallVector.h:211
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
void erase(iterator Start, iterator Stop)
Erase a range of slices.
Definition: SROA.cpp:258
bool isTriviallyEmpty() const
Check if this twine is trivially empty; a false return value does not necessarily mean the twine is e...
Definition: Twine.h:400
This class provides information about the result of a visit.
Definition: PtrUseVisitor.h:62
This class represents a function call, abstracting a target machine&#39;s calling convention.
static Value * extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, IntegerType *Ty, uint64_t Offset, const Twine &Name)
Definition: SROA.cpp:2120
This file contains the declarations for metadata subclasses.
bool isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size, const DataLayout &DL, Instruction *ScanFrom=nullptr, const DominatorTree *DT=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:262
Representation of the alloca slices.
Definition: SROA.cpp:231
An immutable pass that tracks lazily created AssumptionCache objects.
unsigned getSourceAlignment() const
Instruction * getAbortingInst() const
Get the instruction causing the visit to abort.
Definition: PtrUseVisitor.h:83
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:384
gep_type_iterator gep_type_end(const User *GEP)
const Value * getTrueValue() const
void insert(ArrayRef< Slice > NewSlices)
Insert new slices for this alloca.
Definition: SROA.cpp:265
Value * getValue() const
A cache of @llvm.assume calls within a function.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:247
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1127
unsigned getSourceAddressSpace() const
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:733
void deleteValue()
Delete a pointer to a generic Value.
Definition: Value.cpp:98
void setSource(Value *Ptr)
void setDest(Value *Ptr)
Set the specified arguments of the instruction.
This class wraps the llvm.memset intrinsic.
const Use & getOperandUse(unsigned i) const
Definition: User.h:182
unsigned second
Scalar Replacement Of Aggregates
Definition: SROA.cpp:4647
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
STATISTIC(NumFunctions, "Total number of functions")
Metadata node.
Definition: Metadata.h:863
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:230
F(f)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:580
An instruction for reading from memory.
Definition: Instructions.h:167
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:930
Hexagon Common GEP
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:137
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:229
This defines the Use class.
void reserve(size_type N)
Definition: SmallVector.h:369
bool typeSizeEqualsStoreSize(Type *Ty) const
Returns true if no extra padding bits are needed when storing the specified type. ...
Definition: DataLayout.h:461
Value * getLength() const
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:395
bool isEscaped() const
Test whether a pointer to the allocation escapes our analysis.
Definition: SROA.cpp:240
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:30
op_iterator op_begin()
Definition: User.h:229
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it...
Definition: DataLayout.cpp:83
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1517
static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, const DataLayout &DL)
Compute the adjusted alignment for a load or store from an offset.
Definition: SROA.cpp:1681
Builder for the alloca slices.
Definition: SROA.cpp:647
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:268
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1241
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:195
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:270
static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, VectorType *Ty, uint64_t ElementSize, const DataLayout &DL)
Test whether the given slice use can be promoted to a vector.
Definition: SROA.cpp:1808
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
Definition: SROA.cpp:4599
void * PointerTy
Definition: GenericValue.h:21
AnalysisUsage & addRequired()
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:559
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
void setPointer(PointerTy PtrVal)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:231
This class represents a conversion between pointers from one address space to another.
This class represents the LLVM &#39;select&#39; instruction.
Type * getPointerElementType() const
Definition: Type.h:376
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
unsigned getAlignment() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:112
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:450
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
Class to represent struct types.
Definition: DerivedTypes.h:233
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
static Type * getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, uint64_t Size)
Try to find a partition of the aggregate type passed in for a given offset and size.
Definition: SROA.cpp:3495
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:654
ArrayRef< Slice * > splitSliceTails() const
Get the sequence of split slice tails.
Definition: SROA.cpp:417
bool isAborted() const
Did we abort the visit early?
Definition: PtrUseVisitor.h:75
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:779
static Value * convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, Type *NewTy)
Generic routine to convert an SSA value to a value of a different type.
Definition: SROA.cpp:1758
A partition of the slices.
Definition: SROA.cpp:355
unsigned getDestAlignment() const
LLVM_NODISCARD StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:592
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: SROA.cpp:4629
uint64_t getNumElements() const
For scalable vectors, this will return the minimum number of elements in the vector.
Definition: DerivedTypes.h:393
static StructType * get(LLVMContext &Context, ArrayRef< Type *> Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:341
This file implements a class to represent arbitrary precision integral constant values and operations...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Definition: SROA.cpp:4619
This file provides a collection of visitors which walk the (instruction) uses of a pointer...
Maximum number of bits that can be specified.
Definition: DerivedTypes.h:52
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1696
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:418
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
bool visit(AllocaSlices::const_iterator I)
Definition: SROA.cpp:2339
A base class for visitors over the uses of a pointer value.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition: Local.cpp:2547
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
SmallVectorImpl< Slice >::iterator iterator
Support for iterating over the slices.
Definition: SROA.cpp:244
const_iterator end() const
Definition: SROA.cpp:254
IntType getInt() const
void setLength(Value *L)
A legacy pass for the legacy pass manager that wraps the SROA pass.
Definition: SROA.cpp:4608
Class to represent array types.
Definition: DerivedTypes.h:403
This is the common base class for debug info intrinsics for variables.
Definition: IntrinsicInst.h:87
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:938
This class represents a no-op cast from one type to another.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:244
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
ConstantFolder - Create constants with minimum, target independent, folding.
An instruction for storing to memory.
Definition: Instructions.h:320
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:202
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
void printUse(raw_ostream &OS, const_iterator I, StringRef Indent=" ") const
static Constant * getUDiv(Constant *C1, Constant *C2, bool isExact=false)
Definition: Constants.cpp:2283
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:66
Type::subtype_iterator element_iterator
Definition: DerivedTypes.h:333
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:67
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy)
Test whether we can convert a value from the old to the new type.
Definition: SROA.cpp:1707
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
Value * getOperand(unsigned i) const
Definition: User.h:169
Class to represent pointers.
Definition: DerivedTypes.h:544
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Definition: STLExtras.h:1231
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return &#39;this&#39;.
Definition: Type.h:303
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
const BasicBlock & getEntryBlock() const
Definition: Function.h:664
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:614
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:875
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:766
void getAAMetadata(AAMDNodes &N, bool Merge=false) const
Fills the AAMDNodes structure with AA metadata from this instruction.
Scalar Replacement Of false
Definition: SROA.cpp:4647
iterator_range< partition_iterator > partitions()
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:148
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:216
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:328
bool areAllPreserved() const
Test whether all analyses are preserved (and none are abandoned).
Definition: PassManager.h:328
void setAAMetadata(const AAMDNodes &N)
Sets the metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1261
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
static unsigned getPointerOperandIndex()
Definition: Instructions.h:414
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1184
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
ArrayRef< Use * > getDeadOperands() const
Access the dead operands referring to this alloca.
Definition: SROA.cpp:287
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
static bool isSafeSelectToSpeculate(SelectInst &SI)
Select instructions that use an alloca and are subsequently loaded can be rewritten to load both inpu...
Definition: SROA.cpp:1328
LLVM_NODISCARD size_t find_first_not_of(char C, size_t From=0) const
Find the first character in the string that is not C or npos if not found.
Definition: StringRef.cpp:249
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
Definition: Instruction.h:582
static Value * getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *TargetTy, SmallVectorImpl< Value *> &Indices, Twine NamePrefix)
Get a natural GEP from a base pointer to a particular offset and resulting in a particular type...
Definition: SROA.cpp:1536
static sys::TimePoint< std::chrono::seconds > now(bool Deterministic)
element_iterator element_end() const
Definition: DerivedTypes.h:336
DIExpression * getExpression() const
LLVM_NODISCARD size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:299
Represent the analysis usage information of a pass.
op_iterator op_end()
Definition: User.h:231
bool isLifetimeStartOrEnd() const
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
static cl::opt< bool > SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), cl::Hidden)
Hidden option to experiment with completely strict handling of inbounds GEPs.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1172
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:172
Analysis pass providing a never-invalidated alias analysis result.
static void replace(Module &M, GlobalVariable *Old, GlobalVariable *New)
void initializeSROALegacyPassPass(PassRegistry &)
sroa
Definition: SROA.cpp:4647
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
bool empty() const
op_range operands()
Definition: User.h:237
Value * getPointerOperand()
Definition: Instructions.h:284
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:572
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
Class to represent integer types.
Definition: DerivedTypes.h:40
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:684
AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI, uint64_t NewAllocaBeginOffset, uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, VectorType *PromotableVecTy, SmallSetVector< PHINode *, 8 > &PHIUsers, SmallSetVector< SelectInst *, 8 > &SelectUsers)
Definition: SROA.cpp:2310
void setAlignment(unsigned Align)
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1205
const Value * getCondition() const
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:343
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1446
iterator erase(const_iterator CI)
Definition: SmallVector.h:434
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:159
size_t size() const
Definition: SmallVector.h:52
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
bool isVolatile() const
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
static Value * getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, Value *BasePtr, Type *Ty, Type *TargetTy, SmallVectorImpl< Value *> &Indices, Twine NamePrefix)
Get a natural GEP off of the BasePtr walking through Ty toward TargetTy without changing the offset o...
Definition: SROA.cpp:1418
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1095
void printSlice(raw_ostream &OS, const_iterator I, StringRef Indent=" ") const
static Type * stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty)
Strip aggregate type wrapping.
Definition: SROA.cpp:3457
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:50
SmallBitVector & reset()
This provides the default implementation of the IRBuilder &#39;InsertHelper&#39; method that is called whenev...
Definition: IRBuilder.h:61
This is the superclass of the array and vector type classes.
Definition: DerivedTypes.h:375
A function analysis which provides an AssumptionCache.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:226
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:297
print lazy value Lazy Value Info Printer Pass
This is the common base class for memset/memcpy/memmove.
Iterator for intrusive lists based on ilist_node.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
Definition: Constants.h:250
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
TinyPtrVector< DbgVariableIntrinsic * > FindDbgAddrUses(Value *V)
Finds all intrinsics declaring local variables as living in the memory that &#39;V&#39; points to...
Definition: Local.cpp:1492
iterator end()
Definition: BasicBlock.h:270
AllocaSlices(const DataLayout &DL, AllocaInst &AI)
Construct the slices of a particular alloca.
Module.h This file contains the declarations for the Module class.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
partition_iterator & operator++()
Definition: SROA.cpp:602
static Value * insertVector(IRBuilderTy &IRB, Value *Old, Value *V, unsigned BeginIndex, const Twine &Name)
Definition: SROA.cpp:2200
iterator end() const
Definition: ArrayRef.h:137
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU...
Definition: DataLayout.h:255
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:752
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
iterator begin() const
Definition: SROA.cpp:408
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:374
uint64_t getSizeInBytes() const
Definition: DataLayout.h:567
SmallVectorImpl< Slice >::const_iterator const_iterator
Definition: SROA.cpp:250
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition: Type.cpp:179
SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS)
Definition: SROA.cpp:663
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:653
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:301
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1292
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
A range adaptor for a pair of iterators.
Class to represent vector types.
Definition: DerivedTypes.h:427
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
static bool isIntegerWideningViableForSlice(const Slice &S, uint64_t AllocBeginOffset, Type *AllocaTy, const DataLayout &DL, bool &WholeAllocaOp)
Test whether a slice of an alloca is valid for integer widening.
Definition: SROA.cpp:1991
iterator_range< user_iterator > users()
Definition: Value.h:419
#define NDEBUG
Definition: regutils.h:48
Represents analyses that only rely on functions&#39; control flow.
Definition: PassManager.h:114
const Value * getFalseValue() const
element_iterator element_begin() const
Definition: DerivedTypes.h:335
static Value * insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, Value *V, uint64_t Offset, const Twine &Name)
Definition: SROA.cpp:2143
bool isNonIntegralPointerType(PointerType *PT) const
Definition: DataLayout.h:377
const_iterator begin() const
Definition: SROA.cpp:253
static Type * findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, uint64_t EndOffset)
Walk the range of a partitioning looking for a common type to cover this sequence of slices...
Definition: SROA.cpp:1125
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:598
static Value * foldPHINodeOrSelectInst(Instruction &I)
A helper that folds a PHI node or a select.
Definition: SROA.cpp:635
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:470
Virtual Register Rewriter
Definition: VirtRegMap.cpp:220
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:1977
INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", false, false) INITIALIZE_PASS_END(SROALegacyPass
bool ugt(const APInt &RHS) const
Unsigned greather than comparison.
Definition: APInt.h:1254
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:549
This class wraps the llvm.memcpy/memmove intrinsics.
bool isPacked() const
Definition: DerivedTypes.h:293
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:353
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:331
unsigned getDestAddressSpace() const
static const size_t npos
Definition: StringRef.h:50
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:581
unsigned getAlignment() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:240
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:372
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
Definition: SROA.cpp:4636
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
Definition: APInt.h:481
Visitor to rewrite instructions using p particular slice of an alloca to use a new alloca...
Definition: SROA.cpp:2253
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:609
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:189
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
static void speculateSelectInstLoads(SelectInst &SI)
Definition: SROA.cpp:1352
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:259
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
iterator end()
Definition: DenseMap.h:82
FunctionPass * createSROAPass()
Definition: SROA.cpp:4641
uint64_t endOffset() const
The end offset of this partition.
Definition: SROA.cpp:385
Instruction * getEscapingInst() const
Get the instruction causing the pointer to escape.
Definition: PtrUseVisitor.h:88
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:582
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
uint32_t Size
Definition: Profile.cpp:46
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:174
DILocalVariable * getVariable() const
static bool rewrite(Function &F)
static Value * extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, unsigned EndIndex, const Twine &Name)
Definition: SROA.cpp:2174
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
unsigned getAlignment() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:365
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:185
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:290
static unsigned getPointerOperandIndex()
Definition: Instructions.h:286
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
Definition: Value.h:395
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:343
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition: Type.h:249
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction has no side ef...
Definition: Local.cpp:359
LLVM Value Representation.
Definition: Value.h:73
void setAlignment(unsigned Align)
static Value * buildGEP(IRBuilderTy &IRB, Value *BasePtr, SmallVectorImpl< Value *> &Indices, Twine NamePrefix)
Build a GEP out of a base pointer and indices.
Definition: SROA.cpp:1395
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:445
static bool isSafePHIToSpeculate(PHINode &PN)
PHI instructions that use an alloca and are subsequently loaded can be rewritten to load both input p...
Definition: SROA.cpp:1192
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:40
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80