Go to the documentation of this file.
14 #ifndef LLVM_ADT_SMALLVECTOR_H
15 #define LLVM_ADT_SMALLVECTOR_H
25 #include <initializer_list>
30 #include <type_traits>
35 template <
typename IteratorT>
class iterator_range;
62 void *
mallocForGrow(
size_t MinSize,
size_t TSize,
size_t &NewCapacity);
67 void grow_pod(
void *FirstEl,
size_t MinSize,
size_t TSize);
88 typename std::conditional<
sizeof(
T) < 4 &&
sizeof(
void *) >= 8,
uint64_t,
101 template <
typename T,
typename =
void>
109 void *getFirstEl()
const {
110 return const_cast<void *
>(
reinterpret_cast<const void *
>(
111 reinterpret_cast<const char *
>(
this) +
129 this->
BeginX = getFirstEl();
136 std::less<> LessThan;
137 return !LessThan(V,
First) && LessThan(V, Last);
149 std::less<> LessThan;
151 !LessThan(this->
end(), Last);
162 if (NewSize <= this->
size())
163 return Elt < this->
begin() + NewSize;
172 "Attempting to reference an element of the vector in an operation "
173 "that invalidates it");
191 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>,
T *>::value,
204 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>,
T *>::value,
213 size_t NewSize =
This->size() +
N;
217 bool ReferencesStorage =
false;
219 if (!U::TakesParamByValue) {
221 ReferencesStorage =
true;
226 return ReferencesStorage ?
This->begin() +
Index : &Elt;
308 template <
typename T,
bool = (is_trivially_copy_constructible<T>::value) &&
309 (is_trivially_move_constructible<T>::value) &&
310 std::is_trivially_destructible<T>::value>
329 template<
typename It1,
typename It2>
331 std::uninitialized_copy(std::make_move_iterator(
I),
332 std::make_move_iterator(
E), Dest);
337 template<
typename It1,
typename It2>
339 std::uninitialized_copy(
I,
E, Dest);
345 void grow(
size_t MinSize = 0);
350 return static_cast<T *
>(
352 MinSize,
sizeof(
T), NewCapacity));
371 return const_cast<T *
>(
382 std::uninitialized_fill_n(NewElts, NumElts, Elt);
392 ::new ((
void *)(NewElts + this->
size()))
T(std::forward<ArgTypes>(
Args)...);
419 template <
typename T,
bool TriviallyCopyable>
422 T *NewElts = mallocForGrow(MinSize, NewCapacity);
423 moveElementsForGrow(NewElts);
424 takeAllocationForGrow(NewElts, NewCapacity);
428 template <
typename T,
bool TriviallyCopyable>
432 this->uninitialized_move(this->
begin(), this->
end(), NewElts);
435 destroy_range(this->
begin(), this->
end());
439 template <
typename T,
bool TriviallyCopyable>
441 T *NewElts,
size_t NewCapacity) {
443 if (!this->isSmall())
446 this->BeginX = NewElts;
447 this->Capacity = NewCapacity;
454 template <
typename T>
475 template<
typename It1,
typename It2>
483 template<
typename It1,
typename It2>
486 std::uninitialized_copy(
I,
E, Dest);
491 template <
typename T1,
typename T2>
495 T2>::value> * =
nullptr) {
501 memcpy(
reinterpret_cast<void *
>(Dest),
I, (
E -
I) *
sizeof(
T));
517 return const_cast<T *
>(
529 std::uninitialized_fill_n(this->
begin(), NumElts, Elt);
544 memcpy(
reinterpret_cast<void *
>(this->
end()), EltPtr,
sizeof(
T));
553 template <
typename T>
554 class SmallVectorImpl :
public SmallVectorTemplateBase<T> {
555 using SuperClass = SmallVectorTemplateBase<T>;
572 this->destroy_range(this->
begin(), this->
end());
573 if (!this->isSmall())
575 this->BeginX =
RHS.BeginX;
576 this->Size =
RHS.Size;
577 this->Capacity =
RHS.Capacity;
587 if (!this->isSmall())
592 this->destroy_range(this->
begin(), this->
end());
600 template <
bool ForOverwrite>
void resizeImpl(
size_type N) {
601 if (
N == this->
size())
604 if (N < this->
size()) {
610 for (
auto I = this->
end(),
E = this->
begin() + N;
I !=
E; ++
I)
626 assert(this->
size() >= N &&
"Cannot increase size with truncate");
627 this->destroy_range(this->
begin() + N, this->
end());
632 if (
N == this->
size())
635 if (N < this->
size()) {
645 if (this->capacity() <
N)
663 template <
typename in_iter,
664 typename = std::enable_if_t<std::is_convertible<
665 typename std::iterator_traits<in_iter>::iterator_category,
666 std::input_iterator_tag>::value>>
667 void append(in_iter in_start, in_iter in_end) {
668 this->assertSafeToAddRange(in_start, in_end);
669 size_type NumInputs = std::distance(in_start, in_end);
671 this->uninitialized_copy(in_start, in_end, this->
end());
672 this->set_size(this->
size() + NumInputs);
677 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
678 std::uninitialized_fill_n(this->
end(), NumInputs, *EltPtr);
679 this->set_size(this->
size() + NumInputs);
682 void append(std::initializer_list<T> IL) {
683 append(IL.begin(), IL.end());
690 if (NumElts > this->capacity()) {
691 this->growAndAssign(NumElts, Elt);
697 if (NumElts > this->
size())
698 std::uninitialized_fill_n(this->
end(), NumElts - this->
size(), Elt);
699 else if (NumElts < this->
size())
700 this->destroy_range(this->
begin() + NumElts, this->
end());
701 this->set_size(NumElts);
707 template <
typename in_iter,
708 typename = std::enable_if_t<std::is_convertible<
709 typename std::iterator_traits<in_iter>::iterator_category,
710 std::input_iterator_tag>::value>>
711 void assign(in_iter in_start, in_iter in_end) {
712 this->assertSafeToReferenceAfterClear(in_start, in_end);
717 void assign(std::initializer_list<T> IL) {
728 assert(this->isReferenceToStorage(CI) &&
"Iterator to erase is out of bounds.");
743 assert(this->isRangeInStorage(
S,
E) &&
"Range to erase is out of bounds.");
749 this->destroy_range(
I, this->
end());
750 this->set_size(
I - this->
begin());
758 std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
760 "ArgType must be derived from T!");
762 if (
I == this->
end()) {
763 this->push_back(::std::forward<ArgType>(Elt));
764 return this->
end()-1;
767 assert(this->isReferenceToStorage(
I) &&
"Insertion iterator is out of bounds.");
770 size_t Index =
I - this->
begin();
771 std::remove_reference_t<ArgType> *EltPtr =
772 this->reserveForParamAndGetAddress(Elt);
773 I = this->
begin() + Index;
782 static_assert(!TakesParamByValue ||
std::is_same<ArgType, T>::value,
783 "ArgType must
be 'T' when taking by value!");
784 if (!TakesParamByValue &&
this->isReferenceToRange(EltPtr,
I,
this->
end()))
787 *
I = ::
std::forward<ArgType>(*EltPtr);
793 return insert_one_impl(
I, this->forward_value_param(
std::move(Elt)));
797 return insert_one_impl(
I, this->forward_value_param(Elt));
802 size_t InsertElt =
I - this->
begin();
804 if (I == this->
end()) {
806 return this->
begin()+InsertElt;
809 assert(this->isReferenceToStorage(
I) &&
"Insertion iterator is out of bounds.");
813 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
816 I = this->
begin()+InsertElt;
822 if (
size_t(this->
end()-I) >= NumToInsert) {
823 T *OldEnd = this->
end();
824 append(std::move_iterator<iterator>(this->
end() - NumToInsert),
825 std::move_iterator<iterator>(this->
end()));
828 std::move_backward(
I, OldEnd-NumToInsert, OldEnd);
832 if (!TakesParamByValue &&
I <= EltPtr && EltPtr < this->
end())
833 EltPtr += NumToInsert;
835 std::fill_n(
I, NumToInsert, *EltPtr);
843 T *OldEnd = this->
end();
844 this->set_size(this->
size() + NumToInsert);
845 size_t NumOverwritten = OldEnd-
I;
846 this->uninitialized_move(
I, OldEnd, this->
end()-NumOverwritten);
850 if (!TakesParamByValue &&
I <= EltPtr && EltPtr < this->
end())
851 EltPtr += NumToInsert;
854 std::fill_n(
I, NumOverwritten, *EltPtr);
857 std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
861 template <
typename ItTy,
862 typename = std::enable_if_t<std::is_convertible<
863 typename std::iterator_traits<ItTy>::iterator_category,
864 std::input_iterator_tag>::value>>
867 size_t InsertElt =
I - this->
begin();
869 if (I == this->
end()) {
871 return this->
begin()+InsertElt;
874 assert(this->isReferenceToStorage(
I) &&
"Insertion iterator is out of bounds.");
877 this->assertSafeToAddRange(
From, To);
879 size_t NumToInsert = std::distance(
From, To);
885 I = this->
begin()+InsertElt;
891 if (
size_t(this->
end()-I) >= NumToInsert) {
892 T *OldEnd = this->
end();
893 append(std::move_iterator<iterator>(this->
end() - NumToInsert),
894 std::move_iterator<iterator>(this->
end()));
897 std::move_backward(
I, OldEnd-NumToInsert, OldEnd);
907 T *OldEnd = this->
end();
908 this->set_size(this->
size() + NumToInsert);
909 size_t NumOverwritten = OldEnd-
I;
910 this->uninitialized_move(
I, OldEnd, this->
end()-NumOverwritten);
913 for (
T *J =
I; NumOverwritten > 0; --NumOverwritten) {
919 this->uninitialized_copy(
From, To, OldEnd);
924 insert(
I, IL.begin(), IL.end());
929 return this->growAndEmplaceBack(std::forward<ArgTypes>(
Args)...);
931 ::new ((
void *)this->
end())
T(std::forward<ArgTypes>(
Args)...);
932 this->set_size(this->
size() + 1);
941 if (this->
size() != RHS.size())
return false;
945 return !(*
this ==
RHS);
949 return std::lexicographical_compare(this->
begin(), this->
end(),
950 RHS.begin(),
RHS.end());
957 template <
typename T>
959 if (
this == &
RHS)
return;
962 if (!this->isSmall() && !
RHS.isSmall()) {
972 size_t NumShared = this->
size();
973 if (NumShared >
RHS.size()) NumShared =
RHS.size();
978 if (this->
size() > RHS.size()) {
979 size_t EltDiff = this->
size() - RHS.size();
980 this->uninitialized_copy(this->
begin()+NumShared, this->
end(), RHS.end());
981 RHS.set_size(
RHS.size() + EltDiff);
982 this->destroy_range(this->
begin()+NumShared, this->
end());
983 this->set_size(NumShared);
984 }
else if (
RHS.size() >
this->size()) {
985 size_t EltDiff =
RHS.size() - this->
size();
986 this->uninitialized_copy(
RHS.begin()+NumShared,
RHS.end(),
this->end());
987 this->set_size(this->
size() + EltDiff);
988 this->destroy_range(
RHS.begin()+NumShared,
RHS.end());
989 RHS.set_size(NumShared);
993 template <
typename T>
997 if (
this == &
RHS)
return *
this;
1001 size_t RHSSize =
RHS.size();
1002 size_t CurSize = this->
size();
1003 if (CurSize >= RHSSize) {
1009 NewEnd = this->
begin();
1012 this->destroy_range(NewEnd, this->
end());
1015 this->set_size(RHSSize);
1022 if (this->capacity() < RHSSize) {
1026 this->grow(RHSSize);
1027 }
else if (CurSize) {
1033 this->uninitialized_copy(
RHS.begin()+CurSize,
RHS.end(),
1034 this->begin()+CurSize);
1037 this->set_size(RHSSize);
1041 template <
typename T>
1044 if (
this == &
RHS)
return *
this;
1047 if (!
RHS.isSmall()) {
1054 size_t RHSSize =
RHS.size();
1055 size_t CurSize = this->
size();
1056 if (CurSize >= RHSSize) {
1063 this->destroy_range(NewEnd, this->
end());
1064 this->set_size(RHSSize);
1076 if (this->capacity() < RHSSize) {
1080 this->grow(RHSSize);
1081 }
else if (CurSize) {
1087 this->uninitialized_move(
RHS.begin()+CurSize,
RHS.end(),
1088 this->begin()+CurSize);
1091 this->set_size(RHSSize);
1099 template <
typename T,
unsigned N>
1101 alignas(
T)
char InlineElts[
N *
sizeof(T)];
1127 static constexpr
size_t kPreferredSmallVectorSizeof = 64;
1153 "You are trying to use a default number of inlined elements for "
1154 "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1155 "explicit number of inlined elements with `SmallVector<T, N>` to make "
1156 "sure you really want that much inline storage.");
1160 static constexpr
size_t PreferredInlineBytes =
1162 static constexpr
size_t NumElementsThatFit = PreferredInlineBytes /
sizeof(
T);
1163 static constexpr
size_t value =
1164 NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1183 template <
typename T,
1192 this->destroy_range(this->
begin(), this->
end());
1200 template <
typename ItTy,
1201 typename = std::enable_if_t<std::is_convertible<
1202 typename std::iterator_traits<ItTy>::iterator_category,
1203 std::input_iterator_tag>::value>>
1208 template <
typename RangeTy>
1248 this->destroy_range(this->
begin(), this->
end());
1267 template <
typename T,
unsigned N>
1269 return X.capacity_in_bytes();
1272 template <
typename RangeType>
1274 typename std::remove_const<
typename std::remove_reference<
1280 template <
unsigned Size,
typename R>
1284 template <
typename R>
1285 SmallVector<ValueTypeFromRangeType<R>,
1286 CalculateSmallVectorDefaultInlinedElements<
1287 ValueTypeFromRangeType<R>>::value>
1297 template<
typename T>
1304 template<
typename T,
unsigned N>
1312 #endif // LLVM_ADT_SMALLVECTOR_H
const_reference operator[](size_type idx) const
bool isReferenceToStorage(const void *V) const
Return true if V is an internal reference to this vector.
static T && forward_value_param(T &&V)
bool isReferenceToRange(const void *V, const void *First, const void *Last) const
Return true if V is an internal reference to the given range.
SmallVectorImpl(unsigned N)
bool isRangeInStorage(const void *First, const void *Last) const
Return true if First and Last form a valid (possibly empty) range in this vector's storage.
This is an optimization pass for GlobalISel generic memory operations.
iterator erase(const_iterator CI)
bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize)
Return true unless Elt will be invalidated by resizing the vector to NewSize.
SmallVector & operator=(SmallVectorImpl< T > &&RHS)
SmallVector(SmallVectorImpl< T > &&RHS)
SmallVector(std::initializer_list< T > IL)
void append(const SmallVectorImpl &RHS)
void append(size_type NumInputs, ValueParamT Elt)
Append NumInputs copies of Elt to the end.
void append(std::initializer_list< T > IL)
void assign(std::initializer_list< T > IL)
const T * reserveForParamAndGetAddress(const T &Elt, size_t N=1)
Reserve enough space to add one element, and return the updated element pointer in case it was a refe...
const_iterator begin() const
static const T * reserveForParamAndGetAddressImpl(U *This, const T &Elt, size_t N)
Reserve enough space to add one element, and return the updated element pointer in case it was a refe...
#define offsetof(TYPE, MEMBER)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static ValueParamT forward_value_param(ValueParamT V)
Copy V or return a reference, depending on ValueParamT.
void grow(size_t MinSize=0)
Double the size of the allocated memory, guaranteeing space for at least one more element or MinSize ...
void grow_pod(void *FirstEl, size_t MinSize, size_t TSize)
This is an implementation of the grow() method which only works on POD-like data types and is out of ...
iterator insert(iterator I, ItTy From, ItTy To)
#define LLVM_GSL_OWNER
LLVM_GSL_OWNER - Apply this to owning classes like SmallVector to enable lifetime warnings.
static void destroy_range(T *S, T *E)
void assign(const SmallVectorImpl &RHS)
static constexpr bool TakesParamByValue
BitVector::size_type capacity_in_bytes(const BitVector &X)
void moveElementsForGrow(T *NewElts)
Move existing elements over to the new allocation NewElts, the middle section of grow().
const_iterator end(StringRef path)
Get end iterator over path.
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
typename std::remove_const< typename std::remove_reference< decltype(*std::begin(std::declval< RangeType & >()))>::type >::type ValueTypeFromRangeType
Storage for the SmallVector elements.
const T & const_reference
void resetToSmall()
Put this vector in a state of being small.
size_type max_size() const
iterator erase(const_iterator CS, const_iterator CE)
DiagnosticInfoOptimizationBase::Argument NV
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
LLVM_NODISCARD T pop_back_val()
bool isSmall() const
Return true if this is a smallvector which has not had dynamic memory allocated for it.
std::reverse_iterator< iterator > reverse_iterator
LLVM_NODISCARD bool empty() const
bool operator>(const SmallVectorImpl &RHS) const
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj * new
reverse_iterator rbegin()
void growAndAssign(size_t NumElts, const T &Elt)
static const T & forward_value_param(const T &V)
void truncate(size_type N)
Like resize, but requires that N is less than size().
static constexpr size_t SizeTypeMax()
The maximum value of the Size_T used.
const_pointer data() const
Return a pointer to the vector's buffer, even if empty().
SmallVector(const SmallVector &RHS)
static void uninitialized_move(It1 I, It1 E, It2 Dest)
Move the range [I, E) into the uninitialized memory starting with "Dest", constructing elements as ne...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt)
T * mallocForGrow(size_t MinSize, size_t &NewCapacity)
Create a new allocation big enough for MinSize and pass back its size in NewCapacity.
void grow(size_t MinSize=0)
Grow the allocated memory (without initializing new elements), doubling the size of the allocated mem...
void assign(in_iter in_start, in_iter in_end)
SmallVector(const iterator_range< RangeTy > &R)
into llvm powi allowing the code generator to produce balanced multiplication trees First
SmallVectorBase(void *FirstEl, size_t TotalCapacity)
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can be
void push_back(ValueParamT Elt)
const_reverse_iterator rbegin() const
SmallVector(ItTy S, ItTy E)
T * reserveForParamAndGetAddress(T &Elt, size_t N=1)
Reserve enough space to add one element, and return the updated element pointer in case it was a refe...
SmallVector & operator=(SmallVector &&RHS)
static void uninitialized_move(It1 I, It1 E, It2 Dest)
Move the range [I, E) onto the uninitialized memory starting with "Dest", constructing elements into ...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
typename std::conditional< sizeof(T)< 4 &&sizeof(void *) >=8, uint64_t, uint32_t >::type SmallVectorSizeType
const_iterator end() const
void resize_for_overwrite(size_type N)
Like resize, but T is POD, the new values won't be initialized.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
This is the part of SmallVectorTemplateBase which does not depend on whether the type T is a POD.
SmallVectorImpl & operator=(const SmallVectorImpl &RHS)
bool operator==(const SmallVectorImpl &RHS) const
void set_size(size_t N)
Set the array size to N, which the current array must have enough capacity for.
void push_back(const T &Elt)
AMD64 Optimization Manual has some nice information about optimizing integer multiplication by a constant How much of it applies to Intel s X86 implementation There are definite trade offs to xmm0 cvttss2siq rdx jb L3 subss xmm0 rax cvttss2siq rdx xorq rdx rax ret instead of xmm1 cvttss2siq rcx movaps xmm2 subss xmm2 cvttss2siq rax rdx xorq rax ucomiss xmm0 cmovb rax ret Seems like the jb branch has high likelihood of being taken It would have saved a few instructions It s not possible to reference and DH registers in an instruction requiring REX prefix divb and mulb both produce results in AH If isel emits a CopyFromReg which gets turned into a movb and that can be allocated a r8b r15b To get around isel emits a CopyFromReg from AX and then right shift it down by and truncate it It s not pretty but it works We need some register allocation magic to make the hack go which would often require a callee saved register Callees usually need to keep this value live for most of their body so it doesn t add a significant burden on them We currently implement this in however this is suboptimal because it means that it would be quite awkward to implement the optimization for callers A better implementation would be to relax the LLVM IR rules for sret arguments to allow a function with an sret argument to have a non void return type
static void uninitialized_copy(T1 *I, T1 *E, T2 *Dest, std::enable_if_t< std::is_same< typename std::remove_const< T1 >::type, T2 >::value > *=nullptr)
Copy the range [I, E) onto the uninitialized memory starting with "Dest", constructing elements into ...
reference operator[](size_type idx)
void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize)
Check whether Elt will be invalidated by resizing the vector to NewSize.
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
void grow_pod(size_t MinSize, size_t TSize)
T & growAndEmplaceBack(ArgTypes &&... Args)
typename SuperClass::const_iterator const_iterator
void assignRemote(SmallVectorImpl &&RHS)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
void resize(size_type N, ValueParamT NV)
const_reverse_iterator rend() const
void takeAllocationForGrow(T *NewElts, size_t NewCapacity)
Transfer ownership of the allocation, finishing up grow().
pointer data()
Return a pointer to the vector's buffer, even if empty().
T & growAndEmplaceBack(ArgTypes &&... Args)
size_type size_in_bytes() const
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
SmallVector(SmallVector &&RHS)
void assertSafeToReferenceAfterClear(ItTy, ItTy)
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Analysis the ScalarEvolution expression for r is this
const_reference front() const
if(llvm_vc STREQUAL "") set(fake_version_inc "$
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
bool operator!=(const SmallVectorImpl &RHS) const
void assertSafeToAdd(const void *Elt, size_t N=1)
Check whether Elt will be invalidated by increasing the size of the vector by N.
size_t capacity_in_bytes() const
void growAndAssign(size_t NumElts, T Elt)
std::reverse_iterator< const_iterator > const_reverse_iterator
void swap(SmallVectorImpl &RHS)
void assign(size_type NumElts, ValueParamT Elt)
static void uninitialized_copy(It1 I, It1 E, It2 Dest)
Copy the range [I, E) onto the uninitialized memory starting with "Dest", constructing elements as ne...
Figure out the offset of the first element.
void assertSafeToReferenceAfterClear(const T *From, const T *To)
Check whether any part of the range will be invalidated by clearing.
static void destroy_range(T *, T *)
#define LLVM_NODISCARD
LLVM_NODISCARD - Warn if a type or return value is discarded.
iterator insert(iterator I, const T &Elt)
typename SuperClass::ValueParamT ValueParamT
bool operator<=(const SmallVectorImpl &RHS) const
void assertSafeToAddRange(const T *From, const T *To)
Check whether any part of the range will be invalidated by growing.
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical This
Helper class for calculating the default number of inline elements for SmallVector<T>.
void pop_back_n(size_type NumItems)
SmallVectorTemplateCommon(size_t Size)
T * reserveForParamAndGetAddress(T &Elt, size_t N=1)
Reserve enough space to add one element, and return the updated element pointer in case it was a refe...
SmallVector & operator=(std::initializer_list< T > IL)
typename SuperClass::iterator iterator
A range adaptor for a pair of iterators.
#define LLVM_LIKELY(EXPR)
const T * reserveForParamAndGetAddress(const T &Elt, size_t N=1)
Reserve enough space to add one element, and return the updated element pointer in case it was a refe...
typename SuperClass::size_type size_type
SmallVectorTemplateBase(size_t Size)
SmallVector(size_t Size, const T &Value=T())
bool operator>=(const SmallVectorImpl &RHS) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void insert(iterator I, std::initializer_list< T > IL)
SmallVectorTemplateBase(size_t Size)
typename std::conditional< TakesParamByValue, T, const T & >::type ValueParamT
Either const T& or T, depending on whether it's cheap enough to take parameters by value.
BlockVerifier::State From
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
#define LLVM_UNLIKELY(EXPR)
char Base[sizeof(SmallVectorBase< SmallVectorSizeType< T >>)]
void reserve(size_type N)
This is all the stuff common to all SmallVectors.
void * mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity)
This is a helper for grow() that's out of line to reduce code duplication.
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
SmallVector & operator=(const SmallVector &RHS)
bool operator<(const SmallVectorImpl &RHS) const
LLVM Value Representation.
typename SuperClass::reference reference
static void uninitialized_copy(It1 I, It1 E, It2 Dest)
Copy the range [I, E) onto the uninitialized memory starting with "Dest", constructing elements into ...
SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put method implementations that...
void assertSafeToAddRange(ItTy, ItTy)
reference emplace_back(ArgTypes &&... Args)
iterator insert(iterator I, T &&Elt)
const_reference back() const