Go to the documentation of this file.
21 #ifndef LLVM_ADT_SPARSEMULTISET_H
22 #define LLVM_ADT_SPARSEMULTISET_H
84 typename KeyFunctorT = identity<unsigned>,
85 typename SparseT = uint8_t>
87 static_assert(std::numeric_limits<SparseT>::is_integer &&
88 !std::numeric_limits<SparseT>::is_signed,
89 "SparseT must be an unsigned integer type");
98 static constexpr
unsigned INVALID = ~0U;
107 bool isTail()
const {
108 return Next == INVALID;
112 bool isTombstone()
const {
113 return Prev == INVALID;
118 bool isValid()
const {
return Prev != INVALID; }
121 using KeyT =
typename KeyFunctorT::argument_type;
124 SparseT *Sparse =
nullptr;
125 unsigned Universe = 0;
126 KeyFunctorT KeyIndexOf;
132 unsigned FreelistIdx = SMSNode::INVALID;
133 unsigned NumFree = 0;
135 unsigned sparseIndex(
const ValueT &Val)
const {
136 assert(ValIndexOf(Val) < Universe &&
137 "Invalid key in set. Did object mutate?");
138 return ValIndexOf(Val);
140 unsigned sparseIndex(
const SMSNode &
N)
const {
return sparseIndex(
N.Data); }
145 bool isHead(
const SMSNode &
D)
const {
146 assert(
D.isValid() &&
"Invalid node for head");
147 return Dense[
D.Prev].isTail();
152 bool isSingleton(
const SMSNode &
N)
const {
153 assert(
N.isValid() &&
"Invalid node for singleton");
160 unsigned addValue(
const ValueT& V,
unsigned Prev,
unsigned Next) {
162 Dense.push_back(SMSNode(V, Prev, Next));
163 return Dense.size() - 1;
167 unsigned Idx = FreelistIdx;
168 unsigned NextFree =
Dense[Idx].Next;
169 assert(
Dense[Idx].isTombstone() &&
"Non-tombstone free?");
171 Dense[Idx] = SMSNode(V, Prev, Next);
172 FreelistIdx = NextFree;
178 void makeTombstone(
unsigned Idx) {
179 Dense[Idx].Prev = SMSNode::INVALID;
180 Dense[Idx].Next = FreelistIdx;
206 assert(
empty() &&
"Can only resize universe on an empty map");
208 if (U >= Universe/4 && U <= Universe)
214 Sparse =
static_cast<SparseT*
>(
safe_calloc(U,
sizeof(SparseT)));
236 : SMS(
P), Idx(
I), SparseIdx(
SI) {}
240 if (Idx == SMSNode::INVALID)
243 assert(Idx < SMS->
Dense.size() &&
"Out of range, non-INVALID Idx?");
248 bool isKeyed()
const {
return SparseIdx < SMS->Universe; }
250 unsigned Prev()
const {
return SMS->Dense[Idx].Prev; }
251 unsigned Next()
const {
return SMS->Dense[Idx].Next; }
253 void setPrev(
unsigned P) { SMS->Dense[Idx].Prev =
P; }
254 void setNext(
unsigned N) { SMS->Dense[Idx].Next =
N; }
258 assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
259 "Dereferencing iterator of invalid key or index");
261 return SMS->Dense[Idx].Data;
268 if (SMS ==
RHS.SMS && Idx ==
RHS.Idx) {
269 assert((isEnd() || SparseIdx ==
RHS.SparseIdx) &&
270 "Same dense entry, but different keys?");
283 assert(isKeyed() &&
"Decrementing an invalid iterator");
284 assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) &&
285 "Decrementing head of list");
289 Idx = SMS->findIndex(SparseIdx).Prev();
296 assert(!isEnd() && isKeyed() &&
"Incrementing an invalid/end iterator");
337 assert(NumFree <=
Dense.size() &&
"Out-of-bounds free entries");
338 return Dense.size() - NumFree;
347 FreelistIdx = SMSNode::INVALID;
356 assert(Idx < Universe &&
"Key out of range");
358 for (
unsigned i = Sparse[Idx],
e =
Dense.size();
i <
e;
i += Stride) {
359 const unsigned FoundIdx = sparseIndex(
Dense[
i]);
415 return std::make_pair(
B,
E);
421 unsigned Idx = sparseIndex(Val);
424 unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
428 Sparse[Idx] = NodeIdx;
429 Dense[NodeIdx].Prev = NodeIdx;
430 return iterator(
this, NodeIdx, Idx);
434 unsigned HeadIdx =
I.Idx;
435 unsigned TailIdx =
I.Prev();
436 Dense[TailIdx].Next = NodeIdx;
437 Dense[HeadIdx].Prev = NodeIdx;
438 Dense[NodeIdx].Prev = TailIdx;
440 return iterator(
this, NodeIdx, Idx);
468 assert(
I.isKeyed() && !
I.isEnd() && !
Dense[
I.Idx].isTombstone() &&
469 "erasing invalid/end/tombstone iterator");
476 makeTombstone(
I.Idx);
491 if (isSingleton(
N)) {
493 assert(
N.Next == SMSNode::INVALID &&
"Singleton has next?");
494 return iterator(
this, SMSNode::INVALID, ValIndexOf(
N.Data));
499 Sparse[sparseIndex(
N)] =
N.Next;
500 Dense[
N.Next].Prev =
N.Prev;
501 return iterator(
this,
N.Next, ValIndexOf(
N.Data));
517 return iterator(
this,
N.Next, ValIndexOf(
N.Data));
523 #endif // LLVM_ADT_SPARSEMULTISET_H
void setUniverse(unsigned U)
Set the universe size which determines the largest key the set can hold.
This is an optimization pass for GlobalISel generic memory operations.
iterator_base & operator++()
iterator_base operator--(int)
iterator find(const KeyT &Key)
Find an element by its key.
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
reference operator*() const
pointer operator->() const
void clear()
Clears the set.
LLVM_ATTRIBUTE_RETURNS_NONNULL void * safe_calloc(size_t Count, size_t Sz)
void eraseAll(const KeyT &K)
Erase all elements with the given key.
iterator findIndex(unsigned Idx)
Find an element by its index.
std::ptrdiff_t difference_type
Our iterators are iterators over the collection of objects that share a key.
std::pair< iterator, iterator > RangePair
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
SparseSetValFunctor - Helper class for selecting SparseSetValTraits.
bool empty() const
Returns true if the set is empty.
std::bidirectional_iterator_tag iterator_category
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
An individual mapping from virtual register number to SUnit.
iterator_base & operator--()
Increment and decrement operators.
iterator end()
Returns an iterator past this container.
iterator insert(const ValueT &Val)
Insert a new element at the tail of the subset list.
Fast multiset implementation for objects that can be identified by small unsigned keys.
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
size_type count(const KeyT &Key) const
Returns the number of elements identified by Key.
const_iterator end() const
iterator_base< SparseMultiSet * > iterator
SparseMultiSet & operator=(const SparseMultiSet &)=delete
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
StandardInstrumentations SI(Debug, VerifyEach)
iterator_base operator++(int)
bool operator==(const iterator_base &RHS) const
Comparison operators.
RangePair equal_range(const KeyT &K)
The bounds of the range of items sharing Key K.
bool contains(const KeyT &Key) const
Returns true if this set contains an element identified by Key.
iterator erase(iterator I)
Erases an existing element identified by a valid iterator.
size_type size() const
Returns the number of elements in the set.
const_iterator find(const KeyT &Key) const
iterator getHead(const KeyT &Key)
Return the head and tail of the subset's list, otherwise returns end().
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
iterator getTail(const KeyT &Key)
Align max(MaybeAlign Lhs, Align Rhs)
iterator_base< const SparseMultiSet * > const_iterator
bool operator!=(const iterator_base &RHS) const