LLVM 20.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstring>
29#include <initializer_list>
30#include <iterator>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37namespace detail {
38
39// We extend a pair to allow users to override the bucket type with their own
40// implementation without requiring two members.
41template <typename KeyT, typename ValueT>
42struct DenseMapPair : public std::pair<KeyT, ValueT> {
43 using std::pair<KeyT, ValueT>::pair;
44
45 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
46 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49};
50
51} // end namespace detail
52
53template <typename KeyT, typename ValueT,
54 typename KeyInfoT = DenseMapInfo<KeyT>,
56 bool IsConst = false>
57class DenseMapIterator;
58
59template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60 typename BucketT>
62 template <typename T>
63 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64
65public:
67 using key_type = KeyT;
69 using value_type = BucketT;
70
74
75 inline iterator begin() {
76 // When the map is empty, avoid the overhead of advancing/retreating past
77 // empty buckets.
78 if (empty())
79 return end();
80 if (shouldReverseIterate<KeyT>())
81 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82 return makeIterator(getBuckets(), getBucketsEnd(), *this);
83 }
84 inline iterator end() {
85 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86 }
87 inline const_iterator begin() const {
88 if (empty())
89 return end();
90 if (shouldReverseIterate<KeyT>())
91 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93 }
94 inline const_iterator end() const {
95 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96 }
97
98 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
99 unsigned size() const { return getNumEntries(); }
100
101 /// Grow the densemap so that it can contain at least \p NumEntries items
102 /// before resizing again.
103 void reserve(size_type NumEntries) {
104 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
106 if (NumBuckets > getNumBuckets())
107 grow(NumBuckets);
108 }
109
110 void clear() {
112 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
113
114 // If the capacity of the array is huge, and the # elements used is small,
115 // shrink the array.
116 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
117 shrink_and_clear();
118 return;
119 }
120
121 const KeyT EmptyKey = getEmptyKey();
122 if (std::is_trivially_destructible<ValueT>::value) {
123 // Use a simpler loop when values don't need destruction.
124 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
125 P->getFirst() = EmptyKey;
126 } else {
127 const KeyT TombstoneKey = getTombstoneKey();
128 unsigned NumEntries = getNumEntries();
129 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
130 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
131 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
132 P->getSecond().~ValueT();
133 --NumEntries;
134 }
135 P->getFirst() = EmptyKey;
136 }
137 }
138 assert(NumEntries == 0 && "Node count imbalance!");
139 (void)NumEntries;
140 }
141 setNumEntries(0);
142 setNumTombstones(0);
143 }
144
145 /// Return true if the specified key is in the map, false otherwise.
146 bool contains(const_arg_type_t<KeyT> Val) const {
147 return doFind(Val) != nullptr;
148 }
149
150 /// Return 1 if the specified key is in the map, 0 otherwise.
151 size_type count(const_arg_type_t<KeyT> Val) const {
152 return contains(Val) ? 1 : 0;
153 }
154
155 iterator find(const_arg_type_t<KeyT> Val) {
156 if (BucketT *Bucket = doFind(Val))
157 return makeIterator(
158 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
159 *this, true);
160 return end();
161 }
162 const_iterator find(const_arg_type_t<KeyT> Val) const {
163 if (const BucketT *Bucket = doFind(Val))
164 return makeConstIterator(
165 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
166 *this, true);
167 return end();
168 }
169
170 /// Alternate version of find() which allows a different, and possibly
171 /// less expensive, key type.
172 /// The DenseMapInfo is responsible for supplying methods
173 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
174 /// type used.
175 template<class LookupKeyT>
176 iterator find_as(const LookupKeyT &Val) {
177 if (BucketT *Bucket = doFind(Val))
178 return makeIterator(
179 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
180 *this, true);
181 return end();
182 }
183 template<class LookupKeyT>
184 const_iterator find_as(const LookupKeyT &Val) const {
185 if (const BucketT *Bucket = doFind(Val))
186 return makeConstIterator(
187 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
188 *this, true);
189 return end();
190 }
191
192 /// lookup - Return the entry for the specified key, or a default
193 /// constructed value if no such entry exists.
194 ValueT lookup(const_arg_type_t<KeyT> Val) const {
195 if (const BucketT *Bucket = doFind(Val))
196 return Bucket->getSecond();
197 return ValueT();
198 }
199
200 /// at - Return the entry for the specified key, or abort if no such
201 /// entry exists.
202 const ValueT &at(const_arg_type_t<KeyT> Val) const {
203 auto Iter = this->find(std::move(Val));
204 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
205 return Iter->second;
206 }
207
208 // Inserts key,value pair into the map if the key isn't already in the map.
209 // If the key is already in the map, it returns false and doesn't update the
210 // value.
211 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
212 return try_emplace(KV.first, KV.second);
213 }
214
215 // Inserts key,value pair into the map if the key isn't already in the map.
216 // If the key is already in the map, it returns false and doesn't update the
217 // value.
218 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
219 return try_emplace(std::move(KV.first), std::move(KV.second));
220 }
221
222 // Inserts key,value pair into the map if the key isn't already in the map.
223 // The value is constructed in-place if the key is not in the map, otherwise
224 // it is not moved.
225 template <typename... Ts>
226 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
227 BucketT *TheBucket;
228 if (LookupBucketFor(Key, TheBucket))
229 return std::make_pair(makeIterator(TheBucket,
230 shouldReverseIterate<KeyT>()
231 ? getBuckets()
232 : getBucketsEnd(),
233 *this, true),
234 false); // Already in map.
235
236 // Otherwise, insert the new element.
237 TheBucket =
238 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
239 return std::make_pair(makeIterator(TheBucket,
240 shouldReverseIterate<KeyT>()
241 ? getBuckets()
242 : getBucketsEnd(),
243 *this, true),
244 true);
245 }
246
247 // Inserts key,value pair into the map if the key isn't already in the map.
248 // The value is constructed in-place if the key is not in the map, otherwise
249 // it is not moved.
250 template <typename... Ts>
251 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
252 BucketT *TheBucket;
253 if (LookupBucketFor(Key, TheBucket))
254 return std::make_pair(makeIterator(TheBucket,
255 shouldReverseIterate<KeyT>()
256 ? getBuckets()
257 : getBucketsEnd(),
258 *this, true),
259 false); // Already in map.
260
261 // Otherwise, insert the new element.
262 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
263 return std::make_pair(makeIterator(TheBucket,
264 shouldReverseIterate<KeyT>()
265 ? getBuckets()
266 : getBucketsEnd(),
267 *this, true),
268 true);
269 }
270
271 /// Alternate version of insert() which allows a different, and possibly
272 /// less expensive, key type.
273 /// The DenseMapInfo is responsible for supplying methods
274 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
275 /// type used.
276 template <typename LookupKeyT>
277 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
278 const LookupKeyT &Val) {
279 BucketT *TheBucket;
280 if (LookupBucketFor(Val, TheBucket))
281 return std::make_pair(makeIterator(TheBucket,
282 shouldReverseIterate<KeyT>()
283 ? getBuckets()
284 : getBucketsEnd(),
285 *this, true),
286 false); // Already in map.
287
288 // Otherwise, insert the new element.
289 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
290 std::move(KV.second), Val);
291 return std::make_pair(makeIterator(TheBucket,
292 shouldReverseIterate<KeyT>()
293 ? getBuckets()
294 : getBucketsEnd(),
295 *this, true),
296 true);
297 }
298
299 /// insert - Range insertion of pairs.
300 template<typename InputIt>
301 void insert(InputIt I, InputIt E) {
302 for (; I != E; ++I)
303 insert(*I);
304 }
305
306 template <typename V>
307 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
308 auto Ret = try_emplace(Key, std::forward<V>(Val));
309 if (!Ret.second)
310 Ret.first->second = std::forward<V>(Val);
311 return Ret;
312 }
313
314 template <typename V>
315 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
316 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
317 if (!Ret.second)
318 Ret.first->second = std::forward<V>(Val);
319 return Ret;
320 }
321
322 /// Returns the value associated to the key in the map if it exists. If it
323 /// does not exist, emplace a default value for the key and returns a
324 /// reference to the newly created value.
326 return try_emplace(Key).first->second;
327 }
328
329 /// Returns the value associated to the key in the map if it exists. If it
330 /// does not exist, emplace a default value for the key and returns a
331 /// reference to the newly created value.
333 return try_emplace(Key).first->second;
334 }
335
336 bool erase(const KeyT &Val) {
337 BucketT *TheBucket = doFind(Val);
338 if (!TheBucket)
339 return false; // not in map.
340
341 TheBucket->getSecond().~ValueT();
342 TheBucket->getFirst() = getTombstoneKey();
343 decrementNumEntries();
344 incrementNumTombstones();
345 return true;
346 }
348 BucketT *TheBucket = &*I;
349 TheBucket->getSecond().~ValueT();
350 TheBucket->getFirst() = getTombstoneKey();
351 decrementNumEntries();
352 incrementNumTombstones();
353 }
354
356 BucketT *TheBucket;
357 if (LookupBucketFor(Key, TheBucket))
358 return *TheBucket;
359
360 return *InsertIntoBucket(TheBucket, Key);
361 }
362
363 ValueT &operator[](const KeyT &Key) {
364 return FindAndConstruct(Key).second;
365 }
366
368 BucketT *TheBucket;
369 if (LookupBucketFor(Key, TheBucket))
370 return *TheBucket;
371
372 return *InsertIntoBucket(TheBucket, std::move(Key));
373 }
374
376 return FindAndConstruct(std::move(Key)).second;
377 }
378
379 /// isPointerIntoBucketsArray - Return true if the specified pointer points
380 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
381 /// value in the DenseMap).
382 bool isPointerIntoBucketsArray(const void *Ptr) const {
383 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
384 }
385
386 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
387 /// array. In conjunction with the previous method, this can be used to
388 /// determine whether an insertion caused the DenseMap to reallocate.
389 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
390
391protected:
392 DenseMapBase() = default;
393
394 void destroyAll() {
395 if (getNumBuckets() == 0) // Nothing to do.
396 return;
397
398 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
399 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
400 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
401 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
402 P->getSecond().~ValueT();
403 P->getFirst().~KeyT();
404 }
405 }
406
407 void initEmpty() {
408 setNumEntries(0);
409 setNumTombstones(0);
410
411 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
412 "# initial buckets must be a power of two!");
413 const KeyT EmptyKey = getEmptyKey();
414 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
415 ::new (&B->getFirst()) KeyT(EmptyKey);
416 }
417
418 /// Returns the number of buckets to allocate to ensure that the DenseMap can
419 /// accommodate \p NumEntries without need to grow().
420 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
421 // Ensure that "NumEntries * 4 < NumBuckets * 3"
422 if (NumEntries == 0)
423 return 0;
424 // +1 is required because of the strict equality.
425 // For example if NumEntries is 48, we need to return 401.
426 return NextPowerOf2(NumEntries * 4 / 3 + 1);
427 }
428
429 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
430 initEmpty();
431
432 // Insert all the old elements.
433 const KeyT EmptyKey = getEmptyKey();
434 const KeyT TombstoneKey = getTombstoneKey();
435 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
436 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
437 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
438 // Insert the key/value into the new table.
439 BucketT *DestBucket;
440 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
441 (void)FoundVal; // silence warning.
442 assert(!FoundVal && "Key already in new map?");
443 DestBucket->getFirst() = std::move(B->getFirst());
444 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
445 incrementNumEntries();
446
447 // Free the value.
448 B->getSecond().~ValueT();
449 }
450 B->getFirst().~KeyT();
451 }
452 }
453
454 template <typename OtherBaseT>
457 assert(&other != this);
458 assert(getNumBuckets() == other.getNumBuckets());
459
460 setNumEntries(other.getNumEntries());
461 setNumTombstones(other.getNumTombstones());
462
463 if (std::is_trivially_copyable<KeyT>::value &&
464 std::is_trivially_copyable<ValueT>::value)
465 memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
466 getNumBuckets() * sizeof(BucketT));
467 else
468 for (size_t i = 0; i < getNumBuckets(); ++i) {
469 ::new (&getBuckets()[i].getFirst())
470 KeyT(other.getBuckets()[i].getFirst());
471 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
472 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
473 ::new (&getBuckets()[i].getSecond())
474 ValueT(other.getBuckets()[i].getSecond());
475 }
476 }
477
478 static unsigned getHashValue(const KeyT &Val) {
479 return KeyInfoT::getHashValue(Val);
480 }
481
482 template<typename LookupKeyT>
483 static unsigned getHashValue(const LookupKeyT &Val) {
484 return KeyInfoT::getHashValue(Val);
485 }
486
487 static const KeyT getEmptyKey() {
488 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
489 "Must pass the derived type to this template!");
490 return KeyInfoT::getEmptyKey();
491 }
492
493 static const KeyT getTombstoneKey() {
494 return KeyInfoT::getTombstoneKey();
495 }
496
497private:
498 iterator makeIterator(BucketT *P, BucketT *E,
499 DebugEpochBase &Epoch,
500 bool NoAdvance=false) {
501 if (shouldReverseIterate<KeyT>()) {
502 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
503 return iterator(B, E, Epoch, NoAdvance);
504 }
505 return iterator(P, E, Epoch, NoAdvance);
506 }
507
508 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
509 const DebugEpochBase &Epoch,
510 const bool NoAdvance=false) const {
511 if (shouldReverseIterate<KeyT>()) {
512 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
513 return const_iterator(B, E, Epoch, NoAdvance);
514 }
515 return const_iterator(P, E, Epoch, NoAdvance);
516 }
517
518 unsigned getNumEntries() const {
519 return static_cast<const DerivedT *>(this)->getNumEntries();
520 }
521
522 void setNumEntries(unsigned Num) {
523 static_cast<DerivedT *>(this)->setNumEntries(Num);
524 }
525
526 void incrementNumEntries() {
527 setNumEntries(getNumEntries() + 1);
528 }
529
530 void decrementNumEntries() {
531 setNumEntries(getNumEntries() - 1);
532 }
533
534 unsigned getNumTombstones() const {
535 return static_cast<const DerivedT *>(this)->getNumTombstones();
536 }
537
538 void setNumTombstones(unsigned Num) {
539 static_cast<DerivedT *>(this)->setNumTombstones(Num);
540 }
541
542 void incrementNumTombstones() {
543 setNumTombstones(getNumTombstones() + 1);
544 }
545
546 void decrementNumTombstones() {
547 setNumTombstones(getNumTombstones() - 1);
548 }
549
550 const BucketT *getBuckets() const {
551 return static_cast<const DerivedT *>(this)->getBuckets();
552 }
553
554 BucketT *getBuckets() {
555 return static_cast<DerivedT *>(this)->getBuckets();
556 }
557
558 unsigned getNumBuckets() const {
559 return static_cast<const DerivedT *>(this)->getNumBuckets();
560 }
561
562 BucketT *getBucketsEnd() {
563 return getBuckets() + getNumBuckets();
564 }
565
566 const BucketT *getBucketsEnd() const {
567 return getBuckets() + getNumBuckets();
568 }
569
570 void grow(unsigned AtLeast) {
571 static_cast<DerivedT *>(this)->grow(AtLeast);
572 }
573
574 void shrink_and_clear() {
575 static_cast<DerivedT *>(this)->shrink_and_clear();
576 }
577
578 template <typename KeyArg, typename... ValueArgs>
579 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
580 ValueArgs &&... Values) {
581 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
582
583 TheBucket->getFirst() = std::forward<KeyArg>(Key);
584 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
585 return TheBucket;
586 }
587
588 template <typename LookupKeyT>
589 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
590 ValueT &&Value, LookupKeyT &Lookup) {
591 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
592
593 TheBucket->getFirst() = std::move(Key);
594 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
595 return TheBucket;
596 }
597
598 template <typename LookupKeyT>
599 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
600 BucketT *TheBucket) {
602
603 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
604 // the buckets are empty (meaning that many are filled with tombstones),
605 // grow the table.
606 //
607 // The later case is tricky. For example, if we had one empty bucket with
608 // tons of tombstones, failing lookups (e.g. for insertion) would have to
609 // probe almost the entire table until it found the empty bucket. If the
610 // table completely filled with tombstones, no lookup would ever succeed,
611 // causing infinite loops in lookup.
612 unsigned NewNumEntries = getNumEntries() + 1;
613 unsigned NumBuckets = getNumBuckets();
614 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
615 this->grow(NumBuckets * 2);
616 LookupBucketFor(Lookup, TheBucket);
617 NumBuckets = getNumBuckets();
618 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
619 NumBuckets/8)) {
620 this->grow(NumBuckets);
621 LookupBucketFor(Lookup, TheBucket);
622 }
623 assert(TheBucket);
624
625 // Only update the state after we've grown our bucket space appropriately
626 // so that when growing buckets we have self-consistent entry count.
627 incrementNumEntries();
628
629 // If we are writing over a tombstone, remember this.
630 const KeyT EmptyKey = getEmptyKey();
631 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
632 decrementNumTombstones();
633
634 return TheBucket;
635 }
636
637 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
638 BucketT *BucketsPtr = getBuckets();
639 const unsigned NumBuckets = getNumBuckets();
640 if (NumBuckets == 0)
641 return nullptr;
642
643 const KeyT EmptyKey = getEmptyKey();
644 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
645 unsigned ProbeAmt = 1;
646 while (true) {
647 BucketT *Bucket = BucketsPtr + BucketNo;
648 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
649 return Bucket;
650 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
651 return nullptr;
652
653 // Otherwise, it's a hash collision or a tombstone, continue quadratic
654 // probing.
655 BucketNo += ProbeAmt++;
656 BucketNo &= NumBuckets - 1;
657 }
658 }
659
660 template <typename LookupKeyT>
661 const BucketT *doFind(const LookupKeyT &Val) const {
662 return const_cast<DenseMapBase *>(this)->doFind(Val); // NOLINT
663 }
664
665 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
666 /// FoundBucket. If the bucket contains the key and a value, this returns
667 /// true, otherwise it returns a bucket with an empty marker or tombstone and
668 /// returns false.
669 template<typename LookupKeyT>
670 bool LookupBucketFor(const LookupKeyT &Val,
671 const BucketT *&FoundBucket) const {
672 const BucketT *BucketsPtr = getBuckets();
673 const unsigned NumBuckets = getNumBuckets();
674
675 if (NumBuckets == 0) {
676 FoundBucket = nullptr;
677 return false;
678 }
679
680 // FoundTombstone - Keep track of whether we find a tombstone while probing.
681 const BucketT *FoundTombstone = nullptr;
682 const KeyT EmptyKey = getEmptyKey();
683 const KeyT TombstoneKey = getTombstoneKey();
684 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
685 !KeyInfoT::isEqual(Val, TombstoneKey) &&
686 "Empty/Tombstone value shouldn't be inserted into map!");
687
688 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
689 unsigned ProbeAmt = 1;
690 while (true) {
691 const BucketT *ThisBucket = BucketsPtr + BucketNo;
692 // Found Val's bucket? If so, return it.
693 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
694 FoundBucket = ThisBucket;
695 return true;
696 }
697
698 // If we found an empty bucket, the key doesn't exist in the set.
699 // Insert it and return the default value.
700 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
701 // If we've already seen a tombstone while probing, fill it in instead
702 // of the empty bucket we eventually probed to.
703 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
704 return false;
705 }
706
707 // If this is a tombstone, remember it. If Val ends up not in the map, we
708 // prefer to return it than something that would require more probing.
709 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
710 !FoundTombstone)
711 FoundTombstone = ThisBucket; // Remember the first tombstone found.
712
713 // Otherwise, it's a hash collision or a tombstone, continue quadratic
714 // probing.
715 BucketNo += ProbeAmt++;
716 BucketNo &= (NumBuckets-1);
717 }
718 }
719
720 template <typename LookupKeyT>
721 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
722 const BucketT *ConstFoundBucket;
723 bool Result = const_cast<const DenseMapBase *>(this)
724 ->LookupBucketFor(Val, ConstFoundBucket);
725 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
726 return Result;
727 }
728
729public:
730 /// Return the approximate size (in bytes) of the actual map.
731 /// This is just the raw memory used by DenseMap.
732 /// If entries are pointers to objects, the size of the referenced objects
733 /// are not included.
734 size_t getMemorySize() const {
735 return getNumBuckets() * sizeof(BucketT);
736 }
737};
738
739/// Equality comparison for DenseMap.
740///
741/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
742/// is also in RHS, and that no additional pairs are in RHS.
743/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
744/// complexity is linear, worst case is O(N^2) (if every hash collides).
745template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
746 typename BucketT>
750 if (LHS.size() != RHS.size())
751 return false;
752
753 for (auto &KV : LHS) {
754 auto I = RHS.find(KV.first);
755 if (I == RHS.end() || I->second != KV.second)
756 return false;
757 }
758
759 return true;
760}
761
762/// Inequality comparison for DenseMap.
763///
764/// Equivalent to !(LHS == RHS). See operator== for performance notes.
765template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
766 typename BucketT>
770 return !(LHS == RHS);
771}
772
773template <typename KeyT, typename ValueT,
774 typename KeyInfoT = DenseMapInfo<KeyT>,
776class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
777 KeyT, ValueT, KeyInfoT, BucketT> {
778 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
779
780 // Lift some types from the dependent base class into this class for
781 // simplicity of referring to them.
783
784 BucketT *Buckets;
785 unsigned NumEntries;
786 unsigned NumTombstones;
787 unsigned NumBuckets;
788
789public:
790 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
791 /// this number of elements can be inserted in the map without grow()
792 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
793
794 DenseMap(const DenseMap &other) : BaseT() {
795 init(0);
796 copyFrom(other);
797 }
798
799 DenseMap(DenseMap &&other) : BaseT() {
800 init(0);
801 swap(other);
802 }
803
804 template<typename InputIt>
805 DenseMap(const InputIt &I, const InputIt &E) {
806 init(std::distance(I, E));
807 this->insert(I, E);
808 }
809
810 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
811 init(Vals.size());
812 this->insert(Vals.begin(), Vals.end());
813 }
814
816 this->destroyAll();
817 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
818 }
819
821 this->incrementEpoch();
822 RHS.incrementEpoch();
823 std::swap(Buckets, RHS.Buckets);
824 std::swap(NumEntries, RHS.NumEntries);
825 std::swap(NumTombstones, RHS.NumTombstones);
826 std::swap(NumBuckets, RHS.NumBuckets);
827 }
828
829 DenseMap& operator=(const DenseMap& other) {
830 if (&other != this)
831 copyFrom(other);
832 return *this;
833 }
834
836 this->destroyAll();
837 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
838 init(0);
839 swap(other);
840 return *this;
841 }
842
843 void copyFrom(const DenseMap& other) {
844 this->destroyAll();
845 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
846 if (allocateBuckets(other.NumBuckets)) {
847 this->BaseT::copyFrom(other);
848 } else {
849 NumEntries = 0;
850 NumTombstones = 0;
851 }
852 }
853
854 void init(unsigned InitNumEntries) {
855 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
856 if (allocateBuckets(InitBuckets)) {
857 this->BaseT::initEmpty();
858 } else {
859 NumEntries = 0;
860 NumTombstones = 0;
861 }
862 }
863
864 void grow(unsigned AtLeast) {
865 unsigned OldNumBuckets = NumBuckets;
866 BucketT *OldBuckets = Buckets;
867
868 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
869 assert(Buckets);
870 if (!OldBuckets) {
871 this->BaseT::initEmpty();
872 return;
873 }
874
875 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
876
877 // Free the old table.
878 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
879 alignof(BucketT));
880 }
881
883 unsigned OldNumBuckets = NumBuckets;
884 unsigned OldNumEntries = NumEntries;
885 this->destroyAll();
886
887 // Reduce the number of buckets.
888 unsigned NewNumBuckets = 0;
889 if (OldNumEntries)
890 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
891 if (NewNumBuckets == NumBuckets) {
892 this->BaseT::initEmpty();
893 return;
894 }
895
896 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
897 alignof(BucketT));
898 init(NewNumBuckets);
899 }
900
901private:
902 unsigned getNumEntries() const {
903 return NumEntries;
904 }
905
906 void setNumEntries(unsigned Num) {
907 NumEntries = Num;
908 }
909
910 unsigned getNumTombstones() const {
911 return NumTombstones;
912 }
913
914 void setNumTombstones(unsigned Num) {
915 NumTombstones = Num;
916 }
917
918 BucketT *getBuckets() const {
919 return Buckets;
920 }
921
922 unsigned getNumBuckets() const {
923 return NumBuckets;
924 }
925
926 bool allocateBuckets(unsigned Num) {
927 NumBuckets = Num;
928 if (NumBuckets == 0) {
929 Buckets = nullptr;
930 return false;
931 }
932
933 Buckets = static_cast<BucketT *>(
934 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
935 return true;
936 }
937};
938
939template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
940 typename KeyInfoT = DenseMapInfo<KeyT>,
943 : public DenseMapBase<
944 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
945 ValueT, KeyInfoT, BucketT> {
946 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
947
948 // Lift some types from the dependent base class into this class for
949 // simplicity of referring to them.
951
952 static_assert(isPowerOf2_64(InlineBuckets),
953 "InlineBuckets must be a power of 2.");
954
955 unsigned Small : 1;
956 unsigned NumEntries : 31;
957 unsigned NumTombstones;
958
959 struct LargeRep {
960 BucketT *Buckets;
961 unsigned NumBuckets;
962 };
963
964 /// A "union" of an inline bucket array and the struct representing
965 /// a large bucket. This union will be discriminated by the 'Small' bit.
967
968public:
969 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
970 if (NumInitBuckets > InlineBuckets)
971 NumInitBuckets = llvm::bit_ceil(NumInitBuckets);
972 init(NumInitBuckets);
973 }
974
976 init(0);
977 copyFrom(other);
978 }
979
981 init(0);
982 swap(other);
983 }
984
985 template<typename InputIt>
986 SmallDenseMap(const InputIt &I, const InputIt &E) {
987 init(NextPowerOf2(std::distance(I, E)));
988 this->insert(I, E);
989 }
990
991 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
992 : SmallDenseMap(Vals.begin(), Vals.end()) {}
993
995 this->destroyAll();
996 deallocateBuckets();
997 }
998
1000 unsigned TmpNumEntries = RHS.NumEntries;
1001 RHS.NumEntries = NumEntries;
1002 NumEntries = TmpNumEntries;
1003 std::swap(NumTombstones, RHS.NumTombstones);
1004
1005 const KeyT EmptyKey = this->getEmptyKey();
1006 const KeyT TombstoneKey = this->getTombstoneKey();
1007 if (Small && RHS.Small) {
1008 // If we're swapping inline bucket arrays, we have to cope with some of
1009 // the tricky bits of DenseMap's storage system: the buckets are not
1010 // fully initialized. Thus we swap every key, but we may have
1011 // a one-directional move of the value.
1012 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1013 BucketT *LHSB = &getInlineBuckets()[i],
1014 *RHSB = &RHS.getInlineBuckets()[i];
1015 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
1016 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
1017 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
1018 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
1019 if (hasLHSValue && hasRHSValue) {
1020 // Swap together if we can...
1021 std::swap(*LHSB, *RHSB);
1022 continue;
1023 }
1024 // Swap separately and handle any asymmetry.
1025 std::swap(LHSB->getFirst(), RHSB->getFirst());
1026 if (hasLHSValue) {
1027 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
1028 LHSB->getSecond().~ValueT();
1029 } else if (hasRHSValue) {
1030 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
1031 RHSB->getSecond().~ValueT();
1032 }
1033 }
1034 return;
1035 }
1036 if (!Small && !RHS.Small) {
1037 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
1038 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
1039 return;
1040 }
1041
1042 SmallDenseMap &SmallSide = Small ? *this : RHS;
1043 SmallDenseMap &LargeSide = Small ? RHS : *this;
1044
1045 // First stash the large side's rep and move the small side across.
1046 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
1047 LargeSide.getLargeRep()->~LargeRep();
1048 LargeSide.Small = true;
1049 // This is similar to the standard move-from-old-buckets, but the bucket
1050 // count hasn't actually rotated in this case. So we have to carefully
1051 // move construct the keys and values into their new locations, but there
1052 // is no need to re-hash things.
1053 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1054 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
1055 *OldB = &SmallSide.getInlineBuckets()[i];
1056 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
1057 OldB->getFirst().~KeyT();
1058 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
1059 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1060 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1061 OldB->getSecond().~ValueT();
1062 }
1063 }
1064
1065 // The hard part of moving the small buckets across is done, just move
1066 // the TmpRep into its new home.
1067 SmallSide.Small = false;
1068 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1069 }
1070
1072 if (&other != this)
1073 copyFrom(other);
1074 return *this;
1075 }
1076
1078 this->destroyAll();
1079 deallocateBuckets();
1080 init(0);
1081 swap(other);
1082 return *this;
1083 }
1084
1085 void copyFrom(const SmallDenseMap& other) {
1086 this->destroyAll();
1087 deallocateBuckets();
1088 Small = true;
1089 if (other.getNumBuckets() > InlineBuckets) {
1090 Small = false;
1091 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1092 }
1093 this->BaseT::copyFrom(other);
1094 }
1095
1096 void init(unsigned InitBuckets) {
1097 Small = true;
1098 if (InitBuckets > InlineBuckets) {
1099 Small = false;
1100 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1101 }
1102 this->BaseT::initEmpty();
1103 }
1104
1105 void grow(unsigned AtLeast) {
1106 if (AtLeast > InlineBuckets)
1107 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1108
1109 if (Small) {
1110 // First move the inline buckets into a temporary storage.
1112 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1113 BucketT *TmpEnd = TmpBegin;
1114
1115 // Loop over the buckets, moving non-empty, non-tombstones into the
1116 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1117 const KeyT EmptyKey = this->getEmptyKey();
1118 const KeyT TombstoneKey = this->getTombstoneKey();
1119 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1120 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1121 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1122 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1123 "Too many inline buckets!");
1124 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1125 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1126 ++TmpEnd;
1127 P->getSecond().~ValueT();
1128 }
1129 P->getFirst().~KeyT();
1130 }
1131
1132 // AtLeast == InlineBuckets can happen if there are many tombstones,
1133 // and grow() is used to remove them. Usually we always switch to the
1134 // large rep here.
1135 if (AtLeast > InlineBuckets) {
1136 Small = false;
1137 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1138 }
1139 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1140 return;
1141 }
1142
1143 LargeRep OldRep = std::move(*getLargeRep());
1144 getLargeRep()->~LargeRep();
1145 if (AtLeast <= InlineBuckets) {
1146 Small = true;
1147 } else {
1148 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1149 }
1150
1151 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1152
1153 // Free the old table.
1154 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1155 alignof(BucketT));
1156 }
1157
1159 unsigned OldSize = this->size();
1160 this->destroyAll();
1161
1162 // Reduce the number of buckets.
1163 unsigned NewNumBuckets = 0;
1164 if (OldSize) {
1165 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1166 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1167 NewNumBuckets = 64;
1168 }
1169 if ((Small && NewNumBuckets <= InlineBuckets) ||
1170 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1171 this->BaseT::initEmpty();
1172 return;
1173 }
1174
1175 deallocateBuckets();
1176 init(NewNumBuckets);
1177 }
1178
1179private:
1180 unsigned getNumEntries() const {
1181 return NumEntries;
1182 }
1183
1184 void setNumEntries(unsigned Num) {
1185 // NumEntries is hardcoded to be 31 bits wide.
1186 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1187 NumEntries = Num;
1188 }
1189
1190 unsigned getNumTombstones() const {
1191 return NumTombstones;
1192 }
1193
1194 void setNumTombstones(unsigned Num) {
1195 NumTombstones = Num;
1196 }
1197
1198 const BucketT *getInlineBuckets() const {
1199 assert(Small);
1200 // Note that this cast does not violate aliasing rules as we assert that
1201 // the memory's dynamic type is the small, inline bucket buffer, and the
1202 // 'storage' is a POD containing a char buffer.
1203 return reinterpret_cast<const BucketT *>(&storage);
1204 }
1205
1206 BucketT *getInlineBuckets() {
1207 return const_cast<BucketT *>(
1208 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1209 }
1210
1211 const LargeRep *getLargeRep() const {
1212 assert(!Small);
1213 // Note, same rule about aliasing as with getInlineBuckets.
1214 return reinterpret_cast<const LargeRep *>(&storage);
1215 }
1216
1217 LargeRep *getLargeRep() {
1218 return const_cast<LargeRep *>(
1219 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1220 }
1221
1222 const BucketT *getBuckets() const {
1223 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1224 }
1225
1226 BucketT *getBuckets() {
1227 return const_cast<BucketT *>(
1228 const_cast<const SmallDenseMap *>(this)->getBuckets());
1229 }
1230
1231 unsigned getNumBuckets() const {
1232 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1233 }
1234
1235 void deallocateBuckets() {
1236 if (Small)
1237 return;
1238
1239 deallocate_buffer(getLargeRep()->Buckets,
1240 sizeof(BucketT) * getLargeRep()->NumBuckets,
1241 alignof(BucketT));
1242 getLargeRep()->~LargeRep();
1243 }
1244
1245 LargeRep allocateBuckets(unsigned Num) {
1246 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1247 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1248 sizeof(BucketT) * Num, alignof(BucketT))),
1249 Num};
1250 return Rep;
1251 }
1252};
1253
1254template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1255 bool IsConst>
1257 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1258 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1259
1260public:
1262 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1265 using iterator_category = std::forward_iterator_tag;
1266
1267private:
1268 pointer Ptr = nullptr;
1269 pointer End = nullptr;
1270
1271public:
1272 DenseMapIterator() = default;
1273
1275 bool NoAdvance = false)
1276 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1277 assert(isHandleInSync() && "invalid construction!");
1278
1279 if (NoAdvance) return;
1280 if (shouldReverseIterate<KeyT>()) {
1281 RetreatPastEmptyBuckets();
1282 return;
1283 }
1284 AdvancePastEmptyBuckets();
1285 }
1286
1287 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1288 // for const iterator destinations so it doesn't end up as a user defined copy
1289 // constructor.
1290 template <bool IsConstSrc,
1291 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1294 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1295
1297 assert(isHandleInSync() && "invalid iterator access!");
1298 assert(Ptr != End && "dereferencing end() iterator");
1299 if (shouldReverseIterate<KeyT>())
1300 return Ptr[-1];
1301 return *Ptr;
1302 }
1304 assert(isHandleInSync() && "invalid iterator access!");
1305 assert(Ptr != End && "dereferencing end() iterator");
1306 if (shouldReverseIterate<KeyT>())
1307 return &(Ptr[-1]);
1308 return Ptr;
1309 }
1310
1311 friend bool operator==(const DenseMapIterator &LHS,
1312 const DenseMapIterator &RHS) {
1313 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1314 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1315 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1316 "comparing incomparable iterators!");
1317 return LHS.Ptr == RHS.Ptr;
1318 }
1319
1320 friend bool operator!=(const DenseMapIterator &LHS,
1321 const DenseMapIterator &RHS) {
1322 return !(LHS == RHS);
1323 }
1324
1325 inline DenseMapIterator& operator++() { // Preincrement
1326 assert(isHandleInSync() && "invalid iterator access!");
1327 assert(Ptr != End && "incrementing end() iterator");
1328 if (shouldReverseIterate<KeyT>()) {
1329 --Ptr;
1330 RetreatPastEmptyBuckets();
1331 return *this;
1332 }
1333 ++Ptr;
1334 AdvancePastEmptyBuckets();
1335 return *this;
1336 }
1337 DenseMapIterator operator++(int) { // Postincrement
1338 assert(isHandleInSync() && "invalid iterator access!");
1339 DenseMapIterator tmp = *this; ++*this; return tmp;
1340 }
1341
1342private:
1343 void AdvancePastEmptyBuckets() {
1344 assert(Ptr <= End);
1345 const KeyT Empty = KeyInfoT::getEmptyKey();
1346 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1347
1348 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1349 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1350 ++Ptr;
1351 }
1352
1353 void RetreatPastEmptyBuckets() {
1354 assert(Ptr >= End);
1355 const KeyT Empty = KeyInfoT::getEmptyKey();
1356 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1357
1358 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1359 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1360 --Ptr;
1361 }
1362};
1363
1364template <typename KeyT, typename ValueT, typename KeyInfoT>
1366 return X.getMemorySize();
1367}
1368
1369} // end namespace llvm
1370
1371#endif // LLVM_ADT_DENSEMAP_H
aarch64 promote const
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:241
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:240
This file defines DenseMapInfo traits for DenseMap.
bool End
Definition: ELF_riscv.cpp:480
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define I(x, y, z)
Definition: MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT & getOrInsertDefault(const KeyT &Key)
Returns the value associated to the key in the map if it exists.
Definition: DenseMap.h:332
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:478
static const KeyT getEmptyKey()
Definition: DenseMap.h:487
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:367
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:218
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:226
bool erase(const KeyT &Val)
Definition: DenseMap.h:336
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:277
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:184
const_iterator end() const
Definition: DenseMap.h:94
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:251
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:429
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:176
unsigned size() const
Definition: DenseMap.h:99
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:162
bool empty() const
Definition: DenseMap.h:98
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:301
iterator begin()
Definition: DenseMap.h:75
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
static const KeyT getTombstoneKey()
Definition: DenseMap.h:493
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition: DenseMap.h:202
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:382
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:455
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:146
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:355
const_iterator begin() const
Definition: DenseMap.h:87
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:389
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition: DenseMap.h:315
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:420
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:483
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:363
BucketT value_type
Definition: DenseMap.h:69
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition: DenseMap.h:73
ValueT & getOrInsertDefault(KeyT &&Key)
Returns the value associated to the key in the map if it exists.
Definition: DenseMap.h:325
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void erase(iterator I)
Definition: DenseMap.h:347
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition: DenseMap.h:307
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:375
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:734
std::conditional_t< IsConst, const Bucket, Bucket > value_type
Definition: DenseMap.h:1262
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1320
value_type * pointer
Definition: DenseMap.h:1263
DenseMapIterator & operator++()
Definition: DenseMap.h:1325
pointer operator->() const
Definition: DenseMap.h:1303
reference operator*() const
Definition: DenseMap.h:1296
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1274
DenseMapIterator operator++(int)
Definition: DenseMap.h:1337
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1292
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1311
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1265
value_type & reference
Definition: DenseMap.h:1264
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:810
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:843
void shrink_and_clear()
Definition: DenseMap.h:882
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:835
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:792
void grow(unsigned AtLeast)
Definition: DenseMap.h:864
void init(unsigned InitNumEntries)
Definition: DenseMap.h:854
DenseMap(const DenseMap &other)
Definition: DenseMap.h:794
void swap(DenseMap &RHS)
Definition: DenseMap.h:820
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:805
DenseMap(DenseMap &&other)
Definition: DenseMap.h:799
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:829
void grow(unsigned AtLeast)
Definition: DenseMap.h:1105
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:986
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:999
void init(unsigned InitBuckets)
Definition: DenseMap.h:1096
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:1077
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:1071
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:969
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:991
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:980
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:975
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1085
void shrink_and_clear()
Definition: DenseMap.h:1158
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:353
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:835
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2062
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition: bit.h:342
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:15
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:24
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:382
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
const ValueT & getSecond() const
Definition: DenseMap.h:48
const KeyT & getFirst() const
Definition: DenseMap.h:46