LLVM 19.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstring>
29#include <initializer_list>
30#include <iterator>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37namespace detail {
38
39// We extend a pair to allow users to override the bucket type with their own
40// implementation without requiring two members.
41template <typename KeyT, typename ValueT>
42struct DenseMapPair : public std::pair<KeyT, ValueT> {
43 using std::pair<KeyT, ValueT>::pair;
44
45 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
46 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49};
50
51} // end namespace detail
52
53template <typename KeyT, typename ValueT,
54 typename KeyInfoT = DenseMapInfo<KeyT>,
56 bool IsConst = false>
57class DenseMapIterator;
58
59template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60 typename BucketT>
62 template <typename T>
63 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64
65public:
67 using key_type = KeyT;
69 using value_type = BucketT;
70
74
75 inline iterator begin() {
76 // When the map is empty, avoid the overhead of advancing/retreating past
77 // empty buckets.
78 if (empty())
79 return end();
80 if (shouldReverseIterate<KeyT>())
81 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82 return makeIterator(getBuckets(), getBucketsEnd(), *this);
83 }
84 inline iterator end() {
85 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86 }
87 inline const_iterator begin() const {
88 if (empty())
89 return end();
90 if (shouldReverseIterate<KeyT>())
91 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93 }
94 inline const_iterator end() const {
95 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96 }
97
98 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
99 unsigned size() const { return getNumEntries(); }
100
101 /// Grow the densemap so that it can contain at least \p NumEntries items
102 /// before resizing again.
103 void reserve(size_type NumEntries) {
104 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
106 if (NumBuckets > getNumBuckets())
107 grow(NumBuckets);
108 }
109
110 void clear() {
112 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
113
114 // If the capacity of the array is huge, and the # elements used is small,
115 // shrink the array.
116 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
117 shrink_and_clear();
118 return;
119 }
120
121 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
122 if (std::is_trivially_destructible<ValueT>::value) {
123 // Use a simpler loop when values don't need destruction.
124 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
125 P->getFirst() = EmptyKey;
126 } else {
127 unsigned NumEntries = getNumEntries();
128 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
129 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
130 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
131 P->getSecond().~ValueT();
132 --NumEntries;
133 }
134 P->getFirst() = EmptyKey;
135 }
136 }
137 assert(NumEntries == 0 && "Node count imbalance!");
138 (void)NumEntries;
139 }
140 setNumEntries(0);
141 setNumTombstones(0);
142 }
143
144 /// Return true if the specified key is in the map, false otherwise.
145 bool contains(const_arg_type_t<KeyT> Val) const {
146 const BucketT *TheBucket;
147 return LookupBucketFor(Val, TheBucket);
148 }
149
150 /// Return 1 if the specified key is in the map, 0 otherwise.
151 size_type count(const_arg_type_t<KeyT> Val) const {
152 return contains(Val) ? 1 : 0;
153 }
154
155 iterator find(const_arg_type_t<KeyT> Val) {
156 BucketT *TheBucket;
157 if (LookupBucketFor(Val, TheBucket))
158 return makeIterator(TheBucket,
159 shouldReverseIterate<KeyT>() ? getBuckets()
160 : getBucketsEnd(),
161 *this, true);
162 return end();
163 }
164 const_iterator find(const_arg_type_t<KeyT> Val) const {
165 const BucketT *TheBucket;
166 if (LookupBucketFor(Val, TheBucket))
167 return makeConstIterator(TheBucket,
168 shouldReverseIterate<KeyT>() ? getBuckets()
169 : getBucketsEnd(),
170 *this, true);
171 return end();
172 }
173
174 /// Alternate version of find() which allows a different, and possibly
175 /// less expensive, key type.
176 /// The DenseMapInfo is responsible for supplying methods
177 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
178 /// type used.
179 template<class LookupKeyT>
180 iterator find_as(const LookupKeyT &Val) {
181 BucketT *TheBucket;
182 if (LookupBucketFor(Val, TheBucket))
183 return makeIterator(TheBucket,
184 shouldReverseIterate<KeyT>() ? getBuckets()
185 : getBucketsEnd(),
186 *this, true);
187 return end();
188 }
189 template<class LookupKeyT>
190 const_iterator find_as(const LookupKeyT &Val) const {
191 const BucketT *TheBucket;
192 if (LookupBucketFor(Val, TheBucket))
193 return makeConstIterator(TheBucket,
194 shouldReverseIterate<KeyT>() ? getBuckets()
195 : getBucketsEnd(),
196 *this, true);
197 return end();
198 }
199
200 /// lookup - Return the entry for the specified key, or a default
201 /// constructed value if no such entry exists.
202 ValueT lookup(const_arg_type_t<KeyT> Val) const {
203 const BucketT *TheBucket;
204 if (LookupBucketFor(Val, TheBucket))
205 return TheBucket->getSecond();
206 return ValueT();
207 }
208
209 /// at - Return the entry for the specified key, or abort if no such
210 /// entry exists.
211 const ValueT &at(const_arg_type_t<KeyT> Val) const {
212 auto Iter = this->find(std::move(Val));
213 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
214 return Iter->second;
215 }
216
217 // Inserts key,value pair into the map if the key isn't already in the map.
218 // If the key is already in the map, it returns false and doesn't update the
219 // value.
220 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
221 return try_emplace(KV.first, KV.second);
222 }
223
224 // Inserts key,value pair into the map if the key isn't already in the map.
225 // If the key is already in the map, it returns false and doesn't update the
226 // value.
227 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
228 return try_emplace(std::move(KV.first), std::move(KV.second));
229 }
230
231 // Inserts key,value pair into the map if the key isn't already in the map.
232 // The value is constructed in-place if the key is not in the map, otherwise
233 // it is not moved.
234 template <typename... Ts>
235 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
236 BucketT *TheBucket;
237 if (LookupBucketFor(Key, TheBucket))
238 return std::make_pair(makeIterator(TheBucket,
239 shouldReverseIterate<KeyT>()
240 ? getBuckets()
241 : getBucketsEnd(),
242 *this, true),
243 false); // Already in map.
244
245 // Otherwise, insert the new element.
246 TheBucket =
247 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
248 return std::make_pair(makeIterator(TheBucket,
249 shouldReverseIterate<KeyT>()
250 ? getBuckets()
251 : getBucketsEnd(),
252 *this, true),
253 true);
254 }
255
256 // Inserts key,value pair into the map if the key isn't already in the map.
257 // The value is constructed in-place if the key is not in the map, otherwise
258 // it is not moved.
259 template <typename... Ts>
260 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
261 BucketT *TheBucket;
262 if (LookupBucketFor(Key, TheBucket))
263 return std::make_pair(makeIterator(TheBucket,
264 shouldReverseIterate<KeyT>()
265 ? getBuckets()
266 : getBucketsEnd(),
267 *this, true),
268 false); // Already in map.
269
270 // Otherwise, insert the new element.
271 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
272 return std::make_pair(makeIterator(TheBucket,
273 shouldReverseIterate<KeyT>()
274 ? getBuckets()
275 : getBucketsEnd(),
276 *this, true),
277 true);
278 }
279
280 /// Alternate version of insert() which allows a different, and possibly
281 /// less expensive, key type.
282 /// The DenseMapInfo is responsible for supplying methods
283 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
284 /// type used.
285 template <typename LookupKeyT>
286 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
287 const LookupKeyT &Val) {
288 BucketT *TheBucket;
289 if (LookupBucketFor(Val, TheBucket))
290 return std::make_pair(makeIterator(TheBucket,
291 shouldReverseIterate<KeyT>()
292 ? getBuckets()
293 : getBucketsEnd(),
294 *this, true),
295 false); // Already in map.
296
297 // Otherwise, insert the new element.
298 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
299 std::move(KV.second), Val);
300 return std::make_pair(makeIterator(TheBucket,
301 shouldReverseIterate<KeyT>()
302 ? getBuckets()
303 : getBucketsEnd(),
304 *this, true),
305 true);
306 }
307
308 /// insert - Range insertion of pairs.
309 template<typename InputIt>
310 void insert(InputIt I, InputIt E) {
311 for (; I != E; ++I)
312 insert(*I);
313 }
314
315 template <typename V>
316 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
317 auto Ret = try_emplace(Key, std::forward<V>(Val));
318 if (!Ret.second)
319 Ret.first->second = std::forward<V>(Val);
320 return Ret;
321 }
322
323 template <typename V>
324 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
325 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
326 if (!Ret.second)
327 Ret.first->second = std::forward<V>(Val);
328 return Ret;
329 }
330
331 /// Returns the value associated to the key in the map if it exists. If it
332 /// does not exist, emplace a default value for the key and returns a
333 /// reference to the newly created value.
335 return try_emplace(Key).first->second;
336 }
337
338 /// Returns the value associated to the key in the map if it exists. If it
339 /// does not exist, emplace a default value for the key and returns a
340 /// reference to the newly created value.
342 return try_emplace(Key).first->second;
343 }
344
345 bool erase(const KeyT &Val) {
346 BucketT *TheBucket;
347 if (!LookupBucketFor(Val, TheBucket))
348 return false; // not in map.
349
350 TheBucket->getSecond().~ValueT();
351 TheBucket->getFirst() = getTombstoneKey();
352 decrementNumEntries();
353 incrementNumTombstones();
354 return true;
355 }
357 BucketT *TheBucket = &*I;
358 TheBucket->getSecond().~ValueT();
359 TheBucket->getFirst() = getTombstoneKey();
360 decrementNumEntries();
361 incrementNumTombstones();
362 }
363
365 BucketT *TheBucket;
366 if (LookupBucketFor(Key, TheBucket))
367 return *TheBucket;
368
369 return *InsertIntoBucket(TheBucket, Key);
370 }
371
372 ValueT &operator[](const KeyT &Key) {
373 return FindAndConstruct(Key).second;
374 }
375
377 BucketT *TheBucket;
378 if (LookupBucketFor(Key, TheBucket))
379 return *TheBucket;
380
381 return *InsertIntoBucket(TheBucket, std::move(Key));
382 }
383
385 return FindAndConstruct(std::move(Key)).second;
386 }
387
388 /// isPointerIntoBucketsArray - Return true if the specified pointer points
389 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
390 /// value in the DenseMap).
391 bool isPointerIntoBucketsArray(const void *Ptr) const {
392 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
393 }
394
395 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
396 /// array. In conjunction with the previous method, this can be used to
397 /// determine whether an insertion caused the DenseMap to reallocate.
398 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
399
400protected:
401 DenseMapBase() = default;
402
403 void destroyAll() {
404 if (getNumBuckets() == 0) // Nothing to do.
405 return;
406
407 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
408 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
409 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
410 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
411 P->getSecond().~ValueT();
412 P->getFirst().~KeyT();
413 }
414 }
415
416 void initEmpty() {
417 setNumEntries(0);
418 setNumTombstones(0);
419
420 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
421 "# initial buckets must be a power of two!");
422 const KeyT EmptyKey = getEmptyKey();
423 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
424 ::new (&B->getFirst()) KeyT(EmptyKey);
425 }
426
427 /// Returns the number of buckets to allocate to ensure that the DenseMap can
428 /// accommodate \p NumEntries without need to grow().
429 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
430 // Ensure that "NumEntries * 4 < NumBuckets * 3"
431 if (NumEntries == 0)
432 return 0;
433 // +1 is required because of the strict equality.
434 // For example if NumEntries is 48, we need to return 401.
435 return NextPowerOf2(NumEntries * 4 / 3 + 1);
436 }
437
438 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
439 initEmpty();
440
441 // Insert all the old elements.
442 const KeyT EmptyKey = getEmptyKey();
443 const KeyT TombstoneKey = getTombstoneKey();
444 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
445 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
446 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
447 // Insert the key/value into the new table.
448 BucketT *DestBucket;
449 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
450 (void)FoundVal; // silence warning.
451 assert(!FoundVal && "Key already in new map?");
452 DestBucket->getFirst() = std::move(B->getFirst());
453 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
454 incrementNumEntries();
455
456 // Free the value.
457 B->getSecond().~ValueT();
458 }
459 B->getFirst().~KeyT();
460 }
461 }
462
463 template <typename OtherBaseT>
466 assert(&other != this);
467 assert(getNumBuckets() == other.getNumBuckets());
468
469 setNumEntries(other.getNumEntries());
470 setNumTombstones(other.getNumTombstones());
471
472 if (std::is_trivially_copyable<KeyT>::value &&
473 std::is_trivially_copyable<ValueT>::value)
474 memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
475 getNumBuckets() * sizeof(BucketT));
476 else
477 for (size_t i = 0; i < getNumBuckets(); ++i) {
478 ::new (&getBuckets()[i].getFirst())
479 KeyT(other.getBuckets()[i].getFirst());
480 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
481 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
482 ::new (&getBuckets()[i].getSecond())
483 ValueT(other.getBuckets()[i].getSecond());
484 }
485 }
486
487 static unsigned getHashValue(const KeyT &Val) {
488 return KeyInfoT::getHashValue(Val);
489 }
490
491 template<typename LookupKeyT>
492 static unsigned getHashValue(const LookupKeyT &Val) {
493 return KeyInfoT::getHashValue(Val);
494 }
495
496 static const KeyT getEmptyKey() {
497 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
498 "Must pass the derived type to this template!");
499 return KeyInfoT::getEmptyKey();
500 }
501
502 static const KeyT getTombstoneKey() {
503 return KeyInfoT::getTombstoneKey();
504 }
505
506private:
507 iterator makeIterator(BucketT *P, BucketT *E,
508 DebugEpochBase &Epoch,
509 bool NoAdvance=false) {
510 if (shouldReverseIterate<KeyT>()) {
511 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
512 return iterator(B, E, Epoch, NoAdvance);
513 }
514 return iterator(P, E, Epoch, NoAdvance);
515 }
516
517 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
518 const DebugEpochBase &Epoch,
519 const bool NoAdvance=false) const {
520 if (shouldReverseIterate<KeyT>()) {
521 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
522 return const_iterator(B, E, Epoch, NoAdvance);
523 }
524 return const_iterator(P, E, Epoch, NoAdvance);
525 }
526
527 unsigned getNumEntries() const {
528 return static_cast<const DerivedT *>(this)->getNumEntries();
529 }
530
531 void setNumEntries(unsigned Num) {
532 static_cast<DerivedT *>(this)->setNumEntries(Num);
533 }
534
535 void incrementNumEntries() {
536 setNumEntries(getNumEntries() + 1);
537 }
538
539 void decrementNumEntries() {
540 setNumEntries(getNumEntries() - 1);
541 }
542
543 unsigned getNumTombstones() const {
544 return static_cast<const DerivedT *>(this)->getNumTombstones();
545 }
546
547 void setNumTombstones(unsigned Num) {
548 static_cast<DerivedT *>(this)->setNumTombstones(Num);
549 }
550
551 void incrementNumTombstones() {
552 setNumTombstones(getNumTombstones() + 1);
553 }
554
555 void decrementNumTombstones() {
556 setNumTombstones(getNumTombstones() - 1);
557 }
558
559 const BucketT *getBuckets() const {
560 return static_cast<const DerivedT *>(this)->getBuckets();
561 }
562
563 BucketT *getBuckets() {
564 return static_cast<DerivedT *>(this)->getBuckets();
565 }
566
567 unsigned getNumBuckets() const {
568 return static_cast<const DerivedT *>(this)->getNumBuckets();
569 }
570
571 BucketT *getBucketsEnd() {
572 return getBuckets() + getNumBuckets();
573 }
574
575 const BucketT *getBucketsEnd() const {
576 return getBuckets() + getNumBuckets();
577 }
578
579 void grow(unsigned AtLeast) {
580 static_cast<DerivedT *>(this)->grow(AtLeast);
581 }
582
583 void shrink_and_clear() {
584 static_cast<DerivedT *>(this)->shrink_and_clear();
585 }
586
587 template <typename KeyArg, typename... ValueArgs>
588 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
589 ValueArgs &&... Values) {
590 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
591
592 TheBucket->getFirst() = std::forward<KeyArg>(Key);
593 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
594 return TheBucket;
595 }
596
597 template <typename LookupKeyT>
598 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
599 ValueT &&Value, LookupKeyT &Lookup) {
600 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
601
602 TheBucket->getFirst() = std::move(Key);
603 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
604 return TheBucket;
605 }
606
607 template <typename LookupKeyT>
608 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
609 BucketT *TheBucket) {
611
612 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
613 // the buckets are empty (meaning that many are filled with tombstones),
614 // grow the table.
615 //
616 // The later case is tricky. For example, if we had one empty bucket with
617 // tons of tombstones, failing lookups (e.g. for insertion) would have to
618 // probe almost the entire table until it found the empty bucket. If the
619 // table completely filled with tombstones, no lookup would ever succeed,
620 // causing infinite loops in lookup.
621 unsigned NewNumEntries = getNumEntries() + 1;
622 unsigned NumBuckets = getNumBuckets();
623 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
624 this->grow(NumBuckets * 2);
625 LookupBucketFor(Lookup, TheBucket);
626 NumBuckets = getNumBuckets();
627 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
628 NumBuckets/8)) {
629 this->grow(NumBuckets);
630 LookupBucketFor(Lookup, TheBucket);
631 }
632 assert(TheBucket);
633
634 // Only update the state after we've grown our bucket space appropriately
635 // so that when growing buckets we have self-consistent entry count.
636 incrementNumEntries();
637
638 // If we are writing over a tombstone, remember this.
639 const KeyT EmptyKey = getEmptyKey();
640 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
641 decrementNumTombstones();
642
643 return TheBucket;
644 }
645
646 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
647 /// FoundBucket. If the bucket contains the key and a value, this returns
648 /// true, otherwise it returns a bucket with an empty marker or tombstone and
649 /// returns false.
650 template<typename LookupKeyT>
651 bool LookupBucketFor(const LookupKeyT &Val,
652 const BucketT *&FoundBucket) const {
653 const BucketT *BucketsPtr = getBuckets();
654 const unsigned NumBuckets = getNumBuckets();
655
656 if (NumBuckets == 0) {
657 FoundBucket = nullptr;
658 return false;
659 }
660
661 // FoundTombstone - Keep track of whether we find a tombstone while probing.
662 const BucketT *FoundTombstone = nullptr;
663 const KeyT EmptyKey = getEmptyKey();
664 const KeyT TombstoneKey = getTombstoneKey();
665 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
666 !KeyInfoT::isEqual(Val, TombstoneKey) &&
667 "Empty/Tombstone value shouldn't be inserted into map!");
668
669 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
670 unsigned ProbeAmt = 1;
671 while (true) {
672 const BucketT *ThisBucket = BucketsPtr + BucketNo;
673 // Found Val's bucket? If so, return it.
674 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
675 FoundBucket = ThisBucket;
676 return true;
677 }
678
679 // If we found an empty bucket, the key doesn't exist in the set.
680 // Insert it and return the default value.
681 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
682 // If we've already seen a tombstone while probing, fill it in instead
683 // of the empty bucket we eventually probed to.
684 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
685 return false;
686 }
687
688 // If this is a tombstone, remember it. If Val ends up not in the map, we
689 // prefer to return it than something that would require more probing.
690 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
691 !FoundTombstone)
692 FoundTombstone = ThisBucket; // Remember the first tombstone found.
693
694 // Otherwise, it's a hash collision or a tombstone, continue quadratic
695 // probing.
696 BucketNo += ProbeAmt++;
697 BucketNo &= (NumBuckets-1);
698 }
699 }
700
701 template <typename LookupKeyT>
702 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
703 const BucketT *ConstFoundBucket;
704 bool Result = const_cast<const DenseMapBase *>(this)
705 ->LookupBucketFor(Val, ConstFoundBucket);
706 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
707 return Result;
708 }
709
710public:
711 /// Return the approximate size (in bytes) of the actual map.
712 /// This is just the raw memory used by DenseMap.
713 /// If entries are pointers to objects, the size of the referenced objects
714 /// are not included.
715 size_t getMemorySize() const {
716 return getNumBuckets() * sizeof(BucketT);
717 }
718};
719
720/// Equality comparison for DenseMap.
721///
722/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
723/// is also in RHS, and that no additional pairs are in RHS.
724/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
725/// complexity is linear, worst case is O(N^2) (if every hash collides).
726template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
727 typename BucketT>
731 if (LHS.size() != RHS.size())
732 return false;
733
734 for (auto &KV : LHS) {
735 auto I = RHS.find(KV.first);
736 if (I == RHS.end() || I->second != KV.second)
737 return false;
738 }
739
740 return true;
741}
742
743/// Inequality comparison for DenseMap.
744///
745/// Equivalent to !(LHS == RHS). See operator== for performance notes.
746template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
747 typename BucketT>
751 return !(LHS == RHS);
752}
753
754template <typename KeyT, typename ValueT,
755 typename KeyInfoT = DenseMapInfo<KeyT>,
757class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
758 KeyT, ValueT, KeyInfoT, BucketT> {
759 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
760
761 // Lift some types from the dependent base class into this class for
762 // simplicity of referring to them.
764
765 BucketT *Buckets;
766 unsigned NumEntries;
767 unsigned NumTombstones;
768 unsigned NumBuckets;
769
770public:
771 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
772 /// this number of elements can be inserted in the map without grow()
773 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
774
775 DenseMap(const DenseMap &other) : BaseT() {
776 init(0);
777 copyFrom(other);
778 }
779
780 DenseMap(DenseMap &&other) : BaseT() {
781 init(0);
782 swap(other);
783 }
784
785 template<typename InputIt>
786 DenseMap(const InputIt &I, const InputIt &E) {
787 init(std::distance(I, E));
788 this->insert(I, E);
789 }
790
791 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
792 init(Vals.size());
793 this->insert(Vals.begin(), Vals.end());
794 }
795
797 this->destroyAll();
798 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
799 }
800
802 this->incrementEpoch();
803 RHS.incrementEpoch();
804 std::swap(Buckets, RHS.Buckets);
805 std::swap(NumEntries, RHS.NumEntries);
806 std::swap(NumTombstones, RHS.NumTombstones);
807 std::swap(NumBuckets, RHS.NumBuckets);
808 }
809
810 DenseMap& operator=(const DenseMap& other) {
811 if (&other != this)
812 copyFrom(other);
813 return *this;
814 }
815
817 this->destroyAll();
818 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
819 init(0);
820 swap(other);
821 return *this;
822 }
823
824 void copyFrom(const DenseMap& other) {
825 this->destroyAll();
826 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
827 if (allocateBuckets(other.NumBuckets)) {
828 this->BaseT::copyFrom(other);
829 } else {
830 NumEntries = 0;
831 NumTombstones = 0;
832 }
833 }
834
835 void init(unsigned InitNumEntries) {
836 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
837 if (allocateBuckets(InitBuckets)) {
838 this->BaseT::initEmpty();
839 } else {
840 NumEntries = 0;
841 NumTombstones = 0;
842 }
843 }
844
845 void grow(unsigned AtLeast) {
846 unsigned OldNumBuckets = NumBuckets;
847 BucketT *OldBuckets = Buckets;
848
849 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
850 assert(Buckets);
851 if (!OldBuckets) {
852 this->BaseT::initEmpty();
853 return;
854 }
855
856 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
857
858 // Free the old table.
859 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
860 alignof(BucketT));
861 }
862
864 unsigned OldNumBuckets = NumBuckets;
865 unsigned OldNumEntries = NumEntries;
866 this->destroyAll();
867
868 // Reduce the number of buckets.
869 unsigned NewNumBuckets = 0;
870 if (OldNumEntries)
871 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
872 if (NewNumBuckets == NumBuckets) {
873 this->BaseT::initEmpty();
874 return;
875 }
876
877 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
878 alignof(BucketT));
879 init(NewNumBuckets);
880 }
881
882private:
883 unsigned getNumEntries() const {
884 return NumEntries;
885 }
886
887 void setNumEntries(unsigned Num) {
888 NumEntries = Num;
889 }
890
891 unsigned getNumTombstones() const {
892 return NumTombstones;
893 }
894
895 void setNumTombstones(unsigned Num) {
896 NumTombstones = Num;
897 }
898
899 BucketT *getBuckets() const {
900 return Buckets;
901 }
902
903 unsigned getNumBuckets() const {
904 return NumBuckets;
905 }
906
907 bool allocateBuckets(unsigned Num) {
908 NumBuckets = Num;
909 if (NumBuckets == 0) {
910 Buckets = nullptr;
911 return false;
912 }
913
914 Buckets = static_cast<BucketT *>(
915 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
916 return true;
917 }
918};
919
920template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
921 typename KeyInfoT = DenseMapInfo<KeyT>,
924 : public DenseMapBase<
925 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
926 ValueT, KeyInfoT, BucketT> {
927 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
928
929 // Lift some types from the dependent base class into this class for
930 // simplicity of referring to them.
932
933 static_assert(isPowerOf2_64(InlineBuckets),
934 "InlineBuckets must be a power of 2.");
935
936 unsigned Small : 1;
937 unsigned NumEntries : 31;
938 unsigned NumTombstones;
939
940 struct LargeRep {
941 BucketT *Buckets;
942 unsigned NumBuckets;
943 };
944
945 /// A "union" of an inline bucket array and the struct representing
946 /// a large bucket. This union will be discriminated by the 'Small' bit.
948
949public:
950 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
951 if (NumInitBuckets > InlineBuckets)
952 NumInitBuckets = llvm::bit_ceil(NumInitBuckets);
953 init(NumInitBuckets);
954 }
955
957 init(0);
958 copyFrom(other);
959 }
960
962 init(0);
963 swap(other);
964 }
965
966 template<typename InputIt>
967 SmallDenseMap(const InputIt &I, const InputIt &E) {
968 init(NextPowerOf2(std::distance(I, E)));
969 this->insert(I, E);
970 }
971
972 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
973 : SmallDenseMap(Vals.begin(), Vals.end()) {}
974
976 this->destroyAll();
977 deallocateBuckets();
978 }
979
981 unsigned TmpNumEntries = RHS.NumEntries;
982 RHS.NumEntries = NumEntries;
983 NumEntries = TmpNumEntries;
984 std::swap(NumTombstones, RHS.NumTombstones);
985
986 const KeyT EmptyKey = this->getEmptyKey();
987 const KeyT TombstoneKey = this->getTombstoneKey();
988 if (Small && RHS.Small) {
989 // If we're swapping inline bucket arrays, we have to cope with some of
990 // the tricky bits of DenseMap's storage system: the buckets are not
991 // fully initialized. Thus we swap every key, but we may have
992 // a one-directional move of the value.
993 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
994 BucketT *LHSB = &getInlineBuckets()[i],
995 *RHSB = &RHS.getInlineBuckets()[i];
996 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
997 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
998 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
999 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
1000 if (hasLHSValue && hasRHSValue) {
1001 // Swap together if we can...
1002 std::swap(*LHSB, *RHSB);
1003 continue;
1004 }
1005 // Swap separately and handle any asymmetry.
1006 std::swap(LHSB->getFirst(), RHSB->getFirst());
1007 if (hasLHSValue) {
1008 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
1009 LHSB->getSecond().~ValueT();
1010 } else if (hasRHSValue) {
1011 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
1012 RHSB->getSecond().~ValueT();
1013 }
1014 }
1015 return;
1016 }
1017 if (!Small && !RHS.Small) {
1018 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
1019 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
1020 return;
1021 }
1022
1023 SmallDenseMap &SmallSide = Small ? *this : RHS;
1024 SmallDenseMap &LargeSide = Small ? RHS : *this;
1025
1026 // First stash the large side's rep and move the small side across.
1027 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
1028 LargeSide.getLargeRep()->~LargeRep();
1029 LargeSide.Small = true;
1030 // This is similar to the standard move-from-old-buckets, but the bucket
1031 // count hasn't actually rotated in this case. So we have to carefully
1032 // move construct the keys and values into their new locations, but there
1033 // is no need to re-hash things.
1034 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1035 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
1036 *OldB = &SmallSide.getInlineBuckets()[i];
1037 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
1038 OldB->getFirst().~KeyT();
1039 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
1040 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1041 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1042 OldB->getSecond().~ValueT();
1043 }
1044 }
1045
1046 // The hard part of moving the small buckets across is done, just move
1047 // the TmpRep into its new home.
1048 SmallSide.Small = false;
1049 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1050 }
1051
1053 if (&other != this)
1054 copyFrom(other);
1055 return *this;
1056 }
1057
1059 this->destroyAll();
1060 deallocateBuckets();
1061 init(0);
1062 swap(other);
1063 return *this;
1064 }
1065
1066 void copyFrom(const SmallDenseMap& other) {
1067 this->destroyAll();
1068 deallocateBuckets();
1069 Small = true;
1070 if (other.getNumBuckets() > InlineBuckets) {
1071 Small = false;
1072 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1073 }
1074 this->BaseT::copyFrom(other);
1075 }
1076
1077 void init(unsigned InitBuckets) {
1078 Small = true;
1079 if (InitBuckets > InlineBuckets) {
1080 Small = false;
1081 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1082 }
1083 this->BaseT::initEmpty();
1084 }
1085
1086 void grow(unsigned AtLeast) {
1087 if (AtLeast > InlineBuckets)
1088 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1089
1090 if (Small) {
1091 // First move the inline buckets into a temporary storage.
1093 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1094 BucketT *TmpEnd = TmpBegin;
1095
1096 // Loop over the buckets, moving non-empty, non-tombstones into the
1097 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1098 const KeyT EmptyKey = this->getEmptyKey();
1099 const KeyT TombstoneKey = this->getTombstoneKey();
1100 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1101 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1102 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1103 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1104 "Too many inline buckets!");
1105 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1106 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1107 ++TmpEnd;
1108 P->getSecond().~ValueT();
1109 }
1110 P->getFirst().~KeyT();
1111 }
1112
1113 // AtLeast == InlineBuckets can happen if there are many tombstones,
1114 // and grow() is used to remove them. Usually we always switch to the
1115 // large rep here.
1116 if (AtLeast > InlineBuckets) {
1117 Small = false;
1118 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1119 }
1120 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1121 return;
1122 }
1123
1124 LargeRep OldRep = std::move(*getLargeRep());
1125 getLargeRep()->~LargeRep();
1126 if (AtLeast <= InlineBuckets) {
1127 Small = true;
1128 } else {
1129 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1130 }
1131
1132 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1133
1134 // Free the old table.
1135 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1136 alignof(BucketT));
1137 }
1138
1140 unsigned OldSize = this->size();
1141 this->destroyAll();
1142
1143 // Reduce the number of buckets.
1144 unsigned NewNumBuckets = 0;
1145 if (OldSize) {
1146 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1147 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1148 NewNumBuckets = 64;
1149 }
1150 if ((Small && NewNumBuckets <= InlineBuckets) ||
1151 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1152 this->BaseT::initEmpty();
1153 return;
1154 }
1155
1156 deallocateBuckets();
1157 init(NewNumBuckets);
1158 }
1159
1160private:
1161 unsigned getNumEntries() const {
1162 return NumEntries;
1163 }
1164
1165 void setNumEntries(unsigned Num) {
1166 // NumEntries is hardcoded to be 31 bits wide.
1167 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1168 NumEntries = Num;
1169 }
1170
1171 unsigned getNumTombstones() const {
1172 return NumTombstones;
1173 }
1174
1175 void setNumTombstones(unsigned Num) {
1176 NumTombstones = Num;
1177 }
1178
1179 const BucketT *getInlineBuckets() const {
1180 assert(Small);
1181 // Note that this cast does not violate aliasing rules as we assert that
1182 // the memory's dynamic type is the small, inline bucket buffer, and the
1183 // 'storage' is a POD containing a char buffer.
1184 return reinterpret_cast<const BucketT *>(&storage);
1185 }
1186
1187 BucketT *getInlineBuckets() {
1188 return const_cast<BucketT *>(
1189 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1190 }
1191
1192 const LargeRep *getLargeRep() const {
1193 assert(!Small);
1194 // Note, same rule about aliasing as with getInlineBuckets.
1195 return reinterpret_cast<const LargeRep *>(&storage);
1196 }
1197
1198 LargeRep *getLargeRep() {
1199 return const_cast<LargeRep *>(
1200 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1201 }
1202
1203 const BucketT *getBuckets() const {
1204 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1205 }
1206
1207 BucketT *getBuckets() {
1208 return const_cast<BucketT *>(
1209 const_cast<const SmallDenseMap *>(this)->getBuckets());
1210 }
1211
1212 unsigned getNumBuckets() const {
1213 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1214 }
1215
1216 void deallocateBuckets() {
1217 if (Small)
1218 return;
1219
1220 deallocate_buffer(getLargeRep()->Buckets,
1221 sizeof(BucketT) * getLargeRep()->NumBuckets,
1222 alignof(BucketT));
1223 getLargeRep()->~LargeRep();
1224 }
1225
1226 LargeRep allocateBuckets(unsigned Num) {
1227 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1228 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1229 sizeof(BucketT) * Num, alignof(BucketT))),
1230 Num};
1231 return Rep;
1232 }
1233};
1234
1235template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1236 bool IsConst>
1238 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1239 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1240
1241public:
1243 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1246 using iterator_category = std::forward_iterator_tag;
1247
1248private:
1249 pointer Ptr = nullptr;
1250 pointer End = nullptr;
1251
1252public:
1253 DenseMapIterator() = default;
1254
1256 bool NoAdvance = false)
1257 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1258 assert(isHandleInSync() && "invalid construction!");
1259
1260 if (NoAdvance) return;
1261 if (shouldReverseIterate<KeyT>()) {
1262 RetreatPastEmptyBuckets();
1263 return;
1264 }
1265 AdvancePastEmptyBuckets();
1266 }
1267
1268 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1269 // for const iterator destinations so it doesn't end up as a user defined copy
1270 // constructor.
1271 template <bool IsConstSrc,
1272 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1275 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1276
1278 assert(isHandleInSync() && "invalid iterator access!");
1279 assert(Ptr != End && "dereferencing end() iterator");
1280 if (shouldReverseIterate<KeyT>())
1281 return Ptr[-1];
1282 return *Ptr;
1283 }
1285 assert(isHandleInSync() && "invalid iterator access!");
1286 assert(Ptr != End && "dereferencing end() iterator");
1287 if (shouldReverseIterate<KeyT>())
1288 return &(Ptr[-1]);
1289 return Ptr;
1290 }
1291
1292 friend bool operator==(const DenseMapIterator &LHS,
1293 const DenseMapIterator &RHS) {
1294 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1295 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1296 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1297 "comparing incomparable iterators!");
1298 return LHS.Ptr == RHS.Ptr;
1299 }
1300
1301 friend bool operator!=(const DenseMapIterator &LHS,
1302 const DenseMapIterator &RHS) {
1303 return !(LHS == RHS);
1304 }
1305
1306 inline DenseMapIterator& operator++() { // Preincrement
1307 assert(isHandleInSync() && "invalid iterator access!");
1308 assert(Ptr != End && "incrementing end() iterator");
1309 if (shouldReverseIterate<KeyT>()) {
1310 --Ptr;
1311 RetreatPastEmptyBuckets();
1312 return *this;
1313 }
1314 ++Ptr;
1315 AdvancePastEmptyBuckets();
1316 return *this;
1317 }
1318 DenseMapIterator operator++(int) { // Postincrement
1319 assert(isHandleInSync() && "invalid iterator access!");
1320 DenseMapIterator tmp = *this; ++*this; return tmp;
1321 }
1322
1323private:
1324 void AdvancePastEmptyBuckets() {
1325 assert(Ptr <= End);
1326 const KeyT Empty = KeyInfoT::getEmptyKey();
1327 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1328
1329 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1330 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1331 ++Ptr;
1332 }
1333
1334 void RetreatPastEmptyBuckets() {
1335 assert(Ptr >= End);
1336 const KeyT Empty = KeyInfoT::getEmptyKey();
1337 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1338
1339 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1340 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1341 --Ptr;
1342 }
1343};
1344
1345template <typename KeyT, typename ValueT, typename KeyInfoT>
1347 return X.getMemorySize();
1348}
1349
1350} // end namespace llvm
1351
1352#endif // LLVM_ADT_DENSEMAP_H
aarch64 promote const
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:241
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:240
This file defines DenseMapInfo traits for DenseMap.
bool End
Definition: ELF_riscv.cpp:480
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define I(x, y, z)
Definition: MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT & getOrInsertDefault(const KeyT &Key)
Returns the value associated to the key in the map if it exists.
Definition: DenseMap.h:341
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:487
static const KeyT getEmptyKey()
Definition: DenseMap.h:496
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:376
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:227
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
bool erase(const KeyT &Val)
Definition: DenseMap.h:345
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:286
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:190
const_iterator end() const
Definition: DenseMap.h:94
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:260
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:438
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:180
unsigned size() const
Definition: DenseMap.h:99
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:164
bool empty() const
Definition: DenseMap.h:98
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:310
iterator begin()
Definition: DenseMap.h:75
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
static const KeyT getTombstoneKey()
Definition: DenseMap.h:502
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition: DenseMap.h:211
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:391
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:464
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:364
const_iterator begin() const
Definition: DenseMap.h:87
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:398
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition: DenseMap.h:324
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:429
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:492
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:372
BucketT value_type
Definition: DenseMap.h:69
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition: DenseMap.h:73
ValueT & getOrInsertDefault(KeyT &&Key)
Returns the value associated to the key in the map if it exists.
Definition: DenseMap.h:334
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void erase(iterator I)
Definition: DenseMap.h:356
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition: DenseMap.h:316
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:103
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:384
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:715
std::conditional_t< IsConst, const Bucket, Bucket > value_type
Definition: DenseMap.h:1243
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1301
value_type * pointer
Definition: DenseMap.h:1244
DenseMapIterator & operator++()
Definition: DenseMap.h:1306
pointer operator->() const
Definition: DenseMap.h:1284
reference operator*() const
Definition: DenseMap.h:1277
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1255
DenseMapIterator operator++(int)
Definition: DenseMap.h:1318
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1273
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1292
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1246
value_type & reference
Definition: DenseMap.h:1245
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:791
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:824
void shrink_and_clear()
Definition: DenseMap.h:863
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:816
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:773
void grow(unsigned AtLeast)
Definition: DenseMap.h:845
void init(unsigned InitNumEntries)
Definition: DenseMap.h:835
DenseMap(const DenseMap &other)
Definition: DenseMap.h:775
void swap(DenseMap &RHS)
Definition: DenseMap.h:801
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:786
DenseMap(DenseMap &&other)
Definition: DenseMap.h:780
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:810
void grow(unsigned AtLeast)
Definition: DenseMap.h:1086
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:967
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:980
void init(unsigned InitBuckets)
Definition: DenseMap.h:1077
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:1058
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:1052
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:950
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:972
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:961
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:956
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1066
void shrink_and_clear()
Definition: DenseMap.h:1139
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:337
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:835
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2058
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:280
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition: bit.h:342
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:15
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:24
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1849
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:360
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:27
const ValueT & getSecond() const
Definition: DenseMap.h:48
const KeyT & getFirst() const
Definition: DenseMap.h:46