LLVM 23.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/ADL.h"
20#include "llvm/ADT/STLExtras.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstring>
32#include <initializer_list>
33#include <iterator>
34#include <new>
35#include <type_traits>
36#include <utility>
37
38namespace llvm {
39
40namespace detail {
41
42// We extend a pair to allow users to override the bucket type with their own
43// implementation without requiring two members.
44template <typename KeyT, typename ValueT>
45struct DenseMapPair : std::pair<KeyT, ValueT> {
46 using std::pair<KeyT, ValueT>::pair;
47
48 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
49 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
50 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
51 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
52};
53
54} // end namespace detail
55
56template <typename KeyT, typename ValueT,
57 typename KeyInfoT = DenseMapInfo<KeyT>,
59 bool IsConst = false>
60class DenseMapIterator;
61
62template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
63 typename BucketT>
65 template <typename T>
66 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
67
68public:
70 using key_type = KeyT;
71 using mapped_type = ValueT;
72 using value_type = BucketT;
73
77
78 [[nodiscard]] inline iterator begin() {
79 return iterator::makeBegin(buckets(), empty(), *this);
80 }
81 [[nodiscard]] inline iterator end() {
82 return iterator::makeEnd(buckets(), *this);
83 }
84 [[nodiscard]] inline const_iterator begin() const {
85 return const_iterator::makeBegin(buckets(), empty(), *this);
86 }
87 [[nodiscard]] inline const_iterator end() const {
88 return const_iterator::makeEnd(buckets(), *this);
89 }
90
91 // Return an iterator to iterate over keys in the map.
92 [[nodiscard]] inline auto keys() {
93 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
94 }
95
96 // Return an iterator to iterate over values in the map.
97 [[nodiscard]] inline auto values() {
98 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
99 }
100
101 [[nodiscard]] inline auto keys() const {
102 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
103 }
104
105 [[nodiscard]] inline auto values() const {
106 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
107 }
108
109 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
110 [[nodiscard]] unsigned size() const { return getNumEntries(); }
111
112 /// Grow the densemap so that it can contain at least \p NumEntries items
113 /// before resizing again.
114 void reserve(size_type NumEntries) {
115 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
117 if (NumBuckets > getNumBuckets())
118 grow(NumBuckets);
119 }
120
121 void clear() {
123 if (getNumEntries() == 0 && getNumTombstones() == 0)
124 return;
125
126 // If the capacity of the array is huge, and the # elements used is small,
127 // shrink the array.
128 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
130 return;
131 }
132
133 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
134 if constexpr (std::is_trivially_destructible_v<ValueT>) {
135 // Use a simpler loop when values don't need destruction.
136 for (BucketT &B : buckets())
137 B.getFirst() = EmptyKey;
138 } else {
139 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
140 unsigned NumEntries = getNumEntries();
141 for (BucketT &B : buckets()) {
142 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey)) {
143 if (!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
144 B.getSecond().~ValueT();
145 --NumEntries;
146 }
147 B.getFirst() = EmptyKey;
148 }
149 }
150 assert(NumEntries == 0 && "Node count imbalance!");
151 (void)NumEntries;
152 }
153 setNumEntries(0);
154 setNumTombstones(0);
155 }
156
158 auto [Reallocate, NewNumBuckets] = derived().planShrinkAndClear();
159 destroyAll();
160 if (!Reallocate) {
161 initEmpty();
162 return;
163 }
164 derived().deallocateBuckets();
165 initWithExactBucketCount(NewNumBuckets);
166 }
167
168 /// Return true if the specified key is in the map, false otherwise.
169 [[nodiscard]] bool contains(const_arg_type_t<KeyT> Val) const {
170 return doFind(Val) != nullptr;
171 }
172
173 /// Return 1 if the specified key is in the map, 0 otherwise.
174 [[nodiscard]] size_type count(const_arg_type_t<KeyT> Val) const {
175 return contains(Val) ? 1 : 0;
176 }
177
178 [[nodiscard]] iterator find(const_arg_type_t<KeyT> Val) {
179 return find_as(Val);
180 }
181 [[nodiscard]] const_iterator find(const_arg_type_t<KeyT> Val) const {
182 return find_as(Val);
183 }
184
185 /// Alternate version of find() which allows a different, and possibly
186 /// less expensive, key type.
187 /// The DenseMapInfo is responsible for supplying methods
188 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
189 /// type used.
190 template <class LookupKeyT>
191 [[nodiscard]] iterator find_as(const LookupKeyT &Val) {
192 if (BucketT *Bucket = doFind(Val))
193 return makeIterator(Bucket);
194 return end();
195 }
196 template <class LookupKeyT>
197 [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const {
198 if (const BucketT *Bucket = doFind(Val))
199 return makeConstIterator(Bucket);
200 return end();
201 }
202
203 /// Return the entry for the specified key, or a default constructed value if
204 /// no such entry exists.
205 [[nodiscard]] ValueT lookup(const_arg_type_t<KeyT> Val) const {
206 if (const BucketT *Bucket = doFind(Val))
207 return Bucket->getSecond();
208 return ValueT();
209 }
210
211 // Return the entry with the specified key, or \p Default. This variant is
212 // useful, because `lookup` cannot be used with non-default-constructible
213 // values.
214 template <typename U = std::remove_cv_t<ValueT>>
215 [[nodiscard]] ValueT lookup_or(const_arg_type_t<KeyT> Val,
216 U &&Default) const {
217 if (const BucketT *Bucket = doFind(Val))
218 return Bucket->getSecond();
219 return Default;
220 }
221
222 /// Return the entry for the specified key, or abort if no such entry exists.
223 [[nodiscard]] ValueT &at(const_arg_type_t<KeyT> Val) {
224 auto Iter = this->find(std::move(Val));
225 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
226 return Iter->second;
227 }
228
229 /// Return the entry for the specified key, or abort if no such entry exists.
230 [[nodiscard]] const ValueT &at(const_arg_type_t<KeyT> Val) const {
231 auto Iter = this->find(std::move(Val));
232 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
233 return Iter->second;
234 }
235
236 // Inserts key,value pair into the map if the key isn't already in the map.
237 // If the key is already in the map, it returns false and doesn't update the
238 // value.
239 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
240 return try_emplace_impl(KV.first, KV.second);
241 }
242
243 // Inserts key,value pair into the map if the key isn't already in the map.
244 // If the key is already in the map, it returns false and doesn't update the
245 // value.
246 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
247 return try_emplace_impl(std::move(KV.first), std::move(KV.second));
248 }
249
250 // Inserts key,value pair into the map if the key isn't already in the map.
251 // The value is constructed in-place if the key is not in the map, otherwise
252 // it is not moved.
253 template <typename... Ts>
254 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&...Args) {
255 return try_emplace_impl(std::move(Key), std::forward<Ts>(Args)...);
256 }
257
258 // Inserts key,value pair into the map if the key isn't already in the map.
259 // The value is constructed in-place if the key is not in the map, otherwise
260 // it is not moved.
261 template <typename... Ts>
262 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&...Args) {
263 return try_emplace_impl(Key, std::forward<Ts>(Args)...);
264 }
265
266 /// Alternate version of insert() which allows a different, and possibly
267 /// less expensive, key type.
268 /// The DenseMapInfo is responsible for supplying methods
269 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
270 /// type used.
271 template <typename LookupKeyT>
272 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
273 const LookupKeyT &Val) {
274 BucketT *TheBucket;
275 if (LookupBucketFor(Val, TheBucket))
276 return {makeIterator(TheBucket), false}; // Already in map.
277
278 // Otherwise, insert the new element.
279 TheBucket = findBucketForInsertion(Val, TheBucket);
280 TheBucket->getFirst() = std::move(KV.first);
281 ::new (&TheBucket->getSecond()) ValueT(std::move(KV.second));
282 return {makeIterator(TheBucket), true};
283 }
284
285 /// Range insertion of pairs.
286 template <typename InputIt> void insert(InputIt I, InputIt E) {
287 for (; I != E; ++I)
288 insert(*I);
289 }
290
291 /// Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
292 template <typename Range> void insert_range(Range &&R) {
293 insert(adl_begin(R), adl_end(R));
294 }
295
296 template <typename V>
297 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
298 auto Ret = try_emplace(Key, std::forward<V>(Val));
299 if (!Ret.second)
300 Ret.first->second = std::forward<V>(Val);
301 return Ret;
302 }
303
304 template <typename V>
305 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
306 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
307 if (!Ret.second)
308 Ret.first->second = std::forward<V>(Val);
309 return Ret;
310 }
311
312 template <typename... Ts>
313 std::pair<iterator, bool> emplace_or_assign(const KeyT &Key, Ts &&...Args) {
314 auto Ret = try_emplace(Key, std::forward<Ts>(Args)...);
315 if (!Ret.second)
316 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
317 return Ret;
318 }
319
320 template <typename... Ts>
321 std::pair<iterator, bool> emplace_or_assign(KeyT &&Key, Ts &&...Args) {
322 auto Ret = try_emplace(std::move(Key), std::forward<Ts>(Args)...);
323 if (!Ret.second)
324 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
325 return Ret;
326 }
327
328 bool erase(const KeyT &Val) {
329 BucketT *TheBucket = doFind(Val);
330 if (!TheBucket)
331 return false; // not in map.
332
333 TheBucket->getSecond().~ValueT();
334 TheBucket->getFirst() = KeyInfoT::getTombstoneKey();
335 decrementNumEntries();
336 incrementNumTombstones();
337 return true;
338 }
340 BucketT *TheBucket = &*I;
341 TheBucket->getSecond().~ValueT();
342 TheBucket->getFirst() = KeyInfoT::getTombstoneKey();
343 decrementNumEntries();
344 incrementNumTombstones();
345 }
346
347 ValueT &operator[](const KeyT &Key) {
348 return lookupOrInsertIntoBucket(Key).first->second;
349 }
350
351 ValueT &operator[](KeyT &&Key) {
352 return lookupOrInsertIntoBucket(std::move(Key)).first->second;
353 }
354
355 /// Return true if the specified pointer points somewhere into the DenseMap's
356 /// array of buckets (i.e. either to a key or value in the DenseMap).
357 [[nodiscard]] bool isPointerIntoBucketsArray(const void *Ptr) const {
358 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
359 }
360
361 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
362 /// array. In conjunction with the previous method, this can be used to
363 /// determine whether an insertion caused the DenseMap to reallocate.
364 [[nodiscard]] const void *getPointerIntoBucketsArray() const {
365 return getBuckets();
366 }
367
368 void swap(DerivedT &RHS) {
369 this->incrementEpoch();
370 RHS.incrementEpoch();
371 derived().swapImpl(RHS);
372 }
373
374protected:
375 DenseMapBase() = default;
376
378
379 void initWithExactBucketCount(unsigned NewNumBuckets) {
380 if (derived().allocateBuckets(NewNumBuckets)) {
381 initEmpty();
382 } else {
383 setNumEntries(0);
384 setNumTombstones(0);
385 }
386 }
387
388 void destroyAll() {
389 // No need to iterate through the buckets if both KeyT and ValueT are
390 // trivially destructible.
391 if constexpr (std::is_trivially_destructible_v<KeyT> &&
392 std::is_trivially_destructible_v<ValueT>)
393 return;
394
395 if (getNumBuckets() == 0) // Nothing to do.
396 return;
397
398 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
399 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
400 for (BucketT &B : buckets()) {
401 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
402 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey))
403 B.getSecond().~ValueT();
404 B.getFirst().~KeyT();
405 }
406 }
407
408 void initEmpty() {
409 static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
410 "Must pass the derived type to this template!");
411 setNumEntries(0);
412 setNumTombstones(0);
413
414 assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
415 "# initial buckets must be a power of two!");
416 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
417 for (BucketT &B : buckets())
418 ::new (&B.getFirst()) KeyT(EmptyKey);
419 }
420
421 /// Returns the number of buckets to allocate to ensure that the DenseMap can
422 /// accommodate \p NumEntries without need to grow().
423 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
424 // Ensure that "NumEntries * 4 < NumBuckets * 3"
425 if (NumEntries == 0)
426 return 0;
427 // +1 is required because of the strict inequality.
428 // For example, if NumEntries is 48, we need to return 128.
429 return NextPowerOf2(NumEntries * 4 / 3 + 1);
430 }
431
432 // Move key/value from Other to *this.
433 // Other is left in a valid but empty state.
434 void moveFrom(DerivedT &Other) {
435 // Insert all the old elements.
436 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
437 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
438 for (BucketT &B : Other.buckets()) {
439 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
440 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
441 // Insert the key/value into the new table.
442 BucketT *DestBucket;
443 bool FoundVal = LookupBucketFor(B.getFirst(), DestBucket);
444 (void)FoundVal; // silence warning.
445 assert(!FoundVal && "Key already in new map?");
446 DestBucket->getFirst() = std::move(B.getFirst());
447 ::new (&DestBucket->getSecond()) ValueT(std::move(B.getSecond()));
448 incrementNumEntries();
449
450 // Free the value.
451 B.getSecond().~ValueT();
452 }
453 B.getFirst().~KeyT();
454 }
455 Other.derived().kill();
456 }
457
458 void copyFrom(const DerivedT &other) {
459 this->destroyAll();
460 derived().deallocateBuckets();
461 setNumEntries(0);
462 setNumTombstones(0);
463 if (!derived().allocateBuckets(other.getNumBuckets())) {
464 // The bucket list is empty. No work to do.
465 return;
466 }
467
468 assert(&other != this);
469 assert(getNumBuckets() == other.getNumBuckets());
470
471 setNumEntries(other.getNumEntries());
472 setNumTombstones(other.getNumTombstones());
473
474 BucketT *Buckets = getBuckets();
475 const BucketT *OtherBuckets = other.getBuckets();
476 const size_t NumBuckets = getNumBuckets();
477 if constexpr (std::is_trivially_copyable_v<KeyT> &&
478 std::is_trivially_copyable_v<ValueT>) {
479 memcpy(reinterpret_cast<void *>(Buckets), OtherBuckets,
480 NumBuckets * sizeof(BucketT));
481 } else {
482 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
483 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
484 for (size_t I = 0; I < NumBuckets; ++I) {
485 ::new (&Buckets[I].getFirst()) KeyT(OtherBuckets[I].getFirst());
486 if (!KeyInfoT::isEqual(Buckets[I].getFirst(), EmptyKey) &&
487 !KeyInfoT::isEqual(Buckets[I].getFirst(), TombstoneKey))
488 ::new (&Buckets[I].getSecond()) ValueT(OtherBuckets[I].getSecond());
489 }
490 }
491 }
492
493private:
494 DerivedT &derived() { return *static_cast<DerivedT *>(this); }
495 const DerivedT &derived() const {
496 return *static_cast<const DerivedT *>(this);
497 }
498
499 template <typename KeyArgT, typename... Ts>
500 std::pair<BucketT *, bool> lookupOrInsertIntoBucket(KeyArgT &&Key,
501 Ts &&...Args) {
502 BucketT *TheBucket = nullptr;
503 if (LookupBucketFor(Key, TheBucket))
504 return {TheBucket, false}; // Already in the map.
505
506 // Otherwise, insert the new element.
507 TheBucket = findBucketForInsertion(Key, TheBucket);
508 TheBucket->getFirst() = std::forward<KeyArgT>(Key);
509 ::new (&TheBucket->getSecond()) ValueT(std::forward<Ts>(Args)...);
510 return {TheBucket, true};
511 }
512
513 template <typename KeyArgT, typename... Ts>
514 std::pair<iterator, bool> try_emplace_impl(KeyArgT &&Key, Ts &&...Args) {
515 auto [Bucket, Inserted] = lookupOrInsertIntoBucket(
516 std::forward<KeyArgT>(Key), std::forward<Ts>(Args)...);
517 return {makeIterator(Bucket), Inserted};
518 }
519
520 iterator makeIterator(BucketT *TheBucket) {
521 return iterator::makeIterator(TheBucket, buckets(), *this);
522 }
523
524 const_iterator makeConstIterator(const BucketT *TheBucket) const {
525 return const_iterator::makeIterator(TheBucket, buckets(), *this);
526 }
527
528 unsigned getNumEntries() const { return derived().getNumEntries(); }
529
530 void setNumEntries(unsigned Num) { derived().setNumEntries(Num); }
531
532 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
533
534 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
535
536 unsigned getNumTombstones() const { return derived().getNumTombstones(); }
537
538 void setNumTombstones(unsigned Num) { derived().setNumTombstones(Num); }
539
540 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
541
542 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
543
544 const BucketT *getBuckets() const { return derived().getBuckets(); }
545
546 BucketT *getBuckets() { return derived().getBuckets(); }
547
548 unsigned getNumBuckets() const { return derived().getNumBuckets(); }
549
550 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
551
552 const BucketT *getBucketsEnd() const {
553 return getBuckets() + getNumBuckets();
554 }
555
556 iterator_range<BucketT *> buckets() {
557 return llvm::make_range(getBuckets(), getBucketsEnd());
558 }
559
560 iterator_range<const BucketT *> buckets() const {
561 return llvm::make_range(getBuckets(), getBucketsEnd());
562 }
563
564 void grow(unsigned MinNumBuckets) {
565 unsigned NumBuckets = DerivedT::roundUpNumBuckets(MinNumBuckets);
566 DerivedT Tmp(NumBuckets, ExactBucketCount{});
567 Tmp.moveFrom(derived());
568 if (derived().maybeMoveFast(std::move(Tmp)))
569 return;
570 initWithExactBucketCount(NumBuckets);
571 moveFrom(Tmp);
572 }
573
574 template <typename LookupKeyT>
575 BucketT *findBucketForInsertion(const LookupKeyT &Lookup,
576 BucketT *TheBucket) {
578
579 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
580 // the buckets are empty (meaning that many are filled with tombstones),
581 // grow the table.
582 //
583 // The later case is tricky. For example, if we had one empty bucket with
584 // tons of tombstones, failing lookups (e.g. for insertion) would have to
585 // probe almost the entire table until it found the empty bucket. If the
586 // table completely filled with tombstones, no lookup would ever succeed,
587 // causing infinite loops in lookup.
588 unsigned NewNumEntries = getNumEntries() + 1;
589 unsigned NumBuckets = getNumBuckets();
590 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
591 this->grow(NumBuckets * 2);
592 LookupBucketFor(Lookup, TheBucket);
593 } else if (LLVM_UNLIKELY(NumBuckets -
594 (NewNumEntries + getNumTombstones()) <=
595 NumBuckets / 8)) {
596 this->grow(NumBuckets);
597 LookupBucketFor(Lookup, TheBucket);
598 }
599 assert(TheBucket);
600
601 // Only update the state after we've grown our bucket space appropriately
602 // so that when growing buckets we have self-consistent entry count.
603 incrementNumEntries();
604
605 // If we are writing over a tombstone, remember this.
606 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
607 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
608 decrementNumTombstones();
609
610 return TheBucket;
611 }
612
613 template <typename LookupKeyT>
614 const BucketT *doFind(const LookupKeyT &Val) const {
615 const BucketT *BucketsPtr = getBuckets();
616 const unsigned NumBuckets = getNumBuckets();
617 if (NumBuckets == 0)
618 return nullptr;
619
620 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
621 unsigned BucketNo = KeyInfoT::getHashValue(Val) & (NumBuckets - 1);
622 unsigned ProbeAmt = 1;
623 while (true) {
624 const BucketT *Bucket = BucketsPtr + BucketNo;
625 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
626 return Bucket;
627 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
628 return nullptr;
629
630 // Otherwise, it's a hash collision or a tombstone, continue quadratic
631 // probing.
632 BucketNo += ProbeAmt++;
633 BucketNo &= NumBuckets - 1;
634 }
635 }
636
637 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
638 return const_cast<BucketT *>(
639 static_cast<const DenseMapBase *>(this)->doFind(Val));
640 }
641
642 /// Lookup the appropriate bucket for Val, returning it in FoundBucket. If the
643 /// bucket contains the key and a value, this returns true, otherwise it
644 /// returns a bucket with an empty marker or tombstone and returns false.
645 template <typename LookupKeyT>
646 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
647 BucketT *BucketsPtr = getBuckets();
648 const unsigned NumBuckets = getNumBuckets();
649
650 if (NumBuckets == 0) {
651 FoundBucket = nullptr;
652 return false;
653 }
654
655 // FoundTombstone - Keep track of whether we find a tombstone while probing.
656 BucketT *FoundTombstone = nullptr;
657 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
658 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
659 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
660 !KeyInfoT::isEqual(Val, TombstoneKey) &&
661 "Empty/Tombstone value shouldn't be inserted into map!");
662
663 unsigned BucketNo = KeyInfoT::getHashValue(Val) & (NumBuckets - 1);
664 unsigned ProbeAmt = 1;
665 while (true) {
666 BucketT *ThisBucket = BucketsPtr + BucketNo;
667 // Found Val's bucket? If so, return it.
668 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
669 FoundBucket = ThisBucket;
670 return true;
671 }
672
673 // If we found an empty bucket, the key doesn't exist in the set.
674 // Insert it and return the default value.
675 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
676 // If we've already seen a tombstone while probing, fill it in instead
677 // of the empty bucket we eventually probed to.
678 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
679 return false;
680 }
681
682 // If this is a tombstone, remember it. If Val ends up not in the map, we
683 // prefer to return it than something that would require more probing.
684 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
685 !FoundTombstone)
686 FoundTombstone = ThisBucket; // Remember the first tombstone found.
687
688 // Otherwise, it's a hash collision or a tombstone, continue quadratic
689 // probing.
690 BucketNo += ProbeAmt++;
691 BucketNo &= (NumBuckets - 1);
692 }
693 }
694
695public:
696 /// Return the approximate size (in bytes) of the actual map.
697 /// This is just the raw memory used by DenseMap.
698 /// If entries are pointers to objects, the size of the referenced objects
699 /// are not included.
700 [[nodiscard]] size_t getMemorySize() const {
701 return getNumBuckets() * sizeof(BucketT);
702 }
703};
704
705/// Equality comparison for DenseMap.
706///
707/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
708/// is also in RHS, and that no additional pairs are in RHS.
709/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
710/// complexity is linear, worst case is O(N^2) (if every hash collides).
711template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
712 typename BucketT>
713[[nodiscard]] bool
716 if (LHS.size() != RHS.size())
717 return false;
718
719 for (auto &KV : LHS) {
720 auto I = RHS.find(KV.first);
721 if (I == RHS.end() || I->second != KV.second)
722 return false;
723 }
724
725 return true;
726}
727
728/// Inequality comparison for DenseMap.
729///
730/// Equivalent to !(LHS == RHS). See operator== for performance notes.
731template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
732 typename BucketT>
733[[nodiscard]] bool
738
739template <typename KeyT, typename ValueT,
740 typename KeyInfoT = DenseMapInfo<KeyT>,
742class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
743 KeyT, ValueT, KeyInfoT, BucketT> {
744 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
745
746 // Lift some types from the dependent base class into this class for
747 // simplicity of referring to them.
749
750 BucketT *Buckets = nullptr;
751 unsigned NumEntries = 0;
752 unsigned NumTombstones = 0;
753 unsigned NumBuckets = 0;
754
755 explicit DenseMap(unsigned NumBuckets, typename BaseT::ExactBucketCount) {
756 this->initWithExactBucketCount(NumBuckets);
757 }
758
759public:
760 /// Create a DenseMap with an optional \p NumElementsToReserve to guarantee
761 /// that this number of elements can be inserted in the map without grow().
762 explicit DenseMap(unsigned NumElementsToReserve = 0)
763 : DenseMap(BaseT::getMinBucketToReserveForEntries(NumElementsToReserve),
764 typename BaseT::ExactBucketCount{}) {}
765
766 DenseMap(const DenseMap &other) : DenseMap() { this->copyFrom(other); }
767
768 DenseMap(DenseMap &&other) : DenseMap() { this->swap(other); }
769
770 template <typename InputIt>
771 DenseMap(const InputIt &I, const InputIt &E) : DenseMap(std::distance(I, E)) {
772 this->insert(I, E);
773 }
774
775 template <typename RangeT>
777 : DenseMap(adl_begin(Range), adl_end(Range)) {}
778
779 DenseMap(std::initializer_list<typename BaseT::value_type> Vals)
780 : DenseMap(Vals.begin(), Vals.end()) {}
781
783 this->destroyAll();
784 deallocateBuckets();
785 }
786
787 DenseMap &operator=(const DenseMap &other) {
788 if (&other != this)
789 this->copyFrom(other);
790 return *this;
791 }
792
793 DenseMap &operator=(DenseMap &&other) {
794 this->destroyAll();
795 deallocateBuckets();
797 this->swap(other);
798 return *this;
799 }
800
801private:
802 void swapImpl(DenseMap &RHS) {
803 std::swap(Buckets, RHS.Buckets);
804 std::swap(NumEntries, RHS.NumEntries);
805 std::swap(NumTombstones, RHS.NumTombstones);
806 std::swap(NumBuckets, RHS.NumBuckets);
807 }
808
809 unsigned getNumEntries() const { return NumEntries; }
810
811 void setNumEntries(unsigned Num) { NumEntries = Num; }
812
813 unsigned getNumTombstones() const { return NumTombstones; }
814
815 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
816
817 BucketT *getBuckets() const { return Buckets; }
818
819 unsigned getNumBuckets() const { return NumBuckets; }
820
821 void deallocateBuckets() {
822 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
823 }
824
825 bool allocateBuckets(unsigned Num) {
826 NumBuckets = Num;
827 if (NumBuckets == 0) {
828 Buckets = nullptr;
829 return false;
830 }
831
832 Buckets = static_cast<BucketT *>(
833 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
834 return true;
835 }
836
837 // Put the zombie instance in a known good state after a move.
838 void kill() {
839 deallocateBuckets();
840 Buckets = nullptr;
841 NumBuckets = 0;
842 }
843
844 static unsigned roundUpNumBuckets(unsigned MinNumBuckets) {
845 return std::max(64u,
846 static_cast<unsigned>(NextPowerOf2(MinNumBuckets - 1)));
847 }
848
849 bool maybeMoveFast(DenseMap &&Other) {
850 swapImpl(Other);
851 return true;
852 }
853
854 // Plan how to shrink the bucket table. Return:
855 // - {false, 0} to reuse the existing bucket table
856 // - {true, N} to reallocate a bucket table with N entries
857 std::pair<bool, unsigned> planShrinkAndClear() const {
858 unsigned NewNumBuckets = 0;
859 if (NumEntries)
860 NewNumBuckets = std::max(64u, 1u << (Log2_32_Ceil(NumEntries) + 1));
861 if (NewNumBuckets == NumBuckets)
862 return {false, 0}; // Reuse.
863 return {true, NewNumBuckets}; // Reallocate.
864 }
865};
866
867template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
868 typename KeyInfoT = DenseMapInfo<KeyT>,
870class SmallDenseMap
871 : public DenseMapBase<
872 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
873 ValueT, KeyInfoT, BucketT> {
874 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
875
876 // Lift some types from the dependent base class into this class for
877 // simplicity of referring to them.
879
880 static_assert(isPowerOf2_64(InlineBuckets),
881 "InlineBuckets must be a power of 2.");
882
883 unsigned Small : 1;
884 unsigned NumEntries : 31;
885 unsigned NumTombstones;
886
887 struct LargeRep {
888 BucketT *Buckets;
889 unsigned NumBuckets;
891 return llvm::make_range(Buckets, Buckets + NumBuckets);
892 }
893 };
894
895 /// A "union" of an inline bucket array and the struct representing
896 /// a large bucket. This union will be discriminated by the 'Small' bit.
897 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
898
899 SmallDenseMap(unsigned NumBuckets, typename BaseT::ExactBucketCount) {
900 this->initWithExactBucketCount(NumBuckets);
901 }
902
903public:
904 explicit SmallDenseMap(unsigned NumElementsToReserve = 0)
905 : SmallDenseMap(
906 BaseT::getMinBucketToReserveForEntries(NumElementsToReserve),
907 typename BaseT::ExactBucketCount{}) {}
908
909 SmallDenseMap(const SmallDenseMap &other) : SmallDenseMap() {
910 this->copyFrom(other);
911 }
912
913 SmallDenseMap(SmallDenseMap &&other) : SmallDenseMap() { this->swap(other); }
914
915 template <typename InputIt>
916 SmallDenseMap(const InputIt &I, const InputIt &E)
917 : SmallDenseMap(std::distance(I, E)) {
918 this->insert(I, E);
919 }
920
921 template <typename RangeT>
923 : SmallDenseMap(adl_begin(Range), adl_end(Range)) {}
924
925 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
926 : SmallDenseMap(Vals.begin(), Vals.end()) {}
927
929 this->destroyAll();
930 deallocateBuckets();
931 }
932
933 SmallDenseMap &operator=(const SmallDenseMap &other) {
934 if (&other != this)
935 this->copyFrom(other);
936 return *this;
937 }
938
939 SmallDenseMap &operator=(SmallDenseMap &&other) {
940 this->destroyAll();
941 deallocateBuckets();
943 this->swap(other);
944 return *this;
945 }
946
947private:
948 void swapImpl(SmallDenseMap &RHS) {
949 unsigned TmpNumEntries = RHS.NumEntries;
950 RHS.NumEntries = NumEntries;
951 NumEntries = TmpNumEntries;
952 std::swap(NumTombstones, RHS.NumTombstones);
953
954 const KeyT EmptyKey = KeyInfoT::getEmptyKey();
955 const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
956 if (Small && RHS.Small) {
957 // If we're swapping inline bucket arrays, we have to cope with some of
958 // the tricky bits of DenseMap's storage system: the buckets are not
959 // fully initialized. Thus we swap every key, but we may have
960 // a one-directional move of the value.
961 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
962 BucketT *LHSB = &getInlineBuckets()[i],
963 *RHSB = &RHS.getInlineBuckets()[i];
964 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
965 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
966 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
967 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
968 if (hasLHSValue && hasRHSValue) {
969 // Swap together if we can...
970 std::swap(*LHSB, *RHSB);
971 continue;
972 }
973 // Swap separately and handle any asymmetry.
974 std::swap(LHSB->getFirst(), RHSB->getFirst());
975 if (hasLHSValue) {
976 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
977 LHSB->getSecond().~ValueT();
978 } else if (hasRHSValue) {
979 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
980 RHSB->getSecond().~ValueT();
981 }
982 }
983 return;
984 }
985 if (!Small && !RHS.Small) {
986 std::swap(*getLargeRep(), *RHS.getLargeRep());
987 return;
988 }
989
990 SmallDenseMap &SmallSide = Small ? *this : RHS;
991 SmallDenseMap &LargeSide = Small ? RHS : *this;
992
993 // First stash the large side's rep and move the small side across.
994 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
995 LargeSide.getLargeRep()->~LargeRep();
996 LargeSide.Small = true;
997 // This is similar to the standard move-from-old-buckets, but the bucket
998 // count hasn't actually rotated in this case. So we have to carefully
999 // move construct the keys and values into their new locations, but there
1000 // is no need to re-hash things.
1001 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1002 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
1003 *OldB = &SmallSide.getInlineBuckets()[i];
1004 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
1005 OldB->getFirst().~KeyT();
1006 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
1007 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1008 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1009 OldB->getSecond().~ValueT();
1010 }
1011 }
1012
1013 // The hard part of moving the small buckets across is done, just move
1014 // the TmpRep into its new home.
1015 SmallSide.Small = false;
1016 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1017 }
1018
1019 unsigned getNumEntries() const { return NumEntries; }
1020
1021 void setNumEntries(unsigned Num) {
1022 // NumEntries is hardcoded to be 31 bits wide.
1023 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1024 NumEntries = Num;
1025 }
1026
1027 unsigned getNumTombstones() const { return NumTombstones; }
1028
1029 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
1030
1031 const BucketT *getInlineBuckets() const {
1032 assert(Small);
1033 // Note that this cast does not violate aliasing rules as we assert that
1034 // the memory's dynamic type is the small, inline bucket buffer, and the
1035 // 'storage' is a POD containing a char buffer.
1036#if defined(__clang__) && \
1037 (defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_HWADDRESS__))
1038 // Unless it's a sanitizer with container overflow detection. In this case
1039 // some items in buckets can be partially poisoned, triggering sanitizer
1040 // report on load.
1041 __asm__ volatile("" ::: "memory");
1042#endif
1043 return reinterpret_cast<const BucketT *>(&storage);
1044 }
1045
1046 BucketT *getInlineBuckets() {
1047 return const_cast<BucketT *>(
1048 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1049 }
1050
1051 const LargeRep *getLargeRep() const {
1052 assert(!Small);
1053 // Note, same rule about aliasing as with getInlineBuckets.
1054#if defined(__clang__) && \
1055 (defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_HWADDRESS__))
1056 __asm__ volatile("" ::: "memory");
1057#endif
1058 return reinterpret_cast<const LargeRep *>(&storage);
1059 }
1060
1061 LargeRep *getLargeRep() {
1062 return const_cast<LargeRep *>(
1063 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1064 }
1065
1066 const BucketT *getBuckets() const {
1067 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1068 }
1069
1070 BucketT *getBuckets() {
1071 return const_cast<BucketT *>(
1072 const_cast<const SmallDenseMap *>(this)->getBuckets());
1073 }
1074
1075 unsigned getNumBuckets() const {
1076 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1077 }
1078
1079 iterator_range<BucketT *> inlineBuckets() {
1080 BucketT *Begin = getInlineBuckets();
1081 return llvm::make_range(Begin, Begin + InlineBuckets);
1082 }
1083
1084 void deallocateBuckets() {
1085 // Fast path to deallocateBuckets in case getLargeRep()->NumBuckets == 0,
1086 // just like destroyAll. This path is used to destruct zombie instances
1087 // after moves.
1088 if (Small || getLargeRep()->NumBuckets == 0)
1089 return;
1090
1091 deallocate_buffer(getLargeRep()->Buckets,
1092 sizeof(BucketT) * getLargeRep()->NumBuckets,
1093 alignof(BucketT));
1094 getLargeRep()->~LargeRep();
1095 }
1096
1097 bool allocateBuckets(unsigned Num) {
1098 if (Num <= InlineBuckets) {
1099 Small = true;
1100 } else {
1101 Small = false;
1102 BucketT *NewBuckets = static_cast<BucketT *>(
1103 allocate_buffer(sizeof(BucketT) * Num, alignof(BucketT)));
1104 new (getLargeRep()) LargeRep{NewBuckets, Num};
1105 }
1106 return true;
1107 }
1108
1109 // Put the zombie instance in a known good state after a move.
1110 void kill() {
1111 deallocateBuckets();
1112 Small = false;
1113 new (getLargeRep()) LargeRep{nullptr, 0};
1114 }
1115
1116 static unsigned roundUpNumBuckets(unsigned MinNumBuckets) {
1117 if (MinNumBuckets <= InlineBuckets)
1118 return MinNumBuckets;
1119 return std::max(64u,
1120 static_cast<unsigned>(NextPowerOf2(MinNumBuckets - 1)));
1121 }
1122
1123 bool maybeMoveFast(SmallDenseMap &&Other) {
1124 if (Other.Small)
1125 return false;
1126
1127 Small = false;
1128 NumEntries = Other.NumEntries;
1129 NumTombstones = Other.NumTombstones;
1130 *getLargeRep() = std::move(*Other.getLargeRep());
1131 Other.getLargeRep()->NumBuckets = 0;
1132 return true;
1133 }
1134
1135 // Plan how to shrink the bucket table. Return:
1136 // - {false, 0} to reuse the existing bucket table
1137 // - {true, N} to reallocate a bucket table with N entries
1138 std::pair<bool, unsigned> planShrinkAndClear() const {
1139 unsigned NewNumBuckets = 0;
1140 if (!this->empty()) {
1141 NewNumBuckets = 1u << (Log2_32_Ceil(this->size()) + 1);
1142 if (NewNumBuckets > InlineBuckets)
1143 NewNumBuckets = std::max(64u, NewNumBuckets);
1144 }
1145 bool Reuse = Small ? NewNumBuckets <= InlineBuckets
1146 : NewNumBuckets == getLargeRep()->NumBuckets;
1147 if (Reuse)
1148 return {false, 0}; // Reuse.
1149 return {true, NewNumBuckets}; // Reallocate.
1150 }
1151};
1152
1153template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1154 bool IsConst>
1155class DenseMapIterator : DebugEpochBase::HandleBase {
1156 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1157 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1158
1159public:
1161 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1164 using iterator_category = std::forward_iterator_tag;
1165
1166private:
1167 using BucketItTy =
1168 std::conditional_t<shouldReverseIterate<KeyT>(),
1169 std::reverse_iterator<pointer>, pointer>;
1170
1171 BucketItTy Ptr = {};
1172 BucketItTy End = {};
1173
1174 DenseMapIterator(BucketItTy Pos, BucketItTy E, const DebugEpochBase &Epoch)
1175 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1176 assert(isHandleInSync() && "invalid construction!");
1177 }
1178
1179public:
1180 DenseMapIterator() = default;
1181
1182 static DenseMapIterator makeBegin(iterator_range<pointer> Buckets,
1183 bool IsEmpty, const DebugEpochBase &Epoch) {
1184 // When the map is empty, avoid the overhead of advancing/retreating past
1185 // empty buckets.
1186 if (IsEmpty)
1187 return makeEnd(Buckets, Epoch);
1188 auto R = maybeReverse(Buckets);
1189 DenseMapIterator Iter(R.begin(), R.end(), Epoch);
1190 Iter.AdvancePastEmptyBuckets();
1191 return Iter;
1192 }
1193
1194 static DenseMapIterator makeEnd(iterator_range<pointer> Buckets,
1195 const DebugEpochBase &Epoch) {
1196 auto R = maybeReverse(Buckets);
1197 return DenseMapIterator(R.end(), R.end(), Epoch);
1198 }
1199
1200 static DenseMapIterator makeIterator(pointer P,
1202 const DebugEpochBase &Epoch) {
1203 auto R = maybeReverse(Buckets);
1204 constexpr int Offset = shouldReverseIterate<KeyT>() ? 1 : 0;
1205 return DenseMapIterator(BucketItTy(P + Offset), R.end(), Epoch);
1206 }
1207
1208 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1209 // for const iterator destinations so it doesn't end up as a user defined copy
1210 // constructor.
1211 template <bool IsConstSrc,
1212 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1214 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1215 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1216
1217 [[nodiscard]] reference operator*() const {
1218 assert(isHandleInSync() && "invalid iterator access!");
1219 assert(Ptr != End && "dereferencing end() iterator");
1220 return *Ptr;
1221 }
1222 [[nodiscard]] pointer operator->() const { return &operator*(); }
1223
1224 [[nodiscard]] friend bool operator==(const DenseMapIterator &LHS,
1225 const DenseMapIterator &RHS) {
1226 assert((!LHS.getEpochAddress() || LHS.isHandleInSync()) &&
1227 "handle not in sync!");
1228 assert((!RHS.getEpochAddress() || RHS.isHandleInSync()) &&
1229 "handle not in sync!");
1230 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1231 "comparing incomparable iterators!");
1232 return LHS.Ptr == RHS.Ptr;
1233 }
1234
1235 [[nodiscard]] friend bool operator!=(const DenseMapIterator &LHS,
1236 const DenseMapIterator &RHS) {
1237 return !(LHS == RHS);
1238 }
1239
1240 inline DenseMapIterator &operator++() { // Preincrement
1241 assert(isHandleInSync() && "invalid iterator access!");
1242 assert(Ptr != End && "incrementing end() iterator");
1243 ++Ptr;
1244 AdvancePastEmptyBuckets();
1245 return *this;
1246 }
1247 DenseMapIterator operator++(int) { // Postincrement
1248 assert(isHandleInSync() && "invalid iterator access!");
1249 DenseMapIterator tmp = *this;
1250 ++*this;
1251 return tmp;
1252 }
1253
1254private:
1255 void AdvancePastEmptyBuckets() {
1256 assert(Ptr <= End);
1257 const KeyT Empty = KeyInfoT::getEmptyKey();
1258 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1259
1260 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1261 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1262 ++Ptr;
1263 }
1264
1265 static auto maybeReverse(iterator_range<pointer> Range) {
1266 if constexpr (shouldReverseIterate<KeyT>())
1267 return reverse(Range);
1268 else
1269 return Range;
1270 }
1271};
1272
1273template <typename KeyT, typename ValueT, typename KeyInfoT>
1274[[nodiscard]] inline size_t
1276 return X.getMemorySize();
1277}
1278
1279} // end namespace llvm
1280
1281#endif // LLVM_ADT_DENSEMAP_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition Compiler.h:336
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
#define I(x, y, z)
Definition MD5.cpp:57
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
if(PassOpts->AAPipeline)
This file contains some templates that are useful if you are working with the STL at all.
This file contains library features backported from future STL versions.
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
LocallyHashedType DenseMapInfo< LocallyHashedType >::Tombstone
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT & at(const_arg_type_t< KeyT > Val)
Return the entry for the specified key, or abort if no such entry exists.
Definition DenseMap.h:223
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
void copyFrom(const DerivedT &other)
Definition DenseMap.h:458
unsigned size_type
Definition DenseMap.h:69
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:254
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition DenseMap.h:246
bool erase(const KeyT &Val)
Definition DenseMap.h:328
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition DenseMap.h:272
void moveFrom(DerivedT &Other)
Definition DenseMap.h:434
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition DenseMap.h:197
const_iterator end() const
Definition DenseMap.h:87
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:191
unsigned size() const
Definition DenseMap.h:110
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition DenseMap.h:181
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > emplace_or_assign(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:313
void insert(InputIt I, InputIt E)
Range insertion of pairs.
Definition DenseMap.h:286
iterator begin()
Definition DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
const ValueT & at(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or abort if no such entry exists.
Definition DenseMap.h:230
bool isPointerIntoBucketsArray(const void *Ptr) const
Return true if the specified pointer points somewhere into the DenseMap's array of buckets (i....
Definition DenseMap.h:357
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:262
const_iterator begin() const
Definition DenseMap.h:84
std::pair< iterator, bool > emplace_or_assign(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:321
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:292
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition DenseMap.h:364
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition DenseMap.h:305
ValueT lookup_or(const_arg_type_t< KeyT > Val, U &&Default) const
Definition DenseMap.h:215
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition DenseMap.h:423
void swap(DerivedT &RHS)
Definition DenseMap.h:368
ValueT & operator[](const KeyT &Key)
Definition DenseMap.h:347
BucketT value_type
Definition DenseMap.h:72
auto keys() const
Definition DenseMap.h:101
void initWithExactBucketCount(unsigned NewNumBuckets)
Definition DenseMap.h:379
void shrink_and_clear()
Definition DenseMap.h:157
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:239
void erase(iterator I)
Definition DenseMap.h:339
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:297
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition DenseMap.h:114
ValueT & operator[](KeyT &&Key)
Definition DenseMap.h:351
auto values() const
Definition DenseMap.h:105
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition DenseMap.h:700
std::conditional_t< IsConst, const BucketT, BucketT > value_type
Definition DenseMap.h:1161
static DenseMapIterator makeIterator(pointer P, iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1200
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1235
DenseMapIterator & operator++()
Definition DenseMap.h:1240
pointer operator->() const
Definition DenseMap.h:1222
reference operator*() const
Definition DenseMap.h:1217
static DenseMapIterator makeBegin(iterator_range< pointer > Buckets, bool IsEmpty, const DebugEpochBase &Epoch)
Definition DenseMap.h:1182
DenseMapIterator operator++(int)
Definition DenseMap.h:1247
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition DenseMap.h:1213
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1224
static DenseMapIterator makeEnd(iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1194
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:779
DenseMap(unsigned NumElementsToReserve=0)
Create a DenseMap with an optional NumElementsToReserve to guarantee that this number of elements can...
Definition DenseMap.h:762
DenseMap & operator=(DenseMap &&other)
Definition DenseMap.h:793
DenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:776
DenseMap(const DenseMap &other)
Definition DenseMap.h:766
DenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:771
DenseMap(DenseMap &&other)
Definition DenseMap.h:768
DenseMap & operator=(const DenseMap &other)
Definition DenseMap.h:787
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:916
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition DenseMap.h:939
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition DenseMap.h:933
SmallDenseMap(unsigned NumElementsToReserve=0)
Definition DenseMap.h:904
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:925
SmallDenseMap(SmallDenseMap &&other)
Definition DenseMap.h:913
SmallDenseMap(const SmallDenseMap &other)
Definition DenseMap.h:909
SmallDenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:922
A range adaptor for a pair of iterators.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition ADL.h:123
constexpr double e
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:557
bool isEqual(const GCNRPTracker::LiveRegSet &S1, const GCNRPTracker::LiveRegSet &S2)
constexpr auto adl_begin(RangeT &&range) -> decltype(adl_detail::begin_impl(std::forward< RangeT >(range)))
Returns the begin iterator to range using std::begin and function found through Argument-Dependent Lo...
Definition ADL.h:78
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition BitVector.h:851
bool operator!=(uint64_t V1, const APInt &V2)
Definition APInt.h:2142
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
constexpr auto adl_end(RangeT &&range) -> decltype(adl_detail::end_impl(std::forward< RangeT >(range)))
Returns the end iterator to range using std::end and functions found through Argument-Dependent Looku...
Definition ADL.h:86
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto map_range(ContainerTy &&C, FuncTy F)
Return a range that applies F to the elements of C.
Definition STLExtras.h:365
LLVM_ABI LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:15
LLVM_ABI void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:27
constexpr bool shouldReverseIterate()
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
@ Other
Any other memory.
Definition ModRef.h:68
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
@ Default
The result value is uniform if and only if all operands are uniform.
Definition Uniformity.h:20
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:874
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
std::conditional_t< std::is_pointer_v< T >, typename add_const_past_pointer< T >::type, const T & > type
Definition type_traits.h:53
const ValueT & getSecond() const
Definition DenseMap.h:51
const KeyT & getFirst() const
Definition DenseMap.h:49