LLVM  6.0.0svn
DenseMap.h
Go to the documentation of this file.
1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16 
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <cstddef>
27 #include <cstring>
28 #include <iterator>
29 #include <new>
30 #include <type_traits>
31 #include <utility>
32 
33 namespace llvm {
34 
35 namespace detail {
36 
37 // We extend a pair to allow users to override the bucket type with their own
38 // implementation without requiring two members.
39 template <typename KeyT, typename ValueT>
40 struct DenseMapPair : public std::pair<KeyT, ValueT> {
42  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
45 };
46 
47 } // end namespace detail
48 
49 template <
50  typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
51  typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
53 
54 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
55  typename BucketT>
56 class DenseMapBase : public DebugEpochBase {
57  template <typename T>
58  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
59 
60 public:
62  using key_type = KeyT;
63  using mapped_type = ValueT;
64  using value_type = BucketT;
65 
67  using const_iterator =
69 
70  inline iterator begin() {
71  // When the map is empty, avoid the overhead of advancing/retreating past
72  // empty buckets.
73  if (empty())
74  return end();
75  if (shouldReverseIterate<KeyT>())
76  return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
77  return makeIterator(getBuckets(), getBucketsEnd(), *this);
78  }
79  inline iterator end() {
80  return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
81  }
82  inline const_iterator begin() const {
83  if (empty())
84  return end();
85  if (shouldReverseIterate<KeyT>())
86  return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
87  return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
88  }
89  inline const_iterator end() const {
90  return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
91  }
92 
93  LLVM_NODISCARD bool empty() const {
94  return getNumEntries() == 0;
95  }
96  unsigned size() const { return getNumEntries(); }
97 
98  /// Grow the densemap so that it can contain at least \p NumEntries items
99  /// before resizing again.
100  void reserve(size_type NumEntries) {
101  auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
102  incrementEpoch();
103  if (NumBuckets > getNumBuckets())
104  grow(NumBuckets);
105  }
106 
107  void clear() {
108  incrementEpoch();
109  if (getNumEntries() == 0 && getNumTombstones() == 0) return;
110 
111  // If the capacity of the array is huge, and the # elements used is small,
112  // shrink the array.
113  if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
114  shrink_and_clear();
115  return;
116  }
117 
118  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
120  // Use a simpler loop when these are trivial types.
121  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
122  P->getFirst() = EmptyKey;
123  } else {
124  unsigned NumEntries = getNumEntries();
125  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
126  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
127  if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
128  P->getSecond().~ValueT();
129  --NumEntries;
130  }
131  P->getFirst() = EmptyKey;
132  }
133  }
134  assert(NumEntries == 0 && "Node count imbalance!");
135  }
136  setNumEntries(0);
137  setNumTombstones(0);
138  }
139 
140  /// Return 1 if the specified key is in the map, 0 otherwise.
141  size_type count(const_arg_type_t<KeyT> Val) const {
142  const BucketT *TheBucket;
143  return LookupBucketFor(Val, TheBucket) ? 1 : 0;
144  }
145 
146  iterator find(const_arg_type_t<KeyT> Val) {
147  BucketT *TheBucket;
148  if (LookupBucketFor(Val, TheBucket))
149  return makeIterator(TheBucket, getBucketsEnd(), *this, true);
150  return end();
151  }
152  const_iterator find(const_arg_type_t<KeyT> Val) const {
153  const BucketT *TheBucket;
154  if (LookupBucketFor(Val, TheBucket))
155  return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
156  return end();
157  }
158 
159  /// Alternate version of find() which allows a different, and possibly
160  /// less expensive, key type.
161  /// The DenseMapInfo is responsible for supplying methods
162  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
163  /// type used.
164  template<class LookupKeyT>
165  iterator find_as(const LookupKeyT &Val) {
166  BucketT *TheBucket;
167  if (LookupBucketFor(Val, TheBucket))
168  return makeIterator(TheBucket, getBucketsEnd(), *this, true);
169  return end();
170  }
171  template<class LookupKeyT>
172  const_iterator find_as(const LookupKeyT &Val) const {
173  const BucketT *TheBucket;
174  if (LookupBucketFor(Val, TheBucket))
175  return makeConstIterator(TheBucket, getBucketsEnd(), *this, true);
176  return end();
177  }
178 
179  /// lookup - Return the entry for the specified key, or a default
180  /// constructed value if no such entry exists.
181  ValueT lookup(const_arg_type_t<KeyT> Val) const {
182  const BucketT *TheBucket;
183  if (LookupBucketFor(Val, TheBucket))
184  return TheBucket->getSecond();
185  return ValueT();
186  }
187 
188  // Inserts key,value pair into the map if the key isn't already in the map.
189  // If the key is already in the map, it returns false and doesn't update the
190  // value.
191  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
192  return try_emplace(KV.first, KV.second);
193  }
194 
195  // Inserts key,value pair into the map if the key isn't already in the map.
196  // If the key is already in the map, it returns false and doesn't update the
197  // value.
198  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
199  return try_emplace(std::move(KV.first), std::move(KV.second));
200  }
201 
202  // Inserts key,value pair into the map if the key isn't already in the map.
203  // The value is constructed in-place if the key is not in the map, otherwise
204  // it is not moved.
205  template <typename... Ts>
206  std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
207  BucketT *TheBucket;
208  if (LookupBucketFor(Key, TheBucket))
209  return std::make_pair(
210  makeIterator(TheBucket, getBucketsEnd(), *this, true),
211  false); // Already in map.
212 
213  // Otherwise, insert the new element.
214  TheBucket =
215  InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
216  return std::make_pair(
217  makeIterator(TheBucket, getBucketsEnd(), *this, true),
218  true);
219  }
220 
221  // Inserts key,value pair into the map if the key isn't already in the map.
222  // The value is constructed in-place if the key is not in the map, otherwise
223  // it is not moved.
224  template <typename... Ts>
225  std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
226  BucketT *TheBucket;
227  if (LookupBucketFor(Key, TheBucket))
228  return std::make_pair(
229  makeIterator(TheBucket, getBucketsEnd(), *this, true),
230  false); // Already in map.
231 
232  // Otherwise, insert the new element.
233  TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
234  return std::make_pair(
235  makeIterator(TheBucket, getBucketsEnd(), *this, true),
236  true);
237  }
238 
239  /// Alternate version of insert() which allows a different, and possibly
240  /// less expensive, key type.
241  /// The DenseMapInfo is responsible for supplying methods
242  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
243  /// type used.
244  template <typename LookupKeyT>
245  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
246  const LookupKeyT &Val) {
247  BucketT *TheBucket;
248  if (LookupBucketFor(Val, TheBucket))
249  return std::make_pair(
250  makeIterator(TheBucket, getBucketsEnd(), *this, true),
251  false); // Already in map.
252 
253  // Otherwise, insert the new element.
254  TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
255  std::move(KV.second), Val);
256  return std::make_pair(
257  makeIterator(TheBucket, getBucketsEnd(), *this, true),
258  true);
259  }
260 
261  /// insert - Range insertion of pairs.
262  template<typename InputIt>
263  void insert(InputIt I, InputIt E) {
264  for (; I != E; ++I)
265  insert(*I);
266  }
267 
268  bool erase(const KeyT &Val) {
269  BucketT *TheBucket;
270  if (!LookupBucketFor(Val, TheBucket))
271  return false; // not in map.
272 
273  TheBucket->getSecond().~ValueT();
274  TheBucket->getFirst() = getTombstoneKey();
275  decrementNumEntries();
276  incrementNumTombstones();
277  return true;
278  }
279  void erase(iterator I) {
280  BucketT *TheBucket = &*I;
281  TheBucket->getSecond().~ValueT();
282  TheBucket->getFirst() = getTombstoneKey();
283  decrementNumEntries();
284  incrementNumTombstones();
285  }
286 
287  value_type& FindAndConstruct(const KeyT &Key) {
288  BucketT *TheBucket;
289  if (LookupBucketFor(Key, TheBucket))
290  return *TheBucket;
291 
292  return *InsertIntoBucket(TheBucket, Key);
293  }
294 
295  ValueT &operator[](const KeyT &Key) {
296  return FindAndConstruct(Key).second;
297  }
298 
300  BucketT *TheBucket;
301  if (LookupBucketFor(Key, TheBucket))
302  return *TheBucket;
303 
304  return *InsertIntoBucket(TheBucket, std::move(Key));
305  }
306 
307  ValueT &operator[](KeyT &&Key) {
308  return FindAndConstruct(std::move(Key)).second;
309  }
310 
311  /// isPointerIntoBucketsArray - Return true if the specified pointer points
312  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
313  /// value in the DenseMap).
314  bool isPointerIntoBucketsArray(const void *Ptr) const {
315  return Ptr >= getBuckets() && Ptr < getBucketsEnd();
316  }
317 
318  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
319  /// array. In conjunction with the previous method, this can be used to
320  /// determine whether an insertion caused the DenseMap to reallocate.
321  const void *getPointerIntoBucketsArray() const { return getBuckets(); }
322 
323 protected:
324  DenseMapBase() = default;
325 
326  void destroyAll() {
327  if (getNumBuckets() == 0) // Nothing to do.
328  return;
329 
330  const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
331  for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
332  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
333  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
334  P->getSecond().~ValueT();
335  P->getFirst().~KeyT();
336  }
337  }
338 
339  void initEmpty() {
340  setNumEntries(0);
341  setNumTombstones(0);
342 
343  assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
344  "# initial buckets must be a power of two!");
345  const KeyT EmptyKey = getEmptyKey();
346  for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
347  ::new (&B->getFirst()) KeyT(EmptyKey);
348  }
349 
350  /// Returns the number of buckets to allocate to ensure that the DenseMap can
351  /// accommodate \p NumEntries without need to grow().
352  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
353  // Ensure that "NumEntries * 4 < NumBuckets * 3"
354  if (NumEntries == 0)
355  return 0;
356  // +1 is required because of the strict equality.
357  // For example if NumEntries is 48, we need to return 401.
358  return NextPowerOf2(NumEntries * 4 / 3 + 1);
359  }
360 
361  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
362  initEmpty();
363 
364  // Insert all the old elements.
365  const KeyT EmptyKey = getEmptyKey();
366  const KeyT TombstoneKey = getTombstoneKey();
367  for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
368  if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
369  !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
370  // Insert the key/value into the new table.
371  BucketT *DestBucket;
372  bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
373  (void)FoundVal; // silence warning.
374  assert(!FoundVal && "Key already in new map?");
375  DestBucket->getFirst() = std::move(B->getFirst());
376  ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
377  incrementNumEntries();
378 
379  // Free the value.
380  B->getSecond().~ValueT();
381  }
382  B->getFirst().~KeyT();
383  }
384  }
385 
386  template <typename OtherBaseT>
387  void copyFrom(
389  assert(&other != this);
390  assert(getNumBuckets() == other.getNumBuckets());
391 
392  setNumEntries(other.getNumEntries());
393  setNumTombstones(other.getNumTombstones());
394 
396  memcpy(getBuckets(), other.getBuckets(),
397  getNumBuckets() * sizeof(BucketT));
398  else
399  for (size_t i = 0; i < getNumBuckets(); ++i) {
400  ::new (&getBuckets()[i].getFirst())
401  KeyT(other.getBuckets()[i].getFirst());
402  if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
403  !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
404  ::new (&getBuckets()[i].getSecond())
405  ValueT(other.getBuckets()[i].getSecond());
406  }
407  }
408 
409  static unsigned getHashValue(const KeyT &Val) {
410  return KeyInfoT::getHashValue(Val);
411  }
412 
413  template<typename LookupKeyT>
414  static unsigned getHashValue(const LookupKeyT &Val) {
415  return KeyInfoT::getHashValue(Val);
416  }
417 
418  static const KeyT getEmptyKey() {
419  static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
420  "Must pass the derived type to this template!");
421  return KeyInfoT::getEmptyKey();
422  }
423 
424  static const KeyT getTombstoneKey() {
425  return KeyInfoT::getTombstoneKey();
426  }
427 
428 private:
429  iterator makeIterator(BucketT *P, BucketT *E,
430  DebugEpochBase &Epoch,
431  bool NoAdvance=false) {
432  if (shouldReverseIterate<KeyT>()) {
433  BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
434  return iterator(B, E, Epoch, NoAdvance);
435  }
436  return iterator(P, E, Epoch, NoAdvance);
437  }
438 
439  const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
440  const DebugEpochBase &Epoch,
441  const bool NoAdvance=false) const {
442  if (shouldReverseIterate<KeyT>()) {
443  const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
444  return const_iterator(B, E, Epoch, NoAdvance);
445  }
446  return const_iterator(P, E, Epoch, NoAdvance);
447  }
448 
449  unsigned getNumEntries() const {
450  return static_cast<const DerivedT *>(this)->getNumEntries();
451  }
452 
453  void setNumEntries(unsigned Num) {
454  static_cast<DerivedT *>(this)->setNumEntries(Num);
455  }
456 
457  void incrementNumEntries() {
458  setNumEntries(getNumEntries() + 1);
459  }
460 
461  void decrementNumEntries() {
462  setNumEntries(getNumEntries() - 1);
463  }
464 
465  unsigned getNumTombstones() const {
466  return static_cast<const DerivedT *>(this)->getNumTombstones();
467  }
468 
469  void setNumTombstones(unsigned Num) {
470  static_cast<DerivedT *>(this)->setNumTombstones(Num);
471  }
472 
473  void incrementNumTombstones() {
474  setNumTombstones(getNumTombstones() + 1);
475  }
476 
477  void decrementNumTombstones() {
478  setNumTombstones(getNumTombstones() - 1);
479  }
480 
481  const BucketT *getBuckets() const {
482  return static_cast<const DerivedT *>(this)->getBuckets();
483  }
484 
485  BucketT *getBuckets() {
486  return static_cast<DerivedT *>(this)->getBuckets();
487  }
488 
489  unsigned getNumBuckets() const {
490  return static_cast<const DerivedT *>(this)->getNumBuckets();
491  }
492 
493  BucketT *getBucketsEnd() {
494  return getBuckets() + getNumBuckets();
495  }
496 
497  const BucketT *getBucketsEnd() const {
498  return getBuckets() + getNumBuckets();
499  }
500 
501  void grow(unsigned AtLeast) {
502  static_cast<DerivedT *>(this)->grow(AtLeast);
503  }
504 
505  void shrink_and_clear() {
506  static_cast<DerivedT *>(this)->shrink_and_clear();
507  }
508 
509  template <typename KeyArg, typename... ValueArgs>
510  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
511  ValueArgs &&... Values) {
512  TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
513 
514  TheBucket->getFirst() = std::forward<KeyArg>(Key);
515  ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
516  return TheBucket;
517  }
518 
519  template <typename LookupKeyT>
520  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
521  ValueT &&Value, LookupKeyT &Lookup) {
522  TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
523 
524  TheBucket->getFirst() = std::move(Key);
525  ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
526  return TheBucket;
527  }
528 
529  template <typename LookupKeyT>
530  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
531  BucketT *TheBucket) {
532  incrementEpoch();
533 
534  // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
535  // the buckets are empty (meaning that many are filled with tombstones),
536  // grow the table.
537  //
538  // The later case is tricky. For example, if we had one empty bucket with
539  // tons of tombstones, failing lookups (e.g. for insertion) would have to
540  // probe almost the entire table until it found the empty bucket. If the
541  // table completely filled with tombstones, no lookup would ever succeed,
542  // causing infinite loops in lookup.
543  unsigned NewNumEntries = getNumEntries() + 1;
544  unsigned NumBuckets = getNumBuckets();
545  if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
546  this->grow(NumBuckets * 2);
547  LookupBucketFor(Lookup, TheBucket);
548  NumBuckets = getNumBuckets();
549  } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
550  NumBuckets/8)) {
551  this->grow(NumBuckets);
552  LookupBucketFor(Lookup, TheBucket);
553  }
554  assert(TheBucket);
555 
556  // Only update the state after we've grown our bucket space appropriately
557  // so that when growing buckets we have self-consistent entry count.
558  incrementNumEntries();
559 
560  // If we are writing over a tombstone, remember this.
561  const KeyT EmptyKey = getEmptyKey();
562  if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
563  decrementNumTombstones();
564 
565  return TheBucket;
566  }
567 
568  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
569  /// FoundBucket. If the bucket contains the key and a value, this returns
570  /// true, otherwise it returns a bucket with an empty marker or tombstone and
571  /// returns false.
572  template<typename LookupKeyT>
573  bool LookupBucketFor(const LookupKeyT &Val,
574  const BucketT *&FoundBucket) const {
575  const BucketT *BucketsPtr = getBuckets();
576  const unsigned NumBuckets = getNumBuckets();
577 
578  if (NumBuckets == 0) {
579  FoundBucket = nullptr;
580  return false;
581  }
582 
583  // FoundTombstone - Keep track of whether we find a tombstone while probing.
584  const BucketT *FoundTombstone = nullptr;
585  const KeyT EmptyKey = getEmptyKey();
586  const KeyT TombstoneKey = getTombstoneKey();
587  assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
588  !KeyInfoT::isEqual(Val, TombstoneKey) &&
589  "Empty/Tombstone value shouldn't be inserted into map!");
590 
591  unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
592  unsigned ProbeAmt = 1;
593  while (true) {
594  const BucketT *ThisBucket = BucketsPtr + BucketNo;
595  // Found Val's bucket? If so, return it.
596  if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
597  FoundBucket = ThisBucket;
598  return true;
599  }
600 
601  // If we found an empty bucket, the key doesn't exist in the set.
602  // Insert it and return the default value.
603  if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
604  // If we've already seen a tombstone while probing, fill it in instead
605  // of the empty bucket we eventually probed to.
606  FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
607  return false;
608  }
609 
610  // If this is a tombstone, remember it. If Val ends up not in the map, we
611  // prefer to return it than something that would require more probing.
612  if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
613  !FoundTombstone)
614  FoundTombstone = ThisBucket; // Remember the first tombstone found.
615 
616  // Otherwise, it's a hash collision or a tombstone, continue quadratic
617  // probing.
618  BucketNo += ProbeAmt++;
619  BucketNo &= (NumBuckets-1);
620  }
621  }
622 
623  template <typename LookupKeyT>
624  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
625  const BucketT *ConstFoundBucket;
626  bool Result = const_cast<const DenseMapBase *>(this)
627  ->LookupBucketFor(Val, ConstFoundBucket);
628  FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
629  return Result;
630  }
631 
632 public:
633  /// Return the approximate size (in bytes) of the actual map.
634  /// This is just the raw memory used by DenseMap.
635  /// If entries are pointers to objects, the size of the referenced objects
636  /// are not included.
637  size_t getMemorySize() const {
638  return getNumBuckets() * sizeof(BucketT);
639  }
640 };
641 
642 template <typename KeyT, typename ValueT,
643  typename KeyInfoT = DenseMapInfo<KeyT>,
644  typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
645 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
646  KeyT, ValueT, KeyInfoT, BucketT> {
647  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
648 
649  // Lift some types from the dependent base class into this class for
650  // simplicity of referring to them.
652 
653  BucketT *Buckets;
654  unsigned NumEntries;
655  unsigned NumTombstones;
656  unsigned NumBuckets;
657 
658 public:
659  /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
660  /// this number of elements can be inserted in the map without grow()
661  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
662 
663  DenseMap(const DenseMap &other) : BaseT() {
664  init(0);
665  copyFrom(other);
666  }
667 
668  DenseMap(DenseMap &&other) : BaseT() {
669  init(0);
670  swap(other);
671  }
672 
673  template<typename InputIt>
674  DenseMap(const InputIt &I, const InputIt &E) {
675  init(std::distance(I, E));
676  this->insert(I, E);
677  }
678 
680  this->destroyAll();
681  operator delete(Buckets);
682  }
683 
684  void swap(DenseMap& RHS) {
685  this->incrementEpoch();
686  RHS.incrementEpoch();
687  std::swap(Buckets, RHS.Buckets);
688  std::swap(NumEntries, RHS.NumEntries);
689  std::swap(NumTombstones, RHS.NumTombstones);
690  std::swap(NumBuckets, RHS.NumBuckets);
691  }
692 
693  DenseMap& operator=(const DenseMap& other) {
694  if (&other != this)
695  copyFrom(other);
696  return *this;
697  }
698 
700  this->destroyAll();
701  operator delete(Buckets);
702  init(0);
703  swap(other);
704  return *this;
705  }
706 
707  void copyFrom(const DenseMap& other) {
708  this->destroyAll();
709  operator delete(Buckets);
710  if (allocateBuckets(other.NumBuckets)) {
711  this->BaseT::copyFrom(other);
712  } else {
713  NumEntries = 0;
714  NumTombstones = 0;
715  }
716  }
717 
718  void init(unsigned InitNumEntries) {
719  auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
720  if (allocateBuckets(InitBuckets)) {
721  this->BaseT::initEmpty();
722  } else {
723  NumEntries = 0;
724  NumTombstones = 0;
725  }
726  }
727 
728  void grow(unsigned AtLeast) {
729  unsigned OldNumBuckets = NumBuckets;
730  BucketT *OldBuckets = Buckets;
731 
732  allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
733  assert(Buckets);
734  if (!OldBuckets) {
735  this->BaseT::initEmpty();
736  return;
737  }
738 
739  this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
740 
741  // Free the old table.
742  operator delete(OldBuckets);
743  }
744 
746  unsigned OldNumEntries = NumEntries;
747  this->destroyAll();
748 
749  // Reduce the number of buckets.
750  unsigned NewNumBuckets = 0;
751  if (OldNumEntries)
752  NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
753  if (NewNumBuckets == NumBuckets) {
754  this->BaseT::initEmpty();
755  return;
756  }
757 
758  operator delete(Buckets);
759  init(NewNumBuckets);
760  }
761 
762 private:
763  unsigned getNumEntries() const {
764  return NumEntries;
765  }
766 
767  void setNumEntries(unsigned Num) {
768  NumEntries = Num;
769  }
770 
771  unsigned getNumTombstones() const {
772  return NumTombstones;
773  }
774 
775  void setNumTombstones(unsigned Num) {
776  NumTombstones = Num;
777  }
778 
779  BucketT *getBuckets() const {
780  return Buckets;
781  }
782 
783  unsigned getNumBuckets() const {
784  return NumBuckets;
785  }
786 
787  bool allocateBuckets(unsigned Num) {
788  NumBuckets = Num;
789  if (NumBuckets == 0) {
790  Buckets = nullptr;
791  return false;
792  }
793 
794  Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
795  return true;
796  }
797 };
798 
799 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
800  typename KeyInfoT = DenseMapInfo<KeyT>,
801  typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
803  : public DenseMapBase<
804  SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
805  ValueT, KeyInfoT, BucketT> {
806  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
807 
808  // Lift some types from the dependent base class into this class for
809  // simplicity of referring to them.
811 
812  static_assert(isPowerOf2_64(InlineBuckets),
813  "InlineBuckets must be a power of 2.");
814 
815  unsigned Small : 1;
816  unsigned NumEntries : 31;
817  unsigned NumTombstones;
818 
819  struct LargeRep {
820  BucketT *Buckets;
821  unsigned NumBuckets;
822  };
823 
824  /// A "union" of an inline bucket array and the struct representing
825  /// a large bucket. This union will be discriminated by the 'Small' bit.
827 
828 public:
829  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
830  init(NumInitBuckets);
831  }
832 
833  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
834  init(0);
835  copyFrom(other);
836  }
837 
838  SmallDenseMap(SmallDenseMap &&other) : BaseT() {
839  init(0);
840  swap(other);
841  }
842 
843  template<typename InputIt>
844  SmallDenseMap(const InputIt &I, const InputIt &E) {
845  init(NextPowerOf2(std::distance(I, E)));
846  this->insert(I, E);
847  }
848 
850  this->destroyAll();
851  deallocateBuckets();
852  }
853 
854  void swap(SmallDenseMap& RHS) {
855  unsigned TmpNumEntries = RHS.NumEntries;
856  RHS.NumEntries = NumEntries;
857  NumEntries = TmpNumEntries;
858  std::swap(NumTombstones, RHS.NumTombstones);
859 
860  const KeyT EmptyKey = this->getEmptyKey();
861  const KeyT TombstoneKey = this->getTombstoneKey();
862  if (Small && RHS.Small) {
863  // If we're swapping inline bucket arrays, we have to cope with some of
864  // the tricky bits of DenseMap's storage system: the buckets are not
865  // fully initialized. Thus we swap every key, but we may have
866  // a one-directional move of the value.
867  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
868  BucketT *LHSB = &getInlineBuckets()[i],
869  *RHSB = &RHS.getInlineBuckets()[i];
870  bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
871  !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
872  bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
873  !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
874  if (hasLHSValue && hasRHSValue) {
875  // Swap together if we can...
876  std::swap(*LHSB, *RHSB);
877  continue;
878  }
879  // Swap separately and handle any assymetry.
880  std::swap(LHSB->getFirst(), RHSB->getFirst());
881  if (hasLHSValue) {
882  ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
883  LHSB->getSecond().~ValueT();
884  } else if (hasRHSValue) {
885  ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
886  RHSB->getSecond().~ValueT();
887  }
888  }
889  return;
890  }
891  if (!Small && !RHS.Small) {
892  std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
893  std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
894  return;
895  }
896 
897  SmallDenseMap &SmallSide = Small ? *this : RHS;
898  SmallDenseMap &LargeSide = Small ? RHS : *this;
899 
900  // First stash the large side's rep and move the small side across.
901  LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
902  LargeSide.getLargeRep()->~LargeRep();
903  LargeSide.Small = true;
904  // This is similar to the standard move-from-old-buckets, but the bucket
905  // count hasn't actually rotated in this case. So we have to carefully
906  // move construct the keys and values into their new locations, but there
907  // is no need to re-hash things.
908  for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
909  BucketT *NewB = &LargeSide.getInlineBuckets()[i],
910  *OldB = &SmallSide.getInlineBuckets()[i];
911  ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
912  OldB->getFirst().~KeyT();
913  if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
914  !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
915  ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
916  OldB->getSecond().~ValueT();
917  }
918  }
919 
920  // The hard part of moving the small buckets across is done, just move
921  // the TmpRep into its new home.
922  SmallSide.Small = false;
923  new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
924  }
925 
927  if (&other != this)
928  copyFrom(other);
929  return *this;
930  }
931 
933  this->destroyAll();
934  deallocateBuckets();
935  init(0);
936  swap(other);
937  return *this;
938  }
939 
940  void copyFrom(const SmallDenseMap& other) {
941  this->destroyAll();
942  deallocateBuckets();
943  Small = true;
944  if (other.getNumBuckets() > InlineBuckets) {
945  Small = false;
946  new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
947  }
948  this->BaseT::copyFrom(other);
949  }
950 
951  void init(unsigned InitBuckets) {
952  Small = true;
953  if (InitBuckets > InlineBuckets) {
954  Small = false;
955  new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
956  }
957  this->BaseT::initEmpty();
958  }
959 
960  void grow(unsigned AtLeast) {
961  if (AtLeast >= InlineBuckets)
962  AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
963 
964  if (Small) {
965  if (AtLeast < InlineBuckets)
966  return; // Nothing to do.
967 
968  // First move the inline buckets into a temporary storage.
970  BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
971  BucketT *TmpEnd = TmpBegin;
972 
973  // Loop over the buckets, moving non-empty, non-tombstones into the
974  // temporary storage. Have the loop move the TmpEnd forward as it goes.
975  const KeyT EmptyKey = this->getEmptyKey();
976  const KeyT TombstoneKey = this->getTombstoneKey();
977  for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
978  if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
979  !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
980  assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
981  "Too many inline buckets!");
982  ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
983  ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
984  ++TmpEnd;
985  P->getSecond().~ValueT();
986  }
987  P->getFirst().~KeyT();
988  }
989 
990  // Now make this map use the large rep, and move all the entries back
991  // into it.
992  Small = false;
993  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
994  this->moveFromOldBuckets(TmpBegin, TmpEnd);
995  return;
996  }
997 
998  LargeRep OldRep = std::move(*getLargeRep());
999  getLargeRep()->~LargeRep();
1000  if (AtLeast <= InlineBuckets) {
1001  Small = true;
1002  } else {
1003  new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1004  }
1005 
1006  this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1007 
1008  // Free the old table.
1009  operator delete(OldRep.Buckets);
1010  }
1011 
1013  unsigned OldSize = this->size();
1014  this->destroyAll();
1015 
1016  // Reduce the number of buckets.
1017  unsigned NewNumBuckets = 0;
1018  if (OldSize) {
1019  NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1020  if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1021  NewNumBuckets = 64;
1022  }
1023  if ((Small && NewNumBuckets <= InlineBuckets) ||
1024  (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1025  this->BaseT::initEmpty();
1026  return;
1027  }
1028 
1029  deallocateBuckets();
1030  init(NewNumBuckets);
1031  }
1032 
1033 private:
1034  unsigned getNumEntries() const {
1035  return NumEntries;
1036  }
1037 
1038  void setNumEntries(unsigned Num) {
1039  // NumEntries is hardcoded to be 31 bits wide.
1040  assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1041  NumEntries = Num;
1042  }
1043 
1044  unsigned getNumTombstones() const {
1045  return NumTombstones;
1046  }
1047 
1048  void setNumTombstones(unsigned Num) {
1049  NumTombstones = Num;
1050  }
1051 
1052  const BucketT *getInlineBuckets() const {
1053  assert(Small);
1054  // Note that this cast does not violate aliasing rules as we assert that
1055  // the memory's dynamic type is the small, inline bucket buffer, and the
1056  // 'storage.buffer' static type is 'char *'.
1057  return reinterpret_cast<const BucketT *>(storage.buffer);
1058  }
1059 
1060  BucketT *getInlineBuckets() {
1061  return const_cast<BucketT *>(
1062  const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1063  }
1064 
1065  const LargeRep *getLargeRep() const {
1066  assert(!Small);
1067  // Note, same rule about aliasing as with getInlineBuckets.
1068  return reinterpret_cast<const LargeRep *>(storage.buffer);
1069  }
1070 
1071  LargeRep *getLargeRep() {
1072  return const_cast<LargeRep *>(
1073  const_cast<const SmallDenseMap *>(this)->getLargeRep());
1074  }
1075 
1076  const BucketT *getBuckets() const {
1077  return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1078  }
1079 
1080  BucketT *getBuckets() {
1081  return const_cast<BucketT *>(
1082  const_cast<const SmallDenseMap *>(this)->getBuckets());
1083  }
1084 
1085  unsigned getNumBuckets() const {
1086  return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1087  }
1088 
1089  void deallocateBuckets() {
1090  if (Small)
1091  return;
1092 
1093  operator delete(getLargeRep()->Buckets);
1094  getLargeRep()->~LargeRep();
1095  }
1096 
1097  LargeRep allocateBuckets(unsigned Num) {
1098  assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1099  LargeRep Rep = {
1100  static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
1101  };
1102  return Rep;
1103  }
1104 };
1105 
1106 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1107  bool IsConst>
1109  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1110  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1111 
1113 
1114 public:
1116  using value_type =
1117  typename std::conditional<IsConst, const Bucket, Bucket>::type;
1118  using pointer = value_type *;
1120  using iterator_category = std::forward_iterator_tag;
1121 
1122 private:
1123  pointer Ptr = nullptr;
1124  pointer End = nullptr;
1125 
1126 public:
1127  DenseMapIterator() = default;
1128 
1130  bool NoAdvance = false)
1131  : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1132  assert(isHandleInSync() && "invalid construction!");
1133 
1134  if (NoAdvance) return;
1135  if (shouldReverseIterate<KeyT>()) {
1136  RetreatPastEmptyBuckets();
1137  return;
1138  }
1139  AdvancePastEmptyBuckets();
1140  }
1141 
1142  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1143  // for const iterator destinations so it doesn't end up as a user defined copy
1144  // constructor.
1145  template <bool IsConstSrc,
1146  typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
1149  : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1150 
1152  assert(isHandleInSync() && "invalid iterator access!");
1153  if (shouldReverseIterate<KeyT>())
1154  return Ptr[-1];
1155  return *Ptr;
1156  }
1158  assert(isHandleInSync() && "invalid iterator access!");
1159  if (shouldReverseIterate<KeyT>())
1160  return &(Ptr[-1]);
1161  return Ptr;
1162  }
1163 
1164  bool operator==(const ConstIterator &RHS) const {
1165  assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1166  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1167  assert(getEpochAddress() == RHS.getEpochAddress() &&
1168  "comparing incomparable iterators!");
1169  return Ptr == RHS.Ptr;
1170  }
1171  bool operator!=(const ConstIterator &RHS) const {
1172  assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1173  assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1174  assert(getEpochAddress() == RHS.getEpochAddress() &&
1175  "comparing incomparable iterators!");
1176  return Ptr != RHS.Ptr;
1177  }
1178 
1179  inline DenseMapIterator& operator++() { // Preincrement
1180  assert(isHandleInSync() && "invalid iterator access!");
1181  if (shouldReverseIterate<KeyT>()) {
1182  --Ptr;
1183  RetreatPastEmptyBuckets();
1184  return *this;
1185  }
1186  ++Ptr;
1187  AdvancePastEmptyBuckets();
1188  return *this;
1189  }
1190  DenseMapIterator operator++(int) { // Postincrement
1191  assert(isHandleInSync() && "invalid iterator access!");
1192  DenseMapIterator tmp = *this; ++*this; return tmp;
1193  }
1194 
1195 private:
1196  void AdvancePastEmptyBuckets() {
1197  assert(Ptr <= End);
1198  const KeyT Empty = KeyInfoT::getEmptyKey();
1199  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1200 
1201  while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1202  KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1203  ++Ptr;
1204  }
1205 
1206  void RetreatPastEmptyBuckets() {
1207  assert(Ptr >= End);
1208  const KeyT Empty = KeyInfoT::getEmptyKey();
1209  const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1210 
1211  while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1212  KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1213  --Ptr;
1214  }
1215 };
1216 
1217 template<typename KeyT, typename ValueT, typename KeyInfoT>
1218 static inline size_t
1220  return X.getMemorySize();
1221 }
1222 
1223 } // end namespace llvm
1224 
1225 #endif // LLVM_ADT_DENSEMAP_H
value_type & reference
Definition: DenseMap.h:1119
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:544
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:243
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:295
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:707
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition: DenseMap.h:361
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
const KeyT & getFirst() const
Definition: DenseMap.h:42
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
void init(unsigned InitNumEntries)
Definition: DenseMap.h:718
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:176
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:175
typename std::conditional< IsConst, const Bucket, Bucket >::type value_type
Definition: DenseMap.h:1117
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
void init(unsigned InitBuckets)
Definition: DenseMap.h:951
static size_t capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:914
unsigned second
static const KeyT getTombstoneKey()
Definition: DenseMap.h:424
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:321
const_iterator end() const
Definition: DenseMap.h:89
DenseMap(unsigned InitialReserve=0)
Create a DenseMap wth an optional InitialReserve that guarantee that this number of elements can be i...
Definition: DenseMap.h:661
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:191
reference operator*() const
Definition: DenseMap.h:1151
A base class for data structure classes wishing to make iterators ("handles") pointing into themselve...
Definition: EpochTracker.h:37
static unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: StringMap.cpp:24
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
value_type * pointer
Definition: DenseMap.h:1118
void incrementEpoch()
Calling incrementEpoch invalidates all handles pointing into the calling instance.
Definition: EpochTracker.h:45
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:838
A base class for iterator classes ("handles") that wish to poll for iterator invalidating modificatio...
Definition: EpochTracker.h:59
void shrink_and_clear()
Definition: DenseMap.h:1012
static bool isEqual(const Function &Caller, const Function &Callee)
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:352
static const KeyT getEmptyKey()
Definition: DenseMap.h:418
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:387
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition: DenseMap.h:225
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:829
void grow(unsigned AtLeast)
Definition: DenseMap.h:960
value_type & FindAndConstruct(const KeyT &Key)
Definition: DenseMap.h:287
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:146
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1120
#define P(N)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
bool erase(const KeyT &Val)
Definition: DenseMap.h:268
const ValueT & getSecond() const
Definition: DenseMap.h:44
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DenseMap(DenseMap &&other)
Definition: DenseMap.h:668
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:833
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:414
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void grow(unsigned AtLeast)
Definition: DenseMap.h:728
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:426
const void * getEpochAddress() const
Returns a pointer to the epoch word stored in the data structure this handle points into...
Definition: EpochTracker.h:77
DenseMap(const DenseMap &other)
Definition: DenseMap.h:663
const_iterator begin() const
Definition: DenseMap.h:82
static const unsigned End
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again...
Definition: DenseMap.h:100
unsigned size() const
Definition: DenseMap.h:96
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:632
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:263
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:206
unsigned first
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:926
bool operator==(const ConstIterator &RHS) const
Definition: DenseMap.h:1164
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:637
isPodLike - This is a type trait that is used to determine whether a given type can be copied around ...
Definition: ArrayRef.h:530
DenseMapIterator operator++(int)
Definition: DenseMap.h:1190
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:854
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:693
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap&#39;s...
Definition: DenseMap.h:314
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:152
void shrink_and_clear()
Definition: DenseMap.h:745
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:923
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:932
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:940
Basic Alias true
DenseMapIterator & operator++()
Definition: DenseMap.h:1179
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:307
bool isHandleInSync() const
Returns true if the DebugEpochBase this Handle is linked to has not called incrementEpoch on itself s...
Definition: EpochTracker.h:72
void erase(iterator I)
Definition: DenseMap.h:279
iterator begin()
Definition: DenseMap.h:70
bool operator!=(const ConstIterator &RHS) const
Definition: DenseMap.h:1171
#define I(x, y, z)
Definition: MD5.cpp:58
iterator end()
Definition: DenseMap.h:79
void swap(DenseMap &RHS)
Definition: DenseMap.h:684
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:198
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:141
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:93
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:181
#define LLVM_NODISCARD
LLVM_NODISCARD - Warn if a type or return value is discarded.
Definition: Compiler.h:126
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive, key type.
Definition: DenseMap.h:165
value_type & FindAndConstruct(KeyT &&Key)
Definition: DenseMap.h:299
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:844
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:674
LLVM Value Representation.
Definition: Value.h:73
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1147
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:699
pointer operator->() const
Definition: DenseMap.h:1157
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1129
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive, key type.
Definition: DenseMap.h:245
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:409
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:172