67#define DEBUG_TYPE "on-disk-cas"
85 return ID.takeError();
88 "corrupt object '" +
toHex(*
ID) +
"'");
98 enum class StorageKind : uint8_t {
115 StandaloneLeaf0 = 12,
118 static StringRef getStandaloneFilePrefix(StorageKind SK) {
122 case TrieRecord::StorageKind::Standalone:
124 case TrieRecord::StorageKind::StandaloneLeaf:
126 case TrieRecord::StorageKind::StandaloneLeaf0:
131 enum Limits : int64_t {
133 MaxEmbeddedSize = 64LL * 1024LL - 1,
137 StorageKind SK = StorageKind::Unknown;
142 static uint64_t pack(Data
D) {
143 assert(
D.Offset.get() < (int64_t)(1ULL << 56));
144 uint64_t
Packed = uint64_t(
D.SK) << 56 |
D.Offset.get();
145 assert(
D.SK != StorageKind::Unknown || Packed == 0);
147 Data RoundTrip = unpack(Packed);
149 assert(
D.Offset.get() == RoundTrip.Offset.get());
155 static Data unpack(uint64_t Packed) {
159 D.SK = (StorageKind)(Packed >> 56);
164 TrieRecord() : Storage(0) {}
166 Data
load()
const {
return unpack(Storage); }
167 bool compare_exchange_strong(Data &Existing, Data New);
170 std::atomic<uint64_t> Storage;
180struct DataRecordHandle {
183 enum class NumRefsFlags : uint8_t {
193 enum class DataSizeFlags {
202 enum class RefKindFlags {
211 DataSizeShift = NumRefsShift + NumRefsBits,
213 RefKindShift = DataSizeShift + DataSizeBits,
216 static_assert(((UINT32_MAX << NumRefsBits) & (uint32_t)NumRefsFlags::Max) ==
219 static_assert(((UINT32_MAX << DataSizeBits) & (uint32_t)DataSizeFlags::Max) ==
222 static_assert(((UINT32_MAX << RefKindBits) & (uint32_t)RefKindFlags::Max) ==
228 NumRefsFlags NumRefs;
229 DataSizeFlags DataSize;
230 RefKindFlags RefKind;
232 static uint64_t pack(LayoutFlags LF) {
233 unsigned Packed = ((unsigned)LF.NumRefs << NumRefsShift) |
234 ((
unsigned)LF.DataSize << DataSizeShift) |
235 ((unsigned)LF.RefKind << RefKindShift);
237 LayoutFlags RoundTrip = unpack(Packed);
238 assert(LF.NumRefs == RoundTrip.NumRefs);
239 assert(LF.DataSize == RoundTrip.DataSize);
240 assert(LF.RefKind == RoundTrip.RefKind);
244 static LayoutFlags unpack(uint64_t Storage) {
245 assert(Storage <= UINT8_MAX &&
"Expect storage to fit in a byte");
248 (NumRefsFlags)((Storage >> NumRefsShift) & ((1U << NumRefsBits) - 1));
249 LF.DataSize = (DataSizeFlags)((Storage >> DataSizeShift) &
250 ((1U << DataSizeBits) - 1));
252 (RefKindFlags)((Storage >> RefKindShift) & ((1U << RefKindBits) - 1));
262 using PackTy = uint32_t;
265 static constexpr unsigned LayoutFlagsShift =
266 (
sizeof(PackTy) - 1) * CHAR_BIT;
270 InternalRefArrayRef Refs;
274 LayoutFlags getLayoutFlags()
const {
275 return LayoutFlags::unpack(H->Packed >> Header::LayoutFlagsShift);
279 void skipDataSize(LayoutFlags LF, int64_t &RelOffset)
const;
280 uint32_t getNumRefs()
const;
281 void skipNumRefs(LayoutFlags LF, int64_t &RelOffset)
const;
282 int64_t getRefsRelOffset()
const;
283 int64_t getDataRelOffset()
const;
285 static uint64_t getTotalSize(uint64_t DataRelOffset, uint64_t
DataSize) {
286 return DataRelOffset +
DataSize + 1;
288 uint64_t getTotalSize()
const {
295 explicit Layout(
const Input &
I);
298 uint64_t DataSize = 0;
299 uint32_t NumRefs = 0;
300 int64_t RefsRelOffset = 0;
301 int64_t DataRelOffset = 0;
302 uint64_t getTotalSize()
const {
303 return DataRecordHandle::getTotalSize(DataRelOffset, DataSize);
307 InternalRefArrayRef getRefs()
const {
308 assert(H &&
"Expected valid handle");
309 auto *BeginByte =
reinterpret_cast<const char *
>(H) + getRefsRelOffset();
310 size_t Size = getNumRefs();
312 return InternalRefArrayRef();
313 if (getLayoutFlags().RefKind == RefKindFlags::InternalRef4B)
314 return ArrayRef(
reinterpret_cast<const InternalRef4B *
>(BeginByte),
Size);
315 return ArrayRef(
reinterpret_cast<const InternalRef *
>(BeginByte),
Size);
318 ArrayRef<char> getData()
const {
319 assert(H &&
"Expected valid handle");
320 return ArrayRef(
reinterpret_cast<const char *
>(H) + getDataRelOffset(),
324 static DataRecordHandle create(function_ref<
char *(
size_t Size)>
Alloc,
326 static Expected<DataRecordHandle>
327 createWithError(function_ref<Expected<char *>(
size_t Size)>
Alloc,
329 static DataRecordHandle construct(
char *Mem,
const Input &
I);
331 static DataRecordHandle
get(
const char *Mem) {
332 return DataRecordHandle(
333 *
reinterpret_cast<const DataRecordHandle::Header *
>(Mem));
335 static Expected<DataRecordHandle>
336 getFromDataPool(
const OnDiskDataAllocator &Pool, FileOffset
Offset);
338 explicit operator bool()
const {
return H; }
339 const Header &getHeader()
const {
return *H; }
341 DataRecordHandle() =
default;
342 explicit DataRecordHandle(
const Header &H) : H(&H) {}
345 static DataRecordHandle constructImpl(
char *Mem,
const Input &
I,
347 const Header *H =
nullptr;
351struct OnDiskContent {
352 std::optional<DataRecordHandle> Record;
353 std::optional<ArrayRef<char>> Bytes;
357class StandaloneDataInMemory {
359 OnDiskContent getContent()
const;
361 StandaloneDataInMemory(std::unique_ptr<sys::fs::mapped_file_region> Region,
362 TrieRecord::StorageKind SK)
363 : Region(std::
move(Region)), SK(SK) {
365 bool IsStandalone =
false;
367 case TrieRecord::StorageKind::Standalone:
368 case TrieRecord::StorageKind::StandaloneLeaf:
369 case TrieRecord::StorageKind::StandaloneLeaf0:
380 std::unique_ptr<sys::fs::mapped_file_region> Region;
381 TrieRecord::StorageKind SK;
385template <
size_t NumShards>
class StandaloneDataMap {
386 static_assert(
isPowerOf2_64(NumShards),
"Expected power of 2");
389 uintptr_t insert(ArrayRef<uint8_t> Hash, TrieRecord::StorageKind SK,
390 std::unique_ptr<sys::fs::mapped_file_region> Region);
392 const StandaloneDataInMemory *
lookup(ArrayRef<uint8_t> Hash)
const;
393 bool count(ArrayRef<uint8_t> Hash)
const {
return bool(
lookup(Hash)); }
398 DenseMap<const uint8_t *, std::unique_ptr<StandaloneDataInMemory>> Map;
399 mutable std::mutex Mutex;
401 Shard &getShard(ArrayRef<uint8_t> Hash) {
402 return const_cast<Shard &
>(
403 const_cast<const StandaloneDataMap *
>(
this)->getShard(Hash));
405 const Shard &getShard(ArrayRef<uint8_t> Hash)
const {
406 static_assert(NumShards <= 256,
"Expected only 8 bits of shard");
407 return Shards[Hash[0] % NumShards];
410 Shard Shards[NumShards];
413using StandaloneDataMapTy = StandaloneDataMap<16>;
416class InternalRefVector {
418 void push_back(InternalRef
Ref) {
420 return FullRefs.push_back(
Ref);
422 return SmallRefs.push_back(*Small);
425 FullRefs.reserve(SmallRefs.size() + 1);
426 for (InternalRef4B Small : SmallRefs)
427 FullRefs.push_back(Small);
428 FullRefs.push_back(
Ref);
432 operator InternalRefArrayRef()
const {
433 assert(SmallRefs.empty() || FullRefs.empty());
434 return NeedsFull ? InternalRefArrayRef(FullRefs)
435 : InternalRefArrayRef(SmallRefs);
439 bool NeedsFull =
false;
449 if (Expected<char *> Mem =
Alloc(
L.getTotalSize()))
450 return constructImpl(*Mem,
I, L);
452 return Mem.takeError();
456DataRecordHandle::create(function_ref<
char *(
size_t Size)>
Alloc,
459 return constructImpl(
Alloc(
L.getTotalSize()),
I, L);
482uintptr_t StandaloneDataMap<N>::insert(
484 std::unique_ptr<sys::fs::mapped_file_region>
Region) {
485 auto &S = getShard(Hash);
486 std::lock_guard<std::mutex> Lock(S.Mutex);
487 auto &V = S.Map[Hash.
data()];
489 V = std::make_unique<StandaloneDataInMemory>(std::move(
Region), SK);
490 return reinterpret_cast<uintptr_t
>(V.get());
494const StandaloneDataInMemory *
496 auto &S = getShard(Hash);
497 std::lock_guard<std::mutex> Lock(S.Mutex);
498 auto I = S.Map.find(Hash.
data());
499 if (
I == S.Map.end())
513 TempFile(
StringRef Name,
int FD) : TmpName(
std::string(Name)), FD(FD) {}
518 TempFile(TempFile &&
Other) { *
this = std::move(
Other); }
519 TempFile &operator=(TempFile &&
Other) {
520 TmpName = std::move(
Other.TmpName);
534 Error keep(
const Twine &Name);
541class MappedTempFile {
543 char *
data()
const {
return Map.
data(); }
544 size_t size()
const {
return Map.
size(); }
547 assert(Map &&
"Map already destroyed");
549 return Temp.discard();
552 Error keep(
const Twine &Name) {
553 assert(Map &&
"Map already destroyed");
555 return Temp.keep(Name);
558 MappedTempFile(TempFile Temp, sys::fs::mapped_file_region Map)
563 sys::fs::mapped_file_region Map;
577 std::error_code RemoveEC;
611 TempFile Ret(ResultPath,
FD);
612 return std::move(Ret);
615bool TrieRecord::compare_exchange_strong(
Data &Existing,
Data New) {
616 uint64_t ExistingPacked = pack(Existing);
618 if (Storage.compare_exchange_strong(ExistingPacked, NewPacked))
620 Existing = unpack(ExistingPacked);
624DataRecordHandle DataRecordHandle::construct(
char *Mem,
const Input &
I) {
625 return constructImpl(Mem,
I, Layout(
I));
631 auto HeaderData = Pool.
get(
Offset,
sizeof(DataRecordHandle::Header));
633 return HeaderData.takeError();
635 auto Record = DataRecordHandle::get(HeaderData->data());
639 "data record span passed the end of the data pool");
644DataRecordHandle DataRecordHandle::constructImpl(
char *Mem,
const Input &
I,
646 char *
Next = Mem +
sizeof(Header);
649 Header::PackTy Packed = 0;
650 Packed |= LayoutFlags::pack(L.Flags) << Header::LayoutFlagsShift;
653 switch (L.Flags.DataSize) {
654 case DataSizeFlags::Uses1B:
655 assert(
I.Data.size() <= UINT8_MAX);
656 Packed |= (Header::PackTy)
I.Data.size()
657 << ((
sizeof(Packed) - 2) * CHAR_BIT);
659 case DataSizeFlags::Uses2B:
660 assert(
I.Data.size() <= UINT16_MAX);
661 Packed |= (Header::PackTy)
I.Data.size()
662 << ((
sizeof(Packed) - 4) * CHAR_BIT);
664 case DataSizeFlags::Uses4B:
668 case DataSizeFlags::Uses8B:
678 switch (
L.Flags.NumRefs) {
679 case NumRefsFlags::Uses0B:
681 case NumRefsFlags::Uses1B:
682 assert(
I.Refs.size() <= UINT8_MAX);
683 Packed |= (Header::PackTy)
I.Refs.size()
684 << ((
sizeof(
Packed) - 2) * CHAR_BIT);
686 case NumRefsFlags::Uses2B:
687 assert(
I.Refs.size() <= UINT16_MAX);
688 Packed |= (Header::PackTy)
I.Refs.size()
689 << ((
sizeof(
Packed) - 4) * CHAR_BIT);
691 case NumRefsFlags::Uses4B:
695 case NumRefsFlags::Uses8B:
702 if (!
I.Refs.empty()) {
703 assert((
L.Flags.RefKind == RefKindFlags::InternalRef4B) ==
I.Refs.is4B());
704 ArrayRef<uint8_t> RefsBuffer =
I.Refs.getBuffer();
712 Next[
I.Data.size()] = 0;
715 Header *
H =
new (Mem) Header{
Packed};
716 DataRecordHandle Record(*
H);
717 assert(Record.getData() ==
I.Data);
718 assert(Record.getNumRefs() ==
I.Refs.size());
719 assert(Record.getRefs() ==
I.Refs);
720 assert(Record.getLayoutFlags().DataSize ==
L.Flags.DataSize);
721 assert(Record.getLayoutFlags().NumRefs ==
L.Flags.NumRefs);
722 assert(Record.getLayoutFlags().RefKind ==
L.Flags.RefKind);
726DataRecordHandle::Layout::Layout(
const Input &
I) {
728 uint64_t RelOffset =
sizeof(Header);
732 NumRefs =
I.Refs.size();
736 I.Refs.is4B() ? RefKindFlags::InternalRef4B : RefKindFlags::InternalRef;
741 if (
DataSize <= UINT8_MAX && Has1B) {
742 Flags.DataSize = DataSizeFlags::Uses1B;
744 }
else if (
DataSize <= UINT16_MAX && Has2B) {
745 Flags.DataSize = DataSizeFlags::Uses2B;
747 }
else if (
DataSize <= UINT32_MAX) {
748 Flags.DataSize = DataSizeFlags::Uses4B;
751 Flags.DataSize = DataSizeFlags::Uses8B;
757 Flags.NumRefs = NumRefsFlags::Uses0B;
758 }
else if (NumRefs <= UINT8_MAX && Has1B) {
759 Flags.NumRefs = NumRefsFlags::Uses1B;
761 }
else if (NumRefs <= UINT16_MAX && Has2B) {
762 Flags.NumRefs = NumRefsFlags::Uses2B;
765 Flags.NumRefs = NumRefsFlags::Uses4B;
776 auto GrowSizeFieldsBy4B = [&]() {
780 assert(Flags.NumRefs != NumRefsFlags::Uses8B &&
781 "Expected to be able to grow NumRefs8B");
787 if (Flags.DataSize < DataSizeFlags::Uses4B)
788 Flags.DataSize = DataSizeFlags::Uses4B;
789 else if (Flags.DataSize < DataSizeFlags::Uses8B)
790 Flags.DataSize = DataSizeFlags::Uses8B;
791 else if (Flags.NumRefs < NumRefsFlags::Uses4B)
792 Flags.NumRefs = NumRefsFlags::Uses4B;
794 Flags.NumRefs = NumRefsFlags::Uses8B;
798 if (Flags.RefKind == RefKindFlags::InternalRef) {
802 GrowSizeFieldsBy4B();
805 RefsRelOffset = RelOffset;
806 RelOffset += 8 * NumRefs;
814 uint64_t RefListSize = 4 * NumRefs;
816 GrowSizeFieldsBy4B();
817 RefsRelOffset = RelOffset;
818 RelOffset += RefListSize;
822 DataRelOffset = RelOffset;
825uint64_t DataRecordHandle::getDataSize()
const {
826 int64_t RelOffset =
sizeof(Header);
827 auto *DataSizePtr =
reinterpret_cast<const char *
>(
H) + RelOffset;
828 switch (getLayoutFlags().
DataSize) {
829 case DataSizeFlags::Uses1B:
830 return (
H->Packed >> ((
sizeof(Header::PackTy) - 2) * CHAR_BIT)) & UINT8_MAX;
831 case DataSizeFlags::Uses2B:
832 return (
H->Packed >> ((
sizeof(Header::PackTy) - 4) * CHAR_BIT)) &
834 case DataSizeFlags::Uses4B:
836 case DataSizeFlags::Uses8B:
841void DataRecordHandle::skipDataSize(LayoutFlags LF, int64_t &RelOffset)
const {
842 if (LF.DataSize >= DataSizeFlags::Uses4B)
844 if (LF.DataSize >= DataSizeFlags::Uses8B)
848uint32_t DataRecordHandle::getNumRefs()
const {
849 LayoutFlags LF = getLayoutFlags();
850 int64_t RelOffset =
sizeof(Header);
851 skipDataSize(LF, RelOffset);
852 auto *NumRefsPtr =
reinterpret_cast<const char *
>(
H) + RelOffset;
853 switch (LF.NumRefs) {
854 case NumRefsFlags::Uses0B:
856 case NumRefsFlags::Uses1B:
857 return (
H->Packed >> ((
sizeof(Header::PackTy) - 2) * CHAR_BIT)) & UINT8_MAX;
858 case NumRefsFlags::Uses2B:
859 return (
H->Packed >> ((
sizeof(Header::PackTy) - 4) * CHAR_BIT)) &
861 case NumRefsFlags::Uses4B:
863 case NumRefsFlags::Uses8B:
868void DataRecordHandle::skipNumRefs(LayoutFlags LF, int64_t &RelOffset)
const {
869 if (LF.NumRefs >= NumRefsFlags::Uses4B)
871 if (LF.NumRefs >= NumRefsFlags::Uses8B)
875int64_t DataRecordHandle::getRefsRelOffset()
const {
876 LayoutFlags LF = getLayoutFlags();
877 int64_t RelOffset =
sizeof(Header);
878 skipDataSize(LF, RelOffset);
879 skipNumRefs(LF, RelOffset);
883int64_t DataRecordHandle::getDataRelOffset()
const {
884 LayoutFlags LF = getLayoutFlags();
885 int64_t RelOffset =
sizeof(Header);
886 skipDataSize(LF, RelOffset);
887 skipNumRefs(LF, RelOffset);
888 uint32_t RefSize = LF.RefKind == RefKindFlags::InternalRef4B ? 4 : 8;
889 RelOffset += RefSize * getNumRefs();
897 auto formatError = [&](
Twine Msg) {
905 if (
Record.Data.size() !=
sizeof(TrieRecord))
906 return formatError(
"wrong data record size");
908 return formatError(
"wrong data record alignment");
910 auto *R =
reinterpret_cast<const TrieRecord *
>(
Record.Data.data());
911 TrieRecord::Data
D = R->load();
912 std::unique_ptr<MemoryBuffer> FileBuffer;
918 return formatError(
"invalid record kind value");
921 auto I = getIndexProxyFromRef(
Ref);
923 return I.takeError();
926 case TrieRecord::StorageKind::Unknown:
931 case TrieRecord::StorageKind::DataPool:
934 if (
D.Offset.get() <= 0 ||
935 (
uint64_t)
D.Offset.get() +
sizeof(DataRecordHandle::Header) >=
937 return formatError(
"datapool record out of bound");
939 case TrieRecord::StorageKind::Standalone:
940 case TrieRecord::StorageKind::StandaloneLeaf:
941 case TrieRecord::StorageKind::StandaloneLeaf0:
943 getStandalonePath(TrieRecord::getStandaloneFilePrefix(
D.SK), *
I, Path);
950 return formatError(
"record file \'" + Path +
"\' does not exist");
952 FileBuffer = std::move(*File);
954 return formatError(
"record file \'" + Path +
"\' does not exist");
960 auto dataError = [&](
Twine Msg) {
962 "bad data for digest \'" +
toHex(
I->Hash) +
969 case TrieRecord::StorageKind::Unknown:
971 case TrieRecord::StorageKind::DataPool: {
972 auto DataRecord = DataRecordHandle::getFromDataPool(DataPool,
D.Offset);
974 return dataError(
toString(DataRecord.takeError()));
976 for (
auto InternRef : DataRecord->getRefs()) {
977 auto Index = getIndexProxyFromRef(InternRef);
979 return Index.takeError();
982 StoredData = DataRecord->getData();
985 case TrieRecord::StorageKind::Standalone: {
986 if (FileBuffer->getBufferSize() <
sizeof(DataRecordHandle::Header))
987 return dataError(
"data record is not big enough to read the header");
988 auto DataRecord = DataRecordHandle::get(FileBuffer->getBufferStart());
989 if (DataRecord.getTotalSize() < FileBuffer->getBufferSize())
991 "data record span passed the end of the standalone file");
992 for (
auto InternRef : DataRecord.getRefs()) {
993 auto Index = getIndexProxyFromRef(InternRef);
995 return Index.takeError();
998 StoredData = DataRecord.getData();
1001 case TrieRecord::StorageKind::StandaloneLeaf:
1002 case TrieRecord::StorageKind::StandaloneLeaf0: {
1004 if (
D.SK == TrieRecord::StorageKind::StandaloneLeaf0) {
1005 if (!FileBuffer->getBuffer().ends_with(
'\0'))
1006 return dataError(
"standalone file is not zero terminated");
1014 Hasher(Refs, StoredData, ComputedHash);
1016 return dataError(
"hash mismatch, got \'" +
toHex(ComputedHash) +
1024 OS <<
"on-disk-root-path: " << RootPath <<
"\n";
1036 auto *R =
reinterpret_cast<const TrieRecord *
>(
Data.data());
1037 TrieRecord::Data
D = R->load();
1040 case TrieRecord::StorageKind::Unknown:
1043 case TrieRecord::StorageKind::DataPool:
1047 case TrieRecord::StorageKind::Standalone:
1048 OS <<
"standalone-data ";
1050 case TrieRecord::StorageKind::StandaloneLeaf:
1051 OS <<
"standalone-leaf ";
1053 case TrieRecord::StorageKind::StandaloneLeaf0:
1054 OS <<
"standalone-leaf+0";
1057 OS <<
" Offset=" << (
void *)
D.Offset.get();
1065 Pool, [](PoolInfo LHS, PoolInfo RHS) {
return LHS.Offset < RHS.Offset; });
1066 for (PoolInfo PI : Pool) {
1067 OS <<
"- addr=" << (
void *)PI.Offset <<
" ";
1068 auto D = DataRecordHandle::getFromDataPool(DataPool,
FileOffset(PI.Offset));
1070 OS <<
"error: " <<
toString(
D.takeError());
1074 OS <<
"record refs=" <<
D->getNumRefs() <<
" data=" <<
D->getDataSize()
1075 <<
" size=" <<
D->getTotalSize()
1076 <<
" end=" << (
void *)(PI.Offset +
D->getTotalSize()) <<
"\n";
1082 auto P = Index.insertLazy(
1088 new (TentativeValue.
Data.
data()) TrieRecord();
1091 return P.takeError();
1093 assert(*
P &&
"Expected insertion");
1094 return getIndexProxyFromPointer(*
P);
1101 return IndexProxy{
P.getOffset(),
P->Hash,
1102 *
const_cast<TrieRecord *
>(
1103 reinterpret_cast<const TrieRecord *
>(
P->Data.data()))};
1107 auto I = indexHash(Hash);
1109 return I.takeError();
1110 return getExternalReference(*
I);
1113ObjectID OnDiskGraphDB::getExternalReference(
const IndexProxy &
I) {
1114 return getExternalReference(makeInternalRef(
I.Offset));
1117std::optional<ObjectID>
1120 [&](std::optional<IndexProxy>
I) -> std::optional<ObjectID> {
1122 return std::nullopt;
1123 std::optional<ObjectID> UpstreamID =
1124 UpstreamDB->getExistingReference(Digest);
1126 return std::nullopt;
1129 return std::nullopt;
1132 return getExternalReference(*
I);
1137 return tryUpstream(std::nullopt);
1139 TrieRecord::Data Obj =
I.Ref.load();
1140 if (Obj.SK == TrieRecord::StorageKind::Unknown)
1141 return tryUpstream(
I);
1142 return getExternalReference(makeInternalRef(
I.Offset));
1147 auto P = Index.recoverFromFileOffset(
Ref.getFileOffset());
1149 return P.takeError();
1150 return getIndexProxyFromPointer(*
P);
1154 auto I = getIndexProxyFromRef(
Ref);
1156 return I.takeError();
1170 reinterpret_cast<const StandaloneDataInMemory *
>(
Data & (-1ULL << 1));
1171 return SDIM->getContent();
1176 assert(DataHandle.getData().end()[0] == 0 &&
"Null termination");
1177 return OnDiskContent{DataHandle, std::nullopt};
1183 return *Content.Bytes;
1184 assert(Content.Record &&
"Expected record or bytes");
1185 return Content.Record->getData();
1189 if (std::optional<DataRecordHandle>
Record =
1191 return Record->getRefs();
1192 return std::nullopt;
1198 auto I = getIndexProxyFromRef(
Ref);
1200 return I.takeError();
1201 TrieRecord::Data Object =
I->Ref.load();
1203 if (Object.SK == TrieRecord::StorageKind::Unknown) {
1205 return std::nullopt;
1206 return faultInFromUpstream(ExternalRef);
1209 if (Object.SK == TrieRecord::StorageKind::DataPool)
1219 switch (Object.SK) {
1220 case TrieRecord::StorageKind::Unknown:
1221 case TrieRecord::StorageKind::DataPool:
1223 case TrieRecord::StorageKind::Standalone:
1224 case TrieRecord::StorageKind::StandaloneLeaf0:
1225 case TrieRecord::StorageKind::StandaloneLeaf:
1235 getStandalonePath(TrieRecord::getStandaloneFilePrefix(Object.SK), *
I, Path);
1248 auto Region = std::make_unique<sys::fs::mapped_file_region>(
1254 static_cast<StandaloneDataMapTy *
>(StandaloneData)
1255 ->insert(
I->Hash, Object.SK, std::move(
Region)));
1259 auto Presence = getObjectPresence(
Ref,
true);
1261 return Presence.takeError();
1263 switch (*Presence) {
1264 case ObjectPresence::Missing:
1266 case ObjectPresence::InPrimaryDB:
1268 case ObjectPresence::OnlyInUpstreamDB:
1269 if (
auto FaultInResult = faultInFromUpstream(
Ref); !FaultInResult)
1270 return FaultInResult.takeError();
1276OnDiskGraphDB::getObjectPresence(
ObjectID ExternalRef,
1277 bool CheckUpstream)
const {
1279 auto I = getIndexProxyFromRef(
Ref);
1281 return I.takeError();
1283 TrieRecord::Data Object =
I->Ref.load();
1284 if (Object.SK != TrieRecord::StorageKind::Unknown)
1285 return ObjectPresence::InPrimaryDB;
1286 if (!CheckUpstream || !UpstreamDB)
1287 return ObjectPresence::Missing;
1288 std::optional<ObjectID> UpstreamID =
1289 UpstreamDB->getExistingReference(getDigest(*
I));
1290 return UpstreamID.has_value() ? ObjectPresence::OnlyInUpstreamDB
1291 : ObjectPresence::Missing;
1298void OnDiskGraphDB::getStandalonePath(
StringRef Prefix,
const IndexProxy &
I,
1300 Path.assign(RootPath.begin(), RootPath.end());
1305OnDiskContent StandaloneDataInMemory::getContent()
const {
1311 case TrieRecord::StorageKind::Standalone:
1313 case TrieRecord::StorageKind::StandaloneLeaf0:
1314 Leaf = Leaf0 =
true;
1316 case TrieRecord::StorageKind::StandaloneLeaf:
1323 assert(
Data.drop_back(Leaf0).end()[0] == 0 &&
1324 "Standalone node data missing null termination");
1325 return OnDiskContent{std::nullopt,
1329 DataRecordHandle Record = DataRecordHandle::get(
Region->data());
1330 assert(Record.getData().end()[0] == 0 &&
1331 "Standalone object record missing null termination for data");
1332 return OnDiskContent{Record, std::nullopt};
1337 assert(
Size &&
"Unexpected request for an empty temp file");
1340 return File.takeError();
1354 return MappedTempFile(std::move(*File), std::move(Map));
1362Error OnDiskGraphDB::createStandaloneLeaf(IndexProxy &
I, ArrayRef<char>
Data) {
1363 assert(
Data.size() > TrieRecord::MaxEmbeddedSize &&
1364 "Expected a bigger file for external content...");
1367 TrieRecord::StorageKind SK = Leaf0 ? TrieRecord::StorageKind::StandaloneLeaf0
1368 : TrieRecord::StorageKind::StandaloneLeaf;
1370 SmallString<256>
Path;
1371 int64_t FileSize =
Data.size() + Leaf0;
1372 getStandalonePath(TrieRecord::getStandaloneFilePrefix(SK),
I, Path);
1378 return File.takeError();
1388 TrieRecord::Data Existing;
1390 TrieRecord::Data Leaf{SK, FileOffset()};
1391 if (
I.Ref.compare_exchange_strong(Existing, Leaf)) {
1392 recordStandaloneSizeIncrease(FileSize);
1398 if (Existing.SK == TrieRecord::StorageKind::Unknown)
1406 auto I = getIndexProxyFromRef(getInternalRef(
ID));
1408 return I.takeError();
1412 TrieRecord::Data Existing =
I->Ref.load();
1413 if (Existing.SK != TrieRecord::StorageKind::Unknown)
1418 if (Refs.
empty() &&
Data.size() > TrieRecord::MaxEmbeddedSize)
1419 return createStandaloneLeaf(*
I,
Data);
1424 InternalRefVector InternalRefs;
1426 InternalRefs.push_back(getInternalRef(
Ref));
1430 DataRecordHandle::Input
Input{InternalRefs,
Data};
1433 TrieRecord::StorageKind SK = TrieRecord::StorageKind::Unknown;
1436 std::optional<MappedTempFile> File;
1437 std::optional<uint64_t> FileSize;
1439 getStandalonePath(TrieRecord::getStandaloneFilePrefix(
1440 TrieRecord::StorageKind::Standalone),
1443 return std::move(E);
1446 SK = TrieRecord::StorageKind::Standalone;
1447 return File->data();
1450 if (
Size <= TrieRecord::MaxEmbeddedSize) {
1451 SK = TrieRecord::StorageKind::DataPool;
1452 auto P = DataPool.allocate(
Size);
1454 char *NewAlloc =
nullptr;
1456 P.takeError(), [&](std::unique_ptr<StringError> E) ->
Error {
1457 if (E->convertToErrorCode() == std::errc::not_enough_memory)
1458 return AllocStandaloneFile(Size).moveInto(NewAlloc);
1459 return Error(std::move(E));
1463 return std::move(NewE);
1465 PoolOffset =
P->getOffset();
1467 dbgs() <<
"pool-alloc addr=" << (
void *)PoolOffset.
get()
1469 <<
" end=" << (
void *)(PoolOffset.
get() +
Size) <<
"\n";
1471 return (*P)->data();
1473 return AllocStandaloneFile(
Size);
1480 assert(
Record.getData().end()[0] == 0 &&
"Expected null-termination");
1482 assert(SK != TrieRecord::StorageKind::Unknown);
1483 assert(
bool(File) !=
bool(PoolOffset) &&
1484 "Expected either a mapped file or a pooled offset");
1490 TrieRecord::Data Existing =
I->Ref.load();
1492 TrieRecord::Data NewObject{SK, PoolOffset};
1494 if (Existing.SK == TrieRecord::StorageKind::Unknown) {
1496 if (
Error E = File->keep(Path))
1508 if (Existing.SK == TrieRecord::StorageKind::Unknown) {
1509 if (
I->Ref.compare_exchange_strong(Existing, NewObject)) {
1511 recordStandaloneSizeIncrease(*FileSize);
1517 if (Existing.SK == TrieRecord::StorageKind::Unknown)
1524void OnDiskGraphDB::recordStandaloneSizeIncrease(
size_t SizeIncrease) {
1525 standaloneStorageSize().fetch_add(SizeIncrease, std::memory_order_relaxed);
1528std::atomic<uint64_t> &OnDiskGraphDB::standaloneStorageSize()
const {
1530 assert(UserHeader.
size() ==
sizeof(std::atomic<uint64_t>));
1532 return *
reinterpret_cast<std::atomic<uint64_t> *
>(UserHeader.
data());
1535uint64_t OnDiskGraphDB::getStandaloneStorageSize()
const {
1536 return standaloneStorageSize().load(std::memory_order_relaxed);
1540 return Index.size() + DataPool.size() + getStandaloneStorageSize();
1544 unsigned IndexPercent = Index.size() * 100ULL / Index.capacity();
1545 unsigned DataPercent = DataPool.size() * 100ULL / DataPool.capacity();
1546 return std::max(IndexPercent, DataPercent);
1551 std::unique_ptr<OnDiskGraphDB> UpstreamDB,
FaultInPolicy Policy) {
1555 constexpr uint64_t MB = 1024ull * 1024ull;
1556 constexpr uint64_t GB = 1024ull * 1024ull * 1024ull;
1559 uint64_t MaxDataPoolSize = 24 * GB;
1562 MaxIndexSize = 1 * GB;
1563 MaxDataPoolSize = 2 * GB;
1568 return CustomSize.takeError();
1570 MaxIndexSize = MaxDataPoolSize = **CustomSize;
1574 std::optional<OnDiskTrieRawHashMap> Index;
1577 HashByteSize * CHAR_BIT,
1578 sizeof(TrieRecord), MaxIndexSize,
1581 return std::move(E);
1583 uint32_t UserHeaderSize =
sizeof(std::atomic<uint64_t>);
1587 std::optional<OnDiskDataAllocator> DataPool;
1593 MaxDataPoolSize, MB, UserHeaderSize,
1594 [](
void *UserHeaderPtr) {
1595 new (UserHeaderPtr) std::atomic<uint64_t>(0);
1597 .moveInto(DataPool))
1598 return std::move(E);
1599 if (DataPool->getUserHeader().size() != UserHeaderSize)
1601 "unexpected user header in '" + DataPoolPath +
1604 return std::unique_ptr<OnDiskGraphDB>(
1605 new OnDiskGraphDB(AbsPath, std::move(*Index), std::move(*DataPool),
1606 std::move(UpstreamDB), Policy));
1611 std::unique_ptr<OnDiskGraphDB> UpstreamDB,
1612 FaultInPolicy Policy)
1614 RootPath(RootPath.str()), UpstreamDB(
std::
move(UpstreamDB)),
1625 StandaloneData =
new StandaloneDataMapTy();
1629 delete static_cast<StandaloneDataMapTy *
>(StandaloneData);
1639 struct UpstreamCursor {
1656 auto enqueueNode = [&](
ObjectID PrimaryID, std::optional<ObjectHandle>
Node) {
1660 auto Refs = UpstreamDB->getObjectRefs(*
Node);
1662 (size_t)std::distance(Refs.begin(), Refs.end()),
1663 Refs.begin(), Refs.end()});
1666 enqueueNode(PrimaryID, UpstreamNode);
1668 while (!CursorStack.
empty()) {
1669 UpstreamCursor &Cur = CursorStack.
back();
1670 if (Cur.RefI == Cur.RefE) {
1677 assert(PrimaryNodesStack.
size() >= Cur.RefsCount + 1);
1678 ObjectID PrimaryID = *(PrimaryNodesStack.
end() - Cur.RefsCount - 1);
1679 auto PrimaryRefs =
ArrayRef(PrimaryNodesStack)
1680 .slice(PrimaryNodesStack.
size() - Cur.RefsCount);
1681 auto Data = UpstreamDB->getObjectData(Cur.Node);
1685 PrimaryNodesStack.
truncate(PrimaryNodesStack.
size() - Cur.RefsCount);
1690 ObjectID UpstreamID = *(Cur.RefI++);
1691 auto PrimaryID =
getReference(UpstreamDB->getDigest(UpstreamID));
1693 return PrimaryID.takeError();
1698 enqueueNode(*PrimaryID, std::nullopt);
1701 Expected<std::optional<ObjectHandle>> UpstreamNode =
1702 UpstreamDB->load(UpstreamID);
1705 enqueueNode(*PrimaryID, *UpstreamNode);
1721 auto Data = UpstreamDB->getObjectData(UpstreamNode);
1722 auto UpstreamRefs = UpstreamDB->getObjectRefs(UpstreamNode);
1724 Refs.
reserve(std::distance(UpstreamRefs.begin(), UpstreamRefs.end()));
1725 for (ObjectID UpstreamRef : UpstreamRefs) {
1728 return Ref.takeError();
1735Expected<std::optional<ObjectHandle>>
1736OnDiskGraphDB::faultInFromUpstream(
ObjectID PrimaryID) {
1739 auto UpstreamID = UpstreamDB->getReference(
getDigest(PrimaryID));
1741 return UpstreamID.takeError();
1743 Expected<std::optional<ObjectHandle>> UpstreamNode =
1744 UpstreamDB->load(*UpstreamID);
1748 return std::nullopt;
1751 ? importSingleNode(PrimaryID, **UpstreamNode)
1752 : importFullTree(PrimaryID, **UpstreamNode))
1753 return std::move(
E);
1754 return load(PrimaryID);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Mark last scratch load
AMDGPU Prepare AGPR Alloc
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
This file defines the DenseMap class.
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file declares interface for OnDiskDataAllocator, a file backed data pool can be used to allocate...
static constexpr StringLiteral FilePrefixLeaf0
static constexpr StringLiteral DataPoolTableName
static constexpr StringLiteral FilePrefixObject
static constexpr StringLiteral FilePrefixLeaf
static constexpr StringLiteral IndexFilePrefix
static OnDiskContent getContentFromHandle(const OnDiskDataAllocator &DataPool, ObjectHandle OH)
static constexpr StringLiteral DataPoolFilePrefix
static Error createCorruptObjectError(Expected< ArrayRef< uint8_t > > ID)
static size_t getPageSize()
static Expected< MappedTempFile > createTempFile(StringRef FinalPath, uint64_t Size)
static constexpr StringLiteral IndexTableName
This declares OnDiskGraphDB, an ondisk CAS database with a fixed length hash.
This file declares interface for OnDiskTrieRawHashMap, a thread-safe and (mostly) lock-free hash map ...
Provides a library for accessing information about this process and other processes on the operating ...
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
Lightweight error class with error context and mandatory checking.
static ErrorSuccess success()
Create a success value.
Tagged union holding either a T or a Error.
Error takeError()
Take ownership of the stored error.
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFile(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, bool IsVolatile=false, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, returning a new MemoryBuffer if successful,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void truncate(size_type N)
Like resize, but requires that N is less than size().
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
FileOffset is a wrapper around uint64_t to represent the offset of data from the beginning of the fil...
Handle to a loaded object in a ObjectStore instance.
Expected< ArrayRef< char > > get(FileOffset Offset, size_t Size) const
Get the data of Size stored at the given Offset.
static Expected< OnDiskDataAllocator > create(const Twine &Path, const Twine &TableName, uint64_t MaxFileSize, std::optional< uint64_t > NewFileInitialSize, uint32_t UserHeaderSize=0, function_ref< void(void *)> UserHeaderInit=nullptr)
OnDiskTrieRawHashMap is a persistent trie data structure used as hash maps.
static Expected< OnDiskTrieRawHashMap > create(const Twine &Path, const Twine &TrieName, size_t NumHashBits, uint64_t DataSize, uint64_t MaxFileSize, std::optional< uint64_t > NewFileInitialSize, std::optional< size_t > NewTableNumRootBits=std::nullopt, std::optional< size_t > NewTableNumSubtrieBits=std::nullopt)
Gets or creates a file at Path with a hash-mapped trie named TrieName.
static std::optional< InternalRef4B > tryToShrink(InternalRef Ref)
Shrink to 4B reference.
Array of internal node references.
Standard 8 byte reference inside OnDiskGraphDB.
static InternalRef getFromOffset(FileOffset Offset)
Handle for a loaded node object.
static ObjectHandle fromFileOffset(FileOffset Offset)
static ObjectHandle fromMemory(uintptr_t Ptr)
ObjectHandle(uint64_t Opaque)
static Expected< std::unique_ptr< OnDiskGraphDB > > open(StringRef Path, StringRef HashName, unsigned HashByteSize, std::unique_ptr< OnDiskGraphDB > UpstreamDB=nullptr, FaultInPolicy Policy=FaultInPolicy::FullTree)
Open the on-disk store from a directory.
FaultInPolicy
How to fault-in nodes if an upstream database is used.
@ SingleNode
Copy only the requested node.
void print(raw_ostream &OS) const
Expected< std::optional< ObjectHandle > > load(ObjectID Ref)
Expected< bool > isMaterialized(ObjectID Ref)
Check whether the object associated with Ref is stored in the CAS.
Error validate(bool Deep, HashingFuncT Hasher) const
Validate the OnDiskGraphDB.
bool containsObject(ObjectID Ref) const
Check whether the object associated with Ref is stored in the CAS.
unsigned getHardStorageLimitUtilization() const
Error store(ObjectID ID, ArrayRef< ObjectID > Refs, ArrayRef< char > Data)
Associate data & references with a particular object ID.
ArrayRef< uint8_t > getDigest(ObjectID Ref) const
std::optional< ObjectID > getExistingReference(ArrayRef< uint8_t > Digest)
Get an existing reference to the object Digest.
size_t getStorageSize() const
Expected< ObjectID > getReference(ArrayRef< uint8_t > Hash)
Form a reference for the provided hash.
function_ref< void( ArrayRef< ArrayRef< uint8_t > >, ArrayRef< char >, SmallVectorImpl< uint8_t > &)> HashingFuncT
Hashing function type for validation.
ArrayRef< char > getObjectData(ObjectHandle Node) const
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
static unsigned getPageSizeEstimate()
Get the process's estimated page size.
LLVM_ABI Error keep(const Twine &Name)
static LLVM_ABI Expected< TempFile > create(const Twine &Model, unsigned Mode=all_read|all_write, OpenFlags ExtraFlags=OF_None)
This creates a temporary file with createUniqueFile and schedules it for deletion with sys::RemoveFil...
Represents the result of a call to sys::fs::status().
This class represents a memory mapped file.
LLVM_ABI size_t size() const
@ readonly
May only access map via const_data as read only.
@ readwrite
May access map via data and modify it. Written to path.
LLVM_ABI char * data() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
constexpr StringLiteral CASFormatVersion
The version for all the ondisk database files.
Expected< std::optional< uint64_t > > getOverriddenMaxMappingSize()
Retrieves an overridden maximum mapping size for CAS files, if any, speicified by LLVM_CAS_MAX_MAPPIN...
Expected< size_t > preallocateFileTail(int FD, size_t CurrentSize, size_t NewSize)
Allocate space for the file FD on disk, if the filesystem supports it.
bool useSmallMappingSize(const Twine &Path)
Whether to use a small file mapping for ondisk databases created in Path.
uint64_t getDataSize(const FuncRecordTy *Record)
Return the coverage map data size for the function.
uint64_t read64le(const void *P)
void write64le(void *P, uint64_t V)
void write32le(void *P, uint32_t V)
uint32_t read32le(const void *P)
LLVM_ABI std::error_code closeFile(file_t &F)
Close the file object.
LLVM_ABI std::error_code rename(const Twine &from, const Twine &to)
Rename from to to.
std::error_code resize_file_before_mapping_readwrite(int FD, uint64_t Size)
Resize FD to Size before mapping mapped_file_region::readwrite.
LLVM_ABI bool exists(const basic_file_status &status)
Does file exist?
LLVM_ABI std::error_code createUniqueFile(const Twine &Model, int &ResultFD, SmallVectorImpl< char > &ResultPath, OpenFlags Flags=OF_None, unsigned Mode=all_read|all_write)
Create a uniquely named file.
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
LLVM_ABI Expected< file_t > openNativeFileForRead(const Twine &Name, OpenFlags Flags=OF_None, SmallVectorImpl< char > *RealPath=nullptr)
Opens the file with the given name in a read-only mode, returning its open file descriptor.
LLVM_ABI std::error_code create_directories(const Twine &path, bool IgnoreExisting=true, perms Perms=owner_all|group_all)
Create all the non-existent directories in path.
LLVM_ABI file_t convertFDToNativeFile(int FD)
Converts from a Posix file descriptor number to a native file handle.
LLVM_ABI std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
LLVM_ABI void append(SmallVectorImpl< char > &path, const Twine &a, const Twine &b="", const Twine &c="", const Twine &d="")
Append to path.
This is an optimization pass for GlobalISel generic memory operations.
Error createFileError(const Twine &F, Error E)
Concatenate a source file path and/or name with an Error.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
ArrayRef< CharT > arrayRefFromStringRef(StringRef Input)
Construct a string ref from an array ref of unsigned chars.
std::error_code make_error_code(BitcodeError E)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Error handleErrors(Error E, HandlerTs &&... Hs)
Pass the ErrorInfo(s) contained in E to their respective handlers.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
std::string utohexstr(uint64_t X, bool LowerCase=false, unsigned Width=0)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Error createStringError(std::error_code EC, char const *Fmt, const Ts &... Vals)
Create formatted StringError object.
std::optional< T > expectedToOptional(Expected< T > &&E)
Convert an Expected to an Optional without doing anything.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Ref
The access may reference the value stored in memory.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
FunctionAddr VTableAddr Next
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
ArrayRef(const T &OneElt) -> ArrayRef< T >
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
OutputIt copy(R &&Range, OutputIt Out)
void toHex(ArrayRef< uint8_t > Input, bool LowerCase, SmallVectorImpl< char > &Output)
Convert buffer Input to its hexadecimal representation. The returned string is double the size of Inp...
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
void consumeError(Error Err)
Consume a Error without doing anything.
bool isAddrAligned(Align Lhs, const void *Addr)
Checks that Addr is a multiple of the alignment.
Implement std::hash so that hash_code can be used in STL containers.
Proxy for an on-disk index record.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static constexpr Align Of()
Allow constructions of constexpr Align from types.
Const value proxy to access the records stored in TrieRawHashMap.
Value proxy to access the records stored in TrieRawHashMap.
MutableArrayRef< char > Data