67#define DEBUG_TYPE "on-disk-cas"
85 return ID.takeError();
88 "corrupt object '" +
toHex(*
ID) +
"'");
98 enum class StorageKind : uint8_t {
115 StandaloneLeaf0 = 12,
118 static StringRef getStandaloneFilePrefix(StorageKind SK) {
122 case TrieRecord::StorageKind::Standalone:
124 case TrieRecord::StorageKind::StandaloneLeaf:
126 case TrieRecord::StorageKind::StandaloneLeaf0:
131 enum Limits : int64_t {
133 MaxEmbeddedSize = 64LL * 1024LL - 1,
137 StorageKind SK = StorageKind::Unknown;
142 static uint64_t pack(Data
D) {
143 assert(
D.Offset.get() < (int64_t)(1ULL << 56));
144 uint64_t
Packed = uint64_t(
D.SK) << 56 |
D.Offset.get();
145 assert(
D.SK != StorageKind::Unknown || Packed == 0);
147 Data RoundTrip = unpack(Packed);
149 assert(
D.Offset.get() == RoundTrip.Offset.get());
155 static Data unpack(uint64_t Packed) {
159 D.SK = (StorageKind)(Packed >> 56);
164 TrieRecord() : Storage(0) {}
166 Data
load()
const {
return unpack(Storage); }
167 bool compare_exchange_strong(Data &Existing, Data New);
170 std::atomic<uint64_t> Storage;
180struct DataRecordHandle {
183 enum class NumRefsFlags : uint8_t {
193 enum class DataSizeFlags {
202 enum class RefKindFlags {
211 DataSizeShift = NumRefsShift + NumRefsBits,
213 RefKindShift = DataSizeShift + DataSizeBits,
216 static_assert(((UINT32_MAX << NumRefsBits) & (uint32_t)NumRefsFlags::Max) ==
219 static_assert(((UINT32_MAX << DataSizeBits) & (uint32_t)DataSizeFlags::Max) ==
222 static_assert(((UINT32_MAX << RefKindBits) & (uint32_t)RefKindFlags::Max) ==
228 NumRefsFlags NumRefs;
229 DataSizeFlags DataSize;
230 RefKindFlags RefKind;
232 static uint64_t pack(LayoutFlags LF) {
233 unsigned Packed = ((unsigned)LF.NumRefs << NumRefsShift) |
234 ((
unsigned)LF.DataSize << DataSizeShift) |
235 ((unsigned)LF.RefKind << RefKindShift);
237 LayoutFlags RoundTrip = unpack(Packed);
238 assert(LF.NumRefs == RoundTrip.NumRefs);
239 assert(LF.DataSize == RoundTrip.DataSize);
240 assert(LF.RefKind == RoundTrip.RefKind);
244 static LayoutFlags unpack(uint64_t Storage) {
245 assert(Storage <= UINT8_MAX &&
"Expect storage to fit in a byte");
248 (NumRefsFlags)((Storage >> NumRefsShift) & ((1U << NumRefsBits) - 1));
249 LF.DataSize = (DataSizeFlags)((Storage >> DataSizeShift) &
250 ((1U << DataSizeBits) - 1));
252 (RefKindFlags)((Storage >> RefKindShift) & ((1U << RefKindBits) - 1));
262 using PackTy = uint32_t;
265 static constexpr unsigned LayoutFlagsShift =
266 (
sizeof(PackTy) - 1) * CHAR_BIT;
270 InternalRefArrayRef Refs;
274 LayoutFlags getLayoutFlags()
const {
275 return LayoutFlags::unpack(H->Packed >> Header::LayoutFlagsShift);
279 void skipDataSize(LayoutFlags LF, int64_t &RelOffset)
const;
280 uint32_t getNumRefs()
const;
281 void skipNumRefs(LayoutFlags LF, int64_t &RelOffset)
const;
282 int64_t getRefsRelOffset()
const;
283 int64_t getDataRelOffset()
const;
285 static uint64_t getTotalSize(uint64_t DataRelOffset, uint64_t
DataSize) {
286 return DataRelOffset +
DataSize + 1;
288 uint64_t getTotalSize()
const {
295 explicit Layout(
const Input &
I);
298 uint64_t DataSize = 0;
299 uint32_t NumRefs = 0;
300 int64_t RefsRelOffset = 0;
301 int64_t DataRelOffset = 0;
302 uint64_t getTotalSize()
const {
303 return DataRecordHandle::getTotalSize(DataRelOffset, DataSize);
307 InternalRefArrayRef getRefs()
const {
308 assert(H &&
"Expected valid handle");
309 auto *BeginByte =
reinterpret_cast<const char *
>(H) + getRefsRelOffset();
310 size_t Size = getNumRefs();
312 return InternalRefArrayRef();
313 if (getLayoutFlags().RefKind == RefKindFlags::InternalRef4B)
314 return ArrayRef(
reinterpret_cast<const InternalRef4B *
>(BeginByte),
Size);
315 return ArrayRef(
reinterpret_cast<const InternalRef *
>(BeginByte),
Size);
318 ArrayRef<char> getData()
const {
319 assert(H &&
"Expected valid handle");
320 return ArrayRef(
reinterpret_cast<const char *
>(H) + getDataRelOffset(),
324 static DataRecordHandle create(function_ref<
char *(
size_t Size)>
Alloc,
326 static Expected<DataRecordHandle>
327 createWithError(function_ref<Expected<char *>(
size_t Size)>
Alloc,
329 static DataRecordHandle construct(
char *Mem,
const Input &
I);
331 static DataRecordHandle
get(
const char *Mem) {
332 return DataRecordHandle(
333 *
reinterpret_cast<const DataRecordHandle::Header *
>(Mem));
335 static Expected<DataRecordHandle>
336 getFromDataPool(
const OnDiskDataAllocator &Pool, FileOffset
Offset);
338 explicit operator bool()
const {
return H; }
339 const Header &getHeader()
const {
return *H; }
341 DataRecordHandle() =
default;
342 explicit DataRecordHandle(
const Header &H) : H(&H) {}
345 static DataRecordHandle constructImpl(
char *Mem,
const Input &
I,
347 const Header *H =
nullptr;
351struct OnDiskContent {
352 std::optional<DataRecordHandle> Record;
353 std::optional<ArrayRef<char>> Bytes;
357class StandaloneDataInMemory {
359 OnDiskContent getContent()
const;
361 StandaloneDataInMemory(std::unique_ptr<sys::fs::mapped_file_region> Region,
362 TrieRecord::StorageKind SK)
363 : Region(std::
move(Region)), SK(SK) {
365 bool IsStandalone =
false;
367 case TrieRecord::StorageKind::Standalone:
368 case TrieRecord::StorageKind::StandaloneLeaf:
369 case TrieRecord::StorageKind::StandaloneLeaf0:
380 std::unique_ptr<sys::fs::mapped_file_region> Region;
381 TrieRecord::StorageKind SK;
385template <
size_t NumShards>
class StandaloneDataMap {
386 static_assert(
isPowerOf2_64(NumShards),
"Expected power of 2");
389 uintptr_t insert(ArrayRef<uint8_t> Hash, TrieRecord::StorageKind SK,
390 std::unique_ptr<sys::fs::mapped_file_region> Region);
392 const StandaloneDataInMemory *
lookup(ArrayRef<uint8_t> Hash)
const;
393 bool count(ArrayRef<uint8_t> Hash)
const {
return bool(
lookup(Hash)); }
398 DenseMap<const uint8_t *, std::unique_ptr<StandaloneDataInMemory>> Map;
399 mutable std::mutex Mutex;
401 Shard &getShard(ArrayRef<uint8_t> Hash) {
402 return const_cast<Shard &
>(
403 const_cast<const StandaloneDataMap *
>(
this)->getShard(Hash));
405 const Shard &getShard(ArrayRef<uint8_t> Hash)
const {
406 static_assert(NumShards <= 256,
"Expected only 8 bits of shard");
407 return Shards[Hash[0] % NumShards];
410 Shard Shards[NumShards];
413using StandaloneDataMapTy = StandaloneDataMap<16>;
416class InternalRefVector {
418 void push_back(InternalRef
Ref) {
420 return FullRefs.push_back(
Ref);
422 return SmallRefs.push_back(*Small);
425 FullRefs.reserve(SmallRefs.size() + 1);
426 for (InternalRef4B Small : SmallRefs)
427 FullRefs.push_back(Small);
428 FullRefs.push_back(
Ref);
432 operator InternalRefArrayRef()
const {
433 assert(SmallRefs.empty() || FullRefs.empty());
434 return NeedsFull ? InternalRefArrayRef(FullRefs)
435 : InternalRefArrayRef(SmallRefs);
439 bool NeedsFull =
false;
449 if (Expected<char *> Mem =
Alloc(
L.getTotalSize()))
450 return constructImpl(*Mem,
I, L);
452 return Mem.takeError();
475uintptr_t StandaloneDataMap<N>::insert(
477 std::unique_ptr<sys::fs::mapped_file_region>
Region) {
478 auto &S = getShard(Hash);
479 std::lock_guard<std::mutex> Lock(S.Mutex);
480 auto &V = S.Map[Hash.
data()];
482 V = std::make_unique<StandaloneDataInMemory>(std::move(
Region), SK);
483 return reinterpret_cast<uintptr_t
>(V.get());
487const StandaloneDataInMemory *
489 auto &S = getShard(Hash);
490 std::lock_guard<std::mutex> Lock(S.Mutex);
491 auto I = S.Map.find(Hash.
data());
492 if (
I == S.Map.end())
506 TempFile(
StringRef Name,
int FD) : TmpName(
std::string(Name)), FD(FD) {}
511 TempFile(TempFile &&
Other) { *
this = std::move(
Other); }
512 TempFile &operator=(TempFile &&
Other) {
513 TmpName = std::move(
Other.TmpName);
527 Error keep(
const Twine &Name);
534class MappedTempFile {
536 char *
data()
const {
return Map.
data(); }
537 size_t size()
const {
return Map.
size(); }
540 assert(Map &&
"Map already destroyed");
542 return Temp.discard();
545 Error keep(
const Twine &Name) {
546 assert(Map &&
"Map already destroyed");
548 return Temp.keep(Name);
551 MappedTempFile(TempFile Temp, sys::fs::mapped_file_region Map)
556 sys::fs::mapped_file_region Map;
570 std::error_code RemoveEC;
604 TempFile Ret(ResultPath,
FD);
605 return std::move(Ret);
608bool TrieRecord::compare_exchange_strong(
Data &Existing,
Data New) {
609 uint64_t ExistingPacked = pack(Existing);
611 if (Storage.compare_exchange_strong(ExistingPacked, NewPacked))
613 Existing = unpack(ExistingPacked);
620 auto HeaderData = Pool.
get(
Offset,
sizeof(DataRecordHandle::Header));
622 return HeaderData.takeError();
624 auto Record = DataRecordHandle::get(HeaderData->data());
628 "data record span passed the end of the data pool");
633DataRecordHandle DataRecordHandle::constructImpl(
char *Mem,
const Input &
I,
635 char *
Next = Mem +
sizeof(Header);
638 Header::PackTy Packed = 0;
639 Packed |= LayoutFlags::pack(L.Flags) << Header::LayoutFlagsShift;
642 switch (L.Flags.DataSize) {
643 case DataSizeFlags::Uses1B:
644 assert(
I.Data.size() <= UINT8_MAX);
645 Packed |= (Header::PackTy)
I.Data.size()
646 << ((
sizeof(Packed) - 2) * CHAR_BIT);
648 case DataSizeFlags::Uses2B:
649 assert(
I.Data.size() <= UINT16_MAX);
650 Packed |= (Header::PackTy)
I.Data.size()
651 << ((
sizeof(Packed) - 4) * CHAR_BIT);
653 case DataSizeFlags::Uses4B:
657 case DataSizeFlags::Uses8B:
667 switch (L.Flags.NumRefs) {
668 case NumRefsFlags::Uses0B:
670 case NumRefsFlags::Uses1B:
671 assert(
I.Refs.size() <= UINT8_MAX);
672 Packed |= (Header::PackTy)
I.Refs.size()
673 << ((
sizeof(Packed) - 2) * CHAR_BIT);
675 case NumRefsFlags::Uses2B:
676 assert(
I.Refs.size() <= UINT16_MAX);
677 Packed |= (Header::PackTy)
I.Refs.size()
678 << ((
sizeof(Packed) - 4) * CHAR_BIT);
680 case NumRefsFlags::Uses4B:
684 case NumRefsFlags::Uses8B:
691 if (!
I.Refs.empty()) {
692 assert((
L.Flags.RefKind == RefKindFlags::InternalRef4B) ==
I.Refs.is4B());
693 ArrayRef<uint8_t> RefsBuffer =
I.Refs.getBuffer();
701 Next[
I.Data.size()] = 0;
704 Header *
H =
new (Mem) Header{
Packed};
705 DataRecordHandle Record(*
H);
706 assert(Record.getData() ==
I.Data);
707 assert(Record.getNumRefs() ==
I.Refs.size());
708 assert(Record.getRefs() ==
I.Refs);
709 assert(Record.getLayoutFlags().DataSize ==
L.Flags.DataSize);
710 assert(Record.getLayoutFlags().NumRefs ==
L.Flags.NumRefs);
711 assert(Record.getLayoutFlags().RefKind ==
L.Flags.RefKind);
715DataRecordHandle::Layout::Layout(
const Input &
I) {
717 uint64_t RelOffset =
sizeof(Header);
721 NumRefs =
I.Refs.size();
725 I.Refs.is4B() ? RefKindFlags::InternalRef4B : RefKindFlags::InternalRef;
730 if (
DataSize <= UINT8_MAX && Has1B) {
731 Flags.DataSize = DataSizeFlags::Uses1B;
733 }
else if (
DataSize <= UINT16_MAX && Has2B) {
734 Flags.DataSize = DataSizeFlags::Uses2B;
736 }
else if (
DataSize <= UINT32_MAX) {
737 Flags.DataSize = DataSizeFlags::Uses4B;
740 Flags.DataSize = DataSizeFlags::Uses8B;
746 Flags.NumRefs = NumRefsFlags::Uses0B;
747 }
else if (NumRefs <= UINT8_MAX && Has1B) {
748 Flags.NumRefs = NumRefsFlags::Uses1B;
750 }
else if (NumRefs <= UINT16_MAX && Has2B) {
751 Flags.NumRefs = NumRefsFlags::Uses2B;
754 Flags.NumRefs = NumRefsFlags::Uses4B;
765 auto GrowSizeFieldsBy4B = [&]() {
769 assert(Flags.NumRefs != NumRefsFlags::Uses8B &&
770 "Expected to be able to grow NumRefs8B");
776 if (Flags.DataSize < DataSizeFlags::Uses4B)
777 Flags.DataSize = DataSizeFlags::Uses4B;
778 else if (Flags.DataSize < DataSizeFlags::Uses8B)
779 Flags.DataSize = DataSizeFlags::Uses8B;
780 else if (Flags.NumRefs < NumRefsFlags::Uses4B)
781 Flags.NumRefs = NumRefsFlags::Uses4B;
783 Flags.NumRefs = NumRefsFlags::Uses8B;
787 if (Flags.RefKind == RefKindFlags::InternalRef) {
791 GrowSizeFieldsBy4B();
794 RefsRelOffset = RelOffset;
795 RelOffset += 8 * NumRefs;
803 uint64_t RefListSize = 4 * NumRefs;
805 GrowSizeFieldsBy4B();
806 RefsRelOffset = RelOffset;
807 RelOffset += RefListSize;
811 DataRelOffset = RelOffset;
814uint64_t DataRecordHandle::getDataSize()
const {
815 int64_t RelOffset =
sizeof(Header);
816 auto *DataSizePtr =
reinterpret_cast<const char *
>(
H) + RelOffset;
817 switch (getLayoutFlags().
DataSize) {
818 case DataSizeFlags::Uses1B:
819 return (
H->Packed >> ((
sizeof(Header::PackTy) - 2) * CHAR_BIT)) & UINT8_MAX;
820 case DataSizeFlags::Uses2B:
821 return (
H->Packed >> ((
sizeof(Header::PackTy) - 4) * CHAR_BIT)) &
823 case DataSizeFlags::Uses4B:
825 case DataSizeFlags::Uses8B:
831void DataRecordHandle::skipDataSize(LayoutFlags LF, int64_t &RelOffset)
const {
832 if (LF.DataSize >= DataSizeFlags::Uses4B)
834 if (LF.DataSize >= DataSizeFlags::Uses8B)
838uint32_t DataRecordHandle::getNumRefs()
const {
839 LayoutFlags LF = getLayoutFlags();
840 int64_t RelOffset =
sizeof(Header);
841 skipDataSize(LF, RelOffset);
842 auto *NumRefsPtr =
reinterpret_cast<const char *
>(
H) + RelOffset;
843 switch (LF.NumRefs) {
844 case NumRefsFlags::Uses0B:
846 case NumRefsFlags::Uses1B:
847 return (
H->Packed >> ((
sizeof(Header::PackTy) - 2) * CHAR_BIT)) & UINT8_MAX;
848 case NumRefsFlags::Uses2B:
849 return (
H->Packed >> ((
sizeof(Header::PackTy) - 4) * CHAR_BIT)) &
851 case NumRefsFlags::Uses4B:
853 case NumRefsFlags::Uses8B:
859void DataRecordHandle::skipNumRefs(LayoutFlags LF, int64_t &RelOffset)
const {
860 if (LF.NumRefs >= NumRefsFlags::Uses4B)
862 if (LF.NumRefs >= NumRefsFlags::Uses8B)
866int64_t DataRecordHandle::getRefsRelOffset()
const {
867 LayoutFlags LF = getLayoutFlags();
868 int64_t RelOffset =
sizeof(Header);
869 skipDataSize(LF, RelOffset);
870 skipNumRefs(LF, RelOffset);
874int64_t DataRecordHandle::getDataRelOffset()
const {
875 LayoutFlags LF = getLayoutFlags();
876 int64_t RelOffset =
sizeof(Header);
877 skipDataSize(LF, RelOffset);
878 skipNumRefs(LF, RelOffset);
879 uint32_t RefSize = LF.RefKind == RefKindFlags::InternalRef4B ? 4 : 8;
880 RelOffset += RefSize * getNumRefs();
886 if (
auto E = UpstreamDB->validate(Deep, Hasher))
892 auto formatError = [&](
Twine Msg) {
900 if (
Record.Data.size() !=
sizeof(TrieRecord))
901 return formatError(
"wrong data record size");
903 return formatError(
"wrong data record alignment");
905 auto *R =
reinterpret_cast<const TrieRecord *
>(
Record.Data.data());
906 TrieRecord::Data
D = R->load();
907 std::unique_ptr<MemoryBuffer> FileBuffer;
913 return formatError(
"invalid record kind value");
916 auto I = getIndexProxyFromRef(
Ref);
918 return I.takeError();
921 case TrieRecord::StorageKind::Unknown:
926 case TrieRecord::StorageKind::DataPool:
929 if (
D.Offset.get() <= 0 ||
930 D.Offset.get() +
sizeof(DataRecordHandle::Header) >= DataPool.size())
931 return formatError(
"datapool record out of bound");
933 case TrieRecord::StorageKind::Standalone:
934 case TrieRecord::StorageKind::StandaloneLeaf:
935 case TrieRecord::StorageKind::StandaloneLeaf0:
937 getStandalonePath(TrieRecord::getStandaloneFilePrefix(
D.SK), *
I, Path);
944 return formatError(
"record file \'" + Path +
"\' does not exist");
946 FileBuffer = std::move(*File);
948 return formatError(
"record file \'" + Path +
"\' does not exist");
954 auto dataError = [&](
Twine Msg) {
956 "bad data for digest \'" +
toHex(
I->Hash) +
963 case TrieRecord::StorageKind::Unknown:
965 case TrieRecord::StorageKind::DataPool: {
966 auto DataRecord = DataRecordHandle::getFromDataPool(DataPool,
D.Offset);
968 return dataError(
toString(DataRecord.takeError()));
970 for (
auto InternRef : DataRecord->getRefs()) {
971 auto Index = getIndexProxyFromRef(InternRef);
973 return Index.takeError();
976 StoredData = DataRecord->getData();
979 case TrieRecord::StorageKind::Standalone: {
980 if (FileBuffer->getBufferSize() <
sizeof(DataRecordHandle::Header))
981 return dataError(
"data record is not big enough to read the header");
982 auto DataRecord = DataRecordHandle::get(FileBuffer->getBufferStart());
983 if (DataRecord.getTotalSize() < FileBuffer->getBufferSize())
985 "data record span passed the end of the standalone file");
986 for (
auto InternRef : DataRecord.getRefs()) {
987 auto Index = getIndexProxyFromRef(InternRef);
989 return Index.takeError();
992 StoredData = DataRecord.getData();
995 case TrieRecord::StorageKind::StandaloneLeaf:
996 case TrieRecord::StorageKind::StandaloneLeaf0: {
998 if (
D.SK == TrieRecord::StorageKind::StandaloneLeaf0) {
999 if (!FileBuffer->getBuffer().ends_with(
'\0'))
1000 return dataError(
"standalone file is not zero terminated");
1008 Hasher(Refs, StoredData, ComputedHash);
1010 return dataError(
"hash mismatch, got \'" +
toHex(ComputedHash) +
1018 OS <<
"on-disk-root-path: " << RootPath <<
"\n";
1030 auto *R =
reinterpret_cast<const TrieRecord *
>(
Data.data());
1031 TrieRecord::Data
D = R->load();
1034 case TrieRecord::StorageKind::Unknown:
1037 case TrieRecord::StorageKind::DataPool:
1041 case TrieRecord::StorageKind::Standalone:
1042 OS <<
"standalone-data ";
1044 case TrieRecord::StorageKind::StandaloneLeaf:
1045 OS <<
"standalone-leaf ";
1047 case TrieRecord::StorageKind::StandaloneLeaf0:
1048 OS <<
"standalone-leaf+0";
1051 OS <<
" Offset=" << (
void *)
D.Offset.get();
1059 Pool, [](PoolInfo LHS, PoolInfo RHS) {
return LHS.Offset < RHS.Offset; });
1060 for (PoolInfo PI : Pool) {
1061 OS <<
"- addr=" << (
void *)PI.Offset <<
" ";
1062 auto D = DataRecordHandle::getFromDataPool(DataPool,
FileOffset(PI.Offset));
1064 OS <<
"error: " <<
toString(
D.takeError());
1068 OS <<
"record refs=" <<
D->getNumRefs() <<
" data=" <<
D->getDataSize()
1069 <<
" size=" <<
D->getTotalSize()
1070 <<
" end=" << (
void *)(PI.Offset +
D->getTotalSize()) <<
"\n";
1076 auto P = Index.insertLazy(
1082 new (TentativeValue.
Data.
data()) TrieRecord();
1085 return P.takeError();
1087 assert(*
P &&
"Expected insertion");
1088 return getIndexProxyFromPointer(*
P);
1095 return IndexProxy{
P.getOffset(),
P->Hash,
1096 *
const_cast<TrieRecord *
>(
1097 reinterpret_cast<const TrieRecord *
>(
P->Data.data()))};
1101 auto I = indexHash(Hash);
1103 return I.takeError();
1104 return getExternalReference(*
I);
1107ObjectID OnDiskGraphDB::getExternalReference(
const IndexProxy &
I) {
1108 return getExternalReference(makeInternalRef(
I.Offset));
1111std::optional<ObjectID>
1113 bool CheckUpstream) {
1115 [&](std::optional<IndexProxy>
I) -> std::optional<ObjectID> {
1116 if (!CheckUpstream || !UpstreamDB)
1117 return std::nullopt;
1118 std::optional<ObjectID> UpstreamID =
1119 UpstreamDB->getExistingReference(Digest);
1121 return std::nullopt;
1124 return std::nullopt;
1127 return getExternalReference(*
I);
1132 return tryUpstream(std::nullopt);
1134 TrieRecord::Data Obj =
I.Ref.load();
1135 if (Obj.SK == TrieRecord::StorageKind::Unknown)
1136 return tryUpstream(
I);
1137 return getExternalReference(makeInternalRef(
I.Offset));
1142 auto P = Index.recoverFromFileOffset(
Ref.getFileOffset());
1144 return P.takeError();
1145 return getIndexProxyFromPointer(*
P);
1149 auto I = getIndexProxyFromRef(
Ref);
1151 return I.takeError();
1165 reinterpret_cast<const StandaloneDataInMemory *
>(
Data & (-1ULL << 1));
1166 return SDIM->getContent();
1171 assert(DataHandle.getData().end()[0] == 0 &&
"Null termination");
1172 return OnDiskContent{DataHandle, std::nullopt};
1178 return *Content.Bytes;
1179 assert(Content.Record &&
"Expected record or bytes");
1180 return Content.Record->getData();
1184 if (std::optional<DataRecordHandle>
Record =
1186 return Record->getRefs();
1187 return std::nullopt;
1193 auto I = getIndexProxyFromRef(
Ref);
1195 return I.takeError();
1196 TrieRecord::Data Object =
I->Ref.load();
1198 if (Object.SK == TrieRecord::StorageKind::Unknown)
1199 return faultInFromUpstream(ExternalRef);
1201 if (Object.SK == TrieRecord::StorageKind::DataPool)
1211 switch (Object.SK) {
1212 case TrieRecord::StorageKind::Unknown:
1213 case TrieRecord::StorageKind::DataPool:
1215 case TrieRecord::StorageKind::Standalone:
1216 case TrieRecord::StorageKind::StandaloneLeaf0:
1217 case TrieRecord::StorageKind::StandaloneLeaf:
1227 getStandalonePath(TrieRecord::getStandaloneFilePrefix(Object.SK), *
I, Path);
1240 auto Region = std::make_unique<sys::fs::mapped_file_region>(
1246 static_cast<StandaloneDataMapTy *
>(StandaloneData)
1247 ->insert(
I->Hash, Object.SK, std::move(
Region)));
1251 auto Presence = getObjectPresence(
Ref,
true);
1253 return Presence.takeError();
1255 switch (*Presence) {
1256 case ObjectPresence::Missing:
1258 case ObjectPresence::InPrimaryDB:
1260 case ObjectPresence::OnlyInUpstreamDB:
1261 if (
auto FaultInResult = faultInFromUpstream(
Ref); !FaultInResult)
1262 return FaultInResult.takeError();
1269OnDiskGraphDB::getObjectPresence(
ObjectID ExternalRef,
1270 bool CheckUpstream)
const {
1272 auto I = getIndexProxyFromRef(
Ref);
1274 return I.takeError();
1276 TrieRecord::Data Object =
I->Ref.load();
1277 if (Object.SK != TrieRecord::StorageKind::Unknown)
1278 return ObjectPresence::InPrimaryDB;
1280 if (!CheckUpstream || !UpstreamDB)
1281 return ObjectPresence::Missing;
1283 std::optional<ObjectID> UpstreamID =
1284 UpstreamDB->getExistingReference(getDigest(*
I));
1285 return UpstreamID.has_value() ? ObjectPresence::OnlyInUpstreamDB
1286 : ObjectPresence::Missing;
1293void OnDiskGraphDB::getStandalonePath(
StringRef Prefix,
const IndexProxy &
I,
1295 Path.assign(RootPath.begin(), RootPath.end());
1300OnDiskContent StandaloneDataInMemory::getContent()
const {
1306 case TrieRecord::StorageKind::Standalone:
1308 case TrieRecord::StorageKind::StandaloneLeaf0:
1309 Leaf = Leaf0 =
true;
1311 case TrieRecord::StorageKind::StandaloneLeaf:
1318 assert(
Data.drop_back(Leaf0).end()[0] == 0 &&
1319 "Standalone node data missing null termination");
1320 return OnDiskContent{std::nullopt,
1324 DataRecordHandle Record = DataRecordHandle::get(
Region->data());
1325 assert(Record.getData().end()[0] == 0 &&
1326 "Standalone object record missing null termination for data");
1327 return OnDiskContent{Record, std::nullopt};
1332 assert(
Size &&
"Unexpected request for an empty temp file");
1335 return File.takeError();
1349 return MappedTempFile(std::move(*File), std::move(Map));
1357Error OnDiskGraphDB::createStandaloneLeaf(IndexProxy &
I, ArrayRef<char>
Data) {
1358 assert(
Data.size() > TrieRecord::MaxEmbeddedSize &&
1359 "Expected a bigger file for external content...");
1362 TrieRecord::StorageKind SK = Leaf0 ? TrieRecord::StorageKind::StandaloneLeaf0
1363 : TrieRecord::StorageKind::StandaloneLeaf;
1365 SmallString<256>
Path;
1366 int64_t FileSize =
Data.size() + Leaf0;
1367 getStandalonePath(TrieRecord::getStandaloneFilePrefix(SK),
I, Path);
1373 return File.takeError();
1383 TrieRecord::Data Existing;
1385 TrieRecord::Data Leaf{SK, FileOffset()};
1386 if (
I.Ref.compare_exchange_strong(Existing, Leaf)) {
1387 recordStandaloneSizeIncrease(FileSize);
1393 if (Existing.SK == TrieRecord::StorageKind::Unknown)
1401 auto I = getIndexProxyFromRef(getInternalRef(
ID));
1403 return I.takeError();
1407 TrieRecord::Data Existing =
I->Ref.load();
1408 if (Existing.SK != TrieRecord::StorageKind::Unknown)
1413 if (Refs.
empty() &&
Data.size() > TrieRecord::MaxEmbeddedSize)
1414 return createStandaloneLeaf(*
I,
Data);
1419 InternalRefVector InternalRefs;
1421 InternalRefs.push_back(getInternalRef(
Ref));
1425 DataRecordHandle::Input
Input{InternalRefs,
Data};
1428 TrieRecord::StorageKind SK = TrieRecord::StorageKind::Unknown;
1431 std::optional<MappedTempFile> File;
1432 std::optional<uint64_t> FileSize;
1434 getStandalonePath(TrieRecord::getStandaloneFilePrefix(
1435 TrieRecord::StorageKind::Standalone),
1438 return std::move(E);
1441 SK = TrieRecord::StorageKind::Standalone;
1442 return File->data();
1445 if (
Size <= TrieRecord::MaxEmbeddedSize) {
1446 SK = TrieRecord::StorageKind::DataPool;
1447 auto P = DataPool.allocate(
Size);
1449 char *NewAlloc =
nullptr;
1451 P.takeError(), [&](std::unique_ptr<StringError> E) ->
Error {
1452 if (E->convertToErrorCode() == std::errc::not_enough_memory)
1453 return AllocStandaloneFile(Size).moveInto(NewAlloc);
1454 return Error(std::move(E));
1458 return std::move(NewE);
1460 PoolOffset =
P->getOffset();
1462 dbgs() <<
"pool-alloc addr=" << (
void *)PoolOffset.
get()
1464 <<
" end=" << (
void *)(PoolOffset.
get() +
Size) <<
"\n";
1466 return (*P)->data();
1468 return AllocStandaloneFile(
Size);
1475 assert(
Record.getData().end()[0] == 0 &&
"Expected null-termination");
1477 assert(SK != TrieRecord::StorageKind::Unknown);
1478 assert(
bool(File) !=
bool(PoolOffset) &&
1479 "Expected either a mapped file or a pooled offset");
1485 TrieRecord::Data Existing =
I->Ref.load();
1487 TrieRecord::Data NewObject{SK, PoolOffset};
1489 if (Existing.SK == TrieRecord::StorageKind::Unknown) {
1491 if (
Error E = File->keep(Path))
1503 if (Existing.SK == TrieRecord::StorageKind::Unknown) {
1504 if (
I->Ref.compare_exchange_strong(Existing, NewObject)) {
1506 recordStandaloneSizeIncrease(*FileSize);
1512 if (Existing.SK == TrieRecord::StorageKind::Unknown)
1519void OnDiskGraphDB::recordStandaloneSizeIncrease(
size_t SizeIncrease) {
1520 standaloneStorageSize().fetch_add(SizeIncrease, std::memory_order_relaxed);
1523std::atomic<uint64_t> &OnDiskGraphDB::standaloneStorageSize()
const {
1525 assert(UserHeader.
size() ==
sizeof(std::atomic<uint64_t>));
1527 return *
reinterpret_cast<std::atomic<uint64_t> *
>(UserHeader.
data());
1530uint64_t OnDiskGraphDB::getStandaloneStorageSize()
const {
1531 return standaloneStorageSize().load(std::memory_order_relaxed);
1535 return Index.size() + DataPool.size() + getStandaloneStorageSize();
1539 unsigned IndexPercent = Index.size() * 100ULL / Index.capacity();
1540 unsigned DataPercent = DataPool.size() * 100ULL / DataPool.capacity();
1541 return std::max(IndexPercent, DataPercent);
1546 unsigned HashByteSize, OnDiskGraphDB *UpstreamDB,
1551 constexpr uint64_t MB = 1024ull * 1024ull;
1552 constexpr uint64_t GB = 1024ull * 1024ull * 1024ull;
1555 uint64_t MaxDataPoolSize = 24 * GB;
1558 MaxIndexSize = 1 * GB;
1559 MaxDataPoolSize = 2 * GB;
1564 return CustomSize.takeError();
1566 MaxIndexSize = MaxDataPoolSize = **CustomSize;
1570 std::optional<OnDiskTrieRawHashMap> Index;
1573 HashByteSize * CHAR_BIT,
1574 sizeof(TrieRecord), MaxIndexSize,
1577 return std::move(E);
1579 uint32_t UserHeaderSize =
sizeof(std::atomic<uint64_t>);
1583 std::optional<OnDiskDataAllocator> DataPool;
1589 MaxDataPoolSize, MB, UserHeaderSize,
1590 [](
void *UserHeaderPtr) {
1591 new (UserHeaderPtr) std::atomic<uint64_t>(0);
1593 .moveInto(DataPool))
1594 return std::move(E);
1595 if (DataPool->getUserHeader().size() != UserHeaderSize)
1597 "unexpected user header in '" + DataPoolPath +
1600 return std::unique_ptr<OnDiskGraphDB>(
new OnDiskGraphDB(
1601 AbsPath, std::move(*Index), std::move(*DataPool), UpstreamDB, Policy));
1608 RootPath(RootPath.str()), UpstreamDB(UpstreamDB), FIPolicy(Policy) {
1618 StandaloneData =
new StandaloneDataMapTy();
1622 delete static_cast<StandaloneDataMapTy *
>(StandaloneData);
1631 struct UpstreamCursor {
1648 auto enqueueNode = [&](
ObjectID PrimaryID, std::optional<ObjectHandle>
Node) {
1657 enqueueNode(PrimaryID, UpstreamNode);
1659 while (!CursorStack.
empty()) {
1660 UpstreamCursor &Cur = CursorStack.
back();
1661 if (Cur.RefI == Cur.RefE) {
1668 assert(PrimaryNodesStack.
size() >= Cur.RefsCount + 1);
1669 ObjectID PrimaryID = *(PrimaryNodesStack.
end() - Cur.RefsCount - 1);
1670 auto PrimaryRefs =
ArrayRef(PrimaryNodesStack)
1671 .slice(PrimaryNodesStack.
size() - Cur.RefsCount);
1676 PrimaryNodesStack.
truncate(PrimaryNodesStack.
size() - Cur.RefsCount);
1681 ObjectID UpstreamID = *(Cur.RefI++);
1682 auto PrimaryID =
getReference(UpstreamDB->getDigest(UpstreamID));
1684 return PrimaryID.takeError();
1689 enqueueNode(*PrimaryID, std::nullopt);
1692 Expected<std::optional<ObjectHandle>> UpstreamNode =
1693 UpstreamDB->load(UpstreamID);
1696 enqueueNode(*PrimaryID, *UpstreamNode);
1711 auto Data = UpstreamDB->getObjectData(UpstreamNode);
1712 auto UpstreamRefs = UpstreamDB->getObjectRefs(UpstreamNode);
1715 for (ObjectID UpstreamRef : UpstreamRefs) {
1718 return Ref.takeError();
1725Expected<std::optional<ObjectHandle>>
1726OnDiskGraphDB::faultInFromUpstream(
ObjectID PrimaryID) {
1728 return std::nullopt;
1730 auto UpstreamID = UpstreamDB->getReference(
getDigest(PrimaryID));
1732 return UpstreamID.takeError();
1734 Expected<std::optional<ObjectHandle>> UpstreamNode =
1735 UpstreamDB->load(*UpstreamID);
1739 return std::nullopt;
1742 ? importSingleNode(PrimaryID, **UpstreamNode)
1743 : importFullTree(PrimaryID, **UpstreamNode))
1744 return std::move(
E);
1745 return load(PrimaryID);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Mark last scratch load
AMDGPU Prepare AGPR Alloc
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
This file defines the DenseMap class.
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file declares interface for OnDiskDataAllocator, a file backed data pool can be used to allocate...
static constexpr StringLiteral FilePrefixLeaf0
static constexpr StringLiteral DataPoolTableName
static constexpr StringLiteral FilePrefixObject
static constexpr StringLiteral FilePrefixLeaf
static constexpr StringLiteral IndexFilePrefix
static OnDiskContent getContentFromHandle(const OnDiskDataAllocator &DataPool, ObjectHandle OH)
static constexpr StringLiteral DataPoolFilePrefix
static Error createCorruptObjectError(Expected< ArrayRef< uint8_t > > ID)
static size_t getPageSize()
static Expected< MappedTempFile > createTempFile(StringRef FinalPath, uint64_t Size)
static constexpr StringLiteral IndexTableName
This declares OnDiskGraphDB, an ondisk CAS database with a fixed length hash.
This file declares interface for OnDiskTrieRawHashMap, a thread-safe and (mostly) lock-free hash map ...
Provides a library for accessing information about this process and other processes on the operating ...
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
Lightweight error class with error context and mandatory checking.
static ErrorSuccess success()
Create a success value.
Tagged union holding either a T or a Error.
Error takeError()
Take ownership of the stored error.
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFile(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, bool IsVolatile=false, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, returning a new MemoryBuffer if successful,...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void truncate(size_type N)
Like resize, but requires that N is less than size().
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
FileOffset is a wrapper around uint64_t to represent the offset of data from the beginning of the fil...
Handle to a loaded object in a ObjectStore instance.
LLVM_ABI_FOR_TEST Expected< ArrayRef< char > > get(FileOffset Offset, size_t Size) const
Get the data of Size stored at the given Offset.
static LLVM_ABI_FOR_TEST Expected< OnDiskDataAllocator > create(const Twine &Path, const Twine &TableName, uint64_t MaxFileSize, std::optional< uint64_t > NewFileInitialSize, uint32_t UserHeaderSize=0, function_ref< void(void *)> UserHeaderInit=nullptr)
LLVM_ABI_FOR_TEST size_t size() const
OnDiskTrieRawHashMap is a persistent trie data structure used as hash maps.
static LLVM_ABI_FOR_TEST Expected< OnDiskTrieRawHashMap > create(const Twine &Path, const Twine &TrieName, size_t NumHashBits, uint64_t DataSize, uint64_t MaxFileSize, std::optional< uint64_t > NewFileInitialSize, std::optional< size_t > NewTableNumRootBits=std::nullopt, std::optional< size_t > NewTableNumSubtrieBits=std::nullopt)
Gets or creates a file at Path with a hash-mapped trie named TrieName.
static std::optional< InternalRef4B > tryToShrink(InternalRef Ref)
Shrink to 4B reference.
Array of internal node references.
Standard 8 byte reference inside OnDiskGraphDB.
static InternalRef getFromOffset(FileOffset Offset)
Handle for a loaded node object.
static ObjectHandle fromFileOffset(FileOffset Offset)
static ObjectHandle fromMemory(uintptr_t Ptr)
ObjectHandle(uint64_t Opaque)
On-disk CAS nodes database, independent of a particular hashing algorithm.
FaultInPolicy
How to fault-in nodes if an upstream database is used.
@ SingleNode
Copy only the requested node.
void print(raw_ostream &OS) const
LLVM_ABI_FOR_TEST Expected< std::optional< ObjectHandle > > load(ObjectID Ref)
Expected< bool > isMaterialized(ObjectID Ref)
Check whether the object associated with Ref is stored in the CAS.
Error validate(bool Deep, HashingFuncT Hasher) const
Validate the OnDiskGraphDB.
object_refs_range getObjectRefs(ObjectHandle Node) const
unsigned getHardStorageLimitUtilization() const
LLVM_ABI_FOR_TEST Error store(ObjectID ID, ArrayRef< ObjectID > Refs, ArrayRef< char > Data)
Associate data & references with a particular object ID.
ArrayRef< uint8_t > getDigest(ObjectID Ref) const
static LLVM_ABI_FOR_TEST Expected< std::unique_ptr< OnDiskGraphDB > > open(StringRef Path, StringRef HashName, unsigned HashByteSize, OnDiskGraphDB *UpstreamDB=nullptr, FaultInPolicy Policy=FaultInPolicy::FullTree)
Open the on-disk store from a directory.
LLVM_ABI_FOR_TEST size_t getStorageSize() const
bool containsObject(ObjectID Ref, bool CheckUpstream=true) const
Check whether the object associated with Ref is stored in the CAS.
LLVM_ABI_FOR_TEST ~OnDiskGraphDB()
LLVM_ABI_FOR_TEST Expected< ObjectID > getReference(ArrayRef< uint8_t > Hash)
Form a reference for the provided hash.
function_ref< void( ArrayRef< ArrayRef< uint8_t > >, ArrayRef< char >, SmallVectorImpl< uint8_t > &)> HashingFuncT
Hashing function type for validation.
LLVM_ABI_FOR_TEST ArrayRef< char > getObjectData(ObjectHandle Node) const
LLVM_ABI_FOR_TEST std::optional< ObjectID > getExistingReference(ArrayRef< uint8_t > Digest, bool CheckUpstream=true)
Get an existing reference to the object Digest.
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
static unsigned getPageSizeEstimate()
Get the process's estimated page size.
LLVM_ABI Error keep(const Twine &Name)
static LLVM_ABI Expected< TempFile > create(const Twine &Model, unsigned Mode=all_read|all_write, OpenFlags ExtraFlags=OF_None)
This creates a temporary file with createUniqueFile and schedules it for deletion with sys::RemoveFil...
Represents the result of a call to sys::fs::status().
This class represents a memory mapped file.
LLVM_ABI size_t size() const
@ readonly
May only access map via const_data as read only.
@ readwrite
May access map via data and modify it. Written to path.
LLVM_ABI char * data() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
constexpr StringLiteral CASFormatVersion
The version for all the ondisk database files.
Expected< std::optional< uint64_t > > getOverriddenMaxMappingSize()
Retrieves an overridden maximum mapping size for CAS files, if any, speicified by LLVM_CAS_MAX_MAPPIN...
Expected< size_t > preallocateFileTail(int FD, size_t CurrentSize, size_t NewSize)
Allocate space for the file FD on disk, if the filesystem supports it.
bool useSmallMappingSize(const Twine &Path)
Whether to use a small file mapping for ondisk databases created in Path.
uint64_t getDataSize(const FuncRecordTy *Record)
Return the coverage map data size for the function.
uint64_t read64le(const void *P)
void write64le(void *P, uint64_t V)
void write32le(void *P, uint32_t V)
uint32_t read32le(const void *P)
LLVM_ABI std::error_code closeFile(file_t &F)
Close the file object.
LLVM_ABI std::error_code rename(const Twine &from, const Twine &to)
Rename from to to.
std::error_code resize_file_before_mapping_readwrite(int FD, uint64_t Size)
Resize FD to Size before mapping mapped_file_region::readwrite.
LLVM_ABI bool exists(const basic_file_status &status)
Does file exist?
LLVM_ABI std::error_code createUniqueFile(const Twine &Model, int &ResultFD, SmallVectorImpl< char > &ResultPath, OpenFlags Flags=OF_None, unsigned Mode=all_read|all_write)
Create a uniquely named file.
LLVM_ABI std::error_code remove(const Twine &path, bool IgnoreNonExisting=true)
Remove path.
LLVM_ABI Expected< file_t > openNativeFileForRead(const Twine &Name, OpenFlags Flags=OF_None, SmallVectorImpl< char > *RealPath=nullptr)
Opens the file with the given name in a read-only mode, returning its open file descriptor.
LLVM_ABI std::error_code create_directories(const Twine &path, bool IgnoreExisting=true, perms Perms=owner_all|group_all)
Create all the non-existent directories in path.
LLVM_ABI file_t convertFDToNativeFile(int FD)
Converts from a Posix file descriptor number to a native file handle.
LLVM_ABI std::error_code status(const Twine &path, file_status &result, bool follow=true)
Get file status as if by POSIX stat().
LLVM_ABI void append(SmallVectorImpl< char > &path, const Twine &a, const Twine &b="", const Twine &c="", const Twine &d="")
Append to path.
This is an optimization pass for GlobalISel generic memory operations.
Error createFileError(const Twine &F, Error E)
Concatenate a source file path and/or name with an Error.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
ArrayRef< CharT > arrayRefFromStringRef(StringRef Input)
Construct a string ref from an array ref of unsigned chars.
std::error_code make_error_code(BitcodeError E)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Error handleErrors(Error E, HandlerTs &&... Hs)
Pass the ErrorInfo(s) contained in E to their respective handlers.
FunctionAddr VTableAddr uintptr_t uintptr_t DataSize
std::string utohexstr(uint64_t X, bool LowerCase=false, unsigned Width=0)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Error createStringError(std::error_code EC, char const *Fmt, const Ts &... Vals)
Create formatted StringError object.
std::optional< T > expectedToOptional(Expected< T > &&E)
Convert an Expected to an Optional without doing anything.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Ref
The access may reference the value stored in memory.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
FunctionAddr VTableAddr Next
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
ArrayRef(const T &OneElt) -> ArrayRef< T >
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
OutputIt copy(R &&Range, OutputIt Out)
void toHex(ArrayRef< uint8_t > Input, bool LowerCase, SmallVectorImpl< char > &Output)
Convert buffer Input to its hexadecimal representation. The returned string is double the size of Inp...
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
void consumeError(Error Err)
Consume a Error without doing anything.
bool isAddrAligned(Align Lhs, const void *Addr)
Checks that Addr is a multiple of the alignment.
Implement std::hash so that hash_code can be used in STL containers.
Proxy for an on-disk index record.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static constexpr Align Of()
Allow constructions of constexpr Align from types.
Const value proxy to access the records stored in TrieRawHashMap.
Value proxy to access the records stored in TrieRawHashMap.
MutableArrayRef< char > Data