59#define DEBUG_TYPE "atomic-expand"
63class AtomicExpandImpl {
71 bool tryExpandAtomicLoad(
LoadInst *LI);
72 bool expandAtomicLoadToLL(
LoadInst *LI);
73 bool expandAtomicLoadToCmpXchg(
LoadInst *LI);
83 void expandAtomicOpToLLSC(
87 void expandPartwordAtomicRMW(
95 static Value *insertRMWCmpXchgLoop(
111 void expandAtomicLoadToLibcall(
LoadInst *LI);
112 void expandAtomicStoreToLibcall(
StoreInst *LI);
138struct ReplacementIRBuilder
139 :
IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {
150 if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
153 MMRAMD =
I->getMetadata(LLVMContext::MD_mmra);
158 I->setMetadata(LLVMContext::MD_mmra, MMRAMD);
164char AtomicExpandLegacy::ID = 0;
169 "Expand Atomic instructions",
false,
false)
177 return DL.getTypeStoreSize(LI->getType());
182 return DL.getTypeStoreSize(SI->getValueOperand()->getType());
199 Source.getAllMetadata(MD);
203 for (
auto [
ID,
N] : MD) {
205 case LLVMContext::MD_dbg:
206 case LLVMContext::MD_tbaa:
207 case LLVMContext::MD_tbaa_struct:
208 case LLVMContext::MD_alias_scope:
209 case LLVMContext::MD_noalias:
210 case LLVMContext::MD_noalias_addrspace:
211 case LLVMContext::MD_access_group:
212 case LLVMContext::MD_mmra:
218 else if (
ID == Ctx.
getMDKindID(
"amdgpu.no.fine.grained.memory"))
231template <
typename Inst>
234 Align Alignment =
I->getAlign();
235 return Alignment >=
Size &&
239bool AtomicExpandImpl::processAtomicInstr(
Instruction *
I) {
240 auto *LI = dyn_cast<LoadInst>(
I);
241 auto *
SI = dyn_cast<StoreInst>(
I);
242 auto *RMWI = dyn_cast<AtomicRMWInst>(
I);
243 auto *CASI = dyn_cast<AtomicCmpXchgInst>(
I);
245 bool MadeChange =
false;
253 expandAtomicLoadToLibcall(LI);
257 if (TLI->shouldCastAtomicLoadInIR(LI) ==
258 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
259 I = LI = convertAtomicLoadToIntegerType(LI);
267 expandAtomicStoreToLibcall(SI);
271 if (TLI->shouldCastAtomicStoreInIR(SI) ==
272 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
273 I =
SI = convertAtomicStoreToIntegerType(SI);
278 expandAtomicRMWToLibcall(RMWI);
282 if (TLI->shouldCastAtomicRMWIInIR(RMWI) ==
283 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
284 I = RMWI = convertAtomicXchgToIntegerType(RMWI);
289 expandAtomicCASToLibcall(CASI);
295 if (CASI->getCompareOperand()->getType()->isPointerTy()) {
298 I = CASI = convertCmpXchgToIntegerType(CASI);
304 if (TLI->shouldInsertFencesForAtomic(
I)) {
305 auto FenceOrdering = AtomicOrdering::Monotonic;
307 FenceOrdering = LI->getOrdering();
308 LI->setOrdering(AtomicOrdering::Monotonic);
310 FenceOrdering =
SI->getOrdering();
311 SI->setOrdering(AtomicOrdering::Monotonic);
314 FenceOrdering = RMWI->getOrdering();
315 RMWI->setOrdering(AtomicOrdering::Monotonic);
317 TLI->shouldExpandAtomicCmpXchgInIR(CASI) ==
318 TargetLoweringBase::AtomicExpansionKind::None &&
326 FenceOrdering = CASI->getMergedOrdering();
327 CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
328 CASI->setFailureOrdering(AtomicOrdering::Monotonic);
331 if (FenceOrdering != AtomicOrdering::Monotonic) {
332 MadeChange |= bracketInstWithFences(
I, FenceOrdering);
334 }
else if (
I->hasAtomicStore() &&
335 TLI->shouldInsertTrailingFenceForAtomicStore(
I)) {
336 auto FenceOrdering = AtomicOrdering::Monotonic;
338 FenceOrdering =
SI->getOrdering();
340 FenceOrdering = RMWI->getOrdering();
341 else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI) !=
342 TargetLoweringBase::AtomicExpansionKind::LLSC)
344 FenceOrdering = CASI->getSuccessOrdering();
347 if (
auto TrailingFence =
348 TLI->emitTrailingFence(Builder,
I, FenceOrdering)) {
349 TrailingFence->moveAfter(
I);
355 MadeChange |= tryExpandAtomicLoad(LI);
357 MadeChange |= tryExpandAtomicStore(SI);
364 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
368 MadeChange |= tryExpandAtomicRMW(RMWI);
371 MadeChange |= tryExpandAtomicCmpXchg(CASI);
377 const auto *Subtarget =
TM->getSubtargetImpl(
F);
378 if (!Subtarget->enableAtomicExpand())
380 TLI = Subtarget->getTargetLowering();
381 DL = &
F.getDataLayout();
383 bool MadeChange =
false;
395 if (processAtomicInstr(&Inst)) {
407bool AtomicExpandLegacy::runOnFunction(
Function &
F) {
409 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
414 return AE.run(
F, TM);
418 return new AtomicExpandLegacy();
425 bool Changed = AE.run(
F, TM);
432bool AtomicExpandImpl::bracketInstWithFences(
Instruction *
I,
434 ReplacementIRBuilder Builder(
I, *
DL);
436 auto LeadingFence = TLI->emitLeadingFence(Builder,
I, Order);
438 auto TrailingFence = TLI->emitTrailingFence(Builder,
I, Order);
442 TrailingFence->moveAfter(
I);
444 return (LeadingFence || TrailingFence);
450 EVT VT = TLI->getMemValueType(
DL,
T);
461 Type *NewTy = getCorrespondingIntegerType(LI->
getType(),
M->getDataLayout());
463 ReplacementIRBuilder Builder(LI, *
DL);
467 auto *NewLI = Builder.CreateLoad(NewTy,
Addr);
468 NewLI->setAlignment(LI->
getAlign());
471 LLVM_DEBUG(
dbgs() <<
"Replaced " << *LI <<
" with " << *NewLI <<
"\n");
473 Value *NewVal = Builder.CreateBitCast(NewLI, LI->
getType());
480AtomicExpandImpl::convertAtomicXchgToIntegerType(
AtomicRMWInst *RMWI) {
485 getCorrespondingIntegerType(RMWI->
getType(),
M->getDataLayout());
487 ReplacementIRBuilder Builder(RMWI, *
DL);
492 ? Builder.CreatePtrToInt(Val, NewTy)
493 : Builder.CreateBitCast(Val, NewTy);
500 LLVM_DEBUG(
dbgs() <<
"Replaced " << *RMWI <<
" with " << *NewRMWI <<
"\n");
503 ? Builder.CreateIntToPtr(NewRMWI, RMWI->
getType())
504 : Builder.CreateBitCast(NewRMWI, RMWI->
getType());
510bool AtomicExpandImpl::tryExpandAtomicLoad(
LoadInst *LI) {
511 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
515 expandAtomicOpToLLSC(
521 return expandAtomicLoadToLL(LI);
523 return expandAtomicLoadToCmpXchg(LI);
532bool AtomicExpandImpl::tryExpandAtomicStore(
StoreInst *SI) {
533 switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
537 expandAtomicStore(SI);
547bool AtomicExpandImpl::expandAtomicLoadToLL(
LoadInst *LI) {
548 ReplacementIRBuilder Builder(LI, *
DL);
553 Value *Val = TLI->emitLoadLinked(Builder, LI->
getType(),
555 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
563bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(
LoadInst *LI) {
564 ReplacementIRBuilder Builder(LI, *
DL);
573 Value *Pair = Builder.CreateAtomicCmpXchg(
576 Value *Loaded = Builder.CreateExtractValue(Pair, 0,
"loaded");
593 ReplacementIRBuilder Builder(SI, *
DL);
594 auto *
M =
SI->getModule();
595 Type *NewTy = getCorrespondingIntegerType(
SI->getValueOperand()->getType(),
597 Value *NewVal = Builder.CreateBitCast(
SI->getValueOperand(), NewTy);
605 LLVM_DEBUG(
dbgs() <<
"Replaced " << *SI <<
" with " << *NewSI <<
"\n");
606 SI->eraseFromParent();
610void AtomicExpandImpl::expandAtomicStore(
StoreInst *SI) {
617 ReplacementIRBuilder Builder(SI, *
DL);
625 SI->getAlign(), RMWOrdering);
626 SI->eraseFromParent();
629 tryExpandAtomicRMW(AI);
649 Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
661bool AtomicExpandImpl::tryExpandAtomicRMW(
AtomicRMWInst *AI) {
668 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
670 if (ValueSize < MinCASSize) {
671 expandPartwordAtomicRMW(AI,
684 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
686 if (ValueSize < MinCASSize) {
687 expandPartwordAtomicRMW(AI,
698 <<
"A compare and swap loop was generated for an atomic "
700 << MemScope <<
" memory scope";
707 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
709 if (ValueSize < MinCASSize) {
714 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
718 expandAtomicRMWToMaskedIntrinsic(AI);
722 TLI->emitBitTestAtomicRMWIntrinsic(AI);
726 TLI->emitCmpArithAtomicRMWIntrinsic(AI);
732 TLI->emitExpandAtomicRMW(AI);
741struct PartwordMaskValues {
743 Type *WordType =
nullptr;
745 Type *IntValueType =
nullptr;
746 Value *AlignedAddr =
nullptr;
747 Align AlignedAddrAlignment;
749 Value *ShiftAmt =
nullptr;
751 Value *Inv_Mask =
nullptr;
756 auto PrintObj = [&
O](
auto *
V) {
763 O <<
"PartwordMaskValues {\n";
765 PrintObj(PMV.WordType);
767 PrintObj(PMV.ValueType);
768 O <<
" AlignedAddr: ";
769 PrintObj(PMV.AlignedAddr);
770 O <<
" AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.value() <<
'\n';
772 PrintObj(PMV.ShiftAmt);
776 PrintObj(PMV.Inv_Mask);
802 unsigned MinWordSize) {
803 PartwordMaskValues PMV;
808 unsigned ValueSize =
DL.getTypeStoreSize(
ValueType);
810 PMV.ValueType = PMV.IntValueType =
ValueType;
811 if (PMV.ValueType->isFloatingPointTy() || PMV.ValueType->isVectorTy())
815 PMV.WordType = MinWordSize > ValueSize ?
Type::getIntNTy(Ctx, MinWordSize * 8)
817 if (PMV.ValueType == PMV.WordType) {
818 PMV.AlignedAddr =
Addr;
819 PMV.AlignedAddrAlignment = AddrAlign;
820 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
821 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0,
true);
825 PMV.AlignedAddrAlignment =
Align(MinWordSize);
827 assert(ValueSize < MinWordSize);
830 IntegerType *IntTy =
DL.getIndexType(Ctx, PtrTy->getAddressSpace());
833 if (AddrAlign < MinWordSize) {
835 Intrinsic::ptrmask, {PtrTy, IntTy},
836 {
Addr, ConstantInt::get(IntTy, ~(
uint64_t)(MinWordSize - 1))},
nullptr,
840 PtrLSB = Builder.
CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
843 PMV.AlignedAddr =
Addr;
847 if (
DL.isLittleEndian()) {
849 PMV.ShiftAmt = Builder.
CreateShl(PtrLSB, 3);
853 Builder.
CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
856 PMV.ShiftAmt = Builder.
CreateTrunc(PMV.ShiftAmt, PMV.WordType,
"ShiftAmt");
858 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
861 PMV.Inv_Mask = Builder.
CreateNot(PMV.Mask,
"Inv_Mask");
867 const PartwordMaskValues &PMV) {
868 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
869 if (PMV.WordType == PMV.ValueType)
878 Value *Updated,
const PartwordMaskValues &PMV) {
879 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
880 assert(Updated->
getType() == PMV.ValueType &&
"Value type mismatch");
881 if (PMV.WordType == PMV.ValueType)
888 Builder.
CreateShl(ZExt, PMV.ShiftAmt,
"shifted",
true);
900 const PartwordMaskValues &PMV) {
907 Value *FinalVal = Builder.
CreateOr(Loaded_MaskOut, Shifted_Inc);
921 Value *FinalVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Masked);
956void AtomicExpandImpl::expandPartwordAtomicRMW(
962 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
968 ReplacementIRBuilder Builder(AI, *
DL);
970 PartwordMaskValues PMV =
972 AI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
974 Value *ValOperand_Shifted =
nullptr;
979 Builder.CreateShl(Builder.CreateZExt(ValOp, PMV.WordType), PMV.ShiftAmt,
980 "ValOperand_Shifted");
990 OldResult = insertRMWCmpXchgLoop(
991 Builder, PMV.WordType, PMV.AlignedAddr, PMV.AlignedAddrAlignment,
995 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
996 PMV.AlignedAddrAlignment, MemOpOrder,
1007 ReplacementIRBuilder Builder(AI, *
DL);
1012 "Unable to widen operation");
1014 PartwordMaskValues PMV =
1016 AI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1018 Value *ValOperand_Shifted =
1020 PMV.ShiftAmt,
"ValOperand_Shifted");
1026 Builder.
CreateOr(ValOperand_Shifted, PMV.Inv_Mask,
"AndOperand");
1028 NewOperand = ValOperand_Shifted;
1031 Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
1084 ReplacementIRBuilder Builder(CI, *
DL);
1095 std::prev(BB->
end())->eraseFromParent();
1098 PartwordMaskValues PMV =
1100 CI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1103 Value *NewVal_Shifted =
1105 Value *Cmp_Shifted =
1112 Value *InitLoaded_MaskOut = Builder.
CreateAnd(InitLoaded, PMV.Inv_Mask);
1118 Loaded_MaskOut->
addIncoming(InitLoaded_MaskOut, BB);
1121 Value *FullWord_NewVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Shifted);
1122 Value *FullWord_Cmp = Builder.
CreateOr(Loaded_MaskOut, Cmp_Shifted);
1124 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
1152 Loaded_MaskOut->
addIncoming(OldVal_MaskOut, FailureBB);
1167void AtomicExpandImpl::expandAtomicOpToLLSC(
1171 ReplacementIRBuilder Builder(
I, *
DL);
1172 Value *Loaded = insertRMWLLSCLoop(Builder, ResultType,
Addr, AddrAlign,
1173 MemOpOrder, PerformOp);
1175 I->replaceAllUsesWith(Loaded);
1176 I->eraseFromParent();
1179void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(
AtomicRMWInst *AI) {
1180 ReplacementIRBuilder Builder(AI, *
DL);
1182 PartwordMaskValues PMV =
1184 AI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1192 CastOp = Instruction::SExt;
1196 PMV.ShiftAmt,
"ValOperand_Shifted");
1197 Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic(
1198 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
1205void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
1207 ReplacementIRBuilder Builder(CI, *
DL);
1211 CI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1219 Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic(
1220 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
1226 CmpVal_Shifted, Builder.
CreateAnd(OldVal, PMV.Mask),
"Success");
1233Value *AtomicExpandImpl::insertRMWLLSCLoop(
1242 F->getDataLayout().getTypeStoreSize(ResultTy) &&
1243 "Expected at least natural alignment at this point.");
1263 std::prev(BB->
end())->eraseFromParent();
1269 Value *Loaded = TLI->emitLoadLinked(Builder, ResultTy,
Addr, MemOpOrder);
1271 Value *NewVal = PerformOp(Builder, Loaded);
1273 Value *StoreSuccess =
1274 TLI->emitStoreConditional(Builder, NewVal,
Addr, MemOpOrder);
1292 M->getDataLayout());
1294 ReplacementIRBuilder Builder(CI, *
DL);
1306 LLVM_DEBUG(
dbgs() <<
"Replaced " << *CI <<
" with " << *NewCI <<
"\n");
1333 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
1347 bool HasReleasedLoadBB = !CI->
isWeak() && ShouldInsertFencesForAtomic &&
1354 bool UseUnconditionalReleaseBarrier =
F->hasMinSize() && !CI->
isWeak();
1408 auto ReleasedLoadBB =
1412 auto ReleasingStoreBB =
1416 ReplacementIRBuilder Builder(CI, *
DL);
1421 std::prev(BB->
end())->eraseFromParent();
1423 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1424 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1426 PartwordMaskValues PMV =
1428 CI->
getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1433 Value *UnreleasedLoad =
1434 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1435 Value *UnreleasedLoadExtract =
1442 Builder.
CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
1445 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1446 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1451 Builder.
CreatePHI(PMV.WordType, 2,
"loaded.trystore");
1452 LoadedTryStore->
addIncoming(UnreleasedLoad, ReleasingStoreBB);
1453 Value *NewValueInsert =
1455 Value *StoreSuccess = TLI->emitStoreConditional(Builder, NewValueInsert,
1456 PMV.AlignedAddr, MemOpOrder);
1459 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1461 CI->
isWeak() ? FailureBB : RetryBB);
1465 if (HasReleasedLoadBB) {
1467 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1474 Builder.
CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
1476 LoadedTryStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1483 if (ShouldInsertFencesForAtomic ||
1484 TLI->shouldInsertTrailingFenceForAtomicStore(CI))
1485 TLI->emitTrailingFence(Builder, CI, SuccessOrder);
1491 LoadedNoStore->
addIncoming(UnreleasedLoad, StartBB);
1492 if (HasReleasedLoadBB)
1493 LoadedNoStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1498 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1504 LoadedFailure->
addIncoming(LoadedNoStore, NoStoreBB);
1506 LoadedFailure->
addIncoming(LoadedTryStore, TryStoreBB);
1507 if (ShouldInsertFencesForAtomic)
1508 TLI->emitTrailingFence(Builder, CI, FailureOrder);
1518 LoadedExit->
addIncoming(LoadedTryStore, SuccessBB);
1519 LoadedExit->
addIncoming(LoadedFailure, FailureBB);
1526 Value *LoadedFull = LoadedExit;
1540 "weird extraction from { iN, i1 }");
1551 for (
auto *EV : PrunedInsts)
1568bool AtomicExpandImpl::isIdempotentRMW(
AtomicRMWInst *RMWI) {
1581 return C->isMinusOne();
1588bool AtomicExpandImpl::simplifyIdempotentRMW(
AtomicRMWInst *RMWI) {
1589 if (
auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
1590 tryExpandAtomicLoad(ResultingLoad);
1596Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
1627 std::prev(BB->
end())->eraseFromParent();
1637 Value *NewVal = PerformOp(Builder, Loaded);
1639 Value *NewLoaded =
nullptr;
1642 CreateCmpXchg(Builder,
Addr, Loaded, NewVal, AddrAlign,
1646 SSID,
Success, NewLoaded, MetadataSrc);
1658 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
1661 switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) {
1665 if (ValueSize < MinCASSize)
1666 return expandPartwordCmpXchg(CI);
1669 return expandAtomicCmpXchg(CI);
1672 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1677 TLI->emitExpandAtomicCmpXchg(CI);
1687 Builder.setIsFPConstrained(
1692 Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
1696 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
1697 AI->getValOperand());
1720 unsigned LargestSize =
DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1721 return Alignment >=
Size &&
1723 Size <= LargestSize;
1726void AtomicExpandImpl::expandAtomicLoadToLibcall(
LoadInst *
I) {
1728 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1729 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1732 bool expanded = expandAtomicOpToLibcall(
1733 I,
Size,
I->getAlign(),
I->getPointerOperand(),
nullptr,
nullptr,
1739void AtomicExpandImpl::expandAtomicStoreToLibcall(
StoreInst *
I) {
1741 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1742 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1745 bool expanded = expandAtomicOpToLibcall(
1746 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValueOperand(),
1754 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1755 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1756 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1759 bool expanded = expandAtomicOpToLibcall(
1760 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getNewValOperand(),
1761 I->getCompareOperand(),
I->getSuccessOrdering(),
I->getFailureOrdering(),
1769 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1770 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1771 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1773 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1774 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1775 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1777 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1778 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1779 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1781 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1782 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1783 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1785 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1786 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1787 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1789 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1790 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1791 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1793 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1794 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1795 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1832void AtomicExpandImpl::expandAtomicRMWToLibcall(
AtomicRMWInst *
I) {
1838 if (!Libcalls.
empty())
1839 Success = expandAtomicOpToLibcall(
1840 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValOperand(),
1855 Addr, Loaded, NewVal, Alignment, MemOpOrder,
1864 expandAtomicCASToLibcall(Pair);
1875bool AtomicExpandImpl::expandAtomicOpToLibcall(
1885 IRBuilder<> AllocaBuilder(&
I->getFunction()->getEntryBlock().front());
1890 const Align AllocaAlignment =
DL.getPrefTypeAlign(SizedIntTy);
1907 if (UseSizedLibcall) {
1910 RTLibType = Libcalls[1];
1913 RTLibType = Libcalls[2];
1916 RTLibType = Libcalls[3];
1919 RTLibType = Libcalls[4];
1922 RTLibType = Libcalls[5];
1925 }
else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
1926 RTLibType = Libcalls[0];
1933 if (!TLI->getLibcallName(RTLibType)) {
1973 if (!UseSizedLibcall) {
1975 Args.push_back(ConstantInt::get(
DL.getIntPtrType(Ctx),
Size));
1983 Value *PtrVal = PointerOperand;
1985 Args.push_back(PtrVal);
1989 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->
getType());
1993 Args.push_back(AllocaCASExpected);
1998 if (UseSizedLibcall) {
2001 Args.push_back(IntValue);
2003 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->
getType());
2007 Args.push_back(AllocaValue);
2012 if (!CASExpected && HasResult && !UseSizedLibcall) {
2013 AllocaResult = AllocaBuilder.CreateAlloca(
I->getType());
2016 Args.push_back(AllocaResult);
2020 Args.push_back(OrderingVal);
2024 Args.push_back(Ordering2Val);
2030 }
else if (HasResult && UseSizedLibcall)
2031 ResultTy = SizedIntTy;
2037 for (
Value *Arg : Args)
2041 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr);
2043 Call->setAttributes(Attr);
2047 if (ValueOperand && !UseSizedLibcall)
2053 Type *FinalResultTy =
I->getType();
2056 CASExpected->
getType(), AllocaCASExpected, AllocaAlignment);
2060 I->replaceAllUsesWith(V);
2061 }
else if (HasResult) {
2063 if (UseSizedLibcall)
2070 I->replaceAllUsesWith(V);
2072 I->eraseFromParent();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static Value * performMaskedAtomicOp(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Shifted_Inc, Value *Inc, const PartwordMaskValues &PMV)
Emit IR to implement a masked version of a given atomicrmw operation.
static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder, Instruction *I, Type *ValueType, Value *Addr, Align AddrAlign, unsigned MinWordSize)
This is a helper function which builds instructions to provide values necessary for partword atomic o...
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, const DataLayout &DL)
static Value * extractMaskedValue(IRBuilderBase &Builder, Value *WideWord, const PartwordMaskValues &PMV)
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr, Value *Loaded, Value *NewVal, Align AddrAlign, AtomicOrdering MemOpOrder, SyncScope::ID SSID, Value *&Success, Value *&NewLoaded, Instruction *MetadataSrc)
Expand Atomic static false unsigned getAtomicOpSize(LoadInst *LI)
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I)
static Value * insertMaskedValue(IRBuilderBase &Builder, Value *WideWord, Value *Updated, const PartwordMaskValues &PMV)
static void copyMetadataForAtomic(Instruction &Dest, const Instruction &Source)
Copy metadata that's safe to preserve when widening atomics.
static ArrayRef< RTLIB::Libcall > GetRMWLibcall(AtomicRMWInst::BinOp Op)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
#define LLVM_ATTRIBUTE_UNUSED
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Module.h This file contains the declarations for the Module class.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
an instruction to allocate memory on the stack
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getNewValOperand()
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
void setWeak(bool IsWeak)
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Value * getPointerOperand()
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
AttributeList addRetAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Add a return value attribute to the list.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
reverse_iterator rbegin()
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
InstListType::reverse_iterator reverse_iterator
const Function * getParent() const
Return the enclosing method, or null if none.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionPass class - This class is used to implement most global optimizations.
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
BasicBlockListType::iterator iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
BasicBlock * GetInsertBlock() const
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
void setIsFPConstrained(bool IsCon)
Enable/Disable use of constrained floating point math.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
unsigned getMDKindID(StringRef Name) const
getMDKindID - Return a unique non-zero ID for the specified metadata kind.
void getSyncScopeNames(SmallVectorImpl< StringRef > &SSNs) const
getSyncScopeNames - Populates client supplied SmallVector with synchronization scope names registered...
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
A Module instance is used to store all the information related to an LLVM module.
LLVMContext & getContext() const
Get the global data context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
This is an optimization pass for GlobalISel generic memory operations.
void initializeAtomicExpandLegacyPass(PassRegistry &)
bool canInstructionHaveMMRAs(const Instruction &I)
bool isReleaseOrStronger(AtomicOrdering AO)
AtomicOrderingCABI toCABI(AtomicOrdering AO)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Value * buildAtomicRMWValue(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Val)
Emit IR to implement the given atomicrmw operation on values in registers, returning the new value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg)
Expand an atomic RMW instruction into a loop utilizing cmpxchg.
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI)
Convert the given Cmpxchg into primitive load and compare.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI)
Convert the given RMWI into primitive load and stores, assuming that doing so is legal.
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
char & AtomicExpandID
AtomicExpandID – Lowers atomic operations in terms of either cmpxchg load-linked/store-conditional lo...
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.