60#define DEBUG_TYPE "atomic-expand"
64class AtomicExpandImpl {
83 Ctx.
emitError(DiagnosticInst ? DiagnosticInst : &FailedInst, Msg);
85 if (!FailedInst.getType()->isVoidTy())
87 FailedInst.eraseFromParent();
90 template <
typename Inst>
91 void handleUnsupportedAtomicSize(Inst *
I,
const Twine &AtomicOpName,
95 bool tryInsertTrailingSeqCstFence(
Instruction *AtomicI);
96 template <
typename AtomicInst>
97 bool tryInsertFencesForAtomic(AtomicInst *AtomicI,
bool OrderingRequiresFence,
101 bool tryExpandAtomicLoad(
LoadInst *LI);
102 bool expandAtomicLoadToLL(
LoadInst *LI);
103 bool expandAtomicLoadToCmpXchg(
LoadInst *LI);
113 void expandAtomicOpToLLSC(
117 void expandPartwordAtomicRMW(
125 Value *insertRMWCmpXchgLoop(
129 CreateCmpXchgInstFun CreateCmpXchg,
Instruction *MetadataSrc);
141 void expandAtomicLoadToLibcall(
LoadInst *LI);
142 void expandAtomicStoreToLibcall(
StoreInst *LI);
145 const Twine &AtomicOpName =
"cmpxchg",
149 CreateCmpXchgInstFun CreateCmpXchg);
174struct ReplacementIRBuilder
175 :
IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {
185 this->CollectMetadataToCopy(
I, {LLVMContext::MD_pcsections});
186 if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
187 this->setIsFPConstrained(
true);
189 MMRAMD =
I->getMetadata(LLVMContext::MD_mmra);
194 I->setMetadata(LLVMContext::MD_mmra, MMRAMD);
200char AtomicExpandLegacy::ID = 0;
205 "Expand Atomic instructions",
false,
false)
214 return DL.getTypeStoreSize(LI->getType());
219 return DL.getTypeStoreSize(
SI->getValueOperand()->getType());
236 Source.getAllMetadata(MD);
240 for (
auto [
ID,
N] : MD) {
242 case LLVMContext::MD_dbg:
243 case LLVMContext::MD_tbaa:
244 case LLVMContext::MD_tbaa_struct:
245 case LLVMContext::MD_alias_scope:
246 case LLVMContext::MD_noalias:
247 case LLVMContext::MD_noalias_addrspace:
248 case LLVMContext::MD_access_group:
249 case LLVMContext::MD_mmra:
253 if (
ID == Ctx.getMDKindID(
"amdgpu.no.remote.memory"))
255 else if (
ID == Ctx.getMDKindID(
"amdgpu.no.fine.grained.memory"))
265template <
typename Inst>
268 Align Alignment =
I->getAlign();
270 return Alignment >=
Size &&
Size <= MaxSize;
273template <
typename Inst>
277 Align Alignment =
I->getAlign();
278 bool NeedSeparator =
false;
280 if (Alignment <
Size) {
281 OS <<
"instruction alignment " << Alignment.
value()
282 <<
" is smaller than the required " <<
Size
283 <<
"-byte alignment for this atomic operation";
284 NeedSeparator =
true;
288 if (
Size > MaxSize) {
291 OS <<
"target supports atomics up to " << MaxSize
292 <<
" bytes, but this atomic accesses " <<
Size <<
" bytes";
296template <
typename Inst>
297void AtomicExpandImpl::handleUnsupportedAtomicSize(
300 SmallString<128> FailureReason;
301 raw_svector_ostream OS(FailureReason);
303 handleFailure(*
I, Twine(
"unsupported ") + AtomicOpName +
": " + FailureReason,
307bool AtomicExpandImpl::tryInsertTrailingSeqCstFence(Instruction *AtomicI) {
313 Builder, AtomicI, AtomicOrdering::SequentiallyConsistent)) {
314 TrailingFence->moveAfter(AtomicI);
320template <
typename AtomicInst>
321bool AtomicExpandImpl::tryInsertFencesForAtomic(AtomicInst *AtomicI,
322 bool OrderingRequiresFence,
325 if (OrderingRequiresFence && ShouldInsertFences) {
327 AtomicI->setOrdering(NewOrdering);
328 return bracketInstWithFences(AtomicI, FenceOrdering);
330 if (!ShouldInsertFences)
331 return tryInsertTrailingSeqCstFence(AtomicI);
335bool AtomicExpandImpl::processAtomicInstr(Instruction *
I) {
341 expandAtomicLoadToLibcall(LI);
345 bool MadeChange =
false;
347 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
348 LI = convertAtomicLoadToIntegerType(LI);
352 MadeChange |= tryInsertFencesForAtomic(
355 MadeChange |= tryExpandAtomicLoad(LI);
364 expandAtomicStoreToLibcall(SI);
368 bool MadeChange =
false;
370 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
371 SI = convertAtomicStoreToIntegerType(SI);
375 MadeChange |= tryInsertFencesForAtomic(
378 MadeChange |= tryExpandAtomicStore(SI);
384 expandAtomicRMWToLibcall(RMWI);
388 bool MadeChange =
false;
390 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
391 RMWI = convertAtomicXchgToIntegerType(RMWI);
395 MadeChange |= tryInsertFencesForAtomic(
405 MadeChange |= (
isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) ||
406 tryExpandAtomicRMW(RMWI);
412 expandAtomicCASToLibcall(CASI);
418 bool MadeChange =
false;
419 if (CASI->getCompareOperand()->getType()->isPointerTy()) {
422 CASI = convertCmpXchgToIntegerType(CASI);
428 if (CmpXchgExpansion == TargetLoweringBase::AtomicExpansionKind::None &&
439 CASI->setSuccessOrdering(CASOrdering);
440 CASI->setFailureOrdering(CASOrdering);
441 MadeChange |= bracketInstWithFences(CASI, FenceOrdering);
443 }
else if (CmpXchgExpansion !=
444 TargetLoweringBase::AtomicExpansionKind::LLSC) {
446 MadeChange |= tryInsertTrailingSeqCstFence(CASI);
449 MadeChange |= tryExpandAtomicCmpXchg(CASI);
456bool AtomicExpandImpl::run(
457 Function &
F,
const LibcallLoweringModuleAnalysisResult &LibcallResult,
458 const TargetMachine *TM) {
460 if (!Subtarget->enableAtomicExpand())
462 TLI = Subtarget->getTargetLowering();
464 DL = &
F.getDataLayout();
466 bool MadeChange =
false;
478 if (processAtomicInstr(&Inst)) {
490bool AtomicExpandLegacy::runOnFunction(Function &
F) {
492 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
495 auto *TM = &TPC->getTM<TargetMachine>();
497 const LibcallLoweringModuleAnalysisResult &LibcallResult =
498 getAnalysis<LibcallLoweringInfoWrapper>().getResult(*
F.getParent());
500 return AE.run(
F, LibcallResult, TM);
504 return new AtomicExpandLegacy();
514 if (!LibcallResult) {
516 "' analysis required");
522 bool Changed = AE.run(
F, *LibcallResult, TM);
529bool AtomicExpandImpl::bracketInstWithFences(
Instruction *
I,
531 ReplacementIRBuilder Builder(
I, *
DL);
541 return (LeadingFence || TrailingFence);
556LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
558 Type *NewTy = getCorrespondingIntegerType(LI->
getType(),
M->getDataLayout());
560 ReplacementIRBuilder Builder(LI, *
DL);
564 auto *NewLI = Builder.CreateLoad(NewTy, Addr);
565 NewLI->setAlignment(LI->
getAlign());
568 LLVM_DEBUG(
dbgs() <<
"Replaced " << *LI <<
" with " << *NewLI <<
"\n");
570 Value *NewVal = Builder.CreateBitCast(NewLI, LI->
getType());
577AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
582 getCorrespondingIntegerType(RMWI->
getType(),
M->getDataLayout());
584 ReplacementIRBuilder Builder(RMWI, *
DL);
589 ? Builder.CreatePtrToInt(Val, NewTy)
590 : Builder.CreateBitCast(Val, NewTy);
597 LLVM_DEBUG(
dbgs() <<
"Replaced " << *RMWI <<
" with " << *NewRMWI <<
"\n");
600 ? Builder.CreateIntToPtr(NewRMWI, RMWI->
getType())
601 : Builder.CreateBitCast(NewRMWI, RMWI->
getType());
607bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
609 case TargetLoweringBase::AtomicExpansionKind::None:
611 case TargetLoweringBase::AtomicExpansionKind::LLSC:
612 expandAtomicOpToLLSC(
615 [](IRBuilderBase &Builder,
Value *Loaded) { return Loaded; });
617 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
618 return expandAtomicLoadToLL(LI);
619 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
620 return expandAtomicLoadToCmpXchg(LI);
621 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
622 LI->
setAtomic(AtomicOrdering::NotAtomic);
624 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
632bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
634 case TargetLoweringBase::AtomicExpansionKind::None:
636 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
639 case TargetLoweringBase::AtomicExpansionKind::Expand:
640 expandAtomicStoreToXChg(SI);
642 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
643 SI->setAtomic(AtomicOrdering::NotAtomic);
650bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
651 ReplacementIRBuilder Builder(LI, *
DL);
666bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
667 ReplacementIRBuilder Builder(LI, *
DL);
669 if (Order == AtomicOrdering::Unordered)
670 Order = AtomicOrdering::Monotonic;
676 Value *Pair = Builder.CreateAtomicCmpXchg(
677 Addr, DummyVal, DummyVal, LI->
getAlign(), Order,
679 Value *
Loaded = Builder.CreateExtractValue(Pair, 0,
"loaded");
695StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
696 ReplacementIRBuilder Builder(SI, *
DL);
697 auto *
M =
SI->getModule();
698 Type *NewTy = getCorrespondingIntegerType(
SI->getValueOperand()->getType(),
700 Value *NewVal = Builder.CreateBitCast(
SI->getValueOperand(), NewTy);
702 Value *Addr =
SI->getPointerOperand();
704 StoreInst *NewSI = Builder.CreateStore(NewVal, Addr);
708 LLVM_DEBUG(
dbgs() <<
"Replaced " << *SI <<
" with " << *NewSI <<
"\n");
709 SI->eraseFromParent();
713void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) {
720 ReplacementIRBuilder Builder(SI, *
DL);
722 assert(Ordering != AtomicOrdering::NotAtomic);
724 ? AtomicOrdering::Monotonic
726 AtomicRMWInst *AI = Builder.CreateAtomicRMW(
728 SI->getAlign(), RMWOrdering);
729 SI->eraseFromParent();
732 tryExpandAtomicRMW(AI);
747 NewVal = Builder.CreateBitCast(NewVal, IntTy);
748 Loaded = Builder.CreateBitCast(Loaded, IntTy);
752 Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
758 Success = Builder.CreateExtractValue(Pair, 1,
"success");
759 NewLoaded = Builder.CreateExtractValue(Pair, 0,
"newloaded");
762 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
765bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
769 case TargetLoweringBase::AtomicExpansionKind::None:
771 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
774 if (ValueSize < MinCASSize) {
775 expandPartwordAtomicRMW(AI,
776 TargetLoweringBase::AtomicExpansionKind::LLSC);
778 auto PerformOp = [&](IRBuilderBase &Builder,
Value *
Loaded) {
787 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
790 if (ValueSize < MinCASSize) {
791 expandPartwordAtomicRMW(AI,
792 TargetLoweringBase::AtomicExpansionKind::CmpXChg);
801 return OptimizationRemark(
DEBUG_TYPE,
"Passed", AI)
802 <<
"A compare and swap loop was generated for an atomic "
804 << MemScope <<
" memory scope";
810 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
813 if (ValueSize < MinCASSize) {
818 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
822 expandAtomicRMWToMaskedIntrinsic(AI);
825 case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic: {
829 case TargetLoweringBase::AtomicExpansionKind::CmpArithIntrinsic: {
833 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
835 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
845struct PartwordMaskValues {
847 Type *WordType =
nullptr;
849 Type *IntValueType =
nullptr;
850 Value *AlignedAddr =
nullptr;
851 Align AlignedAddrAlignment;
853 Value *ShiftAmt =
nullptr;
854 Value *Mask =
nullptr;
855 Value *Inv_Mask =
nullptr;
859raw_ostream &
operator<<(raw_ostream &O,
const PartwordMaskValues &PMV) {
860 auto PrintObj = [&
O](
auto *
V) {
867 O <<
"PartwordMaskValues {\n";
869 PrintObj(PMV.WordType);
871 PrintObj(PMV.ValueType);
872 O <<
" AlignedAddr: ";
873 PrintObj(PMV.AlignedAddr);
874 O <<
" AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.
value() <<
'\n';
876 PrintObj(PMV.ShiftAmt);
880 PrintObj(PMV.Inv_Mask);
906 unsigned MinWordSize) {
907 PartwordMaskValues PMV;
912 unsigned ValueSize =
DL.getTypeStoreSize(
ValueType);
914 PMV.ValueType = PMV.IntValueType =
ValueType;
919 PMV.WordType = MinWordSize > ValueSize ?
Type::getIntNTy(Ctx, MinWordSize * 8)
921 if (PMV.ValueType == PMV.WordType) {
922 PMV.AlignedAddr = Addr;
923 PMV.AlignedAddrAlignment = AddrAlign;
924 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
925 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0,
true);
929 PMV.AlignedAddrAlignment =
Align(MinWordSize);
931 assert(ValueSize < MinWordSize);
934 IntegerType *IntTy =
DL.getIndexType(Ctx, PtrTy->getAddressSpace());
937 if (AddrAlign < MinWordSize) {
938 PMV.AlignedAddr = Builder.CreateIntrinsic(
939 Intrinsic::ptrmask, {PtrTy, IntTy},
941 nullptr,
"AlignedAddr");
943 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
944 PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
947 PMV.AlignedAddr = Addr;
951 if (
DL.isLittleEndian()) {
953 PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
956 PMV.ShiftAmt = Builder.CreateShl(
957 Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
960 PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType,
"ShiftAmt");
961 PMV.Mask = Builder.CreateShl(
962 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
965 PMV.Inv_Mask = Builder.CreateNot(PMV.Mask,
"Inv_Mask");
971 const PartwordMaskValues &PMV) {
972 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
973 if (PMV.WordType == PMV.ValueType)
976 Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt,
"shifted");
977 Value *Trunc = Builder.CreateTrunc(Shift, PMV.IntValueType,
"extracted");
978 return Builder.CreateBitCast(Trunc, PMV.ValueType);
982 Value *Updated,
const PartwordMaskValues &PMV) {
983 assert(WideWord->
getType() == PMV.WordType &&
"Widened type mismatch");
984 assert(Updated->
getType() == PMV.ValueType &&
"Value type mismatch");
985 if (PMV.WordType == PMV.ValueType)
988 Updated = Builder.CreateBitCast(Updated, PMV.IntValueType);
990 Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType,
"extended");
992 Builder.CreateShl(ZExt, PMV.ShiftAmt,
"shifted",
true);
993 Value *
And = Builder.CreateAnd(WideWord, PMV.Inv_Mask,
"unmasked");
994 Value *
Or = Builder.CreateOr(
And, Shift,
"inserted");
1004 const PartwordMaskValues &PMV) {
1010 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
1011 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
1023 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
1024 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
1025 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
1064void AtomicExpandImpl::expandPartwordAtomicRMW(
1070 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
1076 ReplacementIRBuilder Builder(AI, *
DL);
1078 PartwordMaskValues PMV =
1082 Value *ValOperand_Shifted =
nullptr;
1086 ValOperand_Shifted =
1087 Builder.CreateShl(Builder.CreateZExt(ValOp, PMV.WordType), PMV.ShiftAmt,
1088 "ValOperand_Shifted");
1091 auto PerformPartwordOp = [&](IRBuilderBase &Builder,
Value *
Loaded) {
1097 if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
1098 OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr,
1099 PMV.AlignedAddrAlignment, MemOpOrder, SSID,
1103 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
1104 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
1105 PMV.AlignedAddrAlignment, MemOpOrder,
1115AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
1116 ReplacementIRBuilder Builder(AI, *
DL);
1121 "Unable to widen operation");
1123 PartwordMaskValues PMV =
1127 Value *ValOperand_Shifted =
1129 PMV.ShiftAmt,
"ValOperand_Shifted");
1135 Builder.
CreateOr(ValOperand_Shifted, PMV.Inv_Mask,
"AndOperand");
1137 NewOperand = ValOperand_Shifted;
1140 Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
1151bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
1193 ReplacementIRBuilder Builder(CI, *
DL);
1204 std::prev(BB->
end())->eraseFromParent();
1207 PartwordMaskValues PMV =
1212 Value *NewVal_Shifted =
1214 Value *Cmp_Shifted =
1219 LoadInst *InitLoaded = Builder.
CreateLoad(PMV.WordType, PMV.AlignedAddr);
1220 Value *InitLoaded_MaskOut = Builder.
CreateAnd(InitLoaded, PMV.Inv_Mask);
1225 PHINode *Loaded_MaskOut = Builder.
CreatePHI(PMV.WordType, 2);
1226 Loaded_MaskOut->
addIncoming(InitLoaded_MaskOut, BB);
1239 processAtomicInstr(InitLoaded);
1243 Value *FullWord_NewVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Shifted);
1244 Value *FullWord_Cmp = Builder.
CreateOr(Loaded_MaskOut, Cmp_Shifted);
1246 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
1274 Loaded_MaskOut->
addIncoming(OldVal_MaskOut, FailureBB);
1289void AtomicExpandImpl::expandAtomicOpToLLSC(
1290 Instruction *
I,
Type *ResultType,
Value *Addr, Align AddrAlign,
1292 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp) {
1293 ReplacementIRBuilder Builder(
I, *
DL);
1294 Value *
Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign,
1295 MemOpOrder, PerformOp);
1297 I->replaceAllUsesWith(Loaded);
1298 I->eraseFromParent();
1301void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
1302 ReplacementIRBuilder Builder(AI, *
DL);
1304 PartwordMaskValues PMV =
1314 CastOp = Instruction::SExt;
1318 PMV.ShiftAmt,
"ValOperand_Shifted");
1320 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
1327void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
1328 AtomicCmpXchgInst *CI) {
1329 ReplacementIRBuilder Builder(CI, *
DL);
1342 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
1348 CmpVal_Shifted, Builder.
CreateAnd(OldVal, PMV.Mask),
"Success");
1355Value *AtomicExpandImpl::insertRMWLLSCLoop(
1356 IRBuilderBase &Builder,
Type *ResultTy,
Value *Addr, Align AddrAlign,
1358 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp) {
1363 assert(AddrAlign >=
F->getDataLayout().getTypeStoreSize(ResultTy) &&
1364 "Expected at least natural alignment at this point.");
1384 std::prev(BB->
end())->eraseFromParent();
1392 Value *NewVal = PerformOp(Builder, Loaded);
1394 Value *StoreSuccess =
1416AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
1419 M->getDataLayout());
1421 ReplacementIRBuilder Builder(CI, *
DL);
1433 LLVM_DEBUG(
dbgs() <<
"Replaced " << *CI <<
" with " << *NewCI <<
"\n");
1449bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1455 LLVMContext &Ctx =
F->getContext();
1462 ? AtomicOrdering::Monotonic
1474 bool HasReleasedLoadBB = !CI->
isWeak() && ShouldInsertFencesForAtomic &&
1475 SuccessOrder != AtomicOrdering::Monotonic &&
1476 SuccessOrder != AtomicOrdering::Acquire &&
1481 bool UseUnconditionalReleaseBarrier =
F->hasMinSize() && !CI->
isWeak();
1535 auto ReleasedLoadBB =
1539 auto ReleasingStoreBB =
1543 ReplacementIRBuilder Builder(CI, *
DL);
1548 std::prev(BB->
end())->eraseFromParent();
1550 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1553 PartwordMaskValues PMV =
1560 Value *UnreleasedLoad =
1561 TLI->
emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1562 Value *UnreleasedLoadExtract =
1569 Builder.
CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB,
1570 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1573 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1578 PHINode *LoadedTryStore =
1579 Builder.
CreatePHI(PMV.WordType, 2,
"loaded.trystore");
1580 LoadedTryStore->
addIncoming(UnreleasedLoad, ReleasingStoreBB);
1581 Value *NewValueInsert =
1584 PMV.AlignedAddr, MemOpOrder);
1586 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0),
"success");
1587 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1589 CI->
isWeak() ? FailureBB : RetryBB,
1590 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1594 if (HasReleasedLoadBB) {
1596 TLI->
emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1604 ShouldStore, TryStoreBB, NoStoreBB,
1605 MDBuilder(
F->getContext()).createLikelyBranchWeights());
1607 LoadedTryStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1614 if (ShouldInsertFencesForAtomic ||
1620 PHINode *LoadedNoStore =
1622 LoadedNoStore->
addIncoming(UnreleasedLoad, StartBB);
1623 if (HasReleasedLoadBB)
1624 LoadedNoStore->
addIncoming(SecondLoad, ReleasedLoadBB);
1633 PHINode *LoadedFailure =
1635 LoadedFailure->
addIncoming(LoadedNoStore, NoStoreBB);
1637 LoadedFailure->
addIncoming(LoadedTryStore, TryStoreBB);
1638 if (ShouldInsertFencesForAtomic)
1647 PHINode *LoadedExit =
1649 LoadedExit->
addIncoming(LoadedTryStore, SuccessBB);
1650 LoadedExit->
addIncoming(LoadedFailure, FailureBB);
1657 Value *LoadedFull = LoadedExit;
1665 for (
auto *User : CI->
users()) {
1671 "weird extraction from { iN, i1 }");
1682 for (
auto *EV : PrunedInsts)
1699bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
1712 return C->isMinusOne();
1714 return C->isMaxValue(
true);
1716 return C->isMinValue(
true);
1718 return C->isMaxValue(
false);
1720 return C->isMinValue(
false);
1726bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
1728 tryExpandAtomicLoad(ResultingLoad);
1734Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
1735 IRBuilderBase &Builder,
Type *ResultTy,
Value *Addr, Align AddrAlign,
1737 function_ref<
Value *(IRBuilderBase &,
Value *)> PerformOp,
1738 CreateCmpXchgInstFun CreateCmpXchg, Instruction *MetadataSrc) {
1765 std::prev(BB->
end())->eraseFromParent();
1773 Loaded->addIncoming(InitLoaded, BB);
1782 InitLoaded->
setAtomic(AtomicOrdering::Monotonic, SSID);
1786 processAtomicInstr(InitLoaded);
1789 Value *NewVal = PerformOp(Builder, Loaded);
1791 Value *NewLoaded =
nullptr;
1794 CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign,
1795 MemOpOrder == AtomicOrdering::Unordered
1796 ? AtomicOrdering::Monotonic
1798 SSID, IsVolatile,
Success, NewLoaded, MetadataSrc);
1801 Loaded->addIncoming(NewLoaded, LoopBB);
1814bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1821 case TargetLoweringBase::AtomicExpansionKind::None:
1822 if (ValueSize < MinCASSize)
1823 return expandPartwordCmpXchg(CI);
1825 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
1826 return expandAtomicCmpXchg(CI);
1828 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
1829 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1831 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
1833 case TargetLoweringBase::AtomicExpansionKind::CustomExpand: {
1840bool AtomicExpandImpl::expandAtomicRMWToCmpXchg(
1841 AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg) {
1848 Value *
Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
1851 [&](IRBuilderBase &Builder,
Value *Loaded) {
1852 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
1853 AI->getValOperand());
1876 unsigned LargestSize =
DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1877 return Alignment >=
Size &&
1879 Size <= LargestSize;
1882void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *
I) {
1883 static const RTLIB::Libcall Libcalls[6] = {
1884 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1885 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1888 bool Expanded = expandAtomicOpToLibcall(
1889 I,
Size,
I->getAlign(),
I->getPointerOperand(),
nullptr,
nullptr,
1890 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1892 handleUnsupportedAtomicSize(
I,
"atomic load");
1895void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *
I) {
1896 static const RTLIB::Libcall Libcalls[6] = {
1897 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1898 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1901 bool Expanded = expandAtomicOpToLibcall(
1902 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValueOperand(),
1903 nullptr,
I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1905 handleUnsupportedAtomicSize(
I,
"atomic store");
1908void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *
I,
1909 const Twine &AtomicOpName,
1910 Instruction *DiagnosticInst) {
1911 static const RTLIB::Libcall Libcalls[6] = {
1912 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1913 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1914 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1917 bool Expanded = expandAtomicOpToLibcall(
1918 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getNewValOperand(),
1919 I->getCompareOperand(),
I->getSuccessOrdering(),
I->getFailureOrdering(),
1922 handleUnsupportedAtomicSize(
I, AtomicOpName, DiagnosticInst);
1926 static const RTLIB::Libcall LibcallsXchg[6] = {
1927 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1928 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1929 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1930 static const RTLIB::Libcall LibcallsAdd[6] = {
1931 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1932 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1933 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1934 static const RTLIB::Libcall LibcallsSub[6] = {
1935 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1936 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1937 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1938 static const RTLIB::Libcall LibcallsAnd[6] = {
1939 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1940 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1941 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1942 static const RTLIB::Libcall LibcallsOr[6] = {
1943 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1944 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1945 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1946 static const RTLIB::Libcall LibcallsXor[6] = {
1947 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1948 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1949 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1950 static const RTLIB::Libcall LibcallsNand[6] = {
1951 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1952 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1953 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1994void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *
I) {
2000 if (!Libcalls.
empty())
2001 Success = expandAtomicOpToLibcall(
2002 I,
Size,
I->getAlign(),
I->getPointerOperand(),
I->getValOperand(),
2003 nullptr,
I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
2010 expandAtomicRMWToCmpXchg(
2011 I, [
this,
I](IRBuilderBase &Builder,
Value *Addr,
Value *Loaded,
2014 Value *&NewLoaded, Instruction *MetadataSrc) {
2017 Addr, Loaded, NewVal, Alignment, MemOpOrder,
2027 expandAtomicCASToLibcall(
2041bool AtomicExpandImpl::expandAtomicOpToLibcall(
2042 Instruction *
I,
unsigned Size, Align Alignment,
Value *PointerOperand,
2047 LLVMContext &Ctx =
I->getContext();
2049 const DataLayout &
DL =
M->getDataLayout();
2051 IRBuilder<> AllocaBuilder(&
I->getFunction()->getEntryBlock().front());
2054 Type *SizedIntTy = Type::getIntNTy(Ctx,
Size * 8);
2056 if (
M->getTargetTriple().isOSWindows() &&
M->getTargetTriple().isX86_64() &&
2066 const Align AllocaAlignment =
DL.getPrefTypeAlign(SizedIntTy);
2070 assert(Ordering != AtomicOrdering::NotAtomic &&
"expect atomic MO");
2072 ConstantInt::get(Type::getInt32Ty(Ctx), (
int)
toCABI(Ordering));
2075 assert(Ordering2 != AtomicOrdering::NotAtomic &&
"expect atomic MO");
2077 ConstantInt::get(Type::getInt32Ty(Ctx), (
int)
toCABI(Ordering2));
2079 bool HasResult =
I->getType() != Type::getVoidTy(Ctx);
2081 RTLIB::Libcall RTLibType;
2082 if (UseSizedLibcall) {
2085 RTLibType = Libcalls[1];
2088 RTLibType = Libcalls[2];
2091 RTLibType = Libcalls[3];
2094 RTLibType = Libcalls[4];
2097 RTLibType = Libcalls[5];
2100 }
else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
2101 RTLibType = Libcalls[0];
2108 RTLIB::LibcallImpl LibcallImpl = LibcallLowering->
getLibcallImpl(RTLibType);
2109 if (LibcallImpl == RTLIB::Unsupported) {
2140 AllocaInst *AllocaCASExpected =
nullptr;
2141 AllocaInst *AllocaValue =
nullptr;
2142 AllocaInst *AllocaResult =
nullptr;
2149 if (!UseSizedLibcall) {
2151 Args.push_back(ConstantInt::get(
DL.getIntPtrType(Ctx),
Size));
2159 Value *PtrVal = PointerOperand;
2161 Args.push_back(PtrVal);
2165 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->
getType());
2169 Args.push_back(AllocaCASExpected);
2174 if (UseSizedLibcall) {
2177 Args.push_back(IntValue);
2179 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->
getType());
2183 Args.push_back(AllocaValue);
2188 if (!CASExpected && HasResult && !UseSizedLibcall) {
2189 AllocaResult = AllocaBuilder.CreateAlloca(
I->getType());
2192 Args.push_back(AllocaResult);
2196 Args.push_back(OrderingVal);
2200 Args.push_back(Ordering2Val);
2204 ResultTy = Type::getInt1Ty(Ctx);
2205 Attr = Attr.addRetAttribute(Ctx, Attribute::ZExt);
2206 }
else if (HasResult && UseSizedLibcall)
2207 ResultTy = SizedIntTy;
2209 ResultTy = Type::getVoidTy(Ctx);
2213 for (
Value *Arg : Args)
2215 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys,
false);
2216 FunctionCallee LibcallFn =
M->getOrInsertFunction(
2224 if (ValueOperand && !UseSizedLibcall)
2230 Type *FinalResultTy =
I->getType();
2233 CASExpected->
getType(), AllocaCASExpected, AllocaAlignment);
2238 }
else if (HasResult) {
2240 if (UseSizedLibcall)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Value * performMaskedAtomicOp(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Shifted_Inc, Value *Inc, const PartwordMaskValues &PMV)
Emit IR to implement a masked version of a given atomicrmw operation.
static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder, Instruction *I, Type *ValueType, Value *Addr, Align AddrAlign, unsigned MinWordSize)
This is a helper function which builds instructions to provide values necessary for partword atomic o...
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, const DataLayout &DL)
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr, Value *Loaded, Value *NewVal, Align AddrAlign, AtomicOrdering MemOpOrder, SyncScope::ID SSID, bool IsVolatile, Value *&Success, Value *&NewLoaded, Instruction *MetadataSrc)
static Value * extractMaskedValue(IRBuilderBase &Builder, Value *WideWord, const PartwordMaskValues &PMV)
Expand Atomic static false unsigned getAtomicOpSize(LoadInst *LI)
static void writeUnsupportedAtomicSizeReason(const TargetLowering *TLI, Inst *I, raw_ostream &OS)
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I)
static Value * insertMaskedValue(IRBuilderBase &Builder, Value *WideWord, Value *Updated, const PartwordMaskValues &PMV)
static void copyMetadataForAtomic(Instruction &Dest, const Instruction &Source)
Copy metadata that's safe to preserve when widening atomics.
static ArrayRef< RTLIB::Libcall > GetRMWLibcall(AtomicRMWInst::BinOp Op)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
Module.h This file contains the declarations for the Module class.
static bool isIdempotentRMW(AtomicRMWInst &RMWI)
Return true if and only if the given instruction does not modify the memory location referenced.
Machine Check Debug Module
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file contains the declarations for profiling metadata utility functions.
This file defines the SmallString class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
void setAlignment(Align Align)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getNewValOperand()
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
void setWeak(bool IsWeak)
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Value * getPointerOperand()
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
Value * getPointerOperand()
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
iterator begin()
Instruction iterator methods.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
reverse_iterator rbegin()
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
InstListType::reverse_iterator reverse_iterator
void setAttributes(AttributeList A)
Set the attributes for this call.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionPass class - This class is used to implement most global optimizations.
BasicBlockListType::iterator iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
LLVM_ABI CallInst * CreateLifetimeEnd(Value *Ptr)
Create a lifetime.end intrinsic.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
UnreachableInst * CreateUnreachable()
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
BasicBlock * GetInsertBlock() const
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
void setIsFPConstrained(bool IsCon)
Enable/Disable use of constrained floating point math.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LLVM_ABI void getSyncScopeNames(SmallVectorImpl< StringRef > &SSNs) const
getSyncScopeNames - Populates client supplied SmallVector with synchronization scope names registered...
Tracks which library functions to use for a particular subtarget.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
Record a mapping from subtarget to LibcallLoweringInfo.
const LibcallLoweringInfo & getLibcallLowering(const TargetSubtargetInfo &Subtarget) const
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
A Module instance is used to store all the information related to an LLVM module.
LLVMContext & getContext() const
Get the global data context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
virtual Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
virtual Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(const AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
virtual Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const
virtual void emitExpandAtomicStore(StoreInst *SI) const
Perform a atomic store using a target-specific way.
virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
virtual bool shouldInsertTrailingSeqCstFenceForAtomicStore(const Instruction *I) const
Whether AtomicExpandPass should automatically insert a seq_cst trailing fence without reducing the or...
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
virtual Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
virtual bool shouldIssueAtomicLoadForAtomicEmulationLoop(void) const
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual void emitExpandAtomicLoad(LoadInst *LI) const
Perform a atomic load using a target-specific way.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
bool isReleaseOrStronger(AtomicOrdering AO)
AtomicOrderingCABI toCABI(AtomicOrdering AO)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
Value * buildAtomicRMWValue(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Val)
Emit IR to implement the given atomicrmw operation on values in registers, returning the new value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI)
Convert the given Cmpxchg into primitive load and compare.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI)
Convert the given RMWI into primitive load and stores, assuming that doing so is legal.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
LLVM_ABI char & AtomicExpandID
AtomicExpandID – Lowers atomic operations in terms of either cmpxchg load-linked/store-conditional lo...
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.