34 #define DEBUG_TYPE "atomic-expand"
51 bool IsStore,
bool IsLoad);
54 bool tryExpandAtomicLoad(
LoadInst *LI);
55 bool expandAtomicLoadToLL(
LoadInst *LI);
56 bool expandAtomicLoadToCmpXchg(
LoadInst *LI);
64 void expandAtomicOpToLLSC(
67 void expandPartwordAtomicRMW(
73 static Value *insertRMWCmpXchgLoop(
83 bool expandAtomicOpToLibcall(
Instruction *I,
unsigned Size,
unsigned Align,
88 void expandAtomicLoadToLibcall(
LoadInst *LI);
89 void expandAtomicStoreToLibcall(
StoreInst *LI);
105 return new AtomicExpand(TM);
110 unsigned getAtomicOpSize(
LoadInst *LI) {
115 unsigned getAtomicOpSize(
StoreInst *SI) {
131 unsigned getAtomicOpAlign(
LoadInst *LI) {
136 assert(Align != 0 &&
"An atomic LoadInst always has an explicit alignment");
140 unsigned getAtomicOpAlign(
StoreInst *SI) {
145 assert(Align != 0 &&
"An atomic StoreInst always has an explicit alignment");
166 template <
typename Inst>
168 unsigned Size = getAtomicOpSize(I);
169 unsigned Align = getAtomicOpAlign(I);
175 bool AtomicExpand::runOnFunction(
Function &
F) {
176 if (!
TM || !
TM->getSubtargetImpl(F)->enableAtomicExpand())
178 TLI =
TM->getSubtargetImpl(F)->getTargetLowering();
190 bool MadeChange =
false;
191 for (
auto I : AtomicInsts) {
196 assert((LI || SI || RMWI || CASI) &&
"Unknown atomic instruction");
200 if (!atomicSizeSupported(TLI, LI)) {
201 expandAtomicLoadToLibcall(LI);
206 if (!atomicSizeSupported(TLI, SI)) {
207 expandAtomicStoreToLibcall(SI);
212 if (!atomicSizeSupported(TLI, RMWI)) {
213 expandAtomicRMWToLibcall(RMWI);
218 if (!atomicSizeSupported(TLI, CASI)) {
219 expandAtomicCASToLibcall(CASI);
225 if (TLI->shouldInsertFencesForAtomic(I)) {
227 bool IsStore, IsLoad;
234 FenceOrdering = SI->getOrdering();
240 FenceOrdering = RMWI->getOrdering();
242 IsStore = IsLoad =
true;
243 }
else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
250 FenceOrdering = CASI->getSuccessOrdering();
253 IsStore = IsLoad =
true;
257 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
265 LI = convertAtomicLoadToIntegerType(LI);
270 MadeChange |= tryExpandAtomicLoad(LI);
272 if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
275 SI = convertAtomicStoreToIntegerType(SI);
276 assert(SI->getValueOperand()->getType()->isIntegerTy() &&
281 if (TLI->shouldExpandAtomicStoreInIR(SI))
282 MadeChange |= expandAtomicStore(SI);
289 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
292 MadeChange |= tryExpandAtomicRMW(RMWI);
297 assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
298 "unimplemented - floating point not legal at IR level");
299 if (CASI->getCompareOperand()->getType()->isPointerTy() ) {
302 CASI = convertCmpXchgToIntegerType(CASI);
303 assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
308 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
309 unsigned ValueSize = getAtomicOpSize(CASI);
310 if (ValueSize < MinCASSize) {
311 assert(!TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
312 "MinCmpXchgSizeInBits not yet supported for LL/SC expansions.");
313 expandPartwordCmpXchg(CASI);
315 if (TLI->shouldExpandAtomicCmpXchgInIR(CASI))
316 MadeChange |= expandAtomicCmpXchg(CASI);
324 bool IsStore,
bool IsLoad) {
327 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
329 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
337 TrailingFence->removeFromParent();
338 TrailingFence->insertAfter(I);
341 return (LeadingFence || TrailingFence);
347 EVT VT = TLI->getValueType(DL, T);
358 Type *NewTy = getCorrespondingIntegerType(LI->
getType(),
366 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
368 auto *NewLI = Builder.CreateLoad(NewAddr);
372 DEBUG(
dbgs() <<
"Replaced " << *LI <<
" with " << *NewLI <<
"\n");
374 Value *NewVal = Builder.CreateBitCast(NewLI, LI->
getType());
380 bool AtomicExpand::tryExpandAtomicLoad(
LoadInst *LI) {
381 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
385 expandAtomicOpToLLSC(
390 return expandAtomicLoadToLL(LI);
392 return expandAtomicLoadToCmpXchg(LI);
397 bool AtomicExpand::expandAtomicLoadToLL(
LoadInst *LI) {
405 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
413 bool AtomicExpand::expandAtomicLoadToCmpXchg(
LoadInst *LI) {
417 Type *Ty = cast<PointerType>(Addr->
getType())->getElementType();
420 Value *Pair = Builder.CreateAtomicCmpXchg(
421 Addr, DummyVal, DummyVal, Order,
423 Value *Loaded = Builder.CreateExtractValue(Pair, 0,
"loaded");
449 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
451 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
455 DEBUG(
dbgs() <<
"Replaced " << *SI <<
" with " << *NewSI <<
"\n");
460 bool AtomicExpand::expandAtomicStore(
StoreInst *SI) {
474 return tryExpandAtomicRMW(AI);
482 Addr, Loaded, NewVal, MemOpOrder,
497 return Builder.
CreateAdd(Loaded, Inc,
"new");
499 return Builder.
CreateSub(Loaded, Inc,
"new");
501 return Builder.
CreateAnd(Loaded, Inc,
"new");
505 return Builder.
CreateOr(Loaded, Inc,
"new");
507 return Builder.
CreateXor(Loaded, Inc,
"new");
510 return Builder.
CreateSelect(NewVal, Loaded, Inc,
"new");
513 return Builder.
CreateSelect(NewVal, Loaded, Inc,
"new");
516 return Builder.
CreateSelect(NewVal, Loaded, Inc,
"new");
519 return Builder.
CreateSelect(NewVal, Loaded, Inc,
"new");
526 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
530 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
531 unsigned ValueSize = getAtomicOpSize(AI);
532 if (ValueSize < MinCASSize) {
534 "MinCmpXchgSizeInBits not yet supported for LL/SC architectures.");
546 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
547 unsigned ValueSize = getAtomicOpSize(AI);
548 if (ValueSize < MinCASSize) {
549 expandPartwordAtomicRMW(AI,
564 struct PartwordMaskValues {
594 PartwordMaskValues
Ret;
605 assert(ValueSize < WordSize);
615 Builder.
CreateAnd(AddrInt, ~(uint64_t)(WordSize - 1)), WordPtrType,
618 Value *PtrLSB = Builder.
CreateAnd(AddrInt, WordSize - 1,
"PtrLSB");
621 Ret.ShiftAmt = Builder.
CreateShl(PtrLSB, 3);
628 Ret.ShiftAmt = Builder.
CreateTrunc(Ret.ShiftAmt, Ret.WordType,
"ShiftAmt");
632 Ret.Inv_Mask = Builder.
CreateNot(Ret.Mask,
"Inv_Mask");
643 const PartwordMaskValues &PMV) {
647 Value *FinalVal = Builder.
CreateOr(Loaded_MaskOut, Shifted_Inc);
663 Value *FinalVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Masked);
674 Builder.
CreateLShr(Loaded, PMV.ShiftAmt), PMV.ValueType);
677 Builder.
CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
679 Value *FinalVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Shiftup);
694 void AtomicExpand::expandPartwordAtomicRMW(
703 PartwordMaskValues PMV =
705 TLI->getMinCmpXchgSizeInBits() / 8);
707 Value *ValOperand_Shifted =
709 PMV.ShiftAmt,
"ValOperand_Shifted");
719 insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder,
722 Builder.
CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType);
772 const int WordSize = TLI->getMinCmpXchgSizeInBits() / 8;
782 std::prev(BB->
end())->eraseFromParent();
789 Value *NewVal_Shifted =
798 Value *InitLoaded_MaskOut = Builder.
CreateAnd(InitLoaded, PMV.Inv_Mask);
804 Loaded_MaskOut->
addIncoming(InitLoaded_MaskOut, BB);
807 Value *FullWord_NewVal = Builder.
CreateOr(Loaded_MaskOut, NewVal_Shifted);
808 Value *FullWord_Cmp = Builder.
CreateOr(Loaded_MaskOut, Cmp_Shifted);
838 Loaded_MaskOut->
addIncoming(OldVal_MaskOut, FailureBB);
844 Builder.
CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType);
853 void AtomicExpand::expandAtomicOpToLLSC(
858 insertRMWLLSCLoop(Builder, ResultType, Addr, MemOpOrder, PerformOp);
864 Value *AtomicExpand::insertRMWLLSCLoop(
890 std::prev(BB->
end())->eraseFromParent();
896 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
898 Value *NewVal = PerformOp(Builder, Loaded);
900 Value *StoreSuccess =
901 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
937 DEBUG(
dbgs() <<
"Replaced " << *CI <<
" with " << *NewCI <<
"\n");
965 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
978 bool HasReleasedLoadBB = !CI->
isWeak() && ShouldInsertFencesForAtomic &&
1034 auto ReleasedLoadBB =
1038 auto ReleasingStoreBB =
1048 std::prev(BB->
end())->eraseFromParent();
1050 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1051 TLI->emitLeadingFence(Builder, SuccessOrder,
true,
1057 Value *UnreleasedLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1063 Builder.
CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
1066 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1067 TLI->emitLeadingFence(Builder, SuccessOrder,
true,
1072 Value *StoreSuccess = TLI->emitStoreConditional(
1076 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1078 CI->
isWeak() ? FailureBB : RetryBB);
1082 if (HasReleasedLoadBB) {
1083 SecondLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1089 Builder.
CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
1096 if (ShouldInsertFencesForAtomic)
1097 TLI->emitTrailingFence(Builder, SuccessOrder,
true,
1105 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1109 if (ShouldInsertFencesForAtomic)
1110 TLI->emitTrailingFence(Builder, FailureOrder,
true,
1125 if (!HasReleasedLoadBB)
1126 Loaded = UnreleasedLoad;
1130 TryStoreLoaded->
addIncoming(UnreleasedLoad, ReleasingStoreBB);
1131 TryStoreLoaded->
addIncoming(SecondLoad, ReleasedLoadBB);
1135 NoStoreLoaded->
addIncoming(UnreleasedLoad, StartBB);
1136 NoStoreLoaded->
addIncoming(SecondLoad, ReleasedLoadBB);
1140 ExitLoaded->
addIncoming(TryStoreLoaded, SuccessBB);
1141 ExitLoaded->
addIncoming(NoStoreLoaded, FailureBB);
1143 Loaded = ExitLoaded;
1155 "weird extraction from { iN, i1 }");
1166 for (
auto EV : PrunedInsts)
1196 return C->isMinusOne();
1203 bool AtomicExpand::simplifyIdempotentRMW(
AtomicRMWInst* RMWI) {
1204 if (
auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
1205 tryExpandAtomicLoad(ResultingLoad);
1211 Value *AtomicExpand::insertRMWCmpXchgLoop(
1242 std::prev(BB->
end())->eraseFromParent();
1254 Value *NewVal = PerformOp(Builder, Loaded);
1256 Value *NewLoaded =
nullptr;
1257 Value *Success =
nullptr;
1259 CreateCmpXchg(Builder, Addr, Loaded, NewVal,
1263 Success, NewLoaded);
1264 assert(Success && NewLoaded);
1278 Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
1306 return Align >= Size &&
1307 (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) &&
1308 Size <= LargestSize;
1311 void AtomicExpand::expandAtomicLoadToLibcall(
LoadInst *I) {
1315 unsigned Size = getAtomicOpSize(I);
1316 unsigned Align = getAtomicOpAlign(I);
1318 bool expanded = expandAtomicOpToLibcall(
1322 assert(expanded &&
"expandAtomicOpToLibcall shouldn't fail tor Load");
1325 void AtomicExpand::expandAtomicStoreToLibcall(
StoreInst *I) {
1329 unsigned Size = getAtomicOpSize(I);
1330 unsigned Align = getAtomicOpAlign(I);
1332 bool expanded = expandAtomicOpToLibcall(
1336 assert(expanded &&
"expandAtomicOpToLibcall shouldn't fail tor Store");
1344 unsigned Size = getAtomicOpSize(I);
1345 unsigned Align = getAtomicOpAlign(I);
1347 bool expanded = expandAtomicOpToLibcall(
1352 assert(expanded &&
"expandAtomicOpToLibcall shouldn't fail tor CAS");
1412 void AtomicExpand::expandAtomicRMWToLibcall(
AtomicRMWInst *I) {
1415 unsigned Size = getAtomicOpSize(I);
1416 unsigned Align = getAtomicOpAlign(I);
1418 bool Success =
false;
1419 if (!Libcalls.
empty())
1420 Success = expandAtomicOpToLibcall(
1435 Addr, Loaded, NewVal, MemOpOrder,
1441 expandAtomicCASToLibcall(Pair);
1452 bool AtomicExpand::expandAtomicOpToLibcall(
1484 if (UseSizedLibcall) {
1486 case 1: RTLibType = Libcalls[1];
break;
1487 case 2: RTLibType = Libcalls[2];
break;
1488 case 4: RTLibType = Libcalls[3];
break;
1489 case 8: RTLibType = Libcalls[4];
break;
1490 case 16: RTLibType = Libcalls[5];
break;
1493 RTLibType = Libcalls[0];
1527 Value *AllocaCASExpected_i8 =
nullptr;
1529 Value *AllocaValue_i8 =
nullptr;
1531 Value *AllocaResult_i8 =
nullptr;
1538 if (!UseSizedLibcall) {
1550 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->
getType());
1552 AllocaCASExpected_i8 =
1561 if (UseSizedLibcall) {
1566 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->
getType());
1577 if (!CASExpected && HasResult && !UseSizedLibcall) {
1578 AllocaResult = AllocaBuilder.CreateAlloca(I->
getType());
1597 }
else if (HasResult && UseSizedLibcall)
1598 ResultTy = SizedIntTy;
1604 for (
Value *Arg : Args)
1614 if (ValueOperand && !UseSizedLibcall)
1622 Value *ExpectedOut =
1628 }
else if (HasResult) {
1630 if (UseSizedLibcall)
Value * getValueOperand()
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void push_back(const T &Elt)
A parsed version of the target data layout string in and methods for querying it. ...
AtomicOrdering getFailureOrdering() const
Returns the ordering constraint on this cmpxchg.
static ConstantInt * getFalse(LLVMContext &Context)
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
static IntegerType * getInt1Ty(LLVMContext &C)
BasicBlock::iterator GetInsertPoint() const
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
*p = old <signed v ? old : v
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
bool isVolatile() const
Return true if this is a store to a volatile memory location.
SynchronizationScope getSynchScope() const
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
A Module instance is used to store all the information related to an LLVM module. ...
void setAlignment(unsigned Align)
AtomicOrdering getSuccessOrdering() const
Returns the ordering constraint on this cmpxchg.
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
static AtomicOrderingCABI toCABI(AtomicOrdering ao)
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This class represents a function call, abstracting a target machine's calling convention.
void setOrdering(AtomicOrdering Ordering)
Set the ordering constraint on this load.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I, Type *ValueType, Value *Addr, unsigned WordSize)
This is a helper function which builds instructions to provide values necessary for partword atomic o...
*p = old <unsigned v ? old : v
An efficient, type-erasing, non-owning reference to a callable.
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
*p = old >unsigned v ? old : v
const Function * getParent() const
Return the enclosing method, or null if none.
Value * getNewValOperand()
const Instruction & front() const
char & AtomicExpandID
AtomicExpandID – Lowers atomic operations in terms of either cmpxchg load-linked/store-conditional lo...
An instruction for reading from memory.
static IntegerType * getInt64Ty(LLVMContext &C)
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Value * CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name="")
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
*p = old >signed v ? old : v
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, bool isVolatile=false)
iterator begin()
Instruction iterator methods.
Value * CreateNot(Value *V, const Twine &Name="")
inst_iterator inst_begin(Function *F)
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
FunctionPass * createAtomicExpandPass(const TargetMachine *TM)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name="")
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SynchronizationScope getSynchScope() const
AtomicOrdering
Atomic ordering for LLVM's memory model.
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * getPointerOperand()
Class to represent function types.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
AtomicOrdering getOrdering() const
Returns the ordering constraint on this RMW.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
static bool canUseSizedAtomicCall(unsigned Size, unsigned Align, const DataLayout &DL)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
BinOp
This enumeration lists the possible modifications atomicrmw can make.
static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr, Value *Loaded, Value *NewVal, AtomicOrdering MemOpOrder, Value *&Success, Value *&NewLoaded)
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
bool isLittleEndian() const
Layout endianness...
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for storing to memory.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
size_t size() const
size - Get the array size.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
void setAttributes(AttributeSet Attrs)
Set the parameter attributes for this call.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
LoadInst * CreateLoad(Value *Ptr, const char *Name)
unsigned getStoreSizeInBits() const
getStoreSizeInBits - Return the number of bits overwritten by a store of the specified value type...
unsigned getAlignment() const
Return the alignment of the access that is being performed.
INITIALIZE_TM_PASS(AtomicExpand,"atomic-expand","Expand Atomic instructions", false, false) FunctionPass *llvm
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeSet AttributeList)
Look up the specified function in the module symbol table.
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
UnreachableInst * CreateUnreachable()
AttributeSet addAttribute(LLVMContext &C, unsigned Index, Attribute::AttrKind Kind) const
Add an attribute to the attribute set at the given index.
This is an important base class in LLVM.
Value * getCompareOperand()
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
static Type * getVoidTy(LLVMContext &C)
void initializeAtomicExpandPass(PassRegistry &)
FunctionPass class - This class is used to implement most global optimizations.
Value * getPointerOperand()
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
self_iterator getIterator()
Class to represent integer types.
bool empty() const
empty - Check if the array is empty.
void setAlignment(unsigned Align)
EVT - Extended Value Type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
LLVMContext & getContext() const
void setAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope=CrossThread)
LLVMContext & getContext() const
All values hold a context through their type.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope=CrossThread)
static Value * performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder, Value *Loaded, Value *Inc)
Emit IR to implement the given atomicrmw operation on values in registers, returning the new value...
static bool isReleaseOrStronger(AtomicOrdering ao)
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success...
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
AtomicOrdering getOrdering() const
Returns the ordering effect of this store.
This is the shared class of boolean and integer constants.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
BasicBlock * GetInsertBlock() const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
AtomicOrdering getOrdering() const
Returns the ordering effect of this fence.
const BasicBlock & getEntryBlock() const
static ConstantInt * getTrue(LLVMContext &Context)
BinOp getOperation() const
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
bool isIntegerTy() const
True if this is an instance of IntegerType.
void setWeak(bool IsWeak)
iterator_range< user_iterator > users()
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
static bool isAcquireOrStronger(AtomicOrdering ao)
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned getAlignment() const
Return the alignment of the access that is being performed.
Value * getPointerOperand()
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
new_loaded *typedef function_ref< void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering, Value *&, Value *&)> CreateCmpXchgInstFun
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
LoadInst * CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name)
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
void setAlignment(unsigned Align)
static ArrayRef< RTLIB::Libcall > GetRMWLibcall(AtomicRMWInst::BinOp Op)
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Primary interface to the complete machine description for the target machine.
static Value * performMaskedAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder, Value *Loaded, Value *Shifted_Inc, Value *Inc, const PartwordMaskValues &PMV)
Emit IR to implement a masked version of a given atomicrmw operation.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
inst_iterator inst_end(Function *F)
unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory)
Expand an atomic RMW instruction into a loop utilizing cmpxchg.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SynchronizationScope getSynchScope() const
Returns whether this cmpxchg is atomic between threads or only within a single thread.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
Value * CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name="")
Value * getPointerOperand()
const BasicBlock * getParent() const
This file describes how to lower LLVM code to machine code.
an instruction to allocate memory on the stack