20#define DEBUG_TYPE "lower-mem-intrinsics"
27 Value *OpSize,
unsigned OpSizeVal) {
30 return B.CreateAnd(Len, OpSizeVal - 1);
31 return B.CreateURem(Len, OpSize);
40 Value *RTLoopRemainder =
nullptr) {
43 return B.CreateSub(Len, RTLoopRemainder);
48struct LoopExpansionInfo {
53 Value *MainLoopIndex =
nullptr;
61 Value *ResidualLoopIndex =
nullptr;
98 Value *Len,
unsigned MainLoopStep,
99 unsigned ResidualLoopStep,
101 assert((ResidualLoopStep == 0 || MainLoopStep % ResidualLoopStep == 0) &&
102 "ResidualLoopStep must divide MainLoopStep if specified");
103 assert(ResidualLoopStep <= MainLoopStep &&
104 "ResidualLoopStep cannot be larger than MainLoopStep");
105 assert(MainLoopStep > 0 &&
"MainLoopStep must be non-zero");
106 LoopExpansionInfo LEI;
109 InsertBefore, BBNamePrefix +
"-post-expansion");
116 Type *LenType = Len->getType();
118 ConstantInt *CIMainLoopStep = ConstantInt::get(ILenType, MainLoopStep);
120 Value *LoopUnits = Len;
121 Value *ResidualUnits =
nullptr;
124 bool MustTakeMainLoop =
false;
125 if (MainLoopStep != 1) {
127 uint64_t TotalUnits = CLen->getZExtValue();
129 uint64_t ResidualCount = TotalUnits - LoopEndCount;
130 LoopUnits = ConstantInt::get(LenType, LoopEndCount);
131 ResidualUnits = ConstantInt::get(LenType, ResidualCount);
132 MustTakeMainLoop = LoopEndCount > 0;
140 CIMainLoopStep, MainLoopStep);
142 MainLoopStep, ResidualUnits);
145 MustTakeMainLoop = CLen->getZExtValue() > 0;
149 Ctx, BBNamePrefix +
"-expansion-main-body", ParentFunc, PostLoopBB);
153 LEI.MainLoopIndex = LoopIndex;
154 LoopIndex->
addIncoming(ConstantInt::get(LenType, 0U), PreLoopBB);
157 LoopBuilder.
CreateAdd(LoopIndex, ConstantInt::get(LenType, MainLoopStep));
164 if (ResidualLoopStep > 0 && ResidualLoopStep < MainLoopStep) {
175 ConstantInt *Zero = ConstantInt::get(ILenType, 0U);
176 if (MustTakeMainLoop)
177 PreLoopBuilder.
CreateBr(MainLoopBB);
180 MainLoopBB, ResidualCondBB);
186 MainLoopBB, ResidualCondBB);
191 ResLoopBB, PostLoopBB);
195 ResBuilder.
CreatePHI(LenType, 2,
"residual-loop-index");
200 Value *FullOffset = ResBuilder.
CreateAdd(LoopUnits, ResidualIndex);
201 LEI.ResidualLoopIndex = FullOffset;
204 ResidualIndex, ConstantInt::get(LenType, ResidualLoopStep));
205 ResidualIndex->
addIncoming(ResNewIndex, ResLoopBB);
213 ResBuilder.
CreateICmpULT(ResNewIndex, ResidualUnits), ResLoopBB,
221 if (MustTakeMainLoop) {
222 PreLoopBuilder.
CreateBr(MainLoopBB);
224 ConstantInt *Zero = ConstantInt::get(ILenType, 0U);
226 MainLoopBB, PostLoopBB);
231 MainLoopBB, PostLoopBB);
240 std::optional<uint32_t> AtomicElementSize) {
258 Type *LoopOpType =
TTI.getMemcpyLoopLoweringType(
259 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
261 "Atomic memcpy lowering is not supported for vector operand type");
264 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
265 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
266 "Atomic memcpy lowering is not supported for selected operand size");
271 if (LoopEndCount != 0) {
273 LoopOpSize, 0,
"static-memcpy");
287 LoopOpType, SrcGEP, PartSrcAlign, SrcIsVolatile);
290 Load->setMetadata(LLVMContext::MD_alias_scope,
296 Load, DstGEP, PartDstAlign, DstIsVolatile);
299 Store->setMetadata(LLVMContext::MD_noalias,
MDNode::get(Ctx, NewScope));
301 if (AtomicElementSize) {
305 assert(!LEI.ResidualLoopIP && !LEI.ResidualLoopIndex &&
306 "No residual loop was requested");
310 uint64_t BytesCopied = LoopEndCount;
312 if (RemainingBytes == 0)
317 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
318 SrcAS, DstAS, SrcAlign, DstAlign,
321 for (
auto *OpTy : RemainingOps) {
325 unsigned OperandSize =
DL.getTypeStoreSize(OpTy);
326 assert((!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
327 "Atomic memcpy lowering is not supported for selected operand size");
330 Int8Type, SrcAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
335 Load->setMetadata(LLVMContext::MD_alias_scope,
339 Int8Type, DstAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
344 Store->setMetadata(LLVMContext::MD_noalias,
MDNode::get(Ctx, NewScope));
346 if (AtomicElementSize) {
350 BytesCopied += OperandSize;
353 "Bytes copied should match size in the call!");
358 Align SrcAlign,
Align DstAlign,
bool SrcIsVolatile,
bool DstIsVolatile,
360 std::optional<uint32_t> AtomicElementSize) {
366 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain(
"MemCopyDomain");
368 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
373 Type *LoopOpType =
TTI.getMemcpyLoopLoweringType(
374 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
376 "Atomic memcpy lowering is not supported for vector operand type");
377 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
378 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
379 "Atomic memcpy lowering is not supported for selected operand size");
383 Type *ResidualLoopOpType = AtomicElementSize
386 unsigned ResidualLoopOpSize =
DL.getTypeStoreSize(ResidualLoopOpType);
387 assert(ResidualLoopOpSize == (AtomicElementSize ? *AtomicElementSize : 1) &&
388 "Store size is expected to match type size");
391 InsertBefore, CopyLen, LoopOpSize, ResidualLoopOpSize,
"dynamic-memcpy");
403 MainLoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, LEI.MainLoopIndex);
404 LoadInst *Load = MainLoopBuilder.CreateAlignedLoad(
405 LoopOpType, SrcGEP, PartSrcAlign, SrcIsVolatile);
408 Load->setMetadata(LLVMContext::MD_alias_scope,
MDNode::get(Ctx, NewScope));
411 MainLoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr, LEI.MainLoopIndex);
413 Load, DstGEP, PartDstAlign, DstIsVolatile);
418 if (AtomicElementSize) {
424 if (!LEI.ResidualLoopIP)
431 Value *ResSrcGEP = ResLoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr,
432 LEI.ResidualLoopIndex);
433 LoadInst *ResLoad = ResLoopBuilder.CreateAlignedLoad(
434 ResidualLoopOpType, ResSrcGEP, ResSrcAlign, SrcIsVolatile);
440 Value *ResDstGEP = ResLoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr,
441 LEI.ResidualLoopIndex);
442 StoreInst *ResStore = ResLoopBuilder.CreateAlignedStore(
443 ResLoad, ResDstGEP, ResDstAlign, DstIsVolatile);
448 if (AtomicElementSize) {
458static std::pair<Value *, Value *>
461 Value *ResAddr1 = Addr1;
462 Value *ResAddr2 = Addr2;
467 if (
TTI.isValidAddrSpaceCast(AS2, AS1))
468 ResAddr2 =
B.CreateAddrSpaceCast(Addr2, Addr1->
getType());
469 else if (
TTI.isValidAddrSpaceCast(AS1, AS2))
470 ResAddr1 =
B.CreateAddrSpaceCast(Addr1, Addr2->
getType());
473 "support addrspacecast");
475 return {ResAddr1, ResAddr2};
507 Align DstAlign,
bool SrcIsVolatile,
518 Type *LoopOpType =
TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAS, DstAS,
520 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
522 bool LoopOpIsInt8 = LoopOpType == Int8Type;
526 bool RequiresResidual = !LoopOpIsInt8;
528 Type *ResidualLoopOpType = Int8Type;
529 unsigned ResidualLoopOpSize =
DL.getTypeStoreSize(ResidualLoopOpType);
533 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
535 ConstantInt::get(ILengthType, ResidualLoopOpSize);
536 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
540 Value *RuntimeLoopBytes = CopyLen;
541 Value *RuntimeLoopRemainder =
nullptr;
542 Value *SkipResidualCondition =
nullptr;
543 if (RequiresResidual) {
544 RuntimeLoopRemainder =
547 LoopOpSize, RuntimeLoopRemainder);
548 SkipResidualCondition =
549 PLBuilder.
CreateICmpEQ(RuntimeLoopRemainder, Zero,
"skip_residual");
551 Value *SkipMainCondition =
552 PLBuilder.
CreateICmpEQ(RuntimeLoopBytes, Zero,
"skip_main");
563 auto [CmpSrcAddr, CmpDstAddr] =
566 PLBuilder.
CreateICmpULT(CmpSrcAddr, CmpDstAddr,
"compare_src_dst");
569 &ThenTerm, &ElseTerm);
596 CopyBackwardsBB->
setName(
"memmove_copy_backwards");
598 CopyForwardBB->
setName(
"memmove_copy_forward");
600 ExitBB->
setName(
"memmove_done");
613 F->getContext(),
"memmove_bwd_main_loop",
F, CopyForwardBB);
619 if (RequiresResidual) {
622 F->getContext(),
"memmove_bwd_residual_loop",
F, MainLoopBB);
626 ResidualLoopPhi, CIResidualLoopOpSize,
"bwd_residual_index");
634 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
639 ResidualDstAlign, DstIsVolatile);
643 F->getContext(),
"memmove_bwd_middle",
F, MainLoopBB);
648 ResidualLoopBuilder.
CreateICmpEQ(ResidualIndex, RuntimeLoopBytes),
649 IntermediateBB, ResidualLoopBB);
651 ResidualLoopPhi->
addIncoming(ResidualIndex, ResidualLoopBB);
652 ResidualLoopPhi->
addIncoming(CopyLen, CopyBackwardsBB);
659 PredBB = IntermediateBB;
666 MainLoopBuilder.
CreateSub(MainLoopPhi, CILoopOpSize,
"bwd_main_index");
670 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
678 MainLoopPhi->
addIncoming(RuntimeLoopBytes, PredBB);
694 MainLoopBuilder.
CreatePHI(ILengthType, 0,
"fwd_main_index");
698 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
703 Value *MainIndex = MainLoopBuilder.
CreateAdd(MainLoopPhi, CILoopOpSize);
709 if (RequiresResidual)
715 MainLoopBuilder.
CreateICmpEQ(MainIndex, RuntimeLoopBytes), SuccessorBB,
723 if (RequiresResidual) {
727 F->getContext(),
"memmove_fwd_residual_loop",
F, ExitBB);
728 IntermediateBuilder.
CreateCondBr(SkipResidualCondition, ExitBB,
734 ResidualLoopBuilder.
CreatePHI(ILengthType, 0,
"fwd_residual_index");
738 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
743 ResidualDstAlign, DstIsVolatile);
744 Value *ResidualIndex =
745 ResidualLoopBuilder.
CreateAdd(ResidualLoopPhi, CIResidualLoopOpSize);
747 ResidualLoopBuilder.
CreateICmpEQ(ResidualIndex, CopyLen), ExitBB,
749 ResidualLoopPhi->
addIncoming(ResidualIndex, ResidualLoopBB);
750 ResidualLoopPhi->
addIncoming(RuntimeLoopBytes, IntermediateBB);
761 Align DstAlign,
bool SrcIsVolatile,
776 Type *LoopOpType =
TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAS, DstAS,
778 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
786 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
787 ConstantInt *LoopBound = ConstantInt::get(ILengthType, BytesCopiedInLoop);
788 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
792 auto [CmpSrcAddr, CmpDstAddr] =
795 PLBuilder.
CreateICmpULT(CmpSrcAddr, CmpDstAddr,
"compare_src_dst");
798 &ThenTerm, &ElseTerm);
803 ExitBB->
setName(
"memmove_done");
815 unsigned OperandSize =
DL.getTypeStoreSize(OpTy);
821 Value *SrcGEP = Builder.CreateInBoundsGEP(
822 Int8Type, SrcAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
824 Builder.CreateAlignedLoad(OpTy, SrcGEP, ResSrcAlign, SrcIsVolatile);
825 Value *DstGEP = Builder.CreateInBoundsGEP(
826 Int8Type, DstAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
827 Builder.CreateAlignedStore(Load, DstGEP, ResDstAlign, DstIsVolatile);
828 BytesCopied += OperandSize;
832 if (RemainingBytes != 0) {
833 CopyBackwardsBB->
setName(
"memmove_bwd_residual");
834 uint64_t BytesCopied = BytesCopiedInLoop;
844 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
845 SrcAS, DstAS, PartSrcAlign,
847 for (
auto *OpTy : RemainingOps) {
851 GenerateResidualLdStPair(OpTy, BwdResBuilder, BytesCopied);
854 if (BytesCopiedInLoop != 0) {
857 if (RemainingBytes != 0) {
861 PredBB = CopyBackwardsBB;
863 CopyBackwardsBB->
setName(
"memmove_bwd_loop");
867 Value *Index = LoopBuilder.
CreateSub(LoopPhi, CILoopOpSize,
"bwd_index");
870 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
888 if (BytesCopiedInLoop != 0) {
889 CopyForwardBB->
setName(
"memmove_fwd_loop");
892 if (RemainingBytes != 0) {
895 "memmove_fwd_residual");
896 FwdResidualBB = SuccBB;
902 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
917 if (RemainingBytes != 0) {
918 uint64_t BytesCopied = BytesCopiedInLoop;
924 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
925 SrcAS, DstAS, PartSrcAlign,
927 for (
auto *OpTy : RemainingOps)
928 GenerateResidualLdStPair(OpTy, FwdResBuilder, BytesCopied);
946 Builder.CreateCondBr(
947 Builder.CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0), CopyLen), NewBB,
951 unsigned PartSize =
DL.getTypeStoreSize(
SetValue->getType());
956 LoopIndex->
addIncoming(ConstantInt::get(TypeOfCopyLen, 0), OrigBB);
961 PartAlign, IsVolatile);
964 LoopBuilder.
CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1));
975 const SCEV *DestSCEV = SE->
getSCEV(Memcpy->getRawDest());
1021 bool DstIsVolatile = SrcIsVolatile;
1026 if (SrcAS != DstAS) {
1027 if (!
TTI.addrspacesMayAlias(SrcAS, DstAS)) {
1032 CI, SrcAlign, DstAlign, SrcIsVolatile,
1037 CopyLen, SrcAlign, DstAlign, SrcIsVolatile,
1045 if (!(
TTI.isValidAddrSpaceCast(DstAS, SrcAS) ||
1046 TTI.isValidAddrSpaceCast(SrcAS, DstAS))) {
1051 dbgs() <<
"Do not know how to expand memmove between different "
1052 "address spaces\n");
1059 Memmove, SrcAddr, DstAddr, CI, SrcAlign, DstAlign,
1060 SrcIsVolatile, DstIsVolatile,
TTI);
1063 Memmove, SrcAddr, DstAddr, CopyLen, SrcAlign, DstAlign,
1064 SrcIsVolatile, DstIsVolatile,
TTI);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF)
static std::pair< Value *, Value * > tryInsertCastToCommonAddrSpace(IRBuilderBase &B, Value *Addr1, Value *Addr2, const TargetTransformInfo &TTI)
static bool canOverlap(MemTransferBase< T > *Memcpy, ScalarEvolution *SE)
static void createMemMoveLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr, Value *CopyLen, Value *SetValue, Align DstAlign, bool IsVolatile)
static Value * getRuntimeLoopRemainder(IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static Value * getRuntimeLoopUnits(IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal, Value *RTLoopRemainder=nullptr)
static LoopExpansionInfo insertLoopExpansion(Instruction *InsertBefore, Value *Len, unsigned MainLoopStep, unsigned ResidualLoopStep, StringRef BBNamePrefix)
Insert the control flow and loop counters for a memcpy/memset loop expansion.
static void createMemMoveLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
This class represents any memcpy intrinsic i.e.
uint32_t getElementSizeInBytes() const
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Common base class shared among various IRBuilders.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
This class wraps the llvm.memmove intrinsic.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.experimental.memset.pattern intrinsic.
Common base class for all memory transfer intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
MaybeAlign getSourceAlign() const
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< uint32_t > AtomicCpySize=std::nullopt)
Emit a loop implementing the semantics of an llvm.memcpy whose size is a compile time constant.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet)
Expand MemSetPattern as a loop. MemSet is not deleted.
LLVM_ABI bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< unsigned > AtomicSize=std::nullopt)
Emit a loop implementing the semantics of llvm.memcpy where the size is not a compile-time constant.
LLVM_ABI void expandAtomicMemCpyAsLoop(AnyMemCpyInst *AtomicMemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE)
Expand AtomicMemCpy as a loop. AtomicMemCpy is not deleted.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
LLVM_ABI void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.