20#define DEBUG_TYPE "lower-mem-intrinsics"
28 std::optional<uint32_t> AtomicElementSize) {
43 unsigned SrcAS = cast<PointerType>(SrcAddr->
getType())->getAddressSpace();
44 unsigned DstAS = cast<PointerType>(DstAddr->
getType())->getAddressSpace();
48 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
50 "Atomic memcpy lowering is not supported for vector operand type");
52 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
53 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
54 "Atomic memcpy lowering is not supported for selected operand size");
58 if (LoopEndCount != 0) {
72 LoopIndex->
addIncoming(ConstantInt::get(TypeOfCopyLen, 0U), PreLoopBB);
77 PartSrcAlign, SrcIsVolatile);
80 Load->setMetadata(LLVMContext::MD_alias_scope,
86 Load, DstGEP, PartDstAlign, DstIsVolatile);
89 Store->setMetadata(LLVMContext::MD_noalias,
MDNode::get(Ctx, NewScope));
91 if (AtomicElementSize) {
92 Load->setAtomic(AtomicOrdering::Unordered);
93 Store->setAtomic(AtomicOrdering::Unordered);
96 LoopBuilder.
CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1U));
100 Constant *LoopEndCI = ConstantInt::get(TypeOfCopyLen, LoopEndCount);
105 uint64_t BytesCopied = LoopEndCount * LoopOpSize;
107 if (RemainingBytes) {
113 SrcAS, DstAS, SrcAlign, DstAlign,
116 for (
auto *OpTy : RemainingOps) {
121 unsigned OperandSize =
DL.getTypeStoreSize(OpTy);
123 (!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
124 "Atomic memcpy lowering is not supported for selected operand size");
126 uint64_t GepIndex = BytesCopied / OperandSize;
127 assert(GepIndex * OperandSize == BytesCopied &&
128 "Division should have no Remainder!");
131 OpTy, SrcAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
136 Load->setMetadata(LLVMContext::MD_alias_scope,
140 OpTy, DstAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
145 Store->setMetadata(LLVMContext::MD_noalias,
MDNode::get(Ctx, NewScope));
147 if (AtomicElementSize) {
148 Load->setAtomic(AtomicOrdering::Unordered);
149 Store->setAtomic(AtomicOrdering::Unordered);
151 BytesCopied += OperandSize;
155 "Bytes copied should match size in the call!");
161 unsigned OpSizeVal) {
164 return B.CreateLShr(Len,
Log2_32(OpSizeVal));
165 return B.CreateUDiv(Len, OpSize);
171 unsigned OpSizeVal) {
174 return B.CreateAnd(Len, OpSizeVal - 1);
175 return B.CreateURem(Len, OpSize);
180 Align SrcAlign,
Align DstAlign,
bool SrcIsVolatile,
bool DstIsVolatile,
182 std::optional<uint32_t> AtomicElementSize) {
185 PreLoopBB->
splitBasicBlock(InsertBefore,
"post-loop-memcpy-expansion");
191 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain(
"MemCopyDomain");
193 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain,
Name);
195 unsigned SrcAS = cast<PointerType>(SrcAddr->
getType())->getAddressSpace();
196 unsigned DstAS = cast<PointerType>(DstAddr->
getType())->getAddressSpace();
199 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
201 "Atomic memcpy lowering is not supported for vector operand type");
202 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
203 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
204 "Atomic memcpy lowering is not supported for selected operand size");
210 IntegerType *ILengthType = dyn_cast<IntegerType>(CopyLenType);
212 "expected size argument to memcpy to be an integer type!");
214 bool LoopOpIsInt8 = LoopOpType == Int8Type;
215 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
216 Value *RuntimeLoopCount = LoopOpIsInt8
219 CILoopOpSize, LoopOpSize);
228 PHINode *LoopIndex = LoopBuilder.CreatePHI(CopyLenType, 2,
"loop-index");
229 LoopIndex->
addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
231 Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
232 LoadInst *
Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
233 PartSrcAlign, SrcIsVolatile);
236 Load->setMetadata(LLVMContext::MD_alias_scope,
MDNode::get(Ctx, NewScope));
238 Value *DstGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
240 LoopBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign, DstIsVolatile);
245 if (AtomicElementSize) {
246 Load->setAtomic(AtomicOrdering::Unordered);
247 Store->setAtomic(AtomicOrdering::Unordered);
250 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(CopyLenType, 1U));
253 bool requiresResidual =
254 !LoopOpIsInt8 && !(AtomicElementSize && LoopOpSize == AtomicElementSize);
255 if (requiresResidual) {
256 Type *ResLoopOpType = AtomicElementSize
259 unsigned ResLoopOpSize =
DL.getTypeStoreSize(ResLoopOpType);
260 assert((ResLoopOpSize == AtomicElementSize ? *AtomicElementSize : 1) &&
261 "Store size is expected to match type size");
267 CILoopOpSize, LoopOpSize);
268 Value *RuntimeBytesCopied = PLBuilder.CreateSub(CopyLen, RuntimeResidual);
276 Ctx,
"loop-memcpy-residual-header", PreLoopBB->
getParent(),
nullptr);
284 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
285 LoopBB, ResHeaderBB);
288 LoopBuilder.CreateCondBr(
289 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
294 RHBuilder.CreateCondBr(RHBuilder.CreateICmpNE(RuntimeResidual, Zero),
295 ResLoopBB, PostLoopBB);
300 ResBuilder.CreatePHI(CopyLenType, 2,
"residual-loop-index");
303 Value *FullOffset = ResBuilder.CreateAdd(RuntimeBytesCopied, ResidualIndex);
305 ResBuilder.CreateInBoundsGEP(ResLoopOpType, SrcAddr, FullOffset);
306 LoadInst *
Load = ResBuilder.CreateAlignedLoad(ResLoopOpType, SrcGEP,
307 ResSrcAlign, SrcIsVolatile);
310 Load->setMetadata(LLVMContext::MD_alias_scope,
314 ResBuilder.CreateInBoundsGEP(ResLoopOpType, DstAddr, FullOffset);
316 ResBuilder.CreateAlignedStore(Load, DstGEP, ResDstAlign, DstIsVolatile);
321 if (AtomicElementSize) {
322 Load->setAtomic(AtomicOrdering::Unordered);
323 Store->setAtomic(AtomicOrdering::Unordered);
325 Value *ResNewIndex = ResBuilder.CreateAdd(
326 ResidualIndex, ConstantInt::get(CopyLenType, ResLoopOpSize));
327 ResidualIndex->
addIncoming(ResNewIndex, ResLoopBB);
330 ResBuilder.CreateCondBr(
331 ResBuilder.CreateICmpULT(ResNewIndex, RuntimeResidual), ResLoopBB,
339 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
342 LoopBuilder.CreateCondBr(
343 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
352static std::pair<Value *, Value *>
355 Value *ResAddr1 = Addr1;
356 Value *ResAddr2 = Addr2;
358 unsigned AS1 = cast<PointerType>(Addr1->
getType())->getAddressSpace();
359 unsigned AS2 = cast<PointerType>(Addr2->
getType())->getAddressSpace();
362 ResAddr2 =
B.CreateAddrSpaceCast(Addr2, Addr1->
getType());
364 ResAddr1 =
B.CreateAddrSpaceCast(Addr1, Addr2->
getType());
367 "support addrspacecast");
369 return {ResAddr1, ResAddr2};
401 Align DstAlign,
bool SrcIsVolatile,
409 unsigned SrcAS = cast<PointerType>(SrcAddr->
getType())->getAddressSpace();
410 unsigned DstAS = cast<PointerType>(DstAddr->
getType())->getAddressSpace();
414 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
416 bool LoopOpIsInt8 = LoopOpType == Int8Type;
420 bool RequiresResidual = !LoopOpIsInt8;
422 Type *ResidualLoopOpType = Int8Type;
423 unsigned ResidualLoopOpSize =
DL.getTypeStoreSize(ResidualLoopOpType);
426 IntegerType *ILengthType = cast<IntegerType>(TypeOfCopyLen);
427 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
428 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
429 ConstantInt *One = ConstantInt::get(ILengthType, 1);
433 Value *RuntimeLoopCount = CopyLen;
434 Value *RuntimeLoopRemainder =
nullptr;
435 Value *RuntimeBytesCopiedMainLoop = CopyLen;
436 Value *SkipResidualCondition =
nullptr;
437 if (RequiresResidual) {
441 CILoopOpSize, LoopOpSize);
442 RuntimeBytesCopiedMainLoop =
443 PLBuilder.
CreateSub(CopyLen, RuntimeLoopRemainder);
444 SkipResidualCondition =
445 PLBuilder.
CreateICmpEQ(RuntimeLoopRemainder, Zero,
"skip_residual");
447 Value *SkipMainCondition =
448 PLBuilder.
CreateICmpEQ(RuntimeLoopCount, Zero,
"skip_main");
459 auto [CmpSrcAddr, CmpDstAddr] =
462 PLBuilder.
CreateICmpULT(CmpSrcAddr, CmpDstAddr,
"compare_src_dst");
465 &ThenTerm, &ElseTerm);
492 CopyBackwardsBB->
setName(
"memmove_copy_backwards");
494 CopyForwardBB->
setName(
"memmove_copy_forward");
496 ExitBB->
setName(
"memmove_done");
509 F->getContext(),
"memmove_bwd_main_loop",
F, CopyForwardBB);
515 if (RequiresResidual) {
518 F->getContext(),
"memmove_bwd_residual_loop",
F, MainLoopBB);
522 ResidualLoopPhi, One,
"bwd_residual_index");
524 ResidualLoopOpType, SrcAddr, ResidualIndex);
526 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
529 ResidualLoopOpType, DstAddr, ResidualIndex);
531 ResidualDstAlign, DstIsVolatile);
535 F->getContext(),
"memmove_bwd_middle",
F, MainLoopBB);
541 RuntimeBytesCopiedMainLoop),
542 IntermediateBB, ResidualLoopBB);
544 ResidualLoopPhi->
addIncoming(ResidualIndex, ResidualLoopBB);
545 ResidualLoopPhi->
addIncoming(CopyLen, CopyBackwardsBB);
552 PredBB = IntermediateBB;
559 MainLoopBuilder.
CreateSub(MainLoopPhi, One,
"bwd_main_index");
563 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
571 MainLoopPhi->
addIncoming(RuntimeLoopCount, PredBB);
587 MainLoopBuilder.
CreatePHI(ILengthType, 0,
"fwd_main_index");
591 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
602 if (RequiresResidual)
608 MainLoopBuilder.
CreateICmpEQ(MainIndex, RuntimeLoopCount), SuccessorBB,
616 if (RequiresResidual) {
620 F->getContext(),
"memmove_fwd_residual_loop",
F, ExitBB);
621 IntermediateBuilder.
CreateCondBr(SkipResidualCondition, ExitBB,
627 ResidualLoopBuilder.
CreatePHI(ILengthType, 0,
"fwd_residual_index");
629 ResidualLoopOpType, SrcAddr, ResidualLoopPhi);
631 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
634 ResidualLoopOpType, DstAddr, ResidualLoopPhi);
636 ResidualDstAlign, DstIsVolatile);
637 Value *ResidualIndex =
638 ResidualLoopBuilder.
CreateAdd(ResidualLoopPhi, One);
640 ResidualLoopBuilder.
CreateICmpEQ(ResidualIndex, CopyLen), ExitBB,
642 ResidualLoopPhi->
addIncoming(ResidualIndex, ResidualLoopBB);
643 ResidualLoopPhi->
addIncoming(RuntimeBytesCopiedMainLoop, IntermediateBB);
654 Align DstAlign,
bool SrcIsVolatile,
666 unsigned SrcAS = cast<PointerType>(SrcAddr->
getType())->getAddressSpace();
667 unsigned DstAS = cast<PointerType>(DstAddr->
getType())->getAddressSpace();
671 unsigned LoopOpSize =
DL.getTypeStoreSize(LoopOpType);
675 uint64_t BytesCopiedInLoop = LoopEndCount * LoopOpSize;
678 IntegerType *ILengthType = cast<IntegerType>(TypeOfCopyLen);
679 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
680 ConstantInt *One = ConstantInt::get(ILengthType, 1);
681 ConstantInt *TripCount = ConstantInt::get(ILengthType, LoopEndCount);
685 auto [CmpSrcAddr, CmpDstAddr] =
688 PLBuilder.
CreateICmpULT(CmpSrcAddr, CmpDstAddr,
"compare_src_dst");
691 &ThenTerm, &ElseTerm);
696 ExitBB->
setName(
"memmove_done");
709 unsigned OperandSize =
DL.getTypeStoreSize(OpTy);
711 uint64_t GepIndex = BytesCopied / OperandSize;
712 assert(GepIndex * OperandSize == BytesCopied &&
713 "Division should have no Remainder!");
715 Value *SrcGEP = Builder.CreateInBoundsGEP(
716 OpTy, SrcAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
718 Builder.CreateAlignedLoad(OpTy, SrcGEP, ResSrcAlign, SrcIsVolatile);
719 Value *DstGEP = Builder.CreateInBoundsGEP(
720 OpTy, DstAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
721 Builder.CreateAlignedStore(Load, DstGEP, ResDstAlign, DstIsVolatile);
722 BytesCopied += OperandSize;
726 if (RemainingBytes != 0) {
727 CopyBackwardsBB->
setName(
"memmove_bwd_residual");
728 uint64_t BytesCopied = BytesCopiedInLoop;
738 SrcAS, DstAS, PartSrcAlign,
740 for (
auto *OpTy : RemainingOps) {
743 GenerateResidualLdStPair(OpTy, BwdResBuilder, BytesCopied);
746 if (LoopEndCount != 0) {
749 if (RemainingBytes != 0) {
753 PredBB = CopyBackwardsBB;
755 CopyBackwardsBB->
setName(
"memmove_bwd_loop");
762 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
780 if (LoopEndCount != 0) {
781 CopyForwardBB->
setName(
"memmove_fwd_loop");
784 if (RemainingBytes != 0) {
787 "memmove_fwd_residual");
788 FwdResidualBB = SuccBB;
795 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile,
"element");
811 if (RemainingBytes != 0) {
812 uint64_t BytesCopied = BytesCopiedInLoop;
819 SrcAS, DstAS, PartSrcAlign,
821 for (
auto *OpTy : RemainingOps)
822 GenerateResidualLdStPair(OpTy, FwdResBuilder, BytesCopied);
841 Builder.
CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0), CopyLen), NewBB,
845 unsigned PartSize =
DL.getTypeStoreSize(
SetValue->getType());
850 LoopIndex->
addIncoming(ConstantInt::get(TypeOfCopyLen, 0), OrigBB);
855 PartAlign, IsVolatile);
858 LoopBuilder.
CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1));
869 const SCEV *DestSCEV = SE->
getSCEV(Memcpy->getRawDest());
915 bool DstIsVolatile = SrcIsVolatile;
920 if (SrcAS != DstAS) {
924 if (
ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
926 CI, SrcAlign, DstAlign, SrcIsVolatile,
931 CopyLen, SrcAlign, DstAlign, SrcIsVolatile,
945 dbgs() <<
"Do not know how to expand memmove between different "
951 if (
ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
953 Memmove, SrcAddr, DstAddr, CI, SrcAlign, DstAlign,
954 SrcIsVolatile, DstIsVolatile,
TTI);
957 Memmove, SrcAddr, DstAddr, CopyLen, SrcAlign, DstAlign,
958 SrcIsVolatile, DstIsVolatile,
TTI);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF)
static std::pair< Value *, Value * > tryInsertCastToCommonAddrSpace(IRBuilderBase &B, Value *Addr1, Value *Addr2, const TargetTransformInfo &TTI)
static bool canOverlap(MemTransferBase< T > *Memcpy, ScalarEvolution *SE)
static void createMemMoveLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
static Value * getRuntimeLoopRemainder(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr, Value *CopyLen, Value *SetValue, Align DstAlign, bool IsVolatile)
static Value * getRuntimeLoopCount(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static void createMemMoveLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This class represents the atomic memcpy intrinsic i.e.
uint32_t getElementSizeInBytes() const
LLVM Basic Block Representation.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Common base class shared among various IRBuilders.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
This class wraps the llvm.memmove intrinsic.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Common base class for all memory transfer intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
MaybeAlign getSourceAlign() const
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static IntegerType * getInt8Ty(LLVMContext &C)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< uint32_t > AtomicCpySize=std::nullopt)
Emit a loop implementing the semantics of an llvm.memcpy whose size is a compile time constant.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< unsigned > AtomicSize=std::nullopt)
Emit a loop implementing the semantics of llvm.memcpy where the size is not a compile-time constant.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE)
Expand AtomicMemCpy as a loop. AtomicMemCpy is not deleted.
void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.