LLVM 18.0.0git
LowerMemIntrinsics.cpp
Go to the documentation of this file.
1//===- LowerMemIntrinsics.cpp ----------------------------------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
12#include "llvm/IR/IRBuilder.h"
14#include "llvm/IR/MDBuilder.h"
15#include "llvm/Support/Debug.h"
17#include <optional>
18
19#define DEBUG_TYPE "lower-mem-intrinsics"
20
21using namespace llvm;
22
24 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr,
25 ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile,
26 bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI,
27 std::optional<uint32_t> AtomicElementSize) {
28 // No need to expand zero length copies.
29 if (CopyLen->isZero())
30 return;
31
32 BasicBlock *PreLoopBB = InsertBefore->getParent();
33 BasicBlock *PostLoopBB = nullptr;
34 Function *ParentFunc = PreLoopBB->getParent();
35 LLVMContext &Ctx = PreLoopBB->getContext();
36 const DataLayout &DL = ParentFunc->getParent()->getDataLayout();
37 MDBuilder MDB(Ctx);
38 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
39 StringRef Name = "MemCopyAliasScope";
40 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
41
42 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
43 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
44
45 Type *TypeOfCopyLen = CopyLen->getType();
47 Ctx, CopyLen, SrcAS, DstAS, SrcAlign.value(), DstAlign.value(),
48 AtomicElementSize);
49 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
50 "Atomic memcpy lowering is not supported for vector operand type");
51
52 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
53 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
54 "Atomic memcpy lowering is not supported for selected operand size");
55
56 uint64_t LoopEndCount = CopyLen->getZExtValue() / LoopOpSize;
57
58 if (LoopEndCount != 0) {
59 // Split
60 PostLoopBB = PreLoopBB->splitBasicBlock(InsertBefore, "memcpy-split");
61 BasicBlock *LoopBB =
62 BasicBlock::Create(Ctx, "load-store-loop", ParentFunc, PostLoopBB);
63 PreLoopBB->getTerminator()->setSuccessor(0, LoopBB);
64
65 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
66
67 // Cast the Src and Dst pointers to pointers to the loop operand type (if
68 // needed).
69 PointerType *SrcOpType = PointerType::get(LoopOpType, SrcAS);
70 PointerType *DstOpType = PointerType::get(LoopOpType, DstAS);
71 if (SrcAddr->getType() != SrcOpType) {
72 SrcAddr = PLBuilder.CreateBitCast(SrcAddr, SrcOpType);
73 }
74 if (DstAddr->getType() != DstOpType) {
75 DstAddr = PLBuilder.CreateBitCast(DstAddr, DstOpType);
76 }
77
78 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
79 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
80
81 IRBuilder<> LoopBuilder(LoopBB);
82 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 2, "loop-index");
83 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0U), PreLoopBB);
84 // Loop Body
85 Value *SrcGEP =
86 LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
87 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
88 PartSrcAlign, SrcIsVolatile);
89 if (!CanOverlap) {
90 // Set alias scope for loads.
91 Load->setMetadata(LLVMContext::MD_alias_scope,
92 MDNode::get(Ctx, NewScope));
93 }
94 Value *DstGEP =
95 LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
96 StoreInst *Store = LoopBuilder.CreateAlignedStore(
97 Load, DstGEP, PartDstAlign, DstIsVolatile);
98 if (!CanOverlap) {
99 // Indicate that stores don't overlap loads.
100 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
101 }
102 if (AtomicElementSize) {
103 Load->setAtomic(AtomicOrdering::Unordered);
104 Store->setAtomic(AtomicOrdering::Unordered);
105 }
106 Value *NewIndex =
107 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1U));
108 LoopIndex->addIncoming(NewIndex, LoopBB);
109
110 // Create the loop branch condition.
111 Constant *LoopEndCI = ConstantInt::get(TypeOfCopyLen, LoopEndCount);
112 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, LoopEndCI),
113 LoopBB, PostLoopBB);
114 }
115
116 uint64_t BytesCopied = LoopEndCount * LoopOpSize;
117 uint64_t RemainingBytes = CopyLen->getZExtValue() - BytesCopied;
118 if (RemainingBytes) {
119 IRBuilder<> RBuilder(PostLoopBB ? PostLoopBB->getFirstNonPHI()
120 : InsertBefore);
121
122 SmallVector<Type *, 5> RemainingOps;
123 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
124 SrcAS, DstAS, SrcAlign.value(),
125 DstAlign.value(), AtomicElementSize);
126
127 for (auto *OpTy : RemainingOps) {
128 Align PartSrcAlign(commonAlignment(SrcAlign, BytesCopied));
129 Align PartDstAlign(commonAlignment(DstAlign, BytesCopied));
130
131 // Calculate the new index
132 unsigned OperandSize = DL.getTypeStoreSize(OpTy);
133 assert(
134 (!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
135 "Atomic memcpy lowering is not supported for selected operand size");
136
137 uint64_t GepIndex = BytesCopied / OperandSize;
138 assert(GepIndex * OperandSize == BytesCopied &&
139 "Division should have no Remainder!");
140 // Cast source to operand type and load
141 PointerType *SrcPtrType = PointerType::get(OpTy, SrcAS);
142 Value *CastedSrc = SrcAddr->getType() == SrcPtrType
143 ? SrcAddr
144 : RBuilder.CreateBitCast(SrcAddr, SrcPtrType);
145 Value *SrcGEP = RBuilder.CreateInBoundsGEP(
146 OpTy, CastedSrc, ConstantInt::get(TypeOfCopyLen, GepIndex));
147 LoadInst *Load =
148 RBuilder.CreateAlignedLoad(OpTy, SrcGEP, PartSrcAlign, SrcIsVolatile);
149 if (!CanOverlap) {
150 // Set alias scope for loads.
151 Load->setMetadata(LLVMContext::MD_alias_scope,
152 MDNode::get(Ctx, NewScope));
153 }
154 // Cast destination to operand type and store.
155 PointerType *DstPtrType = PointerType::get(OpTy, DstAS);
156 Value *CastedDst = DstAddr->getType() == DstPtrType
157 ? DstAddr
158 : RBuilder.CreateBitCast(DstAddr, DstPtrType);
159 Value *DstGEP = RBuilder.CreateInBoundsGEP(
160 OpTy, CastedDst, ConstantInt::get(TypeOfCopyLen, GepIndex));
161 StoreInst *Store = RBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign,
162 DstIsVolatile);
163 if (!CanOverlap) {
164 // Indicate that stores don't overlap loads.
165 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
166 }
167 if (AtomicElementSize) {
168 Load->setAtomic(AtomicOrdering::Unordered);
169 Store->setAtomic(AtomicOrdering::Unordered);
170 }
171 BytesCopied += OperandSize;
172 }
173 }
174 assert(BytesCopied == CopyLen->getZExtValue() &&
175 "Bytes copied should match size in the call!");
176}
177
179 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen,
180 Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile,
181 bool CanOverlap, const TargetTransformInfo &TTI,
182 std::optional<uint32_t> AtomicElementSize) {
183 BasicBlock *PreLoopBB = InsertBefore->getParent();
184 BasicBlock *PostLoopBB =
185 PreLoopBB->splitBasicBlock(InsertBefore, "post-loop-memcpy-expansion");
186
187 Function *ParentFunc = PreLoopBB->getParent();
188 const DataLayout &DL = ParentFunc->getParent()->getDataLayout();
189 LLVMContext &Ctx = PreLoopBB->getContext();
190 MDBuilder MDB(Ctx);
191 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
192 StringRef Name = "MemCopyAliasScope";
193 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
194
195 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
196 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
197
198 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(
199 Ctx, CopyLen, SrcAS, DstAS, SrcAlign.value(), DstAlign.value(),
200 AtomicElementSize);
201 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
202 "Atomic memcpy lowering is not supported for vector operand type");
203 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
204 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
205 "Atomic memcpy lowering is not supported for selected operand size");
206
207 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
208
209 PointerType *SrcOpType = PointerType::get(LoopOpType, SrcAS);
210 PointerType *DstOpType = PointerType::get(LoopOpType, DstAS);
211 if (SrcAddr->getType() != SrcOpType) {
212 SrcAddr = PLBuilder.CreateBitCast(SrcAddr, SrcOpType);
213 }
214 if (DstAddr->getType() != DstOpType) {
215 DstAddr = PLBuilder.CreateBitCast(DstAddr, DstOpType);
216 }
217
218 // Calculate the loop trip count, and remaining bytes to copy after the loop.
219 Type *CopyLenType = CopyLen->getType();
220 IntegerType *ILengthType = dyn_cast<IntegerType>(CopyLenType);
221 assert(ILengthType &&
222 "expected size argument to memcpy to be an integer type!");
223 Type *Int8Type = Type::getInt8Ty(Ctx);
224 bool LoopOpIsInt8 = LoopOpType == Int8Type;
225 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
226 Value *RuntimeLoopCount = LoopOpIsInt8 ?
227 CopyLen :
228 PLBuilder.CreateUDiv(CopyLen, CILoopOpSize);
229 BasicBlock *LoopBB =
230 BasicBlock::Create(Ctx, "loop-memcpy-expansion", ParentFunc, PostLoopBB);
231 IRBuilder<> LoopBuilder(LoopBB);
232
233 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
234 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
235
236 PHINode *LoopIndex = LoopBuilder.CreatePHI(CopyLenType, 2, "loop-index");
237 LoopIndex->addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
238
239 Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
240 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
241 PartSrcAlign, SrcIsVolatile);
242 if (!CanOverlap) {
243 // Set alias scope for loads.
244 Load->setMetadata(LLVMContext::MD_alias_scope, MDNode::get(Ctx, NewScope));
245 }
246 Value *DstGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
248 LoopBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign, DstIsVolatile);
249 if (!CanOverlap) {
250 // Indicate that stores don't overlap loads.
251 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
252 }
253 if (AtomicElementSize) {
254 Load->setAtomic(AtomicOrdering::Unordered);
255 Store->setAtomic(AtomicOrdering::Unordered);
256 }
257 Value *NewIndex =
258 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(CopyLenType, 1U));
259 LoopIndex->addIncoming(NewIndex, LoopBB);
260
261 bool requiresResidual =
262 !LoopOpIsInt8 && !(AtomicElementSize && LoopOpSize == AtomicElementSize);
263 if (requiresResidual) {
264 Type *ResLoopOpType = AtomicElementSize
265 ? Type::getIntNTy(Ctx, *AtomicElementSize * 8)
266 : Int8Type;
267 unsigned ResLoopOpSize = DL.getTypeStoreSize(ResLoopOpType);
268 assert((ResLoopOpSize == AtomicElementSize ? *AtomicElementSize : 1) &&
269 "Store size is expected to match type size");
270
271 // Add in the
272 Value *RuntimeResidual = PLBuilder.CreateURem(CopyLen, CILoopOpSize);
273 Value *RuntimeBytesCopied = PLBuilder.CreateSub(CopyLen, RuntimeResidual);
274
275 // Loop body for the residual copy.
276 BasicBlock *ResLoopBB = BasicBlock::Create(Ctx, "loop-memcpy-residual",
277 PreLoopBB->getParent(),
278 PostLoopBB);
279 // Residual loop header.
280 BasicBlock *ResHeaderBB = BasicBlock::Create(
281 Ctx, "loop-memcpy-residual-header", PreLoopBB->getParent(), nullptr);
282
283 // Need to update the pre-loop basic block to branch to the correct place.
284 // branch to the main loop if the count is non-zero, branch to the residual
285 // loop if the copy size is smaller then 1 iteration of the main loop but
286 // non-zero and finally branch to after the residual loop if the memcpy
287 // size is zero.
288 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
289 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
290 LoopBB, ResHeaderBB);
291 PreLoopBB->getTerminator()->eraseFromParent();
292
293 LoopBuilder.CreateCondBr(
294 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
295 ResHeaderBB);
296
297 // Determine if we need to branch to the residual loop or bypass it.
298 IRBuilder<> RHBuilder(ResHeaderBB);
299 RHBuilder.CreateCondBr(RHBuilder.CreateICmpNE(RuntimeResidual, Zero),
300 ResLoopBB, PostLoopBB);
301
302 // Copy the residual with single byte load/store loop.
303 IRBuilder<> ResBuilder(ResLoopBB);
304 PHINode *ResidualIndex =
305 ResBuilder.CreatePHI(CopyLenType, 2, "residual-loop-index");
306 ResidualIndex->addIncoming(Zero, ResHeaderBB);
307
308 Value *SrcAsResLoopOpType = ResBuilder.CreateBitCast(
309 SrcAddr, PointerType::get(ResLoopOpType, SrcAS));
310 Value *DstAsResLoopOpType = ResBuilder.CreateBitCast(
311 DstAddr, PointerType::get(ResLoopOpType, DstAS));
312 Value *FullOffset = ResBuilder.CreateAdd(RuntimeBytesCopied, ResidualIndex);
313 Value *SrcGEP = ResBuilder.CreateInBoundsGEP(
314 ResLoopOpType, SrcAsResLoopOpType, FullOffset);
315 LoadInst *Load = ResBuilder.CreateAlignedLoad(ResLoopOpType, SrcGEP,
316 PartSrcAlign, SrcIsVolatile);
317 if (!CanOverlap) {
318 // Set alias scope for loads.
319 Load->setMetadata(LLVMContext::MD_alias_scope,
320 MDNode::get(Ctx, NewScope));
321 }
322 Value *DstGEP = ResBuilder.CreateInBoundsGEP(
323 ResLoopOpType, DstAsResLoopOpType, FullOffset);
324 StoreInst *Store = ResBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign,
325 DstIsVolatile);
326 if (!CanOverlap) {
327 // Indicate that stores don't overlap loads.
328 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
329 }
330 if (AtomicElementSize) {
331 Load->setAtomic(AtomicOrdering::Unordered);
332 Store->setAtomic(AtomicOrdering::Unordered);
333 }
334 Value *ResNewIndex = ResBuilder.CreateAdd(
335 ResidualIndex, ConstantInt::get(CopyLenType, ResLoopOpSize));
336 ResidualIndex->addIncoming(ResNewIndex, ResLoopBB);
337
338 // Create the loop branch condition.
339 ResBuilder.CreateCondBr(
340 ResBuilder.CreateICmpULT(ResNewIndex, RuntimeResidual), ResLoopBB,
341 PostLoopBB);
342 } else {
343 // In this case the loop operand type was a byte, and there is no need for a
344 // residual loop to copy the remaining memory after the main loop.
345 // We do however need to patch up the control flow by creating the
346 // terminators for the preloop block and the memcpy loop.
347 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
348 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
349 LoopBB, PostLoopBB);
350 PreLoopBB->getTerminator()->eraseFromParent();
351 LoopBuilder.CreateCondBr(
352 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
353 PostLoopBB);
354 }
355}
356
357// Lower memmove to IR. memmove is required to correctly copy overlapping memory
358// regions; therefore, it has to check the relative positions of the source and
359// destination pointers and choose the copy direction accordingly.
360//
361// The code below is an IR rendition of this C function:
362//
363// void* memmove(void* dst, const void* src, size_t n) {
364// unsigned char* d = dst;
365// const unsigned char* s = src;
366// if (s < d) {
367// // copy backwards
368// while (n--) {
369// d[n] = s[n];
370// }
371// } else {
372// // copy forward
373// for (size_t i = 0; i < n; ++i) {
374// d[i] = s[i];
375// }
376// }
377// return dst;
378// }
379static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr,
380 Value *DstAddr, Value *CopyLen, Align SrcAlign,
381 Align DstAlign, bool SrcIsVolatile,
382 bool DstIsVolatile,
383 const TargetTransformInfo &TTI) {
384 Type *TypeOfCopyLen = CopyLen->getType();
385 BasicBlock *OrigBB = InsertBefore->getParent();
386 Function *F = OrigBB->getParent();
387 const DataLayout &DL = F->getParent()->getDataLayout();
388 // TODO: Use different element type if possible?
389 Type *EltTy = Type::getInt8Ty(F->getContext());
390
391 // Create the a comparison of src and dst, based on which we jump to either
392 // the forward-copy part of the function (if src >= dst) or the backwards-copy
393 // part (if src < dst).
394 // SplitBlockAndInsertIfThenElse conveniently creates the basic if-then-else
395 // structure. Its block terminators (unconditional branches) are replaced by
396 // the appropriate conditional branches when the loop is built.
397 ICmpInst *PtrCompare = new ICmpInst(InsertBefore, ICmpInst::ICMP_ULT,
398 SrcAddr, DstAddr, "compare_src_dst");
399 Instruction *ThenTerm, *ElseTerm;
400 SplitBlockAndInsertIfThenElse(PtrCompare, InsertBefore, &ThenTerm,
401 &ElseTerm);
402
403 // Each part of the function consists of two blocks:
404 // copy_backwards: used to skip the loop when n == 0
405 // copy_backwards_loop: the actual backwards loop BB
406 // copy_forward: used to skip the loop when n == 0
407 // copy_forward_loop: the actual forward loop BB
408 BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
409 CopyBackwardsBB->setName("copy_backwards");
410 BasicBlock *CopyForwardBB = ElseTerm->getParent();
411 CopyForwardBB->setName("copy_forward");
412 BasicBlock *ExitBB = InsertBefore->getParent();
413 ExitBB->setName("memmove_done");
414
415 unsigned PartSize = DL.getTypeStoreSize(EltTy);
416 Align PartSrcAlign(commonAlignment(SrcAlign, PartSize));
417 Align PartDstAlign(commonAlignment(DstAlign, PartSize));
418
419 // Initial comparison of n == 0 that lets us skip the loops altogether. Shared
420 // between both backwards and forward copy clauses.
421 ICmpInst *CompareN =
422 new ICmpInst(OrigBB->getTerminator(), ICmpInst::ICMP_EQ, CopyLen,
423 ConstantInt::get(TypeOfCopyLen, 0), "compare_n_to_0");
424
425 // Copying backwards.
426 BasicBlock *LoopBB =
427 BasicBlock::Create(F->getContext(), "copy_backwards_loop", F, CopyForwardBB);
428 IRBuilder<> LoopBuilder(LoopBB);
429
430 PHINode *LoopPhi = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
431 Value *IndexPtr = LoopBuilder.CreateSub(
432 LoopPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_ptr");
433 Value *Element = LoopBuilder.CreateAlignedLoad(
434 EltTy, LoopBuilder.CreateInBoundsGEP(EltTy, SrcAddr, IndexPtr),
435 PartSrcAlign, "element");
436 LoopBuilder.CreateAlignedStore(
437 Element, LoopBuilder.CreateInBoundsGEP(EltTy, DstAddr, IndexPtr),
438 PartDstAlign);
439 LoopBuilder.CreateCondBr(
440 LoopBuilder.CreateICmpEQ(IndexPtr, ConstantInt::get(TypeOfCopyLen, 0)),
441 ExitBB, LoopBB);
442 LoopPhi->addIncoming(IndexPtr, LoopBB);
443 LoopPhi->addIncoming(CopyLen, CopyBackwardsBB);
444 BranchInst::Create(ExitBB, LoopBB, CompareN, ThenTerm);
445 ThenTerm->eraseFromParent();
446
447 // Copying forward.
448 BasicBlock *FwdLoopBB =
449 BasicBlock::Create(F->getContext(), "copy_forward_loop", F, ExitBB);
450 IRBuilder<> FwdLoopBuilder(FwdLoopBB);
451 PHINode *FwdCopyPhi = FwdLoopBuilder.CreatePHI(TypeOfCopyLen, 0, "index_ptr");
452 Value *SrcGEP = FwdLoopBuilder.CreateInBoundsGEP(EltTy, SrcAddr, FwdCopyPhi);
453 Value *FwdElement =
454 FwdLoopBuilder.CreateAlignedLoad(EltTy, SrcGEP, PartSrcAlign, "element");
455 Value *DstGEP = FwdLoopBuilder.CreateInBoundsGEP(EltTy, DstAddr, FwdCopyPhi);
456 FwdLoopBuilder.CreateAlignedStore(FwdElement, DstGEP, PartDstAlign);
457 Value *FwdIndexPtr = FwdLoopBuilder.CreateAdd(
458 FwdCopyPhi, ConstantInt::get(TypeOfCopyLen, 1), "index_increment");
459 FwdLoopBuilder.CreateCondBr(FwdLoopBuilder.CreateICmpEQ(FwdIndexPtr, CopyLen),
460 ExitBB, FwdLoopBB);
461 FwdCopyPhi->addIncoming(FwdIndexPtr, FwdLoopBB);
462 FwdCopyPhi->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), CopyForwardBB);
463
464 BranchInst::Create(ExitBB, FwdLoopBB, CompareN, ElseTerm);
465 ElseTerm->eraseFromParent();
466}
467
468static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
469 Value *CopyLen, Value *SetValue, Align DstAlign,
470 bool IsVolatile) {
471 Type *TypeOfCopyLen = CopyLen->getType();
472 BasicBlock *OrigBB = InsertBefore->getParent();
473 Function *F = OrigBB->getParent();
474 const DataLayout &DL = F->getParent()->getDataLayout();
475 BasicBlock *NewBB =
476 OrigBB->splitBasicBlock(InsertBefore, "split");
477 BasicBlock *LoopBB
478 = BasicBlock::Create(F->getContext(), "loadstoreloop", F, NewBB);
479
481
482 // Cast pointer to the type of value getting stored
483 unsigned dstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
484 DstAddr = Builder.CreateBitCast(DstAddr,
485 PointerType::get(SetValue->getType(), dstAS));
486
487 Builder.CreateCondBr(
488 Builder.CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0), CopyLen), NewBB,
489 LoopBB);
490 OrigBB->getTerminator()->eraseFromParent();
491
492 unsigned PartSize = DL.getTypeStoreSize(SetValue->getType());
493 Align PartAlign(commonAlignment(DstAlign, PartSize));
494
495 IRBuilder<> LoopBuilder(LoopBB);
496 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
497 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), OrigBB);
498
499 LoopBuilder.CreateAlignedStore(
500 SetValue,
501 LoopBuilder.CreateInBoundsGEP(SetValue->getType(), DstAddr, LoopIndex),
502 PartAlign, IsVolatile);
503
504 Value *NewIndex =
505 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1));
506 LoopIndex->addIncoming(NewIndex, LoopBB);
507
508 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, CopyLen), LoopBB,
509 NewBB);
510}
511
512template <typename T>
514 if (SE) {
515 auto *SrcSCEV = SE->getSCEV(Memcpy->getRawSource());
516 auto *DestSCEV = SE->getSCEV(Memcpy->getRawDest());
517 if (SE->isKnownPredicateAt(CmpInst::ICMP_NE, SrcSCEV, DestSCEV, Memcpy))
518 return false;
519 }
520 return true;
521}
522
525 ScalarEvolution *SE) {
526 bool CanOverlap = canOverlap(Memcpy, SE);
527 if (ConstantInt *CI = dyn_cast<ConstantInt>(Memcpy->getLength())) {
529 /* InsertBefore */ Memcpy,
530 /* SrcAddr */ Memcpy->getRawSource(),
531 /* DstAddr */ Memcpy->getRawDest(),
532 /* CopyLen */ CI,
533 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
534 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
535 /* SrcIsVolatile */ Memcpy->isVolatile(),
536 /* DstIsVolatile */ Memcpy->isVolatile(),
537 /* CanOverlap */ CanOverlap,
538 /* TargetTransformInfo */ TTI);
539 } else {
541 /* InsertBefore */ Memcpy,
542 /* SrcAddr */ Memcpy->getRawSource(),
543 /* DstAddr */ Memcpy->getRawDest(),
544 /* CopyLen */ Memcpy->getLength(),
545 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
546 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
547 /* SrcIsVolatile */ Memcpy->isVolatile(),
548 /* DstIsVolatile */ Memcpy->isVolatile(),
549 /* CanOverlap */ CanOverlap,
550 /* TargetTransformInfo */ TTI);
551 }
552}
553
555 const TargetTransformInfo &TTI) {
556 Value *CopyLen = Memmove->getLength();
557 Value *SrcAddr = Memmove->getRawSource();
558 Value *DstAddr = Memmove->getRawDest();
559 Align SrcAlign = Memmove->getSourceAlign().valueOrOne();
560 Align DstAlign = Memmove->getDestAlign().valueOrOne();
561 bool SrcIsVolatile = Memmove->isVolatile();
562 bool DstIsVolatile = SrcIsVolatile;
563 IRBuilder<> CastBuilder(Memmove);
564
565 unsigned SrcAS = SrcAddr->getType()->getPointerAddressSpace();
566 unsigned DstAS = DstAddr->getType()->getPointerAddressSpace();
567 if (SrcAS != DstAS) {
568 if (!TTI.addrspacesMayAlias(SrcAS, DstAS)) {
569 // We may not be able to emit a pointer comparison, but we don't have
570 // to. Expand as memcpy.
571 if (ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
572 createMemCpyLoopKnownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
573 CI, SrcAlign, DstAlign, SrcIsVolatile,
574 DstIsVolatile,
575 /*CanOverlap=*/false, TTI);
576 } else {
577 createMemCpyLoopUnknownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
578 CopyLen, SrcAlign, DstAlign, SrcIsVolatile,
579 DstIsVolatile,
580 /*CanOverlap=*/false, TTI);
581 }
582
583 return true;
584 }
585
586 if (TTI.isValidAddrSpaceCast(DstAS, SrcAS))
587 DstAddr = CastBuilder.CreateAddrSpaceCast(DstAddr, SrcAddr->getType());
588 else if (TTI.isValidAddrSpaceCast(SrcAS, DstAS))
589 SrcAddr = CastBuilder.CreateAddrSpaceCast(SrcAddr, DstAddr->getType());
590 else {
591 // We don't know generically if it's legal to introduce an
592 // addrspacecast. We need to know either if it's legal to insert an
593 // addrspacecast, or if the address spaces cannot alias.
595 dbgs() << "Do not know how to expand memmove between different "
596 "address spaces\n");
597 return false;
598 }
599 }
600
602 /*InsertBefore=*/Memmove, SrcAddr, DstAddr, CopyLen, SrcAlign, DstAlign,
603 SrcIsVolatile, DstIsVolatile, TTI);
604 return true;
605}
606
608 createMemSetLoop(/* InsertBefore */ Memset,
609 /* DstAddr */ Memset->getRawDest(),
610 /* CopyLen */ Memset->getLength(),
611 /* SetValue */ Memset->getValue(),
612 /* Alignment */ Memset->getDestAlign().valueOrOne(),
613 Memset->isVolatile());
614}
615
618 ScalarEvolution *SE) {
619 if (ConstantInt *CI = dyn_cast<ConstantInt>(AtomicMemcpy->getLength())) {
621 /* InsertBefore */ AtomicMemcpy,
622 /* SrcAddr */ AtomicMemcpy->getRawSource(),
623 /* DstAddr */ AtomicMemcpy->getRawDest(),
624 /* CopyLen */ CI,
625 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
626 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
627 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
628 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
629 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
630 /* TargetTransformInfo */ TTI,
631 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
632 } else {
634 /* InsertBefore */ AtomicMemcpy,
635 /* SrcAddr */ AtomicMemcpy->getRawSource(),
636 /* DstAddr */ AtomicMemcpy->getRawDest(),
637 /* CopyLen */ AtomicMemcpy->getLength(),
638 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
639 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
640 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
641 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
642 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
643 /* TargetTransformInfo */ TTI,
644 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
645 }
646}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
assume Assume Builder
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF)
Definition: Execution.cpp:41
static bool canOverlap(MemTransferBase< T > *Memcpy, ScalarEvolution *SE)
static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr, Value *CopyLen, Value *SetValue, Align DstAlign, bool IsVolatile)
static void createMemMoveLoop(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
#define F(x, y, z)
Definition: MD5.cpp:55
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This pass exposes codegen information to IR-level passes.
This class represents the atomic memcpy intrinsic i.e.
uint32_t getElementSizeInBytes() const
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:216
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:414
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:35
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
@ ICMP_NE
not equal
Definition: InstrTypes.h:733
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
Definition: Constants.h:176
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:197
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:145
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2216
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1803
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1872
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2356
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2200
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1335
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2089
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1111
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1318
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1822
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2094
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2625
const BasicBlock * getParent() const
Definition: Instruction.h:90
bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:83
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
Class to represent integer types.
Definition: DerivedTypes.h:40
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:159
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:152
Metadata node.
Definition: Metadata.h:950
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
bool isVolatile() const
This class wraps the llvm.memmove intrinsic.
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Common base class for all memory transfer intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
MaybeAlign getSourceAlign() const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:254
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Class to represent pointers.
Definition: DerivedTypes.h:643
The main scalar evolution driver.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static IntegerType * getInt8Ty(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:378
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< uint32_t > AtomicCpySize=std::nullopt)
Emit a loop implementing the semantics of an llvm.memcpy whose size is a compile time constant.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< unsigned > AtomicSize=std::nullopt)
Emit a loop implementing the semantics of llvm.memcpy where the size is not a compile-time constant.
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE)
Expand AtomicMemCpy as a loop. AtomicMemCpy is not deleted.
void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141