LLVM 20.0.0git
LowerMemIntrinsics.cpp
Go to the documentation of this file.
1//===- LowerMemIntrinsics.cpp ----------------------------------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
12#include "llvm/IR/IRBuilder.h"
14#include "llvm/IR/MDBuilder.h"
15#include "llvm/Support/Debug.h"
18#include <optional>
19
20#define DEBUG_TYPE "lower-mem-intrinsics"
21
22using namespace llvm;
23
25 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr,
26 ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile,
27 bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI,
28 std::optional<uint32_t> AtomicElementSize) {
29 // No need to expand zero length copies.
30 if (CopyLen->isZero())
31 return;
32
33 BasicBlock *PreLoopBB = InsertBefore->getParent();
34 BasicBlock *PostLoopBB = nullptr;
35 Function *ParentFunc = PreLoopBB->getParent();
36 LLVMContext &Ctx = PreLoopBB->getContext();
37 const DataLayout &DL = ParentFunc->getDataLayout();
38 MDBuilder MDB(Ctx);
39 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
40 StringRef Name = "MemCopyAliasScope";
41 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
42
43 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
44 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
45
46 Type *TypeOfCopyLen = CopyLen->getType();
48 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
49 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
50 "Atomic memcpy lowering is not supported for vector operand type");
51
52 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
53 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
54 "Atomic memcpy lowering is not supported for selected operand size");
55
56 uint64_t LoopEndCount = CopyLen->getZExtValue() / LoopOpSize;
57
58 if (LoopEndCount != 0) {
59 // Split
60 PostLoopBB = PreLoopBB->splitBasicBlock(InsertBefore, "memcpy-split");
61 BasicBlock *LoopBB =
62 BasicBlock::Create(Ctx, "load-store-loop", ParentFunc, PostLoopBB);
63 PreLoopBB->getTerminator()->setSuccessor(0, LoopBB);
64
65 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
66
67 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
68 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
69
70 IRBuilder<> LoopBuilder(LoopBB);
71 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 2, "loop-index");
72 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0U), PreLoopBB);
73 // Loop Body
74 Value *SrcGEP =
75 LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
76 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
77 PartSrcAlign, SrcIsVolatile);
78 if (!CanOverlap) {
79 // Set alias scope for loads.
80 Load->setMetadata(LLVMContext::MD_alias_scope,
81 MDNode::get(Ctx, NewScope));
82 }
83 Value *DstGEP =
84 LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
85 StoreInst *Store = LoopBuilder.CreateAlignedStore(
86 Load, DstGEP, PartDstAlign, DstIsVolatile);
87 if (!CanOverlap) {
88 // Indicate that stores don't overlap loads.
89 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
90 }
91 if (AtomicElementSize) {
92 Load->setAtomic(AtomicOrdering::Unordered);
93 Store->setAtomic(AtomicOrdering::Unordered);
94 }
95 Value *NewIndex =
96 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1U));
97 LoopIndex->addIncoming(NewIndex, LoopBB);
98
99 // Create the loop branch condition.
100 Constant *LoopEndCI = ConstantInt::get(TypeOfCopyLen, LoopEndCount);
101 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, LoopEndCI),
102 LoopBB, PostLoopBB);
103 }
104
105 uint64_t BytesCopied = LoopEndCount * LoopOpSize;
106 uint64_t RemainingBytes = CopyLen->getZExtValue() - BytesCopied;
107 if (RemainingBytes) {
108 IRBuilder<> RBuilder(PostLoopBB ? PostLoopBB->getFirstNonPHI()
109 : InsertBefore);
110
111 SmallVector<Type *, 5> RemainingOps;
112 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
113 SrcAS, DstAS, SrcAlign, DstAlign,
114 AtomicElementSize);
115
116 for (auto *OpTy : RemainingOps) {
117 Align PartSrcAlign(commonAlignment(SrcAlign, BytesCopied));
118 Align PartDstAlign(commonAlignment(DstAlign, BytesCopied));
119
120 // Calculate the new index
121 unsigned OperandSize = DL.getTypeStoreSize(OpTy);
122 assert(
123 (!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
124 "Atomic memcpy lowering is not supported for selected operand size");
125
126 uint64_t GepIndex = BytesCopied / OperandSize;
127 assert(GepIndex * OperandSize == BytesCopied &&
128 "Division should have no Remainder!");
129
130 Value *SrcGEP = RBuilder.CreateInBoundsGEP(
131 OpTy, SrcAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
132 LoadInst *Load =
133 RBuilder.CreateAlignedLoad(OpTy, SrcGEP, PartSrcAlign, SrcIsVolatile);
134 if (!CanOverlap) {
135 // Set alias scope for loads.
136 Load->setMetadata(LLVMContext::MD_alias_scope,
137 MDNode::get(Ctx, NewScope));
138 }
139 Value *DstGEP = RBuilder.CreateInBoundsGEP(
140 OpTy, DstAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
141 StoreInst *Store = RBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign,
142 DstIsVolatile);
143 if (!CanOverlap) {
144 // Indicate that stores don't overlap loads.
145 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
146 }
147 if (AtomicElementSize) {
148 Load->setAtomic(AtomicOrdering::Unordered);
149 Store->setAtomic(AtomicOrdering::Unordered);
150 }
151 BytesCopied += OperandSize;
152 }
153 }
154 assert(BytesCopied == CopyLen->getZExtValue() &&
155 "Bytes copied should match size in the call!");
156}
157
158// \returns \p Len udiv \p OpSize, checking for optimization opportunities.
160 Value *Len, Value *OpSize,
161 unsigned OpSizeVal) {
162 // For powers of 2, we can lshr by log2 instead of using udiv.
163 if (isPowerOf2_32(OpSizeVal))
164 return B.CreateLShr(Len, Log2_32(OpSizeVal));
165 return B.CreateUDiv(Len, OpSize);
166}
167
168// \returns \p Len urem \p OpSize, checking for optimization opportunities.
170 Value *Len, Value *OpSize,
171 unsigned OpSizeVal) {
172 // For powers of 2, we can and by (OpSizeVal - 1) instead of using urem.
173 if (isPowerOf2_32(OpSizeVal))
174 return B.CreateAnd(Len, OpSizeVal - 1);
175 return B.CreateURem(Len, OpSize);
176}
177
179 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen,
180 Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile,
181 bool CanOverlap, const TargetTransformInfo &TTI,
182 std::optional<uint32_t> AtomicElementSize) {
183 BasicBlock *PreLoopBB = InsertBefore->getParent();
184 BasicBlock *PostLoopBB =
185 PreLoopBB->splitBasicBlock(InsertBefore, "post-loop-memcpy-expansion");
186
187 Function *ParentFunc = PreLoopBB->getParent();
188 const DataLayout &DL = ParentFunc->getDataLayout();
189 LLVMContext &Ctx = PreLoopBB->getContext();
190 MDBuilder MDB(Ctx);
191 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
192 StringRef Name = "MemCopyAliasScope";
193 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
194
195 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
196 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
197
198 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(
199 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
200 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
201 "Atomic memcpy lowering is not supported for vector operand type");
202 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
203 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
204 "Atomic memcpy lowering is not supported for selected operand size");
205
206 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
207
208 // Calculate the loop trip count, and remaining bytes to copy after the loop.
209 Type *CopyLenType = CopyLen->getType();
210 IntegerType *ILengthType = dyn_cast<IntegerType>(CopyLenType);
211 assert(ILengthType &&
212 "expected size argument to memcpy to be an integer type!");
213 Type *Int8Type = Type::getInt8Ty(Ctx);
214 bool LoopOpIsInt8 = LoopOpType == Int8Type;
215 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
216 Value *RuntimeLoopCount = LoopOpIsInt8
217 ? CopyLen
218 : getRuntimeLoopCount(DL, PLBuilder, CopyLen,
219 CILoopOpSize, LoopOpSize);
220
221 BasicBlock *LoopBB =
222 BasicBlock::Create(Ctx, "loop-memcpy-expansion", ParentFunc, PostLoopBB);
223 IRBuilder<> LoopBuilder(LoopBB);
224
225 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
226 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
227
228 PHINode *LoopIndex = LoopBuilder.CreatePHI(CopyLenType, 2, "loop-index");
229 LoopIndex->addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
230
231 Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopIndex);
232 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
233 PartSrcAlign, SrcIsVolatile);
234 if (!CanOverlap) {
235 // Set alias scope for loads.
236 Load->setMetadata(LLVMContext::MD_alias_scope, MDNode::get(Ctx, NewScope));
237 }
238 Value *DstGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopIndex);
240 LoopBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign, DstIsVolatile);
241 if (!CanOverlap) {
242 // Indicate that stores don't overlap loads.
243 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
244 }
245 if (AtomicElementSize) {
246 Load->setAtomic(AtomicOrdering::Unordered);
247 Store->setAtomic(AtomicOrdering::Unordered);
248 }
249 Value *NewIndex =
250 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(CopyLenType, 1U));
251 LoopIndex->addIncoming(NewIndex, LoopBB);
252
253 bool requiresResidual =
254 !LoopOpIsInt8 && !(AtomicElementSize && LoopOpSize == AtomicElementSize);
255 if (requiresResidual) {
256 Type *ResLoopOpType = AtomicElementSize
257 ? Type::getIntNTy(Ctx, *AtomicElementSize * 8)
258 : Int8Type;
259 unsigned ResLoopOpSize = DL.getTypeStoreSize(ResLoopOpType);
260 assert((ResLoopOpSize == AtomicElementSize ? *AtomicElementSize : 1) &&
261 "Store size is expected to match type size");
262
263 Align ResSrcAlign(commonAlignment(PartSrcAlign, ResLoopOpSize));
264 Align ResDstAlign(commonAlignment(PartDstAlign, ResLoopOpSize));
265
266 Value *RuntimeResidual = getRuntimeLoopRemainder(DL, PLBuilder, CopyLen,
267 CILoopOpSize, LoopOpSize);
268 Value *RuntimeBytesCopied = PLBuilder.CreateSub(CopyLen, RuntimeResidual);
269
270 // Loop body for the residual copy.
271 BasicBlock *ResLoopBB = BasicBlock::Create(Ctx, "loop-memcpy-residual",
272 PreLoopBB->getParent(),
273 PostLoopBB);
274 // Residual loop header.
275 BasicBlock *ResHeaderBB = BasicBlock::Create(
276 Ctx, "loop-memcpy-residual-header", PreLoopBB->getParent(), nullptr);
277
278 // Need to update the pre-loop basic block to branch to the correct place.
279 // branch to the main loop if the count is non-zero, branch to the residual
280 // loop if the copy size is smaller then 1 iteration of the main loop but
281 // non-zero and finally branch to after the residual loop if the memcpy
282 // size is zero.
283 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
284 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
285 LoopBB, ResHeaderBB);
286 PreLoopBB->getTerminator()->eraseFromParent();
287
288 LoopBuilder.CreateCondBr(
289 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
290 ResHeaderBB);
291
292 // Determine if we need to branch to the residual loop or bypass it.
293 IRBuilder<> RHBuilder(ResHeaderBB);
294 RHBuilder.CreateCondBr(RHBuilder.CreateICmpNE(RuntimeResidual, Zero),
295 ResLoopBB, PostLoopBB);
296
297 // Copy the residual with single byte load/store loop.
298 IRBuilder<> ResBuilder(ResLoopBB);
299 PHINode *ResidualIndex =
300 ResBuilder.CreatePHI(CopyLenType, 2, "residual-loop-index");
301 ResidualIndex->addIncoming(Zero, ResHeaderBB);
302
303 Value *FullOffset = ResBuilder.CreateAdd(RuntimeBytesCopied, ResidualIndex);
304 Value *SrcGEP =
305 ResBuilder.CreateInBoundsGEP(ResLoopOpType, SrcAddr, FullOffset);
306 LoadInst *Load = ResBuilder.CreateAlignedLoad(ResLoopOpType, SrcGEP,
307 ResSrcAlign, SrcIsVolatile);
308 if (!CanOverlap) {
309 // Set alias scope for loads.
310 Load->setMetadata(LLVMContext::MD_alias_scope,
311 MDNode::get(Ctx, NewScope));
312 }
313 Value *DstGEP =
314 ResBuilder.CreateInBoundsGEP(ResLoopOpType, DstAddr, FullOffset);
316 ResBuilder.CreateAlignedStore(Load, DstGEP, ResDstAlign, DstIsVolatile);
317 if (!CanOverlap) {
318 // Indicate that stores don't overlap loads.
319 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
320 }
321 if (AtomicElementSize) {
322 Load->setAtomic(AtomicOrdering::Unordered);
323 Store->setAtomic(AtomicOrdering::Unordered);
324 }
325 Value *ResNewIndex = ResBuilder.CreateAdd(
326 ResidualIndex, ConstantInt::get(CopyLenType, ResLoopOpSize));
327 ResidualIndex->addIncoming(ResNewIndex, ResLoopBB);
328
329 // Create the loop branch condition.
330 ResBuilder.CreateCondBr(
331 ResBuilder.CreateICmpULT(ResNewIndex, RuntimeResidual), ResLoopBB,
332 PostLoopBB);
333 } else {
334 // In this case the loop operand type was a byte, and there is no need for a
335 // residual loop to copy the remaining memory after the main loop.
336 // We do however need to patch up the control flow by creating the
337 // terminators for the preloop block and the memcpy loop.
338 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
339 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopCount, Zero),
340 LoopBB, PostLoopBB);
341 PreLoopBB->getTerminator()->eraseFromParent();
342 LoopBuilder.CreateCondBr(
343 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopCount), LoopBB,
344 PostLoopBB);
345 }
346}
347
348// If \p Addr1 and \p Addr2 are pointers to different address spaces, create an
349// addresspacecast to obtain a pair of pointers in the same addressspace. The
350// caller needs to ensure that addrspacecasting is possible.
351// No-op if the pointers are in the same address space.
352static std::pair<Value *, Value *>
354 const TargetTransformInfo &TTI) {
355 Value *ResAddr1 = Addr1;
356 Value *ResAddr2 = Addr2;
357
358 unsigned AS1 = cast<PointerType>(Addr1->getType())->getAddressSpace();
359 unsigned AS2 = cast<PointerType>(Addr2->getType())->getAddressSpace();
360 if (AS1 != AS2) {
361 if (TTI.isValidAddrSpaceCast(AS2, AS1))
362 ResAddr2 = B.CreateAddrSpaceCast(Addr2, Addr1->getType());
363 else if (TTI.isValidAddrSpaceCast(AS1, AS2))
364 ResAddr1 = B.CreateAddrSpaceCast(Addr1, Addr2->getType());
365 else
366 llvm_unreachable("Can only lower memmove between address spaces if they "
367 "support addrspacecast");
368 }
369 return {ResAddr1, ResAddr2};
370}
371
372// Lower memmove to IR. memmove is required to correctly copy overlapping memory
373// regions; therefore, it has to check the relative positions of the source and
374// destination pointers and choose the copy direction accordingly.
375//
376// The code below is an IR rendition of this C function:
377//
378// void* memmove(void* dst, const void* src, size_t n) {
379// unsigned char* d = dst;
380// const unsigned char* s = src;
381// if (s < d) {
382// // copy backwards
383// while (n--) {
384// d[n] = s[n];
385// }
386// } else {
387// // copy forward
388// for (size_t i = 0; i < n; ++i) {
389// d[i] = s[i];
390// }
391// }
392// return dst;
393// }
394//
395// If the TargetTransformInfo specifies a wider MemcpyLoopLoweringType, it is
396// used for the memory accesses in the loops. Then, additional loops with
397// byte-wise accesses are added for the remaining bytes.
399 Value *SrcAddr, Value *DstAddr,
400 Value *CopyLen, Align SrcAlign,
401 Align DstAlign, bool SrcIsVolatile,
402 bool DstIsVolatile,
403 const TargetTransformInfo &TTI) {
404 Type *TypeOfCopyLen = CopyLen->getType();
405 BasicBlock *OrigBB = InsertBefore->getParent();
406 Function *F = OrigBB->getParent();
407 const DataLayout &DL = F->getDataLayout();
408 LLVMContext &Ctx = OrigBB->getContext();
409 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
410 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
411
412 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAS, DstAS,
413 SrcAlign, DstAlign);
414 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
415 Type *Int8Type = Type::getInt8Ty(Ctx);
416 bool LoopOpIsInt8 = LoopOpType == Int8Type;
417
418 // If the memory accesses are wider than one byte, residual loops with
419 // i8-accesses are required to move remaining bytes.
420 bool RequiresResidual = !LoopOpIsInt8;
421
422 Type *ResidualLoopOpType = Int8Type;
423 unsigned ResidualLoopOpSize = DL.getTypeStoreSize(ResidualLoopOpType);
424
425 // Calculate the loop trip count and remaining bytes to copy after the loop.
426 IntegerType *ILengthType = cast<IntegerType>(TypeOfCopyLen);
427 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
428 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
429 ConstantInt *One = ConstantInt::get(ILengthType, 1);
430
431 IRBuilder<> PLBuilder(InsertBefore);
432
433 Value *RuntimeLoopCount = CopyLen;
434 Value *RuntimeLoopRemainder = nullptr;
435 Value *RuntimeBytesCopiedMainLoop = CopyLen;
436 Value *SkipResidualCondition = nullptr;
437 if (RequiresResidual) {
438 RuntimeLoopCount =
439 getRuntimeLoopCount(DL, PLBuilder, CopyLen, CILoopOpSize, LoopOpSize);
440 RuntimeLoopRemainder = getRuntimeLoopRemainder(DL, PLBuilder, CopyLen,
441 CILoopOpSize, LoopOpSize);
442 RuntimeBytesCopiedMainLoop =
443 PLBuilder.CreateSub(CopyLen, RuntimeLoopRemainder);
444 SkipResidualCondition =
445 PLBuilder.CreateICmpEQ(RuntimeLoopRemainder, Zero, "skip_residual");
446 }
447 Value *SkipMainCondition =
448 PLBuilder.CreateICmpEQ(RuntimeLoopCount, Zero, "skip_main");
449
450 // Create the a comparison of src and dst, based on which we jump to either
451 // the forward-copy part of the function (if src >= dst) or the backwards-copy
452 // part (if src < dst).
453 // SplitBlockAndInsertIfThenElse conveniently creates the basic if-then-else
454 // structure. Its block terminators (unconditional branches) are replaced by
455 // the appropriate conditional branches when the loop is built.
456 // If the pointers are in different address spaces, they need to be converted
457 // to a compatible one. Cases where memory ranges in the different address
458 // spaces cannot overlap are lowered as memcpy and not handled here.
459 auto [CmpSrcAddr, CmpDstAddr] =
460 tryInsertCastToCommonAddrSpace(PLBuilder, SrcAddr, DstAddr, TTI);
461 Value *PtrCompare =
462 PLBuilder.CreateICmpULT(CmpSrcAddr, CmpDstAddr, "compare_src_dst");
463 Instruction *ThenTerm, *ElseTerm;
464 SplitBlockAndInsertIfThenElse(PtrCompare, InsertBefore->getIterator(),
465 &ThenTerm, &ElseTerm);
466
467 // If the LoopOpSize is greater than 1, each part of the function consist of
468 // four blocks:
469 // memmove_copy_backwards:
470 // skip the residual loop when 0 iterations are required
471 // memmove_bwd_residual_loop:
472 // copy the last few bytes individually so that the remaining length is
473 // a multiple of the LoopOpSize
474 // memmove_bwd_middle: skip the main loop when 0 iterations are required
475 // memmove_bwd_main_loop: the actual backwards loop BB with wide accesses
476 // memmove_copy_forward: skip the main loop when 0 iterations are required
477 // memmove_fwd_main_loop: the actual forward loop BB with wide accesses
478 // memmove_fwd_middle: skip the residual loop when 0 iterations are required
479 // memmove_fwd_residual_loop: copy the last few bytes individually
480 //
481 // The main and residual loop are switched between copying forward and
482 // backward so that the residual loop always operates on the end of the moved
483 // range. This is based on the assumption that buffers whose start is aligned
484 // with the LoopOpSize are more common than buffers whose end is.
485 //
486 // If the LoopOpSize is 1, each part of the function consists of two blocks:
487 // memmove_copy_backwards: skip the loop when 0 iterations are required
488 // memmove_bwd_main_loop: the actual backwards loop BB
489 // memmove_copy_forward: skip the loop when 0 iterations are required
490 // memmove_fwd_main_loop: the actual forward loop BB
491 BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
492 CopyBackwardsBB->setName("memmove_copy_backwards");
493 BasicBlock *CopyForwardBB = ElseTerm->getParent();
494 CopyForwardBB->setName("memmove_copy_forward");
495 BasicBlock *ExitBB = InsertBefore->getParent();
496 ExitBB->setName("memmove_done");
497
498 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
499 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
500
501 // Accesses in the residual loops do not share the same alignment as those in
502 // the main loops.
503 Align ResidualSrcAlign(commonAlignment(PartSrcAlign, ResidualLoopOpSize));
504 Align ResidualDstAlign(commonAlignment(PartDstAlign, ResidualLoopOpSize));
505
506 // Copying backwards.
507 {
508 BasicBlock *MainLoopBB = BasicBlock::Create(
509 F->getContext(), "memmove_bwd_main_loop", F, CopyForwardBB);
510
511 // The predecessor of the memmove_bwd_main_loop. Updated in the
512 // following if a residual loop is emitted first.
513 BasicBlock *PredBB = CopyBackwardsBB;
514
515 if (RequiresResidual) {
516 // backwards residual loop
517 BasicBlock *ResidualLoopBB = BasicBlock::Create(
518 F->getContext(), "memmove_bwd_residual_loop", F, MainLoopBB);
519 IRBuilder<> ResidualLoopBuilder(ResidualLoopBB);
520 PHINode *ResidualLoopPhi = ResidualLoopBuilder.CreatePHI(ILengthType, 0);
521 Value *ResidualIndex = ResidualLoopBuilder.CreateSub(
522 ResidualLoopPhi, One, "bwd_residual_index");
523 Value *LoadGEP = ResidualLoopBuilder.CreateInBoundsGEP(
524 ResidualLoopOpType, SrcAddr, ResidualIndex);
525 Value *Element = ResidualLoopBuilder.CreateAlignedLoad(
526 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
527 "element");
528 Value *StoreGEP = ResidualLoopBuilder.CreateInBoundsGEP(
529 ResidualLoopOpType, DstAddr, ResidualIndex);
530 ResidualLoopBuilder.CreateAlignedStore(Element, StoreGEP,
531 ResidualDstAlign, DstIsVolatile);
532
533 // After the residual loop, go to an intermediate block.
534 BasicBlock *IntermediateBB = BasicBlock::Create(
535 F->getContext(), "memmove_bwd_middle", F, MainLoopBB);
536 // Later code expects a terminator in the PredBB.
537 IRBuilder<> IntermediateBuilder(IntermediateBB);
538 IntermediateBuilder.CreateUnreachable();
539 ResidualLoopBuilder.CreateCondBr(
540 ResidualLoopBuilder.CreateICmpEQ(ResidualIndex,
541 RuntimeBytesCopiedMainLoop),
542 IntermediateBB, ResidualLoopBB);
543
544 ResidualLoopPhi->addIncoming(ResidualIndex, ResidualLoopBB);
545 ResidualLoopPhi->addIncoming(CopyLen, CopyBackwardsBB);
546
547 // How to get to the residual:
548 BranchInst::Create(IntermediateBB, ResidualLoopBB, SkipResidualCondition,
549 ThenTerm->getIterator());
550 ThenTerm->eraseFromParent();
551
552 PredBB = IntermediateBB;
553 }
554
555 // main loop
556 IRBuilder<> MainLoopBuilder(MainLoopBB);
557 PHINode *MainLoopPhi = MainLoopBuilder.CreatePHI(ILengthType, 0);
558 Value *MainIndex =
559 MainLoopBuilder.CreateSub(MainLoopPhi, One, "bwd_main_index");
560 Value *LoadGEP =
561 MainLoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, MainIndex);
562 Value *Element = MainLoopBuilder.CreateAlignedLoad(
563 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
564 Value *StoreGEP =
565 MainLoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, MainIndex);
566 MainLoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
567 DstIsVolatile);
568 MainLoopBuilder.CreateCondBr(MainLoopBuilder.CreateICmpEQ(MainIndex, Zero),
569 ExitBB, MainLoopBB);
570 MainLoopPhi->addIncoming(MainIndex, MainLoopBB);
571 MainLoopPhi->addIncoming(RuntimeLoopCount, PredBB);
572
573 // How to get to the main loop:
574 Instruction *PredBBTerm = PredBB->getTerminator();
575 BranchInst::Create(ExitBB, MainLoopBB, SkipMainCondition,
576 PredBBTerm->getIterator());
577 PredBBTerm->eraseFromParent();
578 }
579
580 // Copying forward.
581 // main loop
582 {
583 BasicBlock *MainLoopBB =
584 BasicBlock::Create(F->getContext(), "memmove_fwd_main_loop", F, ExitBB);
585 IRBuilder<> MainLoopBuilder(MainLoopBB);
586 PHINode *MainLoopPhi =
587 MainLoopBuilder.CreatePHI(ILengthType, 0, "fwd_main_index");
588 Value *LoadGEP =
589 MainLoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, MainLoopPhi);
590 Value *Element = MainLoopBuilder.CreateAlignedLoad(
591 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
592 Value *StoreGEP =
593 MainLoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, MainLoopPhi);
594 MainLoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
595 DstIsVolatile);
596 Value *MainIndex = MainLoopBuilder.CreateAdd(MainLoopPhi, One);
597 MainLoopPhi->addIncoming(MainIndex, MainLoopBB);
598 MainLoopPhi->addIncoming(Zero, CopyForwardBB);
599
600 Instruction *CopyFwdBBTerm = CopyForwardBB->getTerminator();
601 BasicBlock *SuccessorBB = ExitBB;
602 if (RequiresResidual)
603 SuccessorBB =
604 BasicBlock::Create(F->getContext(), "memmove_fwd_middle", F, ExitBB);
605
606 // leaving or staying in the main loop
607 MainLoopBuilder.CreateCondBr(
608 MainLoopBuilder.CreateICmpEQ(MainIndex, RuntimeLoopCount), SuccessorBB,
609 MainLoopBB);
610
611 // getting in or skipping the main loop
612 BranchInst::Create(SuccessorBB, MainLoopBB, SkipMainCondition,
613 CopyFwdBBTerm->getIterator());
614 CopyFwdBBTerm->eraseFromParent();
615
616 if (RequiresResidual) {
617 BasicBlock *IntermediateBB = SuccessorBB;
618 IRBuilder<> IntermediateBuilder(IntermediateBB);
619 BasicBlock *ResidualLoopBB = BasicBlock::Create(
620 F->getContext(), "memmove_fwd_residual_loop", F, ExitBB);
621 IntermediateBuilder.CreateCondBr(SkipResidualCondition, ExitBB,
622 ResidualLoopBB);
623
624 // Residual loop
625 IRBuilder<> ResidualLoopBuilder(ResidualLoopBB);
626 PHINode *ResidualLoopPhi =
627 ResidualLoopBuilder.CreatePHI(ILengthType, 0, "fwd_residual_index");
628 Value *LoadGEP = ResidualLoopBuilder.CreateInBoundsGEP(
629 ResidualLoopOpType, SrcAddr, ResidualLoopPhi);
630 Value *Element = ResidualLoopBuilder.CreateAlignedLoad(
631 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
632 "element");
633 Value *StoreGEP = ResidualLoopBuilder.CreateInBoundsGEP(
634 ResidualLoopOpType, DstAddr, ResidualLoopPhi);
635 ResidualLoopBuilder.CreateAlignedStore(Element, StoreGEP,
636 ResidualDstAlign, DstIsVolatile);
637 Value *ResidualIndex =
638 ResidualLoopBuilder.CreateAdd(ResidualLoopPhi, One);
639 ResidualLoopBuilder.CreateCondBr(
640 ResidualLoopBuilder.CreateICmpEQ(ResidualIndex, CopyLen), ExitBB,
641 ResidualLoopBB);
642 ResidualLoopPhi->addIncoming(ResidualIndex, ResidualLoopBB);
643 ResidualLoopPhi->addIncoming(RuntimeBytesCopiedMainLoop, IntermediateBB);
644 }
645 }
646}
647
648// Similar to createMemMoveLoopUnknownSize, only the trip counts are computed at
649// compile time, obsolete loops and branches are omitted, and the residual code
650// is straight-line code instead of a loop.
651static void createMemMoveLoopKnownSize(Instruction *InsertBefore,
652 Value *SrcAddr, Value *DstAddr,
653 ConstantInt *CopyLen, Align SrcAlign,
654 Align DstAlign, bool SrcIsVolatile,
655 bool DstIsVolatile,
656 const TargetTransformInfo &TTI) {
657 // No need to expand zero length moves.
658 if (CopyLen->isZero())
659 return;
660
661 Type *TypeOfCopyLen = CopyLen->getType();
662 BasicBlock *OrigBB = InsertBefore->getParent();
663 Function *F = OrigBB->getParent();
664 const DataLayout &DL = F->getDataLayout();
665 LLVMContext &Ctx = OrigBB->getContext();
666 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
667 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
668
669 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAS, DstAS,
670 SrcAlign, DstAlign);
671 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
672
673 // Calculate the loop trip count and remaining bytes to copy after the loop.
674 uint64_t LoopEndCount = CopyLen->getZExtValue() / LoopOpSize;
675 uint64_t BytesCopiedInLoop = LoopEndCount * LoopOpSize;
676 uint64_t RemainingBytes = CopyLen->getZExtValue() - BytesCopiedInLoop;
677
678 IntegerType *ILengthType = cast<IntegerType>(TypeOfCopyLen);
679 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
680 ConstantInt *One = ConstantInt::get(ILengthType, 1);
681 ConstantInt *TripCount = ConstantInt::get(ILengthType, LoopEndCount);
682
683 IRBuilder<> PLBuilder(InsertBefore);
684
685 auto [CmpSrcAddr, CmpDstAddr] =
686 tryInsertCastToCommonAddrSpace(PLBuilder, SrcAddr, DstAddr, TTI);
687 Value *PtrCompare =
688 PLBuilder.CreateICmpULT(CmpSrcAddr, CmpDstAddr, "compare_src_dst");
689 Instruction *ThenTerm, *ElseTerm;
690 SplitBlockAndInsertIfThenElse(PtrCompare, InsertBefore->getIterator(),
691 &ThenTerm, &ElseTerm);
692
693 BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
694 BasicBlock *CopyForwardBB = ElseTerm->getParent();
695 BasicBlock *ExitBB = InsertBefore->getParent();
696 ExitBB->setName("memmove_done");
697
698 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
699 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
700
701 // Helper function to generate a load/store pair of a given type in the
702 // residual. Used in the forward and backward branches.
703 auto GenerateResidualLdStPair = [&](Type *OpTy, IRBuilderBase &Builder,
704 uint64_t &BytesCopied) {
705 Align ResSrcAlign(commonAlignment(SrcAlign, BytesCopied));
706 Align ResDstAlign(commonAlignment(DstAlign, BytesCopied));
707
708 // Calculate the new index
709 unsigned OperandSize = DL.getTypeStoreSize(OpTy);
710
711 uint64_t GepIndex = BytesCopied / OperandSize;
712 assert(GepIndex * OperandSize == BytesCopied &&
713 "Division should have no Remainder!");
714
715 Value *SrcGEP = Builder.CreateInBoundsGEP(
716 OpTy, SrcAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
717 LoadInst *Load =
718 Builder.CreateAlignedLoad(OpTy, SrcGEP, ResSrcAlign, SrcIsVolatile);
719 Value *DstGEP = Builder.CreateInBoundsGEP(
720 OpTy, DstAddr, ConstantInt::get(TypeOfCopyLen, GepIndex));
721 Builder.CreateAlignedStore(Load, DstGEP, ResDstAlign, DstIsVolatile);
722 BytesCopied += OperandSize;
723 };
724
725 // Copying backwards.
726 if (RemainingBytes != 0) {
727 CopyBackwardsBB->setName("memmove_bwd_residual");
728 uint64_t BytesCopied = BytesCopiedInLoop;
729
730 // Residual code is required to move the remaining bytes. We need the same
731 // instructions as in the forward case, only in reverse. So we generate code
732 // the same way, except that we change the IRBuilder insert point for each
733 // load/store pair so that each one is inserted before the previous one
734 // instead of after it.
735 IRBuilder<> BwdResBuilder(CopyBackwardsBB->getFirstNonPHI());
736 SmallVector<Type *, 5> RemainingOps;
737 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
738 SrcAS, DstAS, PartSrcAlign,
739 PartDstAlign);
740 for (auto *OpTy : RemainingOps) {
741 // reverse the order of the emitted operations
742 BwdResBuilder.SetInsertPoint(CopyBackwardsBB->getFirstNonPHI());
743 GenerateResidualLdStPair(OpTy, BwdResBuilder, BytesCopied);
744 }
745 }
746 if (LoopEndCount != 0) {
747 BasicBlock *LoopBB = CopyBackwardsBB;
748 BasicBlock *PredBB = OrigBB;
749 if (RemainingBytes != 0) {
750 // if we introduce residual code, it needs its separate BB
751 LoopBB = CopyBackwardsBB->splitBasicBlock(
752 CopyBackwardsBB->getTerminator(), "memmove_bwd_loop");
753 PredBB = CopyBackwardsBB;
754 } else {
755 CopyBackwardsBB->setName("memmove_bwd_loop");
756 }
757 IRBuilder<> LoopBuilder(LoopBB->getTerminator());
758 PHINode *LoopPhi = LoopBuilder.CreatePHI(ILengthType, 0);
759 Value *Index = LoopBuilder.CreateSub(LoopPhi, One, "bwd_index");
760 Value *LoadGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, Index);
761 Value *Element = LoopBuilder.CreateAlignedLoad(
762 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
763 Value *StoreGEP = LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, Index);
764 LoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
765 DstIsVolatile);
766
767 // Replace the unconditional branch introduced by
768 // SplitBlockAndInsertIfThenElse to turn LoopBB into a loop.
769 Instruction *UncondTerm = LoopBB->getTerminator();
770 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpEQ(Index, Zero), ExitBB,
771 LoopBB);
772 UncondTerm->eraseFromParent();
773
774 LoopPhi->addIncoming(Index, LoopBB);
775 LoopPhi->addIncoming(TripCount, PredBB);
776 }
777
778 // Copying forward.
779 BasicBlock *FwdResidualBB = CopyForwardBB;
780 if (LoopEndCount != 0) {
781 CopyForwardBB->setName("memmove_fwd_loop");
782 BasicBlock *LoopBB = CopyForwardBB;
783 BasicBlock *SuccBB = ExitBB;
784 if (RemainingBytes != 0) {
785 // if we introduce residual code, it needs its separate BB
786 SuccBB = CopyForwardBB->splitBasicBlock(CopyForwardBB->getTerminator(),
787 "memmove_fwd_residual");
788 FwdResidualBB = SuccBB;
789 }
790 IRBuilder<> LoopBuilder(LoopBB->getTerminator());
791 PHINode *LoopPhi = LoopBuilder.CreatePHI(ILengthType, 0, "fwd_index");
792 Value *LoadGEP =
793 LoopBuilder.CreateInBoundsGEP(LoopOpType, SrcAddr, LoopPhi);
794 Value *Element = LoopBuilder.CreateAlignedLoad(
795 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
796 Value *StoreGEP =
797 LoopBuilder.CreateInBoundsGEP(LoopOpType, DstAddr, LoopPhi);
798 LoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
799 DstIsVolatile);
800 Value *Index = LoopBuilder.CreateAdd(LoopPhi, One);
801 LoopPhi->addIncoming(Index, LoopBB);
802 LoopPhi->addIncoming(Zero, OrigBB);
803
804 // Replace the unconditional branch to turn LoopBB into a loop.
805 Instruction *UncondTerm = LoopBB->getTerminator();
806 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpEQ(Index, TripCount), SuccBB,
807 LoopBB);
808 UncondTerm->eraseFromParent();
809 }
810
811 if (RemainingBytes != 0) {
812 uint64_t BytesCopied = BytesCopiedInLoop;
813
814 // Residual code is required to move the remaining bytes. In the forward
815 // case, we emit it in the normal order.
816 IRBuilder<> FwdResBuilder(FwdResidualBB->getTerminator());
817 SmallVector<Type *, 5> RemainingOps;
818 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
819 SrcAS, DstAS, PartSrcAlign,
820 PartDstAlign);
821 for (auto *OpTy : RemainingOps)
822 GenerateResidualLdStPair(OpTy, FwdResBuilder, BytesCopied);
823 }
824}
825
826static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
827 Value *CopyLen, Value *SetValue, Align DstAlign,
828 bool IsVolatile) {
829 Type *TypeOfCopyLen = CopyLen->getType();
830 BasicBlock *OrigBB = InsertBefore->getParent();
831 Function *F = OrigBB->getParent();
832 const DataLayout &DL = F->getDataLayout();
833 BasicBlock *NewBB =
834 OrigBB->splitBasicBlock(InsertBefore, "split");
835 BasicBlock *LoopBB
836 = BasicBlock::Create(F->getContext(), "loadstoreloop", F, NewBB);
837
838 IRBuilder<> Builder(OrigBB->getTerminator());
839
840 Builder.CreateCondBr(
841 Builder.CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0), CopyLen), NewBB,
842 LoopBB);
843 OrigBB->getTerminator()->eraseFromParent();
844
845 unsigned PartSize = DL.getTypeStoreSize(SetValue->getType());
846 Align PartAlign(commonAlignment(DstAlign, PartSize));
847
848 IRBuilder<> LoopBuilder(LoopBB);
849 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
850 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), OrigBB);
851
852 LoopBuilder.CreateAlignedStore(
853 SetValue,
854 LoopBuilder.CreateInBoundsGEP(SetValue->getType(), DstAddr, LoopIndex),
855 PartAlign, IsVolatile);
856
857 Value *NewIndex =
858 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1));
859 LoopIndex->addIncoming(NewIndex, LoopBB);
860
861 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, CopyLen), LoopBB,
862 NewBB);
863}
864
865template <typename T>
867 if (SE) {
868 const SCEV *SrcSCEV = SE->getSCEV(Memcpy->getRawSource());
869 const SCEV *DestSCEV = SE->getSCEV(Memcpy->getRawDest());
870 if (SE->isKnownPredicateAt(CmpInst::ICMP_NE, SrcSCEV, DestSCEV, Memcpy))
871 return false;
872 }
873 return true;
874}
875
878 ScalarEvolution *SE) {
879 bool CanOverlap = canOverlap(Memcpy, SE);
880 if (ConstantInt *CI = dyn_cast<ConstantInt>(Memcpy->getLength())) {
882 /* InsertBefore */ Memcpy,
883 /* SrcAddr */ Memcpy->getRawSource(),
884 /* DstAddr */ Memcpy->getRawDest(),
885 /* CopyLen */ CI,
886 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
887 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
888 /* SrcIsVolatile */ Memcpy->isVolatile(),
889 /* DstIsVolatile */ Memcpy->isVolatile(),
890 /* CanOverlap */ CanOverlap,
891 /* TargetTransformInfo */ TTI);
892 } else {
894 /* InsertBefore */ Memcpy,
895 /* SrcAddr */ Memcpy->getRawSource(),
896 /* DstAddr */ Memcpy->getRawDest(),
897 /* CopyLen */ Memcpy->getLength(),
898 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
899 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
900 /* SrcIsVolatile */ Memcpy->isVolatile(),
901 /* DstIsVolatile */ Memcpy->isVolatile(),
902 /* CanOverlap */ CanOverlap,
903 /* TargetTransformInfo */ TTI);
904 }
905}
906
908 const TargetTransformInfo &TTI) {
909 Value *CopyLen = Memmove->getLength();
910 Value *SrcAddr = Memmove->getRawSource();
911 Value *DstAddr = Memmove->getRawDest();
912 Align SrcAlign = Memmove->getSourceAlign().valueOrOne();
913 Align DstAlign = Memmove->getDestAlign().valueOrOne();
914 bool SrcIsVolatile = Memmove->isVolatile();
915 bool DstIsVolatile = SrcIsVolatile;
916 IRBuilder<> CastBuilder(Memmove);
917
918 unsigned SrcAS = SrcAddr->getType()->getPointerAddressSpace();
919 unsigned DstAS = DstAddr->getType()->getPointerAddressSpace();
920 if (SrcAS != DstAS) {
921 if (!TTI.addrspacesMayAlias(SrcAS, DstAS)) {
922 // We may not be able to emit a pointer comparison, but we don't have
923 // to. Expand as memcpy.
924 if (ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
925 createMemCpyLoopKnownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
926 CI, SrcAlign, DstAlign, SrcIsVolatile,
927 DstIsVolatile,
928 /*CanOverlap=*/false, TTI);
929 } else {
930 createMemCpyLoopUnknownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
931 CopyLen, SrcAlign, DstAlign, SrcIsVolatile,
932 DstIsVolatile,
933 /*CanOverlap=*/false, TTI);
934 }
935
936 return true;
937 }
938
939 if (!(TTI.isValidAddrSpaceCast(DstAS, SrcAS) ||
940 TTI.isValidAddrSpaceCast(SrcAS, DstAS))) {
941 // We don't know generically if it's legal to introduce an
942 // addrspacecast. We need to know either if it's legal to insert an
943 // addrspacecast, or if the address spaces cannot alias.
945 dbgs() << "Do not know how to expand memmove between different "
946 "address spaces\n");
947 return false;
948 }
949 }
950
951 if (ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
953 /*InsertBefore=*/Memmove, SrcAddr, DstAddr, CI, SrcAlign, DstAlign,
954 SrcIsVolatile, DstIsVolatile, TTI);
955 } else {
957 /*InsertBefore=*/Memmove, SrcAddr, DstAddr, CopyLen, SrcAlign, DstAlign,
958 SrcIsVolatile, DstIsVolatile, TTI);
959 }
960 return true;
961}
962
964 createMemSetLoop(/* InsertBefore */ Memset,
965 /* DstAddr */ Memset->getRawDest(),
966 /* CopyLen */ Memset->getLength(),
967 /* SetValue */ Memset->getValue(),
968 /* Alignment */ Memset->getDestAlign().valueOrOne(),
969 Memset->isVolatile());
970}
971
974 ScalarEvolution *SE) {
975 if (ConstantInt *CI = dyn_cast<ConstantInt>(AtomicMemcpy->getLength())) {
977 /* InsertBefore */ AtomicMemcpy,
978 /* SrcAddr */ AtomicMemcpy->getRawSource(),
979 /* DstAddr */ AtomicMemcpy->getRawDest(),
980 /* CopyLen */ CI,
981 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
982 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
983 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
984 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
985 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
986 /* TargetTransformInfo */ TTI,
987 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
988 } else {
990 /* InsertBefore */ AtomicMemcpy,
991 /* SrcAddr */ AtomicMemcpy->getRawSource(),
992 /* DstAddr */ AtomicMemcpy->getRawDest(),
993 /* CopyLen */ AtomicMemcpy->getLength(),
994 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
995 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
996 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
997 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
998 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
999 /* TargetTransformInfo */ TTI,
1000 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
1001 }
1002}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF)
Definition: Execution.cpp:41
static std::pair< Value *, Value * > tryInsertCastToCommonAddrSpace(IRBuilderBase &B, Value *Addr1, Value *Addr2, const TargetTransformInfo &TTI)
static bool canOverlap(MemTransferBase< T > *Memcpy, ScalarEvolution *SE)
static void createMemMoveLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
static Value * getRuntimeLoopRemainder(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr, Value *CopyLen, Value *SetValue, Align DstAlign, bool IsVolatile)
static Value * getRuntimeLoopCount(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static void createMemMoveLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
#define F(x, y, z)
Definition: MD5.cpp:55
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This pass exposes codegen information to IR-level passes.
This class represents the atomic memcpy intrinsic i.e.
uint32_t getElementSizeInBytes() const
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:577
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:206
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
This is an important base class in LLVM.
Definition: Constant.h:42
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition: Function.cpp:384
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2277
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1824
UnreachableInst * CreateUnreachable()
Definition: IRBuilder.h:1280
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1891
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2417
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2261
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1361
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1137
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1843
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
Class to represent integer types.
Definition: DerivedTypes.h:40
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:174
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:167
Metadata node.
Definition: Metadata.h:1069
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1542
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
bool isVolatile() const
This class wraps the llvm.memmove intrinsic.
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Common base class for all memory transfer intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
MaybeAlign getSourceAlign() const
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static IntegerType * getInt8Ty(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< uint32_t > AtomicCpySize=std::nullopt)
Emit a loop implementing the semantics of an llvm.memcpy whose size is a compile time constant.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< unsigned > AtomicSize=std::nullopt)
Emit a loop implementing the semantics of llvm.memcpy where the size is not a compile-time constant.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE)
Expand AtomicMemCpy as a loop. AtomicMemCpy is not deleted.
void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141