LLVM 20.0.0git
LowerMemIntrinsics.cpp
Go to the documentation of this file.
1//===- LowerMemIntrinsics.cpp ----------------------------------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
12#include "llvm/IR/IRBuilder.h"
14#include "llvm/IR/MDBuilder.h"
15#include "llvm/Support/Debug.h"
18#include <optional>
19
20#define DEBUG_TYPE "lower-mem-intrinsics"
21
22using namespace llvm;
23
25 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr,
26 ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile,
27 bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI,
28 std::optional<uint32_t> AtomicElementSize) {
29 // No need to expand zero length copies.
30 if (CopyLen->isZero())
31 return;
32
33 BasicBlock *PreLoopBB = InsertBefore->getParent();
34 BasicBlock *PostLoopBB = nullptr;
35 Function *ParentFunc = PreLoopBB->getParent();
36 LLVMContext &Ctx = PreLoopBB->getContext();
37 const DataLayout &DL = ParentFunc->getDataLayout();
38 MDBuilder MDB(Ctx);
39 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
40 StringRef Name = "MemCopyAliasScope";
41 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
42
43 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
44 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
45
46 Type *TypeOfCopyLen = CopyLen->getType();
48 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
49 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
50 "Atomic memcpy lowering is not supported for vector operand type");
51
52 Type *Int8Type = Type::getInt8Ty(Ctx);
53 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
54 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
55 "Atomic memcpy lowering is not supported for selected operand size");
56
57 uint64_t LoopEndCount = alignDown(CopyLen->getZExtValue(), LoopOpSize);
58
59 if (LoopEndCount != 0) {
60 // Split
61 PostLoopBB = PreLoopBB->splitBasicBlock(InsertBefore, "memcpy-split");
62 BasicBlock *LoopBB =
63 BasicBlock::Create(Ctx, "load-store-loop", ParentFunc, PostLoopBB);
64 PreLoopBB->getTerminator()->setSuccessor(0, LoopBB);
65
66 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
67
68 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
69 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
70
71 IRBuilder<> LoopBuilder(LoopBB);
72 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 2, "loop-index");
73 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0U), PreLoopBB);
74 // Loop Body
75
76 // If we used LoopOpType as GEP element type, we would iterate over the
77 // buffers in TypeStoreSize strides while copying TypeAllocSize bytes, i.e.,
78 // we would miss bytes if TypeStoreSize != TypeAllocSize. Therefore, use
79 // byte offsets computed from the TypeStoreSize.
80 Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, LoopIndex);
81 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
82 PartSrcAlign, SrcIsVolatile);
83 if (!CanOverlap) {
84 // Set alias scope for loads.
85 Load->setMetadata(LLVMContext::MD_alias_scope,
86 MDNode::get(Ctx, NewScope));
87 }
88 Value *DstGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr, LoopIndex);
89 StoreInst *Store = LoopBuilder.CreateAlignedStore(
90 Load, DstGEP, PartDstAlign, DstIsVolatile);
91 if (!CanOverlap) {
92 // Indicate that stores don't overlap loads.
93 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
94 }
95 if (AtomicElementSize) {
96 Load->setAtomic(AtomicOrdering::Unordered);
97 Store->setAtomic(AtomicOrdering::Unordered);
98 }
99 Value *NewIndex = LoopBuilder.CreateAdd(
100 LoopIndex, ConstantInt::get(TypeOfCopyLen, LoopOpSize));
101 LoopIndex->addIncoming(NewIndex, LoopBB);
102
103 // Create the loop branch condition.
104 Constant *LoopEndCI = ConstantInt::get(TypeOfCopyLen, LoopEndCount);
105 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, LoopEndCI),
106 LoopBB, PostLoopBB);
107 }
108
109 uint64_t BytesCopied = LoopEndCount;
110 uint64_t RemainingBytes = CopyLen->getZExtValue() - BytesCopied;
111 if (RemainingBytes) {
112 IRBuilder<> RBuilder(PostLoopBB ? PostLoopBB->getFirstNonPHI()
113 : InsertBefore);
114
115 SmallVector<Type *, 5> RemainingOps;
116 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
117 SrcAS, DstAS, SrcAlign, DstAlign,
118 AtomicElementSize);
119
120 for (auto *OpTy : RemainingOps) {
121 Align PartSrcAlign(commonAlignment(SrcAlign, BytesCopied));
122 Align PartDstAlign(commonAlignment(DstAlign, BytesCopied));
123
124 unsigned OperandSize = DL.getTypeStoreSize(OpTy);
125 assert(
126 (!AtomicElementSize || OperandSize % *AtomicElementSize == 0) &&
127 "Atomic memcpy lowering is not supported for selected operand size");
128
129 Value *SrcGEP = RBuilder.CreateInBoundsGEP(
130 Int8Type, SrcAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
131 LoadInst *Load =
132 RBuilder.CreateAlignedLoad(OpTy, SrcGEP, PartSrcAlign, SrcIsVolatile);
133 if (!CanOverlap) {
134 // Set alias scope for loads.
135 Load->setMetadata(LLVMContext::MD_alias_scope,
136 MDNode::get(Ctx, NewScope));
137 }
138 Value *DstGEP = RBuilder.CreateInBoundsGEP(
139 Int8Type, DstAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
140 StoreInst *Store = RBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign,
141 DstIsVolatile);
142 if (!CanOverlap) {
143 // Indicate that stores don't overlap loads.
144 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
145 }
146 if (AtomicElementSize) {
147 Load->setAtomic(AtomicOrdering::Unordered);
148 Store->setAtomic(AtomicOrdering::Unordered);
149 }
150 BytesCopied += OperandSize;
151 }
152 }
153 assert(BytesCopied == CopyLen->getZExtValue() &&
154 "Bytes copied should match size in the call!");
155}
156
157// \returns \p Len urem \p OpSize, checking for optimization opportunities.
159 Value *Len, Value *OpSize,
160 unsigned OpSizeVal) {
161 // For powers of 2, we can and by (OpSizeVal - 1) instead of using urem.
162 if (isPowerOf2_32(OpSizeVal))
163 return B.CreateAnd(Len, OpSizeVal - 1);
164 return B.CreateURem(Len, OpSize);
165}
166
167// \returns (\p Len udiv \p OpSize) mul \p OpSize, checking for optimization
168// opportunities.
169// If RTLoopRemainder is provided, it must be the result of
170// getRuntimeLoopRemainder() with the same arguments.
172 Value *Len, Value *OpSize, unsigned OpSizeVal,
173 Value *RTLoopRemainder = nullptr) {
174 if (!RTLoopRemainder)
175 RTLoopRemainder = getRuntimeLoopRemainder(DL, B, Len, OpSize, OpSizeVal);
176 return B.CreateSub(Len, RTLoopRemainder);
177}
178
180 Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen,
181 Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile,
182 bool CanOverlap, const TargetTransformInfo &TTI,
183 std::optional<uint32_t> AtomicElementSize) {
184 BasicBlock *PreLoopBB = InsertBefore->getParent();
185 BasicBlock *PostLoopBB =
186 PreLoopBB->splitBasicBlock(InsertBefore, "post-loop-memcpy-expansion");
187
188 Function *ParentFunc = PreLoopBB->getParent();
189 const DataLayout &DL = ParentFunc->getDataLayout();
190 LLVMContext &Ctx = PreLoopBB->getContext();
191 MDBuilder MDB(Ctx);
192 MDNode *NewDomain = MDB.createAnonymousAliasScopeDomain("MemCopyDomain");
193 StringRef Name = "MemCopyAliasScope";
194 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
195
196 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
197 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
198
199 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(
200 Ctx, CopyLen, SrcAS, DstAS, SrcAlign, DstAlign, AtomicElementSize);
201 assert((!AtomicElementSize || !LoopOpType->isVectorTy()) &&
202 "Atomic memcpy lowering is not supported for vector operand type");
203 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
204 assert((!AtomicElementSize || LoopOpSize % *AtomicElementSize == 0) &&
205 "Atomic memcpy lowering is not supported for selected operand size");
206
207 IRBuilder<> PLBuilder(PreLoopBB->getTerminator());
208
209 // Calculate the loop trip count, and remaining bytes to copy after the loop.
210 Type *CopyLenType = CopyLen->getType();
211 IntegerType *ILengthType = dyn_cast<IntegerType>(CopyLenType);
212 assert(ILengthType &&
213 "expected size argument to memcpy to be an integer type!");
214 Type *Int8Type = Type::getInt8Ty(Ctx);
215 bool LoopOpIsInt8 = LoopOpType == Int8Type;
216 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
217
218 Value *RuntimeLoopBytes = CopyLen;
219 Value *RuntimeResidualBytes = nullptr;
220 if (!LoopOpIsInt8) {
221 RuntimeResidualBytes = getRuntimeLoopRemainder(DL, PLBuilder, CopyLen,
222 CILoopOpSize, LoopOpSize);
223 RuntimeLoopBytes = getRuntimeLoopBytes(DL, PLBuilder, CopyLen, CILoopOpSize,
224 LoopOpSize, RuntimeResidualBytes);
225 }
226
227 BasicBlock *LoopBB =
228 BasicBlock::Create(Ctx, "loop-memcpy-expansion", ParentFunc, PostLoopBB);
229 IRBuilder<> LoopBuilder(LoopBB);
230
231 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
232 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
233
234 PHINode *LoopIndex = LoopBuilder.CreatePHI(CopyLenType, 2, "loop-index");
235 LoopIndex->addIncoming(ConstantInt::get(CopyLenType, 0U), PreLoopBB);
236
237 // If we used LoopOpType as GEP element type, we would iterate over the
238 // buffers in TypeStoreSize strides while copying TypeAllocSize bytes, i.e.,
239 // we would miss bytes if TypeStoreSize != TypeAllocSize. Therefore, use byte
240 // offsets computed from the TypeStoreSize.
241 Value *SrcGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, LoopIndex);
242 LoadInst *Load = LoopBuilder.CreateAlignedLoad(LoopOpType, SrcGEP,
243 PartSrcAlign, SrcIsVolatile);
244 if (!CanOverlap) {
245 // Set alias scope for loads.
246 Load->setMetadata(LLVMContext::MD_alias_scope, MDNode::get(Ctx, NewScope));
247 }
248 Value *DstGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr, LoopIndex);
250 LoopBuilder.CreateAlignedStore(Load, DstGEP, PartDstAlign, DstIsVolatile);
251 if (!CanOverlap) {
252 // Indicate that stores don't overlap loads.
253 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
254 }
255 if (AtomicElementSize) {
256 Load->setAtomic(AtomicOrdering::Unordered);
257 Store->setAtomic(AtomicOrdering::Unordered);
258 }
259 Value *NewIndex = LoopBuilder.CreateAdd(
260 LoopIndex, ConstantInt::get(CopyLenType, LoopOpSize));
261 LoopIndex->addIncoming(NewIndex, LoopBB);
262
263 bool RequiresResidual =
264 !LoopOpIsInt8 && !(AtomicElementSize && LoopOpSize == AtomicElementSize);
265 if (RequiresResidual) {
266 Type *ResLoopOpType = AtomicElementSize
267 ? Type::getIntNTy(Ctx, *AtomicElementSize * 8)
268 : Int8Type;
269 unsigned ResLoopOpSize = DL.getTypeStoreSize(ResLoopOpType);
270 assert((ResLoopOpSize == AtomicElementSize ? *AtomicElementSize : 1) &&
271 "Store size is expected to match type size");
272
273 Align ResSrcAlign(commonAlignment(PartSrcAlign, ResLoopOpSize));
274 Align ResDstAlign(commonAlignment(PartDstAlign, ResLoopOpSize));
275
276 // Loop body for the residual copy.
277 BasicBlock *ResLoopBB = BasicBlock::Create(
278 Ctx, "loop-memcpy-residual", PreLoopBB->getParent(), PostLoopBB);
279 // Residual loop header.
280 BasicBlock *ResHeaderBB = BasicBlock::Create(
281 Ctx, "loop-memcpy-residual-header", PreLoopBB->getParent(), nullptr);
282
283 // Need to update the pre-loop basic block to branch to the correct place.
284 // branch to the main loop if the count is non-zero, branch to the residual
285 // loop if the copy size is smaller then 1 iteration of the main loop but
286 // non-zero and finally branch to after the residual loop if the memcpy
287 // size is zero.
288 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
289 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopBytes, Zero),
290 LoopBB, ResHeaderBB);
291 PreLoopBB->getTerminator()->eraseFromParent();
292
293 LoopBuilder.CreateCondBr(
294 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopBytes), LoopBB,
295 ResHeaderBB);
296
297 // Determine if we need to branch to the residual loop or bypass it.
298 IRBuilder<> RHBuilder(ResHeaderBB);
299 RHBuilder.CreateCondBr(RHBuilder.CreateICmpNE(RuntimeResidualBytes, Zero),
300 ResLoopBB, PostLoopBB);
301
302 // Copy the residual with single byte load/store loop.
303 IRBuilder<> ResBuilder(ResLoopBB);
304 PHINode *ResidualIndex =
305 ResBuilder.CreatePHI(CopyLenType, 2, "residual-loop-index");
306 ResidualIndex->addIncoming(Zero, ResHeaderBB);
307
308 Value *FullOffset = ResBuilder.CreateAdd(RuntimeLoopBytes, ResidualIndex);
309 Value *SrcGEP = ResBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, FullOffset);
310 LoadInst *Load = ResBuilder.CreateAlignedLoad(ResLoopOpType, SrcGEP,
311 ResSrcAlign, SrcIsVolatile);
312 if (!CanOverlap) {
313 // Set alias scope for loads.
314 Load->setMetadata(LLVMContext::MD_alias_scope,
315 MDNode::get(Ctx, NewScope));
316 }
317 Value *DstGEP = ResBuilder.CreateInBoundsGEP(Int8Type, DstAddr, FullOffset);
319 ResBuilder.CreateAlignedStore(Load, DstGEP, ResDstAlign, DstIsVolatile);
320 if (!CanOverlap) {
321 // Indicate that stores don't overlap loads.
322 Store->setMetadata(LLVMContext::MD_noalias, MDNode::get(Ctx, NewScope));
323 }
324 if (AtomicElementSize) {
325 Load->setAtomic(AtomicOrdering::Unordered);
326 Store->setAtomic(AtomicOrdering::Unordered);
327 }
328 Value *ResNewIndex = ResBuilder.CreateAdd(
329 ResidualIndex, ConstantInt::get(CopyLenType, ResLoopOpSize));
330 ResidualIndex->addIncoming(ResNewIndex, ResLoopBB);
331
332 // Create the loop branch condition.
333 ResBuilder.CreateCondBr(
334 ResBuilder.CreateICmpULT(ResNewIndex, RuntimeResidualBytes), ResLoopBB,
335 PostLoopBB);
336 } else {
337 // In this case the loop operand type was a byte, and there is no need for a
338 // residual loop to copy the remaining memory after the main loop.
339 // We do however need to patch up the control flow by creating the
340 // terminators for the preloop block and the memcpy loop.
341 ConstantInt *Zero = ConstantInt::get(ILengthType, 0U);
342 PLBuilder.CreateCondBr(PLBuilder.CreateICmpNE(RuntimeLoopBytes, Zero),
343 LoopBB, PostLoopBB);
344 PreLoopBB->getTerminator()->eraseFromParent();
345 LoopBuilder.CreateCondBr(
346 LoopBuilder.CreateICmpULT(NewIndex, RuntimeLoopBytes), LoopBB,
347 PostLoopBB);
348 }
349}
350
351// If \p Addr1 and \p Addr2 are pointers to different address spaces, create an
352// addresspacecast to obtain a pair of pointers in the same addressspace. The
353// caller needs to ensure that addrspacecasting is possible.
354// No-op if the pointers are in the same address space.
355static std::pair<Value *, Value *>
357 const TargetTransformInfo &TTI) {
358 Value *ResAddr1 = Addr1;
359 Value *ResAddr2 = Addr2;
360
361 unsigned AS1 = cast<PointerType>(Addr1->getType())->getAddressSpace();
362 unsigned AS2 = cast<PointerType>(Addr2->getType())->getAddressSpace();
363 if (AS1 != AS2) {
364 if (TTI.isValidAddrSpaceCast(AS2, AS1))
365 ResAddr2 = B.CreateAddrSpaceCast(Addr2, Addr1->getType());
366 else if (TTI.isValidAddrSpaceCast(AS1, AS2))
367 ResAddr1 = B.CreateAddrSpaceCast(Addr1, Addr2->getType());
368 else
369 llvm_unreachable("Can only lower memmove between address spaces if they "
370 "support addrspacecast");
371 }
372 return {ResAddr1, ResAddr2};
373}
374
375// Lower memmove to IR. memmove is required to correctly copy overlapping memory
376// regions; therefore, it has to check the relative positions of the source and
377// destination pointers and choose the copy direction accordingly.
378//
379// The code below is an IR rendition of this C function:
380//
381// void* memmove(void* dst, const void* src, size_t n) {
382// unsigned char* d = dst;
383// const unsigned char* s = src;
384// if (s < d) {
385// // copy backwards
386// while (n--) {
387// d[n] = s[n];
388// }
389// } else {
390// // copy forward
391// for (size_t i = 0; i < n; ++i) {
392// d[i] = s[i];
393// }
394// }
395// return dst;
396// }
397//
398// If the TargetTransformInfo specifies a wider MemcpyLoopLoweringType, it is
399// used for the memory accesses in the loops. Then, additional loops with
400// byte-wise accesses are added for the remaining bytes.
402 Value *SrcAddr, Value *DstAddr,
403 Value *CopyLen, Align SrcAlign,
404 Align DstAlign, bool SrcIsVolatile,
405 bool DstIsVolatile,
406 const TargetTransformInfo &TTI) {
407 Type *TypeOfCopyLen = CopyLen->getType();
408 BasicBlock *OrigBB = InsertBefore->getParent();
409 Function *F = OrigBB->getParent();
410 const DataLayout &DL = F->getDataLayout();
411 LLVMContext &Ctx = OrigBB->getContext();
412 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
413 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
414
415 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAS, DstAS,
416 SrcAlign, DstAlign);
417 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
418 Type *Int8Type = Type::getInt8Ty(Ctx);
419 bool LoopOpIsInt8 = LoopOpType == Int8Type;
420
421 // If the memory accesses are wider than one byte, residual loops with
422 // i8-accesses are required to move remaining bytes.
423 bool RequiresResidual = !LoopOpIsInt8;
424
425 Type *ResidualLoopOpType = Int8Type;
426 unsigned ResidualLoopOpSize = DL.getTypeStoreSize(ResidualLoopOpType);
427
428 // Calculate the loop trip count and remaining bytes to copy after the loop.
429 IntegerType *ILengthType = cast<IntegerType>(TypeOfCopyLen);
430 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
431 ConstantInt *CIResidualLoopOpSize =
432 ConstantInt::get(ILengthType, ResidualLoopOpSize);
433 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
434
435 IRBuilder<> PLBuilder(InsertBefore);
436
437 Value *RuntimeLoopBytes = CopyLen;
438 Value *RuntimeLoopRemainder = nullptr;
439 Value *SkipResidualCondition = nullptr;
440 if (RequiresResidual) {
441 RuntimeLoopRemainder = getRuntimeLoopRemainder(DL, PLBuilder, CopyLen,
442 CILoopOpSize, LoopOpSize);
443 RuntimeLoopBytes = getRuntimeLoopBytes(DL, PLBuilder, CopyLen, CILoopOpSize,
444 LoopOpSize, RuntimeLoopRemainder);
445 SkipResidualCondition =
446 PLBuilder.CreateICmpEQ(RuntimeLoopRemainder, Zero, "skip_residual");
447 }
448 Value *SkipMainCondition =
449 PLBuilder.CreateICmpEQ(RuntimeLoopBytes, Zero, "skip_main");
450
451 // Create the a comparison of src and dst, based on which we jump to either
452 // the forward-copy part of the function (if src >= dst) or the backwards-copy
453 // part (if src < dst).
454 // SplitBlockAndInsertIfThenElse conveniently creates the basic if-then-else
455 // structure. Its block terminators (unconditional branches) are replaced by
456 // the appropriate conditional branches when the loop is built.
457 // If the pointers are in different address spaces, they need to be converted
458 // to a compatible one. Cases where memory ranges in the different address
459 // spaces cannot overlap are lowered as memcpy and not handled here.
460 auto [CmpSrcAddr, CmpDstAddr] =
461 tryInsertCastToCommonAddrSpace(PLBuilder, SrcAddr, DstAddr, TTI);
462 Value *PtrCompare =
463 PLBuilder.CreateICmpULT(CmpSrcAddr, CmpDstAddr, "compare_src_dst");
464 Instruction *ThenTerm, *ElseTerm;
465 SplitBlockAndInsertIfThenElse(PtrCompare, InsertBefore->getIterator(),
466 &ThenTerm, &ElseTerm);
467
468 // If the LoopOpSize is greater than 1, each part of the function consists of
469 // four blocks:
470 // memmove_copy_backwards:
471 // skip the residual loop when 0 iterations are required
472 // memmove_bwd_residual_loop:
473 // copy the last few bytes individually so that the remaining length is
474 // a multiple of the LoopOpSize
475 // memmove_bwd_middle: skip the main loop when 0 iterations are required
476 // memmove_bwd_main_loop: the actual backwards loop BB with wide accesses
477 // memmove_copy_forward: skip the main loop when 0 iterations are required
478 // memmove_fwd_main_loop: the actual forward loop BB with wide accesses
479 // memmove_fwd_middle: skip the residual loop when 0 iterations are required
480 // memmove_fwd_residual_loop: copy the last few bytes individually
481 //
482 // The main and residual loop are switched between copying forward and
483 // backward so that the residual loop always operates on the end of the moved
484 // range. This is based on the assumption that buffers whose start is aligned
485 // with the LoopOpSize are more common than buffers whose end is.
486 //
487 // If the LoopOpSize is 1, each part of the function consists of two blocks:
488 // memmove_copy_backwards: skip the loop when 0 iterations are required
489 // memmove_bwd_main_loop: the actual backwards loop BB
490 // memmove_copy_forward: skip the loop when 0 iterations are required
491 // memmove_fwd_main_loop: the actual forward loop BB
492 BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
493 CopyBackwardsBB->setName("memmove_copy_backwards");
494 BasicBlock *CopyForwardBB = ElseTerm->getParent();
495 CopyForwardBB->setName("memmove_copy_forward");
496 BasicBlock *ExitBB = InsertBefore->getParent();
497 ExitBB->setName("memmove_done");
498
499 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
500 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
501
502 // Accesses in the residual loops do not share the same alignment as those in
503 // the main loops.
504 Align ResidualSrcAlign(commonAlignment(PartSrcAlign, ResidualLoopOpSize));
505 Align ResidualDstAlign(commonAlignment(PartDstAlign, ResidualLoopOpSize));
506
507 // Copying backwards.
508 {
509 BasicBlock *MainLoopBB = BasicBlock::Create(
510 F->getContext(), "memmove_bwd_main_loop", F, CopyForwardBB);
511
512 // The predecessor of the memmove_bwd_main_loop. Updated in the
513 // following if a residual loop is emitted first.
514 BasicBlock *PredBB = CopyBackwardsBB;
515
516 if (RequiresResidual) {
517 // backwards residual loop
518 BasicBlock *ResidualLoopBB = BasicBlock::Create(
519 F->getContext(), "memmove_bwd_residual_loop", F, MainLoopBB);
520 IRBuilder<> ResidualLoopBuilder(ResidualLoopBB);
521 PHINode *ResidualLoopPhi = ResidualLoopBuilder.CreatePHI(ILengthType, 0);
522 Value *ResidualIndex = ResidualLoopBuilder.CreateSub(
523 ResidualLoopPhi, CIResidualLoopOpSize, "bwd_residual_index");
524 // If we used LoopOpType as GEP element type, we would iterate over the
525 // buffers in TypeStoreSize strides while copying TypeAllocSize bytes,
526 // i.e., we would miss bytes if TypeStoreSize != TypeAllocSize. Therefore,
527 // use byte offsets computed from the TypeStoreSize.
528 Value *LoadGEP = ResidualLoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr,
529 ResidualIndex);
530 Value *Element = ResidualLoopBuilder.CreateAlignedLoad(
531 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
532 "element");
533 Value *StoreGEP = ResidualLoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr,
534 ResidualIndex);
535 ResidualLoopBuilder.CreateAlignedStore(Element, StoreGEP,
536 ResidualDstAlign, DstIsVolatile);
537
538 // After the residual loop, go to an intermediate block.
539 BasicBlock *IntermediateBB = BasicBlock::Create(
540 F->getContext(), "memmove_bwd_middle", F, MainLoopBB);
541 // Later code expects a terminator in the PredBB.
542 IRBuilder<> IntermediateBuilder(IntermediateBB);
543 IntermediateBuilder.CreateUnreachable();
544 ResidualLoopBuilder.CreateCondBr(
545 ResidualLoopBuilder.CreateICmpEQ(ResidualIndex, RuntimeLoopBytes),
546 IntermediateBB, ResidualLoopBB);
547
548 ResidualLoopPhi->addIncoming(ResidualIndex, ResidualLoopBB);
549 ResidualLoopPhi->addIncoming(CopyLen, CopyBackwardsBB);
550
551 // How to get to the residual:
552 BranchInst::Create(IntermediateBB, ResidualLoopBB, SkipResidualCondition,
553 ThenTerm->getIterator());
554 ThenTerm->eraseFromParent();
555
556 PredBB = IntermediateBB;
557 }
558
559 // main loop
560 IRBuilder<> MainLoopBuilder(MainLoopBB);
561 PHINode *MainLoopPhi = MainLoopBuilder.CreatePHI(ILengthType, 0);
562 Value *MainIndex =
563 MainLoopBuilder.CreateSub(MainLoopPhi, CILoopOpSize, "bwd_main_index");
564 Value *LoadGEP =
565 MainLoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, MainIndex);
566 Value *Element = MainLoopBuilder.CreateAlignedLoad(
567 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
568 Value *StoreGEP =
569 MainLoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr, MainIndex);
570 MainLoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
571 DstIsVolatile);
572 MainLoopBuilder.CreateCondBr(MainLoopBuilder.CreateICmpEQ(MainIndex, Zero),
573 ExitBB, MainLoopBB);
574 MainLoopPhi->addIncoming(MainIndex, MainLoopBB);
575 MainLoopPhi->addIncoming(RuntimeLoopBytes, PredBB);
576
577 // How to get to the main loop:
578 Instruction *PredBBTerm = PredBB->getTerminator();
579 BranchInst::Create(ExitBB, MainLoopBB, SkipMainCondition,
580 PredBBTerm->getIterator());
581 PredBBTerm->eraseFromParent();
582 }
583
584 // Copying forward.
585 // main loop
586 {
587 BasicBlock *MainLoopBB =
588 BasicBlock::Create(F->getContext(), "memmove_fwd_main_loop", F, ExitBB);
589 IRBuilder<> MainLoopBuilder(MainLoopBB);
590 PHINode *MainLoopPhi =
591 MainLoopBuilder.CreatePHI(ILengthType, 0, "fwd_main_index");
592 Value *LoadGEP =
593 MainLoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, MainLoopPhi);
594 Value *Element = MainLoopBuilder.CreateAlignedLoad(
595 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
596 Value *StoreGEP =
597 MainLoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr, MainLoopPhi);
598 MainLoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
599 DstIsVolatile);
600 Value *MainIndex = MainLoopBuilder.CreateAdd(MainLoopPhi, CILoopOpSize);
601 MainLoopPhi->addIncoming(MainIndex, MainLoopBB);
602 MainLoopPhi->addIncoming(Zero, CopyForwardBB);
603
604 Instruction *CopyFwdBBTerm = CopyForwardBB->getTerminator();
605 BasicBlock *SuccessorBB = ExitBB;
606 if (RequiresResidual)
607 SuccessorBB =
608 BasicBlock::Create(F->getContext(), "memmove_fwd_middle", F, ExitBB);
609
610 // leaving or staying in the main loop
611 MainLoopBuilder.CreateCondBr(
612 MainLoopBuilder.CreateICmpEQ(MainIndex, RuntimeLoopBytes), SuccessorBB,
613 MainLoopBB);
614
615 // getting in or skipping the main loop
616 BranchInst::Create(SuccessorBB, MainLoopBB, SkipMainCondition,
617 CopyFwdBBTerm->getIterator());
618 CopyFwdBBTerm->eraseFromParent();
619
620 if (RequiresResidual) {
621 BasicBlock *IntermediateBB = SuccessorBB;
622 IRBuilder<> IntermediateBuilder(IntermediateBB);
623 BasicBlock *ResidualLoopBB = BasicBlock::Create(
624 F->getContext(), "memmove_fwd_residual_loop", F, ExitBB);
625 IntermediateBuilder.CreateCondBr(SkipResidualCondition, ExitBB,
626 ResidualLoopBB);
627
628 // Residual loop
629 IRBuilder<> ResidualLoopBuilder(ResidualLoopBB);
630 PHINode *ResidualLoopPhi =
631 ResidualLoopBuilder.CreatePHI(ILengthType, 0, "fwd_residual_index");
632 Value *LoadGEP = ResidualLoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr,
633 ResidualLoopPhi);
634 Value *Element = ResidualLoopBuilder.CreateAlignedLoad(
635 ResidualLoopOpType, LoadGEP, ResidualSrcAlign, SrcIsVolatile,
636 "element");
637 Value *StoreGEP = ResidualLoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr,
638 ResidualLoopPhi);
639 ResidualLoopBuilder.CreateAlignedStore(Element, StoreGEP,
640 ResidualDstAlign, DstIsVolatile);
641 Value *ResidualIndex =
642 ResidualLoopBuilder.CreateAdd(ResidualLoopPhi, CIResidualLoopOpSize);
643 ResidualLoopBuilder.CreateCondBr(
644 ResidualLoopBuilder.CreateICmpEQ(ResidualIndex, CopyLen), ExitBB,
645 ResidualLoopBB);
646 ResidualLoopPhi->addIncoming(ResidualIndex, ResidualLoopBB);
647 ResidualLoopPhi->addIncoming(RuntimeLoopBytes, IntermediateBB);
648 }
649 }
650}
651
652// Similar to createMemMoveLoopUnknownSize, only the trip counts are computed at
653// compile time, obsolete loops and branches are omitted, and the residual code
654// is straight-line code instead of a loop.
655static void createMemMoveLoopKnownSize(Instruction *InsertBefore,
656 Value *SrcAddr, Value *DstAddr,
657 ConstantInt *CopyLen, Align SrcAlign,
658 Align DstAlign, bool SrcIsVolatile,
659 bool DstIsVolatile,
660 const TargetTransformInfo &TTI) {
661 // No need to expand zero length moves.
662 if (CopyLen->isZero())
663 return;
664
665 Type *TypeOfCopyLen = CopyLen->getType();
666 BasicBlock *OrigBB = InsertBefore->getParent();
667 Function *F = OrigBB->getParent();
668 const DataLayout &DL = F->getDataLayout();
669 LLVMContext &Ctx = OrigBB->getContext();
670 unsigned SrcAS = cast<PointerType>(SrcAddr->getType())->getAddressSpace();
671 unsigned DstAS = cast<PointerType>(DstAddr->getType())->getAddressSpace();
672
673 Type *LoopOpType = TTI.getMemcpyLoopLoweringType(Ctx, CopyLen, SrcAS, DstAS,
674 SrcAlign, DstAlign);
675 unsigned LoopOpSize = DL.getTypeStoreSize(LoopOpType);
676 Type *Int8Type = Type::getInt8Ty(Ctx);
677
678 // Calculate the loop trip count and remaining bytes to copy after the loop.
679 uint64_t BytesCopiedInLoop = alignDown(CopyLen->getZExtValue(), LoopOpSize);
680 uint64_t RemainingBytes = CopyLen->getZExtValue() - BytesCopiedInLoop;
681
682 IntegerType *ILengthType = cast<IntegerType>(TypeOfCopyLen);
683 ConstantInt *Zero = ConstantInt::get(ILengthType, 0);
684 ConstantInt *LoopBound = ConstantInt::get(ILengthType, BytesCopiedInLoop);
685 ConstantInt *CILoopOpSize = ConstantInt::get(ILengthType, LoopOpSize);
686
687 IRBuilder<> PLBuilder(InsertBefore);
688
689 auto [CmpSrcAddr, CmpDstAddr] =
690 tryInsertCastToCommonAddrSpace(PLBuilder, SrcAddr, DstAddr, TTI);
691 Value *PtrCompare =
692 PLBuilder.CreateICmpULT(CmpSrcAddr, CmpDstAddr, "compare_src_dst");
693 Instruction *ThenTerm, *ElseTerm;
694 SplitBlockAndInsertIfThenElse(PtrCompare, InsertBefore->getIterator(),
695 &ThenTerm, &ElseTerm);
696
697 BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
698 BasicBlock *CopyForwardBB = ElseTerm->getParent();
699 BasicBlock *ExitBB = InsertBefore->getParent();
700 ExitBB->setName("memmove_done");
701
702 Align PartSrcAlign(commonAlignment(SrcAlign, LoopOpSize));
703 Align PartDstAlign(commonAlignment(DstAlign, LoopOpSize));
704
705 // Helper function to generate a load/store pair of a given type in the
706 // residual. Used in the forward and backward branches.
707 auto GenerateResidualLdStPair = [&](Type *OpTy, IRBuilderBase &Builder,
708 uint64_t &BytesCopied) {
709 Align ResSrcAlign(commonAlignment(SrcAlign, BytesCopied));
710 Align ResDstAlign(commonAlignment(DstAlign, BytesCopied));
711
712 unsigned OperandSize = DL.getTypeStoreSize(OpTy);
713
714 // If we used LoopOpType as GEP element type, we would iterate over the
715 // buffers in TypeStoreSize strides while copying TypeAllocSize bytes, i.e.,
716 // we would miss bytes if TypeStoreSize != TypeAllocSize. Therefore, use
717 // byte offsets computed from the TypeStoreSize.
718 Value *SrcGEP = Builder.CreateInBoundsGEP(
719 Int8Type, SrcAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
720 LoadInst *Load =
721 Builder.CreateAlignedLoad(OpTy, SrcGEP, ResSrcAlign, SrcIsVolatile);
722 Value *DstGEP = Builder.CreateInBoundsGEP(
723 Int8Type, DstAddr, ConstantInt::get(TypeOfCopyLen, BytesCopied));
724 Builder.CreateAlignedStore(Load, DstGEP, ResDstAlign, DstIsVolatile);
725 BytesCopied += OperandSize;
726 };
727
728 // Copying backwards.
729 if (RemainingBytes != 0) {
730 CopyBackwardsBB->setName("memmove_bwd_residual");
731 uint64_t BytesCopied = BytesCopiedInLoop;
732
733 // Residual code is required to move the remaining bytes. We need the same
734 // instructions as in the forward case, only in reverse. So we generate code
735 // the same way, except that we change the IRBuilder insert point for each
736 // load/store pair so that each one is inserted before the previous one
737 // instead of after it.
738 IRBuilder<> BwdResBuilder(CopyBackwardsBB->getFirstNonPHI());
739 SmallVector<Type *, 5> RemainingOps;
740 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
741 SrcAS, DstAS, PartSrcAlign,
742 PartDstAlign);
743 for (auto *OpTy : RemainingOps) {
744 // reverse the order of the emitted operations
745 BwdResBuilder.SetInsertPoint(CopyBackwardsBB->getFirstNonPHI());
746 GenerateResidualLdStPair(OpTy, BwdResBuilder, BytesCopied);
747 }
748 }
749 if (BytesCopiedInLoop != 0) {
750 BasicBlock *LoopBB = CopyBackwardsBB;
751 BasicBlock *PredBB = OrigBB;
752 if (RemainingBytes != 0) {
753 // if we introduce residual code, it needs its separate BB
754 LoopBB = CopyBackwardsBB->splitBasicBlock(
755 CopyBackwardsBB->getTerminator(), "memmove_bwd_loop");
756 PredBB = CopyBackwardsBB;
757 } else {
758 CopyBackwardsBB->setName("memmove_bwd_loop");
759 }
760 IRBuilder<> LoopBuilder(LoopBB->getTerminator());
761 PHINode *LoopPhi = LoopBuilder.CreatePHI(ILengthType, 0);
762 Value *Index = LoopBuilder.CreateSub(LoopPhi, CILoopOpSize, "bwd_index");
763 Value *LoadGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, Index);
764 Value *Element = LoopBuilder.CreateAlignedLoad(
765 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
766 Value *StoreGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr, Index);
767 LoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
768 DstIsVolatile);
769
770 // Replace the unconditional branch introduced by
771 // SplitBlockAndInsertIfThenElse to turn LoopBB into a loop.
772 Instruction *UncondTerm = LoopBB->getTerminator();
773 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpEQ(Index, Zero), ExitBB,
774 LoopBB);
775 UncondTerm->eraseFromParent();
776
777 LoopPhi->addIncoming(Index, LoopBB);
778 LoopPhi->addIncoming(LoopBound, PredBB);
779 }
780
781 // Copying forward.
782 BasicBlock *FwdResidualBB = CopyForwardBB;
783 if (BytesCopiedInLoop != 0) {
784 CopyForwardBB->setName("memmove_fwd_loop");
785 BasicBlock *LoopBB = CopyForwardBB;
786 BasicBlock *SuccBB = ExitBB;
787 if (RemainingBytes != 0) {
788 // if we introduce residual code, it needs its separate BB
789 SuccBB = CopyForwardBB->splitBasicBlock(CopyForwardBB->getTerminator(),
790 "memmove_fwd_residual");
791 FwdResidualBB = SuccBB;
792 }
793 IRBuilder<> LoopBuilder(LoopBB->getTerminator());
794 PHINode *LoopPhi = LoopBuilder.CreatePHI(ILengthType, 0, "fwd_index");
795 Value *LoadGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, SrcAddr, LoopPhi);
796 Value *Element = LoopBuilder.CreateAlignedLoad(
797 LoopOpType, LoadGEP, PartSrcAlign, SrcIsVolatile, "element");
798 Value *StoreGEP = LoopBuilder.CreateInBoundsGEP(Int8Type, DstAddr, LoopPhi);
799 LoopBuilder.CreateAlignedStore(Element, StoreGEP, PartDstAlign,
800 DstIsVolatile);
801 Value *Index = LoopBuilder.CreateAdd(LoopPhi, CILoopOpSize);
802 LoopPhi->addIncoming(Index, LoopBB);
803 LoopPhi->addIncoming(Zero, OrigBB);
804
805 // Replace the unconditional branch to turn LoopBB into a loop.
806 Instruction *UncondTerm = LoopBB->getTerminator();
807 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpEQ(Index, LoopBound), SuccBB,
808 LoopBB);
809 UncondTerm->eraseFromParent();
810 }
811
812 if (RemainingBytes != 0) {
813 uint64_t BytesCopied = BytesCopiedInLoop;
814
815 // Residual code is required to move the remaining bytes. In the forward
816 // case, we emit it in the normal order.
817 IRBuilder<> FwdResBuilder(FwdResidualBB->getTerminator());
818 SmallVector<Type *, 5> RemainingOps;
819 TTI.getMemcpyLoopResidualLoweringType(RemainingOps, Ctx, RemainingBytes,
820 SrcAS, DstAS, PartSrcAlign,
821 PartDstAlign);
822 for (auto *OpTy : RemainingOps)
823 GenerateResidualLdStPair(OpTy, FwdResBuilder, BytesCopied);
824 }
825}
826
827static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr,
828 Value *CopyLen, Value *SetValue, Align DstAlign,
829 bool IsVolatile) {
830 Type *TypeOfCopyLen = CopyLen->getType();
831 BasicBlock *OrigBB = InsertBefore->getParent();
832 Function *F = OrigBB->getParent();
833 const DataLayout &DL = F->getDataLayout();
834 BasicBlock *NewBB =
835 OrigBB->splitBasicBlock(InsertBefore, "split");
836 BasicBlock *LoopBB
837 = BasicBlock::Create(F->getContext(), "loadstoreloop", F, NewBB);
838
839 IRBuilder<> Builder(OrigBB->getTerminator());
840
841 Builder.CreateCondBr(
842 Builder.CreateICmpEQ(ConstantInt::get(TypeOfCopyLen, 0), CopyLen), NewBB,
843 LoopBB);
844 OrigBB->getTerminator()->eraseFromParent();
845
846 unsigned PartSize = DL.getTypeStoreSize(SetValue->getType());
847 Align PartAlign(commonAlignment(DstAlign, PartSize));
848
849 IRBuilder<> LoopBuilder(LoopBB);
850 PHINode *LoopIndex = LoopBuilder.CreatePHI(TypeOfCopyLen, 0);
851 LoopIndex->addIncoming(ConstantInt::get(TypeOfCopyLen, 0), OrigBB);
852
853 LoopBuilder.CreateAlignedStore(
854 SetValue,
855 LoopBuilder.CreateInBoundsGEP(SetValue->getType(), DstAddr, LoopIndex),
856 PartAlign, IsVolatile);
857
858 Value *NewIndex =
859 LoopBuilder.CreateAdd(LoopIndex, ConstantInt::get(TypeOfCopyLen, 1));
860 LoopIndex->addIncoming(NewIndex, LoopBB);
861
862 LoopBuilder.CreateCondBr(LoopBuilder.CreateICmpULT(NewIndex, CopyLen), LoopBB,
863 NewBB);
864}
865
866template <typename T>
868 if (SE) {
869 const SCEV *SrcSCEV = SE->getSCEV(Memcpy->getRawSource());
870 const SCEV *DestSCEV = SE->getSCEV(Memcpy->getRawDest());
871 if (SE->isKnownPredicateAt(CmpInst::ICMP_NE, SrcSCEV, DestSCEV, Memcpy))
872 return false;
873 }
874 return true;
875}
876
879 ScalarEvolution *SE) {
880 bool CanOverlap = canOverlap(Memcpy, SE);
881 if (ConstantInt *CI = dyn_cast<ConstantInt>(Memcpy->getLength())) {
883 /* InsertBefore */ Memcpy,
884 /* SrcAddr */ Memcpy->getRawSource(),
885 /* DstAddr */ Memcpy->getRawDest(),
886 /* CopyLen */ CI,
887 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
888 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
889 /* SrcIsVolatile */ Memcpy->isVolatile(),
890 /* DstIsVolatile */ Memcpy->isVolatile(),
891 /* CanOverlap */ CanOverlap,
892 /* TargetTransformInfo */ TTI);
893 } else {
895 /* InsertBefore */ Memcpy,
896 /* SrcAddr */ Memcpy->getRawSource(),
897 /* DstAddr */ Memcpy->getRawDest(),
898 /* CopyLen */ Memcpy->getLength(),
899 /* SrcAlign */ Memcpy->getSourceAlign().valueOrOne(),
900 /* DestAlign */ Memcpy->getDestAlign().valueOrOne(),
901 /* SrcIsVolatile */ Memcpy->isVolatile(),
902 /* DstIsVolatile */ Memcpy->isVolatile(),
903 /* CanOverlap */ CanOverlap,
904 /* TargetTransformInfo */ TTI);
905 }
906}
907
909 const TargetTransformInfo &TTI) {
910 Value *CopyLen = Memmove->getLength();
911 Value *SrcAddr = Memmove->getRawSource();
912 Value *DstAddr = Memmove->getRawDest();
913 Align SrcAlign = Memmove->getSourceAlign().valueOrOne();
914 Align DstAlign = Memmove->getDestAlign().valueOrOne();
915 bool SrcIsVolatile = Memmove->isVolatile();
916 bool DstIsVolatile = SrcIsVolatile;
917 IRBuilder<> CastBuilder(Memmove);
918
919 unsigned SrcAS = SrcAddr->getType()->getPointerAddressSpace();
920 unsigned DstAS = DstAddr->getType()->getPointerAddressSpace();
921 if (SrcAS != DstAS) {
922 if (!TTI.addrspacesMayAlias(SrcAS, DstAS)) {
923 // We may not be able to emit a pointer comparison, but we don't have
924 // to. Expand as memcpy.
925 if (ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
926 createMemCpyLoopKnownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
927 CI, SrcAlign, DstAlign, SrcIsVolatile,
928 DstIsVolatile,
929 /*CanOverlap=*/false, TTI);
930 } else {
931 createMemCpyLoopUnknownSize(/*InsertBefore=*/Memmove, SrcAddr, DstAddr,
932 CopyLen, SrcAlign, DstAlign, SrcIsVolatile,
933 DstIsVolatile,
934 /*CanOverlap=*/false, TTI);
935 }
936
937 return true;
938 }
939
940 if (!(TTI.isValidAddrSpaceCast(DstAS, SrcAS) ||
941 TTI.isValidAddrSpaceCast(SrcAS, DstAS))) {
942 // We don't know generically if it's legal to introduce an
943 // addrspacecast. We need to know either if it's legal to insert an
944 // addrspacecast, or if the address spaces cannot alias.
946 dbgs() << "Do not know how to expand memmove between different "
947 "address spaces\n");
948 return false;
949 }
950 }
951
952 if (ConstantInt *CI = dyn_cast<ConstantInt>(CopyLen)) {
954 /*InsertBefore=*/Memmove, SrcAddr, DstAddr, CI, SrcAlign, DstAlign,
955 SrcIsVolatile, DstIsVolatile, TTI);
956 } else {
958 /*InsertBefore=*/Memmove, SrcAddr, DstAddr, CopyLen, SrcAlign, DstAlign,
959 SrcIsVolatile, DstIsVolatile, TTI);
960 }
961 return true;
962}
963
965 createMemSetLoop(/* InsertBefore */ Memset,
966 /* DstAddr */ Memset->getRawDest(),
967 /* CopyLen */ Memset->getLength(),
968 /* SetValue */ Memset->getValue(),
969 /* Alignment */ Memset->getDestAlign().valueOrOne(),
970 Memset->isVolatile());
971}
972
974 createMemSetLoop(/* InsertBefore=*/Memset,
975 /* DstAddr=*/Memset->getRawDest(),
976 /* CopyLen=*/Memset->getLength(),
977 /* SetValue=*/Memset->getValue(),
978 /* Alignment=*/Memset->getDestAlign().valueOrOne(),
979 Memset->isVolatile());
980}
981
984 ScalarEvolution *SE) {
985 if (ConstantInt *CI = dyn_cast<ConstantInt>(AtomicMemcpy->getLength())) {
987 /* InsertBefore */ AtomicMemcpy,
988 /* SrcAddr */ AtomicMemcpy->getRawSource(),
989 /* DstAddr */ AtomicMemcpy->getRawDest(),
990 /* CopyLen */ CI,
991 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
992 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
993 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
994 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
995 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
996 /* TargetTransformInfo */ TTI,
997 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
998 } else {
1000 /* InsertBefore */ AtomicMemcpy,
1001 /* SrcAddr */ AtomicMemcpy->getRawSource(),
1002 /* DstAddr */ AtomicMemcpy->getRawDest(),
1003 /* CopyLen */ AtomicMemcpy->getLength(),
1004 /* SrcAlign */ AtomicMemcpy->getSourceAlign().valueOrOne(),
1005 /* DestAlign */ AtomicMemcpy->getDestAlign().valueOrOne(),
1006 /* SrcIsVolatile */ AtomicMemcpy->isVolatile(),
1007 /* DstIsVolatile */ AtomicMemcpy->isVolatile(),
1008 /* CanOverlap */ false, // SrcAddr & DstAddr may not overlap by spec.
1009 /* TargetTransformInfo */ TTI,
1010 /* AtomicCpySize */ AtomicMemcpy->getElementSizeInBytes());
1011 }
1012}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DEBUG(...)
Definition: Debug.h:106
std::string Name
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF)
Definition: Execution.cpp:41
static std::pair< Value *, Value * > tryInsertCastToCommonAddrSpace(IRBuilderBase &B, Value *Addr1, Value *Addr2, const TargetTransformInfo &TTI)
static bool canOverlap(MemTransferBase< T > *Memcpy, ScalarEvolution *SE)
static void createMemMoveLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
static Value * getRuntimeLoopRemainder(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal)
static void createMemSetLoop(Instruction *InsertBefore, Value *DstAddr, Value *CopyLen, Value *SetValue, Align DstAlign, bool IsVolatile)
static Value * getRuntimeLoopBytes(const DataLayout &DL, IRBuilderBase &B, Value *Len, Value *OpSize, unsigned OpSizeVal, Value *RTLoopRemainder=nullptr)
static void createMemMoveLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DstAlign, bool SrcIsVolatile, bool DstIsVolatile, const TargetTransformInfo &TTI)
#define F(x, y, z)
Definition: MD5.cpp:55
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This pass exposes codegen information to IR-level passes.
This class represents the atomic memcpy intrinsic i.e.
uint32_t getElementSizeInBytes() const
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:577
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:208
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
This is an important base class in LLVM.
Definition: Constant.h:42
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition: Function.cpp:373
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2289
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1830
UnreachableInst * CreateUnreachable()
Definition: IRBuilder.h:1286
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1897
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2429
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2273
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1367
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1144
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1350
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1849
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
Class to represent integer types.
Definition: DerivedTypes.h:42
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:174
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:167
Metadata node.
Definition: Metadata.h:1069
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1543
This class wraps the llvm.memcpy intrinsic.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
bool isVolatile() const
This class wraps the llvm.memmove intrinsic.
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.experimental.memset.pattern intrinsic.
Common base class for all memory transfer intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
MaybeAlign getSourceAlign() const
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
This class represents an analyzed expression in the program.
The main scalar evolution driver.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const
Return false if a AS0 address cannot possibly alias a AS1 address.
Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static IntegerType * getInt8Ty(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, ConstantInt *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< uint32_t > AtomicCpySize=std::nullopt)
Emit a loop implementing the semantics of an llvm.memcpy whose size is a compile time constant.
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet)
Expand MemSetPattern as a loop. MemSet is not deleted.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:555
void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI, std::optional< unsigned > AtomicSize=std::nullopt)
Emit a loop implementing the semantics of llvm.memcpy where the size is not a compile-time constant.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE)
Expand AtomicMemCpy as a loop. AtomicMemCpy is not deleted.
void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141