LLVM 23.0.0git
ExpandMemCmp.cpp
Go to the documentation of this file.
1//===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass tries to expand memcmp() calls into optimally-sized loads and
10// compares for the target.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/Statistic.h"
23#include "llvm/IR/Dominators.h"
24#include "llvm/IR/IRBuilder.h"
30#include <optional>
31
32using namespace llvm;
33using namespace llvm::PatternMatch;
34
35#define DEBUG_TYPE "expand-memcmp"
36
37STATISTIC(NumMemCmpCalls, "Number of memcmp calls");
38STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size");
39STATISTIC(NumMemCmpGreaterThanMax,
40 "Number of memcmp calls with size greater than max size");
41STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls");
42
44 "memcmp-num-loads-per-block", cl::Hidden, cl::init(1),
45 cl::desc("The number of loads per basic block for inline expansion of "
46 "memcmp that is only being compared against zero."));
47
49 "max-loads-per-memcmp", cl::Hidden,
50 cl::desc("Set maximum number of loads used in expanded memcmp"));
51
53 "max-loads-per-memcmp-opt-size", cl::Hidden,
54 cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
55
56namespace {
57
58
59// This class provides helper functions to expand a memcmp library call into an
60// inline expansion.
61class MemCmpExpansion {
62 struct ResultBlock {
63 BasicBlock *BB = nullptr;
64 PHINode *PhiSrc1 = nullptr;
65 PHINode *PhiSrc2 = nullptr;
66
67 ResultBlock() = default;
68 };
69
70 CallInst *const CI = nullptr;
71 ResultBlock ResBlock;
72 const uint64_t Size;
73 unsigned MaxLoadSize = 0;
74 uint64_t NumLoadsNonOneByte = 0;
75 const uint64_t NumLoadsPerBlockForZeroCmp;
76 std::vector<BasicBlock *> LoadCmpBlocks;
77 BasicBlock *EndBlock = nullptr;
78 PHINode *PhiRes = nullptr;
79 const bool IsUsedForZeroCmp;
80 const DataLayout &DL;
81 DomTreeUpdater *DTU = nullptr;
82 IRBuilder<> Builder;
83 // Represents the decomposition in blocks of the expansion. For example,
84 // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
85 // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {1, 32}.
86 struct LoadEntry {
87 LoadEntry(unsigned LoadSize, uint64_t Offset)
88 : LoadSize(LoadSize), Offset(Offset) {
89 }
90
91 // The size of the load for this block, in bytes.
92 unsigned LoadSize;
93 // The offset of this load from the base pointer, in bytes.
94 uint64_t Offset;
95 };
96 using LoadEntryVector = SmallVector<LoadEntry, 8>;
97 LoadEntryVector LoadSequence;
98
99 void createLoadCmpBlocks();
100 void createResultBlock();
101 void setupResultBlockPHINodes();
102 void setupEndBlockPHINodes();
103 Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex);
104 void emitLoadCompareBlock(unsigned BlockIndex);
105 void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
106 unsigned &LoadIndex);
107 void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
108 void emitMemCmpResultBlock();
109 Value *getMemCmpExpansionZeroCase();
110 Value *getMemCmpEqZeroOneBlock();
111 Value *getMemCmpOneBlock();
112 struct LoadPair {
113 Value *Lhs = nullptr;
114 Value *Rhs = nullptr;
115 };
116 LoadPair getLoadPair(Type *LoadSizeType, Type *BSwapSizeType,
117 Type *CmpSizeType, unsigned OffsetBytes);
118
119 static LoadEntryVector
120 computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
121 unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
122 static LoadEntryVector
123 computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
124 unsigned MaxNumLoads,
125 unsigned &NumLoadsNonOneByte);
126
127 static void optimiseLoadSequence(
128 LoadEntryVector &LoadSequence,
129 const TargetTransformInfo::MemCmpExpansionOptions &Options,
130 bool IsUsedForZeroCmp);
131
132public:
133 MemCmpExpansion(CallInst *CI, uint64_t Size,
134 const TargetTransformInfo::MemCmpExpansionOptions &Options,
135 const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
136 DomTreeUpdater *DTU);
137
138 unsigned getNumBlocks();
139 uint64_t getNumLoads() const { return LoadSequence.size(); }
140
141 Value *getMemCmpExpansion();
142};
143
144MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
146 const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
147 NumLoadsNonOneByte = 0;
148 LoadEntryVector LoadSequence;
149 uint64_t Offset = 0;
150 while (Size && !LoadSizes.empty()) {
151 const unsigned LoadSize = LoadSizes.front();
152 const uint64_t NumLoadsForThisSize = Size / LoadSize;
153 if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
154 // Do not expand if the total number of loads is larger than what the
155 // target allows. Note that it's important that we exit before completing
156 // the expansion to avoid using a ton of memory to store the expansion for
157 // large sizes.
158 return {};
159 }
160 if (NumLoadsForThisSize > 0) {
161 for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
162 LoadSequence.push_back({LoadSize, Offset});
163 Offset += LoadSize;
164 }
165 if (LoadSize > 1)
166 ++NumLoadsNonOneByte;
167 Size = Size % LoadSize;
168 }
169 LoadSizes = LoadSizes.drop_front();
170 }
171 return LoadSequence;
172}
173
174MemCmpExpansion::LoadEntryVector
175MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
176 const unsigned MaxLoadSize,
177 const unsigned MaxNumLoads,
178 unsigned &NumLoadsNonOneByte) {
179 // These are already handled by the greedy approach.
180 if (Size < 2 || MaxLoadSize < 2)
181 return {};
182
183 // We try to do as many non-overlapping loads as possible starting from the
184 // beginning.
185 const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
186 assert(NumNonOverlappingLoads && "there must be at least one load");
187 // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
188 // an overlapping load.
189 Size = Size - NumNonOverlappingLoads * MaxLoadSize;
190 // Bail if we do not need an overloapping store, this is already handled by
191 // the greedy approach.
192 if (Size == 0)
193 return {};
194 // Bail if the number of loads (non-overlapping + potential overlapping one)
195 // is larger than the max allowed.
196 if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
197 return {};
198
199 // Add non-overlapping loads.
200 LoadEntryVector LoadSequence;
201 uint64_t Offset = 0;
202 for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
203 LoadSequence.push_back({MaxLoadSize, Offset});
204 Offset += MaxLoadSize;
205 }
206
207 // Add the last overlapping load.
208 assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
209 LoadSequence.push_back({MaxLoadSize, Offset - (MaxLoadSize - Size)});
210 NumLoadsNonOneByte = 1;
211 return LoadSequence;
212}
213
214void MemCmpExpansion::optimiseLoadSequence(
215 LoadEntryVector &LoadSequence,
216 const TargetTransformInfo::MemCmpExpansionOptions &Options,
217 bool IsUsedForZeroCmp) {
218 // This part of code attempts to optimize the LoadSequence by merging allowed
219 // subsequences into single loads of allowed sizes from
220 // `MemCmpExpansionOptions::AllowedTailExpansions`. If it is for zero
221 // comparison or if no allowed tail expansions are specified, we exit early.
222 if (IsUsedForZeroCmp || Options.AllowedTailExpansions.empty())
223 return;
224
225 while (LoadSequence.size() >= 2) {
226 auto Last = LoadSequence[LoadSequence.size() - 1];
227 auto PreLast = LoadSequence[LoadSequence.size() - 2];
228
229 // Exit the loop if the two sequences are not contiguous
230 if (PreLast.Offset + PreLast.LoadSize != Last.Offset)
231 break;
232
233 auto LoadSize = Last.LoadSize + PreLast.LoadSize;
234 if (find(Options.AllowedTailExpansions, LoadSize) ==
235 Options.AllowedTailExpansions.end())
236 break;
237
238 // Remove the last two sequences and replace with the combined sequence
239 LoadSequence.pop_back();
240 LoadSequence.pop_back();
241 LoadSequence.emplace_back(PreLast.Offset, LoadSize);
242 }
243}
244
245// Initialize the basic block structure required for expansion of memcmp call
246// with given maximum load size and memcmp size parameter.
247// This structure includes:
248// 1. A list of load compare blocks - LoadCmpBlocks.
249// 2. An EndBlock, split from original instruction point, which is the block to
250// return from.
251// 3. ResultBlock, block to branch to for early exit when a
252// LoadCmpBlock finds a difference.
253MemCmpExpansion::MemCmpExpansion(
254 CallInst *const CI, uint64_t Size,
255 const TargetTransformInfo::MemCmpExpansionOptions &Options,
256 const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
257 DomTreeUpdater *DTU)
258 : CI(CI), Size(Size), NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
259 IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), DTU(DTU),
260 Builder(CI) {
261 assert(Size > 0 && "zero blocks");
262 // Scale the max size down if the target can load more bytes than we need.
263 llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
264 while (!LoadSizes.empty() && LoadSizes.front() > Size) {
265 LoadSizes = LoadSizes.drop_front();
266 }
267 assert(!LoadSizes.empty() && "cannot load Size bytes");
268 MaxLoadSize = LoadSizes.front();
269 // Compute the decomposition.
270 unsigned GreedyNumLoadsNonOneByte = 0;
271 LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, Options.MaxNumLoads,
272 GreedyNumLoadsNonOneByte);
273 NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
274 assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
275 // If we allow overlapping loads and the load sequence is not already optimal,
276 // use overlapping loads.
277 if (Options.AllowOverlappingLoads &&
278 (LoadSequence.empty() || LoadSequence.size() > 2)) {
279 unsigned OverlappingNumLoadsNonOneByte = 0;
280 auto OverlappingLoads = computeOverlappingLoadSequence(
281 Size, MaxLoadSize, Options.MaxNumLoads, OverlappingNumLoadsNonOneByte);
282 if (!OverlappingLoads.empty() &&
283 (LoadSequence.empty() ||
284 OverlappingLoads.size() < LoadSequence.size())) {
285 LoadSequence = OverlappingLoads;
286 NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
287 }
288 }
289 assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
290 optimiseLoadSequence(LoadSequence, Options, IsUsedForZeroCmp);
291}
292
293unsigned MemCmpExpansion::getNumBlocks() {
294 if (IsUsedForZeroCmp)
295 return getNumLoads() / NumLoadsPerBlockForZeroCmp +
296 (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
297 return getNumLoads();
298}
299
300void MemCmpExpansion::createLoadCmpBlocks() {
301 for (unsigned i = 0; i < getNumBlocks(); i++) {
302 BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
303 EndBlock->getParent(), EndBlock);
304 LoadCmpBlocks.push_back(BB);
305 }
306}
307
308void MemCmpExpansion::createResultBlock() {
309 ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
310 EndBlock->getParent(), EndBlock);
311}
312
313MemCmpExpansion::LoadPair MemCmpExpansion::getLoadPair(Type *LoadSizeType,
314 Type *BSwapSizeType,
315 Type *CmpSizeType,
316 unsigned OffsetBytes) {
317 // Get the memory source at offset `OffsetBytes`.
318 Value *LhsSource = CI->getArgOperand(0);
319 Value *RhsSource = CI->getArgOperand(1);
320 Align LhsAlign = LhsSource->getPointerAlignment(DL);
321 Align RhsAlign = RhsSource->getPointerAlignment(DL);
322 if (OffsetBytes > 0) {
323 auto *ByteType = Type::getInt8Ty(CI->getContext());
324 LhsSource = Builder.CreateConstGEP1_64(ByteType, LhsSource, OffsetBytes);
325 RhsSource = Builder.CreateConstGEP1_64(ByteType, RhsSource, OffsetBytes);
326 LhsAlign = commonAlignment(LhsAlign, OffsetBytes);
327 RhsAlign = commonAlignment(RhsAlign, OffsetBytes);
328 }
329
330 // Create a constant or a load from the source.
331 Value *Lhs = nullptr;
332 if (auto *C = dyn_cast<Constant>(LhsSource))
333 Lhs = ConstantFoldLoadFromConstPtr(C, LoadSizeType, DL);
334 if (!Lhs)
335 Lhs = Builder.CreateAlignedLoad(LoadSizeType, LhsSource, LhsAlign);
336
337 Value *Rhs = nullptr;
338 if (auto *C = dyn_cast<Constant>(RhsSource))
339 Rhs = ConstantFoldLoadFromConstPtr(C, LoadSizeType, DL);
340 if (!Rhs)
341 Rhs = Builder.CreateAlignedLoad(LoadSizeType, RhsSource, RhsAlign);
342
343 // Zero extend if Byte Swap intrinsic has different type
344 if (BSwapSizeType && LoadSizeType != BSwapSizeType) {
345 Lhs = Builder.CreateZExt(Lhs, BSwapSizeType);
346 Rhs = Builder.CreateZExt(Rhs, BSwapSizeType);
347 }
348
349 // Swap bytes if required.
350 if (BSwapSizeType) {
352 CI->getModule(), Intrinsic::bswap, BSwapSizeType);
353 Lhs = Builder.CreateCall(Bswap, Lhs);
354 Rhs = Builder.CreateCall(Bswap, Rhs);
355 }
356
357 // Zero extend if required.
358 if (CmpSizeType != nullptr && CmpSizeType != Lhs->getType()) {
359 Lhs = Builder.CreateZExt(Lhs, CmpSizeType);
360 Rhs = Builder.CreateZExt(Rhs, CmpSizeType);
361 }
362 return {Lhs, Rhs};
363}
364
365// This function creates the IR instructions for loading and comparing 1 byte.
366// It loads 1 byte from each source of the memcmp parameters with the given
367// GEPIndex. It then subtracts the two loaded values and adds this result to the
368// final phi node for selecting the memcmp result.
369void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
370 unsigned OffsetBytes) {
371 BasicBlock *BB = LoadCmpBlocks[BlockIndex];
372 Builder.SetInsertPoint(BB);
373 const LoadPair Loads =
374 getLoadPair(Type::getInt8Ty(CI->getContext()), nullptr,
375 Type::getInt32Ty(CI->getContext()), OffsetBytes);
376 Value *Diff = Builder.CreateSub(Loads.Lhs, Loads.Rhs);
377
378 PhiRes->addIncoming(Diff, BB);
379
380 if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
381 // Early exit branch if difference found to EndBlock. Otherwise, continue to
382 // next LoadCmpBlock,
383 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
384 ConstantInt::get(Diff->getType(), 0));
385 Builder.CreateCondBr(Cmp, EndBlock, LoadCmpBlocks[BlockIndex + 1]);
386 if (DTU)
387 DTU->applyUpdates(
388 {{DominatorTree::Insert, BB, EndBlock},
389 {DominatorTree::Insert, BB, LoadCmpBlocks[BlockIndex + 1]}});
390 } else {
391 // The last block has an unconditional branch to EndBlock.
392 Builder.CreateBr(EndBlock);
393 if (DTU)
394 DTU->applyUpdates({{DominatorTree::Insert, BB, EndBlock}});
395 }
396}
397
398/// Generate an equality comparison for one or more pairs of loaded values.
399/// This is used in the case where the memcmp() call is compared equal or not
400/// equal to zero.
401Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex,
402 unsigned &LoadIndex) {
403 assert(LoadIndex < getNumLoads() &&
404 "getCompareLoadPairs() called with no remaining loads");
405 std::vector<Value *> XorList, OrList;
406 Value *Diff = nullptr;
407
408 const unsigned NumLoads =
409 std::min(getNumLoads() - LoadIndex, NumLoadsPerBlockForZeroCmp);
410
411 // For a single-block expansion, start inserting before the memcmp call.
412 if (LoadCmpBlocks.empty())
413 Builder.SetInsertPoint(CI);
414 else
415 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
416
417 Value *Cmp = nullptr;
418 // If we have multiple loads per block, we need to generate a composite
419 // comparison using xor+or. The type for the combinations is the largest load
420 // type.
421 IntegerType *const MaxLoadType =
422 NumLoads == 1 ? nullptr
423 : IntegerType::get(CI->getContext(), MaxLoadSize * 8);
424
425 for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
426 const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
427 const LoadPair Loads = getLoadPair(
428 IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8), nullptr,
429 MaxLoadType, CurLoadEntry.Offset);
430
431 if (NumLoads != 1) {
432 // If we have multiple loads per block, we need to generate a composite
433 // comparison using xor+or.
434 Diff = Builder.CreateXor(Loads.Lhs, Loads.Rhs);
435 Diff = Builder.CreateZExt(Diff, MaxLoadType);
436 XorList.push_back(Diff);
437 } else {
438 // If there's only one load per block, we just compare the loaded values.
439 Cmp = Builder.CreateICmpNE(Loads.Lhs, Loads.Rhs);
440 }
441 }
442
443 auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
444 std::vector<Value *> OutList;
445 for (unsigned i = 0; i < InList.size() - 1; i = i + 2) {
446 Value *Or = Builder.CreateOr(InList[i], InList[i + 1]);
447 OutList.push_back(Or);
448 }
449 if (InList.size() % 2 != 0)
450 OutList.push_back(InList.back());
451 return OutList;
452 };
453
454 if (!Cmp) {
455 // Pairwise OR the XOR results.
456 OrList = pairWiseOr(XorList);
457
458 // Pairwise OR the OR results until one result left.
459 while (OrList.size() != 1) {
460 OrList = pairWiseOr(OrList);
461 }
462
463 assert(Diff && "Failed to find comparison diff");
464 Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0));
465 }
466
467 return Cmp;
468}
469
470void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
471 unsigned &LoadIndex) {
472 Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
473
474 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
475 ? EndBlock
476 : LoadCmpBlocks[BlockIndex + 1];
477 // Early exit branch if difference found to ResultBlock. Otherwise,
478 // continue to next LoadCmpBlock or EndBlock.
479 BasicBlock *BB = Builder.GetInsertBlock();
480 CondBrInst *CmpBr = Builder.CreateCondBr(Cmp, ResBlock.BB, NextBB);
482 CI->getFunction());
483 if (DTU)
484 DTU->applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
485 {DominatorTree::Insert, BB, NextBB}});
486
487 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
488 // since early exit to ResultBlock was not taken (no difference was found in
489 // any of the bytes).
490 if (BlockIndex == LoadCmpBlocks.size() - 1) {
491 Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
492 PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
493 }
494}
495
496// This function creates the IR intructions for loading and comparing using the
497// given LoadSize. It loads the number of bytes specified by LoadSize from each
498// source of the memcmp parameters. It then does a subtract to see if there was
499// a difference in the loaded values. If a difference is found, it branches
500// with an early exit to the ResultBlock for calculating which source was
501// larger. Otherwise, it falls through to the either the next LoadCmpBlock or
502// the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
503// a special case through emitLoadCompareByteBlock. The special handling can
504// simply subtract the loaded values and add it to the result phi node.
505void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
506 // There is one load per block in this case, BlockIndex == LoadIndex.
507 const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
508
509 if (CurLoadEntry.LoadSize == 1) {
510 MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
511 return;
512 }
513
514 Type *LoadSizeType =
515 IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
516 Type *BSwapSizeType =
517 DL.isLittleEndian()
519 PowerOf2Ceil(CurLoadEntry.LoadSize * 8))
520 : nullptr;
521 Type *MaxLoadType = IntegerType::get(
522 CI->getContext(),
523 std::max(MaxLoadSize, (unsigned)PowerOf2Ceil(CurLoadEntry.LoadSize)) * 8);
524 assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
525
526 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
527
528 const LoadPair Loads = getLoadPair(LoadSizeType, BSwapSizeType, MaxLoadType,
529 CurLoadEntry.Offset);
530
531 // Add the loaded values to the phi nodes for calculating memcmp result only
532 // if result is not used in a zero equality.
533 if (!IsUsedForZeroCmp) {
534 ResBlock.PhiSrc1->addIncoming(Loads.Lhs, LoadCmpBlocks[BlockIndex]);
535 ResBlock.PhiSrc2->addIncoming(Loads.Rhs, LoadCmpBlocks[BlockIndex]);
536 }
537
538 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Loads.Lhs, Loads.Rhs);
539 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
540 ? EndBlock
541 : LoadCmpBlocks[BlockIndex + 1];
542 // Early exit branch if difference found to ResultBlock. Otherwise, continue
543 // to next LoadCmpBlock or EndBlock.
544 BasicBlock *BB = Builder.GetInsertBlock();
545 CondBrInst *CmpBr = Builder.CreateCondBr(Cmp, NextBB, ResBlock.BB);
547 CI->getFunction());
548 if (DTU)
549 DTU->applyUpdates({{DominatorTree::Insert, BB, NextBB},
550 {DominatorTree::Insert, BB, ResBlock.BB}});
551
552 // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
553 // since early exit to ResultBlock was not taken (no difference was found in
554 // any of the bytes).
555 if (BlockIndex == LoadCmpBlocks.size() - 1) {
556 Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
557 PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
558 }
559}
560
561// This function populates the ResultBlock with a sequence to calculate the
562// memcmp result. It compares the two loaded source values and returns -1 if
563// src1 < src2 and 1 if src1 > src2.
564void MemCmpExpansion::emitMemCmpResultBlock() {
565 // Special case: if memcmp result is used in a zero equality, result does not
566 // need to be calculated and can simply return 1.
567 if (IsUsedForZeroCmp) {
568 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
569 Builder.SetInsertPoint(ResBlock.BB, InsertPt);
570 Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
571 PhiRes->addIncoming(Res, ResBlock.BB);
572 Builder.CreateBr(EndBlock);
573 if (DTU)
574 DTU->applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
575 return;
576 }
577 BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
578 Builder.SetInsertPoint(ResBlock.BB, InsertPt);
579
580 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
581 ResBlock.PhiSrc2);
582
583 Value *Res =
584 Builder.CreateSelect(Cmp, Constant::getAllOnesValue(Builder.getInt32Ty()),
585 ConstantInt::get(Builder.getInt32Ty(), 1));
587 DEBUG_TYPE, CI->getFunction());
588
589 PhiRes->addIncoming(Res, ResBlock.BB);
590 Builder.CreateBr(EndBlock);
591 if (DTU)
592 DTU->applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
593}
594
595void MemCmpExpansion::setupResultBlockPHINodes() {
596 Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
597 Builder.SetInsertPoint(ResBlock.BB);
598 // Note: this assumes one load per block.
599 ResBlock.PhiSrc1 =
600 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1");
601 ResBlock.PhiSrc2 =
602 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2");
603}
604
605void MemCmpExpansion::setupEndBlockPHINodes() {
606 Builder.SetInsertPoint(EndBlock, EndBlock->begin());
607 PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
608}
609
610Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
611 unsigned LoadIndex = 0;
612 // This loop populates each of the LoadCmpBlocks with the IR sequence to
613 // handle multiple loads per block.
614 for (unsigned I = 0; I < getNumBlocks(); ++I) {
615 emitLoadCompareBlockMultipleLoads(I, LoadIndex);
616 }
617
618 emitMemCmpResultBlock();
619 return PhiRes;
620}
621
622/// A memcmp expansion that compares equality with 0 and only has one block of
623/// load and compare can bypass the compare, branch, and phi IR that is required
624/// in the general case.
625Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
626 unsigned LoadIndex = 0;
627 Value *Cmp = getCompareLoadPairs(0, LoadIndex);
628 assert(LoadIndex == getNumLoads() && "some entries were not consumed");
629 return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext()));
630}
631
632/// A memcmp expansion that only has one block of load and compare can bypass
633/// the compare, branch, and phi IR that is required in the general case.
634/// This function also analyses users of memcmp, and if there is only one user
635/// from which we can conclude that only 2 out of 3 memcmp outcomes really
636/// matter, then it generates more efficient code with only one comparison.
637Value *MemCmpExpansion::getMemCmpOneBlock() {
638 bool NeedsBSwap = DL.isLittleEndian() && Size != 1;
639 Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8);
640 Type *BSwapSizeType =
641 NeedsBSwap ? IntegerType::get(CI->getContext(), PowerOf2Ceil(Size * 8))
642 : nullptr;
643 Type *MaxLoadType =
645 std::max(MaxLoadSize, (unsigned)PowerOf2Ceil(Size)) * 8);
646
647 // The i8 and i16 cases don't need compares. We zext the loaded values and
648 // subtract them to get the suitable negative, zero, or positive i32 result.
649 if (Size == 1 || Size == 2) {
650 const LoadPair Loads = getLoadPair(LoadSizeType, BSwapSizeType,
651 Builder.getInt32Ty(), /*Offset*/ 0);
652 return Builder.CreateSub(Loads.Lhs, Loads.Rhs);
653 }
654
655 const LoadPair Loads = getLoadPair(LoadSizeType, BSwapSizeType, MaxLoadType,
656 /*Offset*/ 0);
657
658 // If a user of memcmp cares only about two outcomes, for example:
659 // bool result = memcmp(a, b, NBYTES) > 0;
660 // We can generate more optimal code with a smaller number of operations
661 if (CI->hasOneUser()) {
662 auto *UI = cast<Instruction>(*CI->user_begin());
663 CmpPredicate Pred = ICmpInst::Predicate::BAD_ICMP_PREDICATE;
664 bool NeedsZExt = false;
665 // This is a special case because instead of checking if the result is less
666 // than zero:
667 // bool result = memcmp(a, b, NBYTES) < 0;
668 // Compiler is clever enough to generate the following code:
669 // bool result = memcmp(a, b, NBYTES) >> 31;
670 if (match(UI,
671 m_LShr(m_Value(),
672 m_SpecificInt(CI->getType()->getIntegerBitWidth() - 1)))) {
673 Pred = ICmpInst::ICMP_SLT;
674 NeedsZExt = true;
675 } else if (match(UI, m_SpecificICmp(ICmpInst::ICMP_SGT, m_Specific(CI),
676 m_AllOnes()))) {
677 // Adjust predicate as if it compared with 0.
678 Pred = ICmpInst::ICMP_SGE;
679 } else if (match(UI, m_SpecificICmp(ICmpInst::ICMP_SLT, m_Specific(CI),
680 m_One()))) {
681 // Adjust predicate as if it compared with 0.
682 Pred = ICmpInst::ICMP_SLE;
683 } else {
684 // In case of a successful match this call will set `Pred` variable
685 match(UI, m_ICmp(Pred, m_Specific(CI), m_Zero()));
686 }
687 // Generate new code and remove the original memcmp call and the user
688 if (ICmpInst::isSigned(Pred)) {
690 Loads.Lhs, Loads.Rhs);
691 auto *Result = NeedsZExt ? Builder.CreateZExt(Cmp, UI->getType()) : Cmp;
692 UI->replaceAllUsesWith(Result);
693 UI->eraseFromParent();
694 CI->eraseFromParent();
695 return nullptr;
696 }
697 }
698
699 // The result of memcmp is negative, zero, or positive.
700 return Builder.CreateIntrinsic(Builder.getInt32Ty(), Intrinsic::ucmp,
701 {Loads.Lhs, Loads.Rhs});
702}
703
704// This function expands the memcmp call into an inline expansion and returns
705// the memcmp result. Returns nullptr if the memcmp is already replaced.
706Value *MemCmpExpansion::getMemCmpExpansion() {
707 // Create the basic block framework for a multi-block expansion.
708 if (getNumBlocks() != 1) {
709 BasicBlock *StartBlock = CI->getParent();
710 EndBlock = SplitBlock(StartBlock, CI, DTU, /*LI=*/nullptr,
711 /*MSSAU=*/nullptr, "endblock");
712 setupEndBlockPHINodes();
713 createResultBlock();
714
715 // If return value of memcmp is not used in a zero equality, we need to
716 // calculate which source was larger. The calculation requires the
717 // two loaded source values of each load compare block.
718 // These will be saved in the phi nodes created by setupResultBlockPHINodes.
719 if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
720
721 // Create the number of required load compare basic blocks.
722 createLoadCmpBlocks();
723
724 // Update the terminator added by SplitBlock to branch to the first
725 // LoadCmpBlock.
726 StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]);
727 if (DTU)
728 DTU->applyUpdates({{DominatorTree::Insert, StartBlock, LoadCmpBlocks[0]},
729 {DominatorTree::Delete, StartBlock, EndBlock}});
730 }
731
733
734 if (IsUsedForZeroCmp)
735 return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
736 : getMemCmpExpansionZeroCase();
737
738 if (getNumBlocks() == 1)
739 return getMemCmpOneBlock();
740
741 for (unsigned I = 0; I < getNumBlocks(); ++I) {
742 emitLoadCompareBlock(I);
743 }
744
745 emitMemCmpResultBlock();
746 return PhiRes;
747}
748
749// This function checks to see if an expansion of memcmp can be generated.
750// It checks for constant compare size that is less than the max inline size.
751// If an expansion cannot occur, returns false to leave as a library call.
752// Otherwise, the library call is replaced with a new IR instruction sequence.
753/// We want to transform:
754/// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
755/// To:
756/// loadbb:
757/// %0 = bitcast i32* %buffer2 to i8*
758/// %1 = bitcast i32* %buffer1 to i8*
759/// %2 = bitcast i8* %1 to i64*
760/// %3 = bitcast i8* %0 to i64*
761/// %4 = load i64, i64* %2
762/// %5 = load i64, i64* %3
763/// %6 = call i64 @llvm.bswap.i64(i64 %4)
764/// %7 = call i64 @llvm.bswap.i64(i64 %5)
765/// %8 = sub i64 %6, %7
766/// %9 = icmp ne i64 %8, 0
767/// br i1 %9, label %res_block, label %loadbb1
768/// res_block: ; preds = %loadbb2,
769/// %loadbb1, %loadbb
770/// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
771/// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
772/// %10 = icmp ult i64 %phi.src1, %phi.src2
773/// %11 = select i1 %10, i32 -1, i32 1
774/// br label %endblock
775/// loadbb1: ; preds = %loadbb
776/// %12 = bitcast i32* %buffer2 to i8*
777/// %13 = bitcast i32* %buffer1 to i8*
778/// %14 = bitcast i8* %13 to i32*
779/// %15 = bitcast i8* %12 to i32*
780/// %16 = getelementptr i32, i32* %14, i32 2
781/// %17 = getelementptr i32, i32* %15, i32 2
782/// %18 = load i32, i32* %16
783/// %19 = load i32, i32* %17
784/// %20 = call i32 @llvm.bswap.i32(i32 %18)
785/// %21 = call i32 @llvm.bswap.i32(i32 %19)
786/// %22 = zext i32 %20 to i64
787/// %23 = zext i32 %21 to i64
788/// %24 = sub i64 %22, %23
789/// %25 = icmp ne i64 %24, 0
790/// br i1 %25, label %res_block, label %loadbb2
791/// loadbb2: ; preds = %loadbb1
792/// %26 = bitcast i32* %buffer2 to i8*
793/// %27 = bitcast i32* %buffer1 to i8*
794/// %28 = bitcast i8* %27 to i16*
795/// %29 = bitcast i8* %26 to i16*
796/// %30 = getelementptr i16, i16* %28, i16 6
797/// %31 = getelementptr i16, i16* %29, i16 6
798/// %32 = load i16, i16* %30
799/// %33 = load i16, i16* %31
800/// %34 = call i16 @llvm.bswap.i16(i16 %32)
801/// %35 = call i16 @llvm.bswap.i16(i16 %33)
802/// %36 = zext i16 %34 to i64
803/// %37 = zext i16 %35 to i64
804/// %38 = sub i64 %36, %37
805/// %39 = icmp ne i64 %38, 0
806/// br i1 %39, label %res_block, label %loadbb3
807/// loadbb3: ; preds = %loadbb2
808/// %40 = bitcast i32* %buffer2 to i8*
809/// %41 = bitcast i32* %buffer1 to i8*
810/// %42 = getelementptr i8, i8* %41, i8 14
811/// %43 = getelementptr i8, i8* %40, i8 14
812/// %44 = load i8, i8* %42
813/// %45 = load i8, i8* %43
814/// %46 = zext i8 %44 to i32
815/// %47 = zext i8 %45 to i32
816/// %48 = sub i32 %46, %47
817/// br label %endblock
818/// endblock: ; preds = %res_block,
819/// %loadbb3
820/// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
821/// ret i32 %phi.res
822static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
823 const DataLayout *DL, ProfileSummaryInfo *PSI,
824 BlockFrequencyInfo *BFI, DomTreeUpdater *DTU,
825 const bool IsBCmp) {
826 NumMemCmpCalls++;
827
828 // Early exit from expansion if -Oz.
829 if (CI->getFunction()->hasMinSize())
830 return false;
831
832 // Early exit from expansion if size is not a constant.
833 ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2));
834 if (!SizeCast) {
835 NumMemCmpNotConstant++;
836 return false;
837 }
838 const uint64_t SizeVal = SizeCast->getZExtValue();
839
840 if (SizeVal == 0) {
841 return false;
842 }
843 // TTI call to check if target would like to expand memcmp. Also, get the
844 // available load sizes.
845 const bool IsUsedForZeroCmp =
847 bool OptForSize = llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
848 auto Options = TTI->enableMemCmpExpansion(OptForSize,
849 IsUsedForZeroCmp);
850 if (!Options) return false;
851
852 if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
854
855 if (OptForSize &&
856 MaxLoadsPerMemcmpOptSize.getNumOccurrences())
857 Options.MaxNumLoads = MaxLoadsPerMemcmpOptSize;
858
859 if (!OptForSize && MaxLoadsPerMemcmp.getNumOccurrences())
860 Options.MaxNumLoads = MaxLoadsPerMemcmp;
861
862 MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL, DTU);
863
864 // Don't expand if this will require more loads than desired by the target.
865 if (Expansion.getNumLoads() == 0) {
866 NumMemCmpGreaterThanMax++;
867 return false;
868 }
869
870 NumMemCmpInlined++;
871
872 if (Value *Res = Expansion.getMemCmpExpansion()) {
873 // Replace call with result of expansion and erase call.
874 CI->replaceAllUsesWith(Res);
875 CI->eraseFromParent();
876 }
877
878 return true;
879}
880
881// Returns true if a change was made.
882static bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
883 const TargetTransformInfo *TTI, const DataLayout &DL,
884 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI,
885 DomTreeUpdater *DTU);
886
887static PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
888 const TargetTransformInfo *TTI,
889 ProfileSummaryInfo *PSI,
890 BlockFrequencyInfo *BFI, DominatorTree *DT);
891
892bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
893 const TargetTransformInfo *TTI, const DataLayout &DL,
894 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI,
895 DomTreeUpdater *DTU) {
896 for (Instruction &I : BB) {
897 CallInst *CI = dyn_cast<CallInst>(&I);
898 if (!CI) {
899 continue;
900 }
901 LibFunc Func;
902 if (TLI->getLibFunc(*CI, Func) &&
903 (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
904 expandMemCmp(CI, TTI, &DL, PSI, BFI, DTU, Func == LibFunc_bcmp)) {
905 return true;
906 }
907 }
908 return false;
909}
910
911PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
912 const TargetTransformInfo *TTI,
913 ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI,
914 DominatorTree *DT) {
915 std::optional<DomTreeUpdater> DTU;
916 if (DT)
917 DTU.emplace(DT, DomTreeUpdater::UpdateStrategy::Lazy);
918
919 const DataLayout& DL = F.getDataLayout();
920 bool MadeChanges = false;
921 for (auto BBIt = F.begin(); BBIt != F.end();) {
922 if (runOnBlock(*BBIt, TLI, TTI, DL, PSI, BFI, DTU ? &*DTU : nullptr)) {
923 MadeChanges = true;
924 // If changes were made, restart the function from the beginning, since
925 // the structure of the function was changed.
926 BBIt = F.begin();
927 } else {
928 ++BBIt;
929 }
930 }
931 if (MadeChanges)
932 for (BasicBlock &BB : F)
934 if (!MadeChanges)
935 return PreservedAnalyses::all();
936 PreservedAnalyses PA;
937 PA.preserve<DominatorTreeAnalysis>();
938 return PA;
939}
940
941} // namespace
942
945 // Don't expand memcmp in sanitized functions — sanitizers intercept memcmp
946 // calls to check for memory errors, and expanding would bypass that.
947 if (F.hasFnAttribute(Attribute::SanitizeAddress) ||
948 F.hasFnAttribute(Attribute::SanitizeMemory) ||
949 F.hasFnAttribute(Attribute::SanitizeThread) ||
950 F.hasFnAttribute(Attribute::SanitizeHWAddress))
951 return PreservedAnalyses::all();
952
953 const auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
954 const auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
955 auto *PSI = FAM.getResult<ModuleAnalysisManagerFunctionProxy>(F)
956 .getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
957 BlockFrequencyInfo *BFI = (PSI && PSI->hasProfileSummary())
958 ? &FAM.getResult<BlockFrequencyAnalysis>(F)
959 : nullptr;
960 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
961
962 return runImpl(F, &TLI, &TTI, PSI, BFI, DT);
963}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DXIL Intrinsic Expansion
static bool runImpl(Function &F, const TargetLowering &TLI, const LibcallLoweringInfo &Libcalls, AssumptionCache *AC)
static cl::opt< unsigned > MaxLoadsPerMemcmpOptSize("max-loads-per-memcmp-opt-size", cl::Hidden, cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"))
static cl::opt< unsigned > MaxLoadsPerMemcmp("max-loads-per-memcmp", cl::Hidden, cl::desc("Set maximum number of loads used in expanded memcmp"))
static cl::opt< unsigned > MemCmpEqZeroNumLoadsPerBlock("memcmp-num-loads-per-block", cl::Hidden, cl::init(1), cl::desc("The number of loads per basic block for inline expansion of " "memcmp that is only being compared against zero."))
#define DEBUG_TYPE
static LVOptions Options
Definition LVOptions.cpp:25
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
FunctionAnalysisManager FAM
This file contains the declarations for profiling metadata utility functions.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This pass exposes codegen information to IR-level passes.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Value * getArgOperand(unsigned i) const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:711
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Value * CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, const Twine &Name="")
Definition IRBuilder.h:2011
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1894
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition IRBuilder.h:1223
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:579
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2335
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition IRBuilder.h:1217
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2496
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1446
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2077
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2510
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1625
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2441
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1599
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const
LLVM_ABI unsigned getIntegerBitWidth() const
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
user_iterator user_begin()
Definition Value.h:402
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition Value.cpp:964
const ParentTy * getParent() const
Definition ilist_node.h:34
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
Definition RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1765
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition Local.cpp:723
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition MathExtras.h:385
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.