LLVM 18.0.0git
InstCombineLoadStoreAlloca.cpp
Go to the documentation of this file.
1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for load, store and alloca.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/MapVector.h"
16#include "llvm/ADT/Statistic.h"
18#include "llvm/Analysis/Loads.h"
19#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/LLVMContext.h"
26using namespace llvm;
27using namespace PatternMatch;
28
29#define DEBUG_TYPE "instcombine"
30
31STATISTIC(NumDeadStore, "Number of dead stores eliminated");
32STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
33
35 "instcombine-max-copied-from-constant-users", cl::init(300),
36 cl::desc("Maximum users to visit in copy from constant transform"),
38
39namespace llvm {
41 "enable-infer-alignment-pass", cl::init(true), cl::Hidden, cl::ZeroOrMore,
42 cl::desc("Enable the InferAlignment pass, disabling alignment inference in "
43 "InstCombine"));
44}
45
46/// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived)
47/// pointer to an alloca. Ignore any reads of the pointer, return false if we
48/// see any stores or other unknown uses. If we see pointer arithmetic, keep
49/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
50/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
51/// the alloca, and if the source pointer is a pointer to a constant memory
52/// location, we can optimize this.
53static bool
55 MemTransferInst *&TheCopy,
57 // We track lifetime intrinsics as we encounter them. If we decide to go
58 // ahead and replace the value with the memory location, this lets the caller
59 // quickly eliminate the markers.
60
61 using ValueAndIsOffset = PointerIntPair<Value *, 1, bool>;
64 Worklist.emplace_back(V, false);
65 while (!Worklist.empty()) {
66 ValueAndIsOffset Elem = Worklist.pop_back_val();
67 if (!Visited.insert(Elem).second)
68 continue;
69 if (Visited.size() > MaxCopiedFromConstantUsers)
70 return false;
71
72 const auto [Value, IsOffset] = Elem;
73 for (auto &U : Value->uses()) {
74 auto *I = cast<Instruction>(U.getUser());
75
76 if (auto *LI = dyn_cast<LoadInst>(I)) {
77 // Ignore non-volatile loads, they are always ok.
78 if (!LI->isSimple()) return false;
79 continue;
80 }
81
82 if (isa<PHINode, SelectInst>(I)) {
83 // We set IsOffset=true, to forbid the memcpy from occurring after the
84 // phi: If one of the phi operands is not based on the alloca, we
85 // would incorrectly omit a write.
86 Worklist.emplace_back(I, true);
87 continue;
88 }
89 if (isa<BitCastInst, AddrSpaceCastInst>(I)) {
90 // If uses of the bitcast are ok, we are ok.
91 Worklist.emplace_back(I, IsOffset);
92 continue;
93 }
94 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
95 // If the GEP has all zero indices, it doesn't offset the pointer. If it
96 // doesn't, it does.
97 Worklist.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
98 continue;
99 }
100
101 if (auto *Call = dyn_cast<CallBase>(I)) {
102 // If this is the function being called then we treat it like a load and
103 // ignore it.
104 if (Call->isCallee(&U))
105 continue;
106
107 unsigned DataOpNo = Call->getDataOperandNo(&U);
108 bool IsArgOperand = Call->isArgOperand(&U);
109
110 // Inalloca arguments are clobbered by the call.
111 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
112 return false;
113
114 // If this call site doesn't modify the memory, then we know it is just
115 // a load (but one that potentially returns the value itself), so we can
116 // ignore it if we know that the value isn't captured.
117 bool NoCapture = Call->doesNotCapture(DataOpNo);
118 if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
119 (Call->onlyReadsMemory(DataOpNo) && NoCapture))
120 continue;
121
122 // If this is being passed as a byval argument, the caller is making a
123 // copy, so it is only a read of the alloca.
124 if (IsArgOperand && Call->isByValArgument(DataOpNo))
125 continue;
126 }
127
128 // Lifetime intrinsics can be handled by the caller.
129 if (I->isLifetimeStartOrEnd()) {
130 assert(I->use_empty() && "Lifetime markers have no result to use!");
131 ToDelete.push_back(I);
132 continue;
133 }
134
135 // If this is isn't our memcpy/memmove, reject it as something we can't
136 // handle.
137 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
138 if (!MI)
139 return false;
140
141 // If the transfer is volatile, reject it.
142 if (MI->isVolatile())
143 return false;
144
145 // If the transfer is using the alloca as a source of the transfer, then
146 // ignore it since it is a load (unless the transfer is volatile).
147 if (U.getOperandNo() == 1)
148 continue;
149
150 // If we already have seen a copy, reject the second one.
151 if (TheCopy) return false;
152
153 // If the pointer has been offset from the start of the alloca, we can't
154 // safely handle this.
155 if (IsOffset) return false;
156
157 // If the memintrinsic isn't using the alloca as the dest, reject it.
158 if (U.getOperandNo() != 0) return false;
159
160 // If the source of the memcpy/move is not constant, reject it.
161 if (isModSet(AA->getModRefInfoMask(MI->getSource())))
162 return false;
163
164 // Otherwise, the transform is safe. Remember the copy instruction.
165 TheCopy = MI;
166 }
167 }
168 return true;
169}
170
171/// isOnlyCopiedFromConstantMemory - Return true if the specified alloca is only
172/// modified by a copy from a constant memory location. If we can prove this, we
173/// can replace any uses of the alloca with uses of the memory location
174/// directly.
175static MemTransferInst *
177 AllocaInst *AI,
179 MemTransferInst *TheCopy = nullptr;
180 if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
181 return TheCopy;
182 return nullptr;
183}
184
185/// Returns true if V is dereferenceable for size of alloca.
186static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
187 const DataLayout &DL) {
188 if (AI->isArrayAllocation())
189 return false;
190 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
191 if (!AllocaSize)
192 return false;
194 APInt(64, AllocaSize), DL);
195}
196
198 AllocaInst &AI, DominatorTree &DT) {
199 // Check for array size of 1 (scalar allocation).
200 if (!AI.isArrayAllocation()) {
201 // i32 1 is the canonical array size for scalar allocations.
202 if (AI.getArraySize()->getType()->isIntegerTy(32))
203 return nullptr;
204
205 // Canonicalize it.
206 return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
207 }
208
209 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
210 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
211 if (C->getValue().getActiveBits() <= 64) {
212 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
213 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, AI.getAddressSpace(),
214 nullptr, AI.getName());
215 New->setAlignment(AI.getAlign());
216
217 replaceAllDbgUsesWith(AI, *New, *New, DT);
218
219 // Scan to the end of the allocation instructions, to skip over a block of
220 // allocas if possible...also skip interleaved debug info
221 //
222 BasicBlock::iterator It(New);
223 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
224 ++It;
225
226 // Now that I is pointing to the first non-allocation-inst in the block,
227 // insert our getelementptr instruction...
228 //
229 Type *IdxTy = IC.getDataLayout().getIndexType(AI.getType());
230 Value *NullIdx = Constant::getNullValue(IdxTy);
231 Value *Idx[2] = {NullIdx, NullIdx};
233 NewTy, New, Idx, New->getName() + ".sub");
234 IC.InsertNewInstBefore(GEP, It);
235
236 // Now make everything use the getelementptr instead of the original
237 // allocation.
238 return IC.replaceInstUsesWith(AI, GEP);
239 }
240 }
241
242 if (isa<UndefValue>(AI.getArraySize()))
244
245 // Ensure that the alloca array size argument has type equal to the offset
246 // size of the alloca() pointer, which, in the tyical case, is intptr_t,
247 // so that any casting is exposed early.
248 Type *PtrIdxTy = IC.getDataLayout().getIndexType(AI.getType());
249 if (AI.getArraySize()->getType() != PtrIdxTy) {
250 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), PtrIdxTy, false);
251 return IC.replaceOperand(AI, 0, V);
252 }
253
254 return nullptr;
255}
256
257namespace {
258// If I and V are pointers in different address space, it is not allowed to
259// use replaceAllUsesWith since I and V have different types. A
260// non-target-specific transformation should not use addrspacecast on V since
261// the two address space may be disjoint depending on target.
262//
263// This class chases down uses of the old pointer until reaching the load
264// instructions, then replaces the old pointer in the load instructions with
265// the new pointer. If during the chasing it sees bitcast or GEP, it will
266// create new bitcast or GEP with the new pointer and use them in the load
267// instruction.
268class PointerReplacer {
269public:
270 PointerReplacer(InstCombinerImpl &IC, Instruction &Root, unsigned SrcAS)
271 : IC(IC), Root(Root), FromAS(SrcAS) {}
272
273 bool collectUsers();
274 void replacePointer(Value *V);
275
276private:
277 bool collectUsersRecursive(Instruction &I);
278 void replace(Instruction *I);
279 Value *getReplacement(Value *I);
280 bool isAvailable(Instruction *I) const {
281 return I == &Root || Worklist.contains(I);
282 }
283
284 bool isEqualOrValidAddrSpaceCast(const Instruction *I,
285 unsigned FromAS) const {
286 const auto *ASC = dyn_cast<AddrSpaceCastInst>(I);
287 if (!ASC)
288 return false;
289 unsigned ToAS = ASC->getDestAddressSpace();
290 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
291 }
292
293 SmallPtrSet<Instruction *, 32> ValuesToRevisit;
297 Instruction &Root;
298 unsigned FromAS;
299};
300} // end anonymous namespace
301
302bool PointerReplacer::collectUsers() {
303 if (!collectUsersRecursive(Root))
304 return false;
305
306 // Ensure that all outstanding (indirect) users of I
307 // are inserted into the Worklist. Return false
308 // otherwise.
309 for (auto *Inst : ValuesToRevisit)
310 if (!Worklist.contains(Inst))
311 return false;
312 return true;
313}
314
315bool PointerReplacer::collectUsersRecursive(Instruction &I) {
316 for (auto *U : I.users()) {
317 auto *Inst = cast<Instruction>(&*U);
318 if (auto *Load = dyn_cast<LoadInst>(Inst)) {
319 if (Load->isVolatile())
320 return false;
321 Worklist.insert(Load);
322 } else if (auto *PHI = dyn_cast<PHINode>(Inst)) {
323 // All incoming values must be instructions for replacability
324 if (any_of(PHI->incoming_values(),
325 [](Value *V) { return !isa<Instruction>(V); }))
326 return false;
327
328 // If at least one incoming value of the PHI is not in Worklist,
329 // store the PHI for revisiting and skip this iteration of the
330 // loop.
331 if (any_of(PHI->incoming_values(), [this](Value *V) {
332 return !isAvailable(cast<Instruction>(V));
333 })) {
334 ValuesToRevisit.insert(Inst);
335 continue;
336 }
337
338 Worklist.insert(PHI);
339 if (!collectUsersRecursive(*PHI))
340 return false;
341 } else if (auto *SI = dyn_cast<SelectInst>(Inst)) {
342 if (!isa<Instruction>(SI->getTrueValue()) ||
343 !isa<Instruction>(SI->getFalseValue()))
344 return false;
345
346 if (!isAvailable(cast<Instruction>(SI->getTrueValue())) ||
347 !isAvailable(cast<Instruction>(SI->getFalseValue()))) {
348 ValuesToRevisit.insert(Inst);
349 continue;
350 }
351 Worklist.insert(SI);
352 if (!collectUsersRecursive(*SI))
353 return false;
354 } else if (isa<GetElementPtrInst, BitCastInst>(Inst)) {
355 Worklist.insert(Inst);
356 if (!collectUsersRecursive(*Inst))
357 return false;
358 } else if (auto *MI = dyn_cast<MemTransferInst>(Inst)) {
359 if (MI->isVolatile())
360 return false;
361 Worklist.insert(Inst);
362 } else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
363 Worklist.insert(Inst);
364 } else if (Inst->isLifetimeStartOrEnd()) {
365 continue;
366 } else {
367 LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *U << '\n');
368 return false;
369 }
370 }
371
372 return true;
373}
374
375Value *PointerReplacer::getReplacement(Value *V) { return WorkMap.lookup(V); }
376
377void PointerReplacer::replace(Instruction *I) {
378 if (getReplacement(I))
379 return;
380
381 if (auto *LT = dyn_cast<LoadInst>(I)) {
382 auto *V = getReplacement(LT->getPointerOperand());
383 assert(V && "Operand not replaced");
384 auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),
385 LT->getAlign(), LT->getOrdering(),
386 LT->getSyncScopeID());
387 NewI->takeName(LT);
388 copyMetadataForLoad(*NewI, *LT);
389
390 IC.InsertNewInstWith(NewI, LT->getIterator());
391 IC.replaceInstUsesWith(*LT, NewI);
392 WorkMap[LT] = NewI;
393 } else if (auto *PHI = dyn_cast<PHINode>(I)) {
394 Type *NewTy = getReplacement(PHI->getIncomingValue(0))->getType();
395 auto *NewPHI = PHINode::Create(NewTy, PHI->getNumIncomingValues(),
396 PHI->getName(), PHI);
397 for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I)
398 NewPHI->addIncoming(getReplacement(PHI->getIncomingValue(I)),
399 PHI->getIncomingBlock(I));
400 WorkMap[PHI] = NewPHI;
401 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
402 auto *V = getReplacement(GEP->getPointerOperand());
403 assert(V && "Operand not replaced");
405 Indices.append(GEP->idx_begin(), GEP->idx_end());
406 auto *NewI =
407 GetElementPtrInst::Create(GEP->getSourceElementType(), V, Indices);
408 IC.InsertNewInstWith(NewI, GEP->getIterator());
409 NewI->takeName(GEP);
410 WorkMap[GEP] = NewI;
411 } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
412 auto *V = getReplacement(BC->getOperand(0));
413 assert(V && "Operand not replaced");
414 auto *NewT = PointerType::get(BC->getType()->getContext(),
415 V->getType()->getPointerAddressSpace());
416 auto *NewI = new BitCastInst(V, NewT);
417 IC.InsertNewInstWith(NewI, BC->getIterator());
418 NewI->takeName(BC);
419 WorkMap[BC] = NewI;
420 } else if (auto *SI = dyn_cast<SelectInst>(I)) {
421 auto *NewSI = SelectInst::Create(
422 SI->getCondition(), getReplacement(SI->getTrueValue()),
423 getReplacement(SI->getFalseValue()), SI->getName(), nullptr, SI);
424 IC.InsertNewInstWith(NewSI, SI->getIterator());
425 NewSI->takeName(SI);
426 WorkMap[SI] = NewSI;
427 } else if (auto *MemCpy = dyn_cast<MemTransferInst>(I)) {
428 auto *SrcV = getReplacement(MemCpy->getRawSource());
429 // The pointer may appear in the destination of a copy, but we don't want to
430 // replace it.
431 if (!SrcV) {
432 assert(getReplacement(MemCpy->getRawDest()) &&
433 "destination not in replace list");
434 return;
435 }
436
437 IC.Builder.SetInsertPoint(MemCpy);
438 auto *NewI = IC.Builder.CreateMemTransferInst(
439 MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
440 SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
441 MemCpy->isVolatile());
442 AAMDNodes AAMD = MemCpy->getAAMetadata();
443 if (AAMD)
444 NewI->setAAMetadata(AAMD);
445
446 IC.eraseInstFromFunction(*MemCpy);
447 WorkMap[MemCpy] = NewI;
448 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(I)) {
449 auto *V = getReplacement(ASC->getPointerOperand());
450 assert(V && "Operand not replaced");
451 assert(isEqualOrValidAddrSpaceCast(
452 ASC, V->getType()->getPointerAddressSpace()) &&
453 "Invalid address space cast!");
454 auto *NewV = V;
455 if (V->getType()->getPointerAddressSpace() !=
456 ASC->getType()->getPointerAddressSpace()) {
457 auto *NewI = new AddrSpaceCastInst(V, ASC->getType(), "");
458 NewI->takeName(ASC);
459 IC.InsertNewInstWith(NewI, ASC->getIterator());
460 NewV = NewI;
461 }
462 IC.replaceInstUsesWith(*ASC, NewV);
463 IC.eraseInstFromFunction(*ASC);
464 } else {
465 llvm_unreachable("should never reach here");
466 }
467}
468
469void PointerReplacer::replacePointer(Value *V) {
470#ifndef NDEBUG
471 auto *PT = cast<PointerType>(Root.getType());
472 auto *NT = cast<PointerType>(V->getType());
473 assert(PT != NT && "Invalid usage");
474#endif
475 WorkMap[&Root] = V;
476
477 for (Instruction *Workitem : Worklist)
478 replace(Workitem);
479}
480
482 if (auto *I = simplifyAllocaArraySize(*this, AI, DT))
483 return I;
484
485 if (AI.getAllocatedType()->isSized()) {
486 // Move all alloca's of zero byte objects to the entry block and merge them
487 // together. Note that we only do this for alloca's, because malloc should
488 // allocate and return a unique pointer, even for a zero byte allocation.
490 // For a zero sized alloca there is no point in doing an array allocation.
491 // This is helpful if the array size is a complicated expression not used
492 // elsewhere.
493 if (AI.isArrayAllocation())
494 return replaceOperand(AI, 0,
496
497 // Get the first instruction in the entry block.
498 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
499 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
500 if (FirstInst != &AI) {
501 // If the entry block doesn't start with a zero-size alloca then move
502 // this one to the start of the entry block. There is no problem with
503 // dominance as the array size was forced to a constant earlier already.
504 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
505 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
507 .getKnownMinValue() != 0) {
508 AI.moveBefore(FirstInst);
509 return &AI;
510 }
511
512 // Replace this zero-sized alloca with the one at the start of the entry
513 // block after ensuring that the address will be aligned enough for both
514 // types.
515 const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
516 EntryAI->setAlignment(MaxAlign);
517 return replaceInstUsesWith(AI, EntryAI);
518 }
519 }
520 }
521
522 // Check to see if this allocation is only modified by a memcpy/memmove from
523 // a memory location whose alignment is equal to or exceeds that of the
524 // allocation. If this is the case, we can change all users to use the
525 // constant memory location instead. This is commonly produced by the CFE by
526 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
527 // is only subsequently read.
529 if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
530 Value *TheSrc = Copy->getSource();
531 Align AllocaAlign = AI.getAlign();
532 Align SourceAlign = getOrEnforceKnownAlignment(
533 TheSrc, AllocaAlign, DL, &AI, &AC, &DT);
534 if (AllocaAlign <= SourceAlign &&
535 isDereferenceableForAllocaSize(TheSrc, &AI, DL) &&
536 !isa<Instruction>(TheSrc)) {
537 // FIXME: Can we sink instructions without violating dominance when TheSrc
538 // is an instruction instead of a constant or argument?
539 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
540 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
541 unsigned SrcAddrSpace = TheSrc->getType()->getPointerAddressSpace();
542 if (AI.getAddressSpace() == SrcAddrSpace) {
543 for (Instruction *Delete : ToDelete)
544 eraseInstFromFunction(*Delete);
545
546 Instruction *NewI = replaceInstUsesWith(AI, TheSrc);
548 ++NumGlobalCopies;
549 return NewI;
550 }
551
552 PointerReplacer PtrReplacer(*this, AI, SrcAddrSpace);
553 if (PtrReplacer.collectUsers()) {
554 for (Instruction *Delete : ToDelete)
555 eraseInstFromFunction(*Delete);
556
557 PtrReplacer.replacePointer(TheSrc);
558 ++NumGlobalCopies;
559 }
560 }
561 }
562
563 // At last, use the generic allocation site handler to aggressively remove
564 // unused allocas.
565 return visitAllocSite(AI);
566}
567
568// Are we allowed to form a atomic load or store of this type?
569static bool isSupportedAtomicType(Type *Ty) {
570 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
571}
572
573/// Helper to combine a load to a new type.
574///
575/// This just does the work of combining a load to a new type. It handles
576/// metadata, etc., and returns the new instruction. The \c NewTy should be the
577/// loaded *value* type. This will convert it to a pointer, cast the operand to
578/// that pointer type, load it, etc.
579///
580/// Note that this will create all of the instructions with whatever insert
581/// point the \c InstCombinerImpl currently is using.
583 const Twine &Suffix) {
584 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
585 "can't fold an atomic load to requested type");
586
587 LoadInst *NewLoad =
588 Builder.CreateAlignedLoad(NewTy, LI.getPointerOperand(), LI.getAlign(),
589 LI.isVolatile(), LI.getName() + Suffix);
590 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
591 copyMetadataForLoad(*NewLoad, LI);
592 return NewLoad;
593}
594
595/// Combine a store to a new type.
596///
597/// Returns the newly created store instruction.
599 Value *V) {
600 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
601 "can't fold an atomic store of requested type");
602
603 Value *Ptr = SI.getPointerOperand();
605 SI.getAllMetadata(MD);
606
607 StoreInst *NewStore =
608 IC.Builder.CreateAlignedStore(V, Ptr, SI.getAlign(), SI.isVolatile());
609 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
610 for (const auto &MDPair : MD) {
611 unsigned ID = MDPair.first;
612 MDNode *N = MDPair.second;
613 // Note, essentially every kind of metadata should be preserved here! This
614 // routine is supposed to clone a store instruction changing *only its
615 // type*. The only metadata it makes sense to drop is metadata which is
616 // invalidated when the pointer type changes. This should essentially
617 // never be the case in LLVM, but we explicitly switch over only known
618 // metadata to be conservatively correct. If you are adding metadata to
619 // LLVM which pertains to stores, you almost certainly want to add it
620 // here.
621 switch (ID) {
622 case LLVMContext::MD_dbg:
623 case LLVMContext::MD_DIAssignID:
624 case LLVMContext::MD_tbaa:
625 case LLVMContext::MD_prof:
626 case LLVMContext::MD_fpmath:
627 case LLVMContext::MD_tbaa_struct:
628 case LLVMContext::MD_alias_scope:
629 case LLVMContext::MD_noalias:
630 case LLVMContext::MD_nontemporal:
631 case LLVMContext::MD_mem_parallel_loop_access:
632 case LLVMContext::MD_access_group:
633 // All of these directly apply.
634 NewStore->setMetadata(ID, N);
635 break;
636 case LLVMContext::MD_invariant_load:
637 case LLVMContext::MD_nonnull:
638 case LLVMContext::MD_noundef:
639 case LLVMContext::MD_range:
640 case LLVMContext::MD_align:
641 case LLVMContext::MD_dereferenceable:
642 case LLVMContext::MD_dereferenceable_or_null:
643 // These don't apply for stores.
644 break;
645 }
646 }
647
648 return NewStore;
649}
650
651/// Combine loads to match the type of their uses' value after looking
652/// through intervening bitcasts.
653///
654/// The core idea here is that if the result of a load is used in an operation,
655/// we should load the type most conducive to that operation. For example, when
656/// loading an integer and converting that immediately to a pointer, we should
657/// instead directly load a pointer.
658///
659/// However, this routine must never change the width of a load or the number of
660/// loads as that would introduce a semantic change. This combine is expected to
661/// be a semantic no-op which just allows loads to more closely model the types
662/// of their consuming operations.
663///
664/// Currently, we also refuse to change the precise type used for an atomic load
665/// or a volatile load. This is debatable, and might be reasonable to change
666/// later. However, it is risky in case some backend or other part of LLVM is
667/// relying on the exact type loaded to select appropriate atomic operations.
669 LoadInst &Load) {
670 // FIXME: We could probably with some care handle both volatile and ordered
671 // atomic loads here but it isn't clear that this is important.
672 if (!Load.isUnordered())
673 return nullptr;
674
675 if (Load.use_empty())
676 return nullptr;
677
678 // swifterror values can't be bitcasted.
679 if (Load.getPointerOperand()->isSwiftError())
680 return nullptr;
681
682 // Fold away bit casts of the loaded value by loading the desired type.
683 // Note that we should not do this for pointer<->integer casts,
684 // because that would result in type punning.
685 if (Load.hasOneUse()) {
686 // Don't transform when the type is x86_amx, it makes the pass that lower
687 // x86_amx type happy.
688 Type *LoadTy = Load.getType();
689 if (auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
690 assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!");
691 if (BC->getType()->isX86_AMXTy())
692 return nullptr;
693 }
694
695 if (auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
696 Type *DestTy = CastUser->getDestTy();
697 if (CastUser->isNoopCast(IC.getDataLayout()) &&
698 LoadTy->isPtrOrPtrVectorTy() == DestTy->isPtrOrPtrVectorTy() &&
699 (!Load.isAtomic() || isSupportedAtomicType(DestTy))) {
700 LoadInst *NewLoad = IC.combineLoadToNewType(Load, DestTy);
701 CastUser->replaceAllUsesWith(NewLoad);
702 IC.eraseInstFromFunction(*CastUser);
703 return &Load;
704 }
705 }
706 }
707
708 // FIXME: We should also canonicalize loads of vectors when their elements are
709 // cast to other types.
710 return nullptr;
711}
712
714 // FIXME: We could probably with some care handle both volatile and atomic
715 // stores here but it isn't clear that this is important.
716 if (!LI.isSimple())
717 return nullptr;
718
719 Type *T = LI.getType();
720 if (!T->isAggregateType())
721 return nullptr;
722
723 StringRef Name = LI.getName();
724
725 if (auto *ST = dyn_cast<StructType>(T)) {
726 // If the struct only have one element, we unpack.
727 auto NumElements = ST->getNumElements();
728 if (NumElements == 1) {
729 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
730 ".unpack");
731 NewLoad->setAAMetadata(LI.getAAMetadata());
733 PoisonValue::get(T), NewLoad, 0, Name));
734 }
735
736 // We don't want to break loads with padding here as we'd loose
737 // the knowledge that padding exists for the rest of the pipeline.
738 const DataLayout &DL = IC.getDataLayout();
739 auto *SL = DL.getStructLayout(ST);
740
741 // Don't unpack for structure with scalable vector.
742 if (SL->getSizeInBits().isScalable())
743 return nullptr;
744
745 if (SL->hasPadding())
746 return nullptr;
747
748 const auto Align = LI.getAlign();
749 auto *Addr = LI.getPointerOperand();
750 auto *IdxType = Type::getInt32Ty(T->getContext());
751 auto *Zero = ConstantInt::get(IdxType, 0);
752
754 for (unsigned i = 0; i < NumElements; i++) {
755 Value *Indices[2] = {
756 Zero,
757 ConstantInt::get(IdxType, i),
758 };
759 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, ArrayRef(Indices),
760 Name + ".elt");
761 auto *L = IC.Builder.CreateAlignedLoad(
762 ST->getElementType(i), Ptr,
763 commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
764 // Propagate AA metadata. It'll still be valid on the narrowed load.
765 L->setAAMetadata(LI.getAAMetadata());
766 V = IC.Builder.CreateInsertValue(V, L, i);
767 }
768
769 V->setName(Name);
770 return IC.replaceInstUsesWith(LI, V);
771 }
772
773 if (auto *AT = dyn_cast<ArrayType>(T)) {
774 auto *ET = AT->getElementType();
775 auto NumElements = AT->getNumElements();
776 if (NumElements == 1) {
777 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
778 NewLoad->setAAMetadata(LI.getAAMetadata());
780 PoisonValue::get(T), NewLoad, 0, Name));
781 }
782
783 // Bail out if the array is too large. Ideally we would like to optimize
784 // arrays of arbitrary size but this has a terrible impact on compile time.
785 // The threshold here is chosen arbitrarily, maybe needs a little bit of
786 // tuning.
787 if (NumElements > IC.MaxArraySizeForCombine)
788 return nullptr;
789
790 const DataLayout &DL = IC.getDataLayout();
791 TypeSize EltSize = DL.getTypeAllocSize(ET);
792 const auto Align = LI.getAlign();
793
794 auto *Addr = LI.getPointerOperand();
795 auto *IdxType = Type::getInt64Ty(T->getContext());
796 auto *Zero = ConstantInt::get(IdxType, 0);
797
799 TypeSize Offset = TypeSize::get(0, ET->isScalableTy());
800 for (uint64_t i = 0; i < NumElements; i++) {
801 Value *Indices[2] = {
802 Zero,
803 ConstantInt::get(IdxType, i),
804 };
805 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices),
806 Name + ".elt");
807 auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
808 auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
809 EltAlign, Name + ".unpack");
810 L->setAAMetadata(LI.getAAMetadata());
811 V = IC.Builder.CreateInsertValue(V, L, i);
812 Offset += EltSize;
813 }
814
815 V->setName(Name);
816 return IC.replaceInstUsesWith(LI, V);
817 }
818
819 return nullptr;
820}
821
822// If we can determine that all possible objects pointed to by the provided
823// pointer value are, not only dereferenceable, but also definitively less than
824// or equal to the provided maximum size, then return true. Otherwise, return
825// false (constant global values and allocas fall into this category).
826//
827// FIXME: This should probably live in ValueTracking (or similar).
829 const DataLayout &DL) {
831 SmallVector<Value *, 4> Worklist(1, V);
832
833 do {
834 Value *P = Worklist.pop_back_val();
835 P = P->stripPointerCasts();
836
837 if (!Visited.insert(P).second)
838 continue;
839
840 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
841 Worklist.push_back(SI->getTrueValue());
842 Worklist.push_back(SI->getFalseValue());
843 continue;
844 }
845
846 if (PHINode *PN = dyn_cast<PHINode>(P)) {
847 append_range(Worklist, PN->incoming_values());
848 continue;
849 }
850
851 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
852 if (GA->isInterposable())
853 return false;
854 Worklist.push_back(GA->getAliasee());
855 continue;
856 }
857
858 // If we know how big this object is, and it is less than MaxSize, continue
859 // searching. Otherwise, return false.
860 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
861 if (!AI->getAllocatedType()->isSized())
862 return false;
863
864 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
865 if (!CS)
866 return false;
867
868 TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType());
869 if (TS.isScalable())
870 return false;
871 // Make sure that, even if the multiplication below would wrap as an
872 // uint64_t, we still do the right thing.
873 if ((CS->getValue().zext(128) * APInt(128, TS.getFixedValue()))
874 .ugt(MaxSize))
875 return false;
876 continue;
877 }
878
879 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
880 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
881 return false;
882
883 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
884 if (InitSize > MaxSize)
885 return false;
886 continue;
887 }
888
889 return false;
890 } while (!Worklist.empty());
891
892 return true;
893}
894
895// If we're indexing into an object of a known size, and the outer index is
896// not a constant, but having any value but zero would lead to undefined
897// behavior, replace it with zero.
898//
899// For example, if we have:
900// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
901// ...
902// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
903// ... = load i32* %arrayidx, align 4
904// Then we know that we can replace %x in the GEP with i64 0.
905//
906// FIXME: We could fold any GEP index to zero that would cause UB if it were
907// not zero. Currently, we only handle the first such index. Also, we could
908// also search through non-zero constant indices if we kept track of the
909// offsets those indices implied.
911 GetElementPtrInst *GEPI, Instruction *MemI,
912 unsigned &Idx) {
913 if (GEPI->getNumOperands() < 2)
914 return false;
915
916 // Find the first non-zero index of a GEP. If all indices are zero, return
917 // one past the last index.
918 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
919 unsigned I = 1;
920 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
921 Value *V = GEPI->getOperand(I);
922 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
923 if (CI->isZero())
924 continue;
925
926 break;
927 }
928
929 return I;
930 };
931
932 // Skip through initial 'zero' indices, and find the corresponding pointer
933 // type. See if the next index is not a constant.
934 Idx = FirstNZIdx(GEPI);
935 if (Idx == GEPI->getNumOperands())
936 return false;
937 if (isa<Constant>(GEPI->getOperand(Idx)))
938 return false;
939
940 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
941 Type *SourceElementType = GEPI->getSourceElementType();
942 // Size information about scalable vectors is not available, so we cannot
943 // deduce whether indexing at n is undefined behaviour or not. Bail out.
944 if (SourceElementType->isScalableTy())
945 return false;
946
947 Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops);
948 if (!AllocTy || !AllocTy->isSized())
949 return false;
950 const DataLayout &DL = IC.getDataLayout();
951 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedValue();
952
953 // If there are more indices after the one we might replace with a zero, make
954 // sure they're all non-negative. If any of them are negative, the overall
955 // address being computed might be before the base address determined by the
956 // first non-zero index.
957 auto IsAllNonNegative = [&]() {
958 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
959 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
960 if (Known.isNonNegative())
961 continue;
962 return false;
963 }
964
965 return true;
966 };
967
968 // FIXME: If the GEP is not inbounds, and there are extra indices after the
969 // one we'll replace, those could cause the address computation to wrap
970 // (rendering the IsAllNonNegative() check below insufficient). We can do
971 // better, ignoring zero indices (and other indices we can prove small
972 // enough not to wrap).
973 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
974 return false;
975
976 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
977 // also known to be dereferenceable.
978 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
979 IsAllNonNegative();
980}
981
982// If we're indexing into an object with a variable index for the memory
983// access, but the object has only one element, we can assume that the index
984// will always be zero. If we replace the GEP, return it.
986 Instruction &MemI) {
987 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
988 unsigned Idx;
989 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
990 Instruction *NewGEPI = GEPI->clone();
991 NewGEPI->setOperand(Idx,
992 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
993 IC.InsertNewInstBefore(NewGEPI, GEPI->getIterator());
994 return NewGEPI;
995 }
996 }
997
998 return nullptr;
999}
1000
1002 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
1003 return false;
1004
1005 auto *Ptr = SI.getPointerOperand();
1006 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
1007 Ptr = GEPI->getOperand(0);
1008 return (isa<ConstantPointerNull>(Ptr) &&
1009 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
1010}
1011
1013 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
1014 const Value *GEPI0 = GEPI->getOperand(0);
1015 if (isa<ConstantPointerNull>(GEPI0) &&
1016 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
1017 return true;
1018 }
1019 if (isa<UndefValue>(Op) ||
1020 (isa<ConstantPointerNull>(Op) &&
1022 return true;
1023 return false;
1024}
1025
1027 Value *Op = LI.getOperand(0);
1029 return replaceInstUsesWith(LI, Res);
1030
1031 // Try to canonicalize the loaded type.
1032 if (Instruction *Res = combineLoadToOperationType(*this, LI))
1033 return Res;
1034
1036 // Attempt to improve the alignment.
1037 Align KnownAlign = getOrEnforceKnownAlignment(
1038 Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT);
1039 if (KnownAlign > LI.getAlign())
1040 LI.setAlignment(KnownAlign);
1041 }
1042
1043 // Replace GEP indices if possible.
1044 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI))
1045 return replaceOperand(LI, 0, NewGEPI);
1046
1047 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1048 return Res;
1049
1050 // Do really simple store-to-load forwarding and load CSE, to catch cases
1051 // where there are several consecutive memory accesses to the same location,
1052 // separated by a few arithmetic operations.
1053 bool IsLoadCSE = false;
1054 if (Value *AvailableVal = FindAvailableLoadedValue(&LI, *AA, &IsLoadCSE)) {
1055 if (IsLoadCSE)
1056 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
1057
1058 return replaceInstUsesWith(
1059 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1060 LI.getName() + ".cast"));
1061 }
1062
1063 // None of the following transforms are legal for volatile/ordered atomic
1064 // loads. Most of them do apply for unordered atomics.
1065 if (!LI.isUnordered()) return nullptr;
1066
1067 // load(gep null, ...) -> unreachable
1068 // load null/undef -> unreachable
1069 // TODO: Consider a target hook for valid address spaces for this xforms.
1072 return replaceInstUsesWith(LI, PoisonValue::get(LI.getType()));
1073 }
1074
1075 if (Op->hasOneUse()) {
1076 // Change select and PHI nodes to select values instead of addresses: this
1077 // helps alias analysis out a lot, allows many others simplifications, and
1078 // exposes redundancy in the code.
1079 //
1080 // Note that we cannot do the transformation unless we know that the
1081 // introduced loads cannot trap! Something like this is valid as long as
1082 // the condition is always false: load (select bool %C, int* null, int* %G),
1083 // but it would not be valid if we transformed it to load from null
1084 // unconditionally.
1085 //
1086 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1087 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
1088 Align Alignment = LI.getAlign();
1089 if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1090 Alignment, DL, SI) &&
1091 isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1092 Alignment, DL, SI)) {
1093 LoadInst *V1 =
1094 Builder.CreateLoad(LI.getType(), SI->getOperand(1),
1095 SI->getOperand(1)->getName() + ".val");
1096 LoadInst *V2 =
1097 Builder.CreateLoad(LI.getType(), SI->getOperand(2),
1098 SI->getOperand(2)->getName() + ".val");
1099 assert(LI.isUnordered() && "implied by above");
1100 V1->setAlignment(Alignment);
1101 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1102 V2->setAlignment(Alignment);
1103 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1104 return SelectInst::Create(SI->getCondition(), V1, V2);
1105 }
1106
1107 // load (select (cond, null, P)) -> load P
1108 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1109 !NullPointerIsDefined(SI->getFunction(),
1110 LI.getPointerAddressSpace()))
1111 return replaceOperand(LI, 0, SI->getOperand(2));
1112
1113 // load (select (cond, P, null)) -> load P
1114 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1115 !NullPointerIsDefined(SI->getFunction(),
1116 LI.getPointerAddressSpace()))
1117 return replaceOperand(LI, 0, SI->getOperand(1));
1118 }
1119 }
1120 return nullptr;
1121}
1122
1123/// Look for extractelement/insertvalue sequence that acts like a bitcast.
1124///
1125/// \returns underlying value that was "cast", or nullptr otherwise.
1126///
1127/// For example, if we have:
1128///
1129/// %E0 = extractelement <2 x double> %U, i32 0
1130/// %V0 = insertvalue [2 x double] undef, double %E0, 0
1131/// %E1 = extractelement <2 x double> %U, i32 1
1132/// %V1 = insertvalue [2 x double] %V0, double %E1, 1
1133///
1134/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1135/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1136/// Note that %U may contain non-undef values where %V1 has undef.
1138 Value *U = nullptr;
1139 while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1140 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1141 if (!E)
1142 return nullptr;
1143 auto *W = E->getVectorOperand();
1144 if (!U)
1145 U = W;
1146 else if (U != W)
1147 return nullptr;
1148 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1149 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1150 return nullptr;
1151 V = IV->getAggregateOperand();
1152 }
1153 if (!match(V, m_Undef()) || !U)
1154 return nullptr;
1155
1156 auto *UT = cast<VectorType>(U->getType());
1157 auto *VT = V->getType();
1158 // Check that types UT and VT are bitwise isomorphic.
1159 const auto &DL = IC.getDataLayout();
1160 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1161 return nullptr;
1162 }
1163 if (auto *AT = dyn_cast<ArrayType>(VT)) {
1164 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1165 return nullptr;
1166 } else {
1167 auto *ST = cast<StructType>(VT);
1168 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1169 return nullptr;
1170 for (const auto *EltT : ST->elements()) {
1171 if (EltT != UT->getElementType())
1172 return nullptr;
1173 }
1174 }
1175 return U;
1176}
1177
1178/// Combine stores to match the type of value being stored.
1179///
1180/// The core idea here is that the memory does not have any intrinsic type and
1181/// where we can we should match the type of a store to the type of value being
1182/// stored.
1183///
1184/// However, this routine must never change the width of a store or the number of
1185/// stores as that would introduce a semantic change. This combine is expected to
1186/// be a semantic no-op which just allows stores to more closely model the types
1187/// of their incoming values.
1188///
1189/// Currently, we also refuse to change the precise type used for an atomic or
1190/// volatile store. This is debatable, and might be reasonable to change later.
1191/// However, it is risky in case some backend or other part of LLVM is relying
1192/// on the exact type stored to select appropriate atomic operations.
1193///
1194/// \returns true if the store was successfully combined away. This indicates
1195/// the caller must erase the store instruction. We have to let the caller erase
1196/// the store instruction as otherwise there is no way to signal whether it was
1197/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1199 // FIXME: We could probably with some care handle both volatile and ordered
1200 // atomic stores here but it isn't clear that this is important.
1201 if (!SI.isUnordered())
1202 return false;
1203
1204 // swifterror values can't be bitcasted.
1205 if (SI.getPointerOperand()->isSwiftError())
1206 return false;
1207
1208 Value *V = SI.getValueOperand();
1209
1210 // Fold away bit casts of the stored value by storing the original type.
1211 if (auto *BC = dyn_cast<BitCastInst>(V)) {
1212 assert(!BC->getType()->isX86_AMXTy() &&
1213 "store to x86_amx* should not happen!");
1214 V = BC->getOperand(0);
1215 // Don't transform when the type is x86_amx, it makes the pass that lower
1216 // x86_amx type happy.
1217 if (V->getType()->isX86_AMXTy())
1218 return false;
1219 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1220 combineStoreToNewValue(IC, SI, V);
1221 return true;
1222 }
1223 }
1224
1225 if (Value *U = likeBitCastFromVector(IC, V))
1226 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1227 combineStoreToNewValue(IC, SI, U);
1228 return true;
1229 }
1230
1231 // FIXME: We should also canonicalize stores of vectors when their elements
1232 // are cast to other types.
1233 return false;
1234}
1235
1237 // FIXME: We could probably with some care handle both volatile and atomic
1238 // stores here but it isn't clear that this is important.
1239 if (!SI.isSimple())
1240 return false;
1241
1242 Value *V = SI.getValueOperand();
1243 Type *T = V->getType();
1244
1245 if (!T->isAggregateType())
1246 return false;
1247
1248 if (auto *ST = dyn_cast<StructType>(T)) {
1249 // If the struct only have one element, we unpack.
1250 unsigned Count = ST->getNumElements();
1251 if (Count == 1) {
1252 V = IC.Builder.CreateExtractValue(V, 0);
1253 combineStoreToNewValue(IC, SI, V);
1254 return true;
1255 }
1256
1257 // We don't want to break loads with padding here as we'd loose
1258 // the knowledge that padding exists for the rest of the pipeline.
1259 const DataLayout &DL = IC.getDataLayout();
1260 auto *SL = DL.getStructLayout(ST);
1261
1262 // Don't unpack for structure with scalable vector.
1263 if (SL->getSizeInBits().isScalable())
1264 return false;
1265
1266 if (SL->hasPadding())
1267 return false;
1268
1269 const auto Align = SI.getAlign();
1270
1271 SmallString<16> EltName = V->getName();
1272 EltName += ".elt";
1273 auto *Addr = SI.getPointerOperand();
1274 SmallString<16> AddrName = Addr->getName();
1275 AddrName += ".repack";
1276
1277 auto *IdxType = Type::getInt32Ty(ST->getContext());
1278 auto *Zero = ConstantInt::get(IdxType, 0);
1279 for (unsigned i = 0; i < Count; i++) {
1280 Value *Indices[2] = {
1281 Zero,
1282 ConstantInt::get(IdxType, i),
1283 };
1284 auto *Ptr =
1285 IC.Builder.CreateInBoundsGEP(ST, Addr, ArrayRef(Indices), AddrName);
1286 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1287 auto EltAlign = commonAlignment(Align, SL->getElementOffset(i));
1288 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1289 NS->setAAMetadata(SI.getAAMetadata());
1290 }
1291
1292 return true;
1293 }
1294
1295 if (auto *AT = dyn_cast<ArrayType>(T)) {
1296 // If the array only have one element, we unpack.
1297 auto NumElements = AT->getNumElements();
1298 if (NumElements == 1) {
1299 V = IC.Builder.CreateExtractValue(V, 0);
1300 combineStoreToNewValue(IC, SI, V);
1301 return true;
1302 }
1303
1304 // Bail out if the array is too large. Ideally we would like to optimize
1305 // arrays of arbitrary size but this has a terrible impact on compile time.
1306 // The threshold here is chosen arbitrarily, maybe needs a little bit of
1307 // tuning.
1308 if (NumElements > IC.MaxArraySizeForCombine)
1309 return false;
1310
1311 const DataLayout &DL = IC.getDataLayout();
1312 TypeSize EltSize = DL.getTypeAllocSize(AT->getElementType());
1313 const auto Align = SI.getAlign();
1314
1315 SmallString<16> EltName = V->getName();
1316 EltName += ".elt";
1317 auto *Addr = SI.getPointerOperand();
1318 SmallString<16> AddrName = Addr->getName();
1319 AddrName += ".repack";
1320
1321 auto *IdxType = Type::getInt64Ty(T->getContext());
1322 auto *Zero = ConstantInt::get(IdxType, 0);
1323
1324 TypeSize Offset = TypeSize::get(0, AT->getElementType()->isScalableTy());
1325 for (uint64_t i = 0; i < NumElements; i++) {
1326 Value *Indices[2] = {
1327 Zero,
1328 ConstantInt::get(IdxType, i),
1329 };
1330 auto *Ptr =
1331 IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices), AddrName);
1332 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1333 auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
1334 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1335 NS->setAAMetadata(SI.getAAMetadata());
1336 Offset += EltSize;
1337 }
1338
1339 return true;
1340 }
1341
1342 return false;
1343}
1344
1345/// equivalentAddressValues - Test if A and B will obviously have the same
1346/// value. This includes recognizing that %t0 and %t1 will have the same
1347/// value in code like this:
1348/// %t0 = getelementptr \@a, 0, 3
1349/// store i32 0, i32* %t0
1350/// %t1 = getelementptr \@a, 0, 3
1351/// %t2 = load i32* %t1
1352///
1354 // Test if the values are trivially equivalent.
1355 if (A == B) return true;
1356
1357 // Test if the values come form identical arithmetic instructions.
1358 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1359 // its only used to compare two uses within the same basic block, which
1360 // means that they'll always either have the same value or one of them
1361 // will have an undefined value.
1362 if (isa<BinaryOperator>(A) ||
1363 isa<CastInst>(A) ||
1364 isa<PHINode>(A) ||
1365 isa<GetElementPtrInst>(A))
1366 if (Instruction *BI = dyn_cast<Instruction>(B))
1367 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1368 return true;
1369
1370 // Otherwise they may not be equivalent.
1371 return false;
1372}
1373
1375 Value *Val = SI.getOperand(0);
1376 Value *Ptr = SI.getOperand(1);
1377
1378 // Try to canonicalize the stored type.
1379 if (combineStoreToValueType(*this, SI))
1380 return eraseInstFromFunction(SI);
1381
1383 // Attempt to improve the alignment.
1384 const Align KnownAlign = getOrEnforceKnownAlignment(
1385 Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT);
1386 if (KnownAlign > SI.getAlign())
1387 SI.setAlignment(KnownAlign);
1388 }
1389
1390 // Try to canonicalize the stored type.
1391 if (unpackStoreToAggregate(*this, SI))
1392 return eraseInstFromFunction(SI);
1393
1394 // Replace GEP indices if possible.
1395 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI))
1396 return replaceOperand(SI, 1, NewGEPI);
1397
1398 // Don't hack volatile/ordered stores.
1399 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1400 if (!SI.isUnordered()) return nullptr;
1401
1402 // If the RHS is an alloca with a single use, zapify the store, making the
1403 // alloca dead.
1404 if (Ptr->hasOneUse()) {
1405 if (isa<AllocaInst>(Ptr))
1406 return eraseInstFromFunction(SI);
1407 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1408 if (isa<AllocaInst>(GEP->getOperand(0))) {
1409 if (GEP->getOperand(0)->hasOneUse())
1410 return eraseInstFromFunction(SI);
1411 }
1412 }
1413 }
1414
1415 // If we have a store to a location which is known constant, we can conclude
1416 // that the store must be storing the constant value (else the memory
1417 // wouldn't be constant), and this must be a noop.
1419 return eraseInstFromFunction(SI);
1420
1421 // Do really simple DSE, to catch cases where there are several consecutive
1422 // stores to the same location, separated by a few arithmetic operations. This
1423 // situation often occurs with bitfield accesses.
1424 BasicBlock::iterator BBI(SI);
1425 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1426 --ScanInsts) {
1427 --BBI;
1428 // Don't count debug info directives, lest they affect codegen,
1429 // and we skip pointer-to-pointer bitcasts, which are NOPs.
1430 if (BBI->isDebugOrPseudoInst()) {
1431 ScanInsts++;
1432 continue;
1433 }
1434
1435 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1436 // Prev store isn't volatile, and stores to the same location?
1437 if (PrevSI->isUnordered() &&
1438 equivalentAddressValues(PrevSI->getOperand(1), SI.getOperand(1)) &&
1439 PrevSI->getValueOperand()->getType() ==
1440 SI.getValueOperand()->getType()) {
1441 ++NumDeadStore;
1442 // Manually add back the original store to the worklist now, so it will
1443 // be processed after the operands of the removed store, as this may
1444 // expose additional DSE opportunities.
1445 Worklist.push(&SI);
1446 eraseInstFromFunction(*PrevSI);
1447 return nullptr;
1448 }
1449 break;
1450 }
1451
1452 // If this is a load, we have to stop. However, if the loaded value is from
1453 // the pointer we're loading and is producing the pointer we're storing,
1454 // then *this* store is dead (X = load P; store X -> P).
1455 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1456 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1457 assert(SI.isUnordered() && "can't eliminate ordering operation");
1458 return eraseInstFromFunction(SI);
1459 }
1460
1461 // Otherwise, this is a load from some other location. Stores before it
1462 // may not be dead.
1463 break;
1464 }
1465
1466 // Don't skip over loads, throws or things that can modify memory.
1467 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1468 break;
1469 }
1470
1471 // store X, null -> turns into 'unreachable' in SimplifyCFG
1472 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1473 if (canSimplifyNullStoreOrGEP(SI)) {
1474 if (!isa<PoisonValue>(Val))
1475 return replaceOperand(SI, 0, PoisonValue::get(Val->getType()));
1476 return nullptr; // Do not modify these!
1477 }
1478
1479 // This is a non-terminator unreachable marker. Don't remove it.
1480 if (isa<UndefValue>(Ptr)) {
1481 // Remove guaranteed-to-transfer instructions before the marker.
1483 return &SI;
1484
1485 // Remove all instructions after the marker and handle dead blocks this
1486 // implies.
1488 handleUnreachableFrom(SI.getNextNode(), Worklist);
1490 return nullptr;
1491 }
1492
1493 // store undef, Ptr -> noop
1494 // FIXME: This is technically incorrect because it might overwrite a poison
1495 // value. Change to PoisonValue once #52930 is resolved.
1496 if (isa<UndefValue>(Val))
1497 return eraseInstFromFunction(SI);
1498
1499 return nullptr;
1500}
1501
1502/// Try to transform:
1503/// if () { *P = v1; } else { *P = v2 }
1504/// or:
1505/// *P = v1; if () { *P = v2; }
1506/// into a phi node with a store in the successor.
1508 if (!SI.isUnordered())
1509 return false; // This code has not been audited for volatile/ordered case.
1510
1511 // Check if the successor block has exactly 2 incoming edges.
1512 BasicBlock *StoreBB = SI.getParent();
1513 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1514 if (!DestBB->hasNPredecessors(2))
1515 return false;
1516
1517 // Capture the other block (the block that doesn't contain our store).
1518 pred_iterator PredIter = pred_begin(DestBB);
1519 if (*PredIter == StoreBB)
1520 ++PredIter;
1521 BasicBlock *OtherBB = *PredIter;
1522
1523 // Bail out if all of the relevant blocks aren't distinct. This can happen,
1524 // for example, if SI is in an infinite loop.
1525 if (StoreBB == DestBB || OtherBB == DestBB)
1526 return false;
1527
1528 // Verify that the other block ends in a branch and is not otherwise empty.
1529 BasicBlock::iterator BBI(OtherBB->getTerminator());
1530 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1531 if (!OtherBr || BBI == OtherBB->begin())
1532 return false;
1533
1534 auto OtherStoreIsMergeable = [&](StoreInst *OtherStore) -> bool {
1535 if (!OtherStore ||
1536 OtherStore->getPointerOperand() != SI.getPointerOperand())
1537 return false;
1538
1539 auto *SIVTy = SI.getValueOperand()->getType();
1540 auto *OSVTy = OtherStore->getValueOperand()->getType();
1541 return CastInst::isBitOrNoopPointerCastable(OSVTy, SIVTy, DL) &&
1542 SI.hasSameSpecialState(OtherStore);
1543 };
1544
1545 // If the other block ends in an unconditional branch, check for the 'if then
1546 // else' case. There is an instruction before the branch.
1547 StoreInst *OtherStore = nullptr;
1548 if (OtherBr->isUnconditional()) {
1549 --BBI;
1550 // Skip over debugging info and pseudo probes.
1551 while (BBI->isDebugOrPseudoInst()) {
1552 if (BBI==OtherBB->begin())
1553 return false;
1554 --BBI;
1555 }
1556 // If this isn't a store, isn't a store to the same location, or is not the
1557 // right kind of store, bail out.
1558 OtherStore = dyn_cast<StoreInst>(BBI);
1559 if (!OtherStoreIsMergeable(OtherStore))
1560 return false;
1561 } else {
1562 // Otherwise, the other block ended with a conditional branch. If one of the
1563 // destinations is StoreBB, then we have the if/then case.
1564 if (OtherBr->getSuccessor(0) != StoreBB &&
1565 OtherBr->getSuccessor(1) != StoreBB)
1566 return false;
1567
1568 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1569 // if/then triangle. See if there is a store to the same ptr as SI that
1570 // lives in OtherBB.
1571 for (;; --BBI) {
1572 // Check to see if we find the matching store.
1573 OtherStore = dyn_cast<StoreInst>(BBI);
1574 if (OtherStoreIsMergeable(OtherStore))
1575 break;
1576
1577 // If we find something that may be using or overwriting the stored
1578 // value, or if we run out of instructions, we can't do the transform.
1579 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1580 BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1581 return false;
1582 }
1583
1584 // In order to eliminate the store in OtherBr, we have to make sure nothing
1585 // reads or overwrites the stored value in StoreBB.
1586 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1587 // FIXME: This should really be AA driven.
1588 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1589 return false;
1590 }
1591 }
1592
1593 // Insert a PHI node now if we need it.
1594 Value *MergedVal = OtherStore->getValueOperand();
1595 // The debug locations of the original instructions might differ. Merge them.
1596 DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1597 OtherStore->getDebugLoc());
1598 if (MergedVal != SI.getValueOperand()) {
1599 PHINode *PN =
1600 PHINode::Create(SI.getValueOperand()->getType(), 2, "storemerge");
1601 PN->addIncoming(SI.getValueOperand(), SI.getParent());
1602 Builder.SetInsertPoint(OtherStore);
1603 PN->addIncoming(Builder.CreateBitOrPointerCast(MergedVal, PN->getType()),
1604 OtherBB);
1605 MergedVal = InsertNewInstBefore(PN, DestBB->begin());
1606 PN->setDebugLoc(MergedLoc);
1607 }
1608
1609 // Advance to a place where it is safe to insert the new store and insert it.
1610 BBI = DestBB->getFirstInsertionPt();
1611 StoreInst *NewSI =
1612 new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1613 SI.getOrdering(), SI.getSyncScopeID());
1614 InsertNewInstBefore(NewSI, BBI);
1615 NewSI->setDebugLoc(MergedLoc);
1616 NewSI->mergeDIAssignID({&SI, OtherStore});
1617
1618 // If the two stores had AA tags, merge them.
1619 AAMDNodes AATags = SI.getAAMetadata();
1620 if (AATags)
1621 NewSI->setAAMetadata(AATags.merge(OtherStore->getAAMetadata()));
1622
1623 // Nuke the old stores.
1625 eraseInstFromFunction(*OtherStore);
1626 return true;
1627}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static const uint32_t IV[8]
Definition: blake3_impl.h:78
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:125
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:100
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:105
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:129
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:96
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:437
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:446
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
Definition: BasicBlock.cpp:511
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:416
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:173
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:228
This class represents a no-op cast from one type to another.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:905
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:874
A debug info location.
Definition: DebugLoc.h:33
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:164
const BasicBlock & getEntryBlock() const
Definition: Function.h:778
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:948
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:997
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:974
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1772
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2496
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1806
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2489
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1875
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:480
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2178
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1789
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2169
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1825
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitLoadInst(LoadInst &LI)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
bool removeInstructionsBeforeUnreachable(Instruction &I)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:375
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:400
AAResults * AA
Definition: InstCombiner.h:68
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:420
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:54
const SimplifyQuery SQ
Definition: InstCombiner.h:75
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:63
const DataLayout & DL
Definition: InstCombiner.h:74
AssumptionCache & AC
Definition: InstCombiner.h:71
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:444
DominatorTree & DT
Definition: InstCombiner.h:73
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
Definition: InstCombiner.h:465
BuilderTy & Builder
Definition: InstCombiner.h:59
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
Definition: DebugInfo.cpp:898
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:438
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1671
const BasicBlock * getParent() const
Definition: Instruction.h:139
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:75
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1586
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1657
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:435
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
An instruction for reading from memory.
Definition: Instructions.h:177
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:270
void setAlignment(Align Align)
Definition: Instructions.h:224
Value * getPointerOperand()
Definition: Instructions.h:264
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:250
bool isSimple() const
Definition: Instructions.h:256
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:220
Metadata node.
Definition: Metadata.h:1037
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
This class wraps the llvm.memcpy/memmove intrinsics.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
Value * getValueOperand()
Definition: Instructions.h:398
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:384
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
bool isX86_AMXTy() const
Return true if this is X86 AMX.
Definition: Type.h:204
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:188
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:169
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:136
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:199
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition: Local.cpp:3291
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2042
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, AAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
Definition: Loads.cpp:453
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1536
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2007
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2671
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3270
cl::opt< bool > EnableInferAlignmentPass
void replace(Container &Cont, typename Container::iterator ContIt, typename Container::iterator ContEnd, RandomAccessIterator ValIt, RandomAccessIterator ValEnd)
Given a sequence container Cont, replace the range [ContIt, ContEnd) with the range [ValIt,...
Definition: STLExtras.h:2057
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, const DataLayout &DL, Instruction *ScanFrom=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:350
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:738
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
SimplifyQuery getWithInstruction(const Instruction *I) const
Definition: SimplifyQuery.h:94