LLVM 20.0.0git
LoadStoreOpt.cpp
Go to the documentation of this file.
1//===- LoadStoreOpt.cpp ----------- Generic memory optimizations -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the LoadStoreOpt optimization pass.
10//===----------------------------------------------------------------------===//
11
13#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/Statistic.h"
37#include "llvm/Support/Debug.h"
40#include <algorithm>
41
42#define DEBUG_TYPE "loadstore-opt"
43
44using namespace llvm;
45using namespace ore;
46using namespace MIPatternMatch;
47
48STATISTIC(NumStoresMerged, "Number of stores merged");
49
50const unsigned MaxStoreSizeToForm = 128;
51
52char LoadStoreOpt::ID = 0;
53INITIALIZE_PASS_BEGIN(LoadStoreOpt, DEBUG_TYPE, "Generic memory optimizations",
54 false, false)
57
59 : MachineFunctionPass(ID), DoNotRunPass(F) {}
60
62 : LoadStoreOpt([](const MachineFunction &) { return false; }) {}
63
64void LoadStoreOpt::init(MachineFunction &MF) {
65 this->MF = &MF;
66 MRI = &MF.getRegInfo();
67 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
70 Builder.setMF(MF);
71 IsPreLegalizer = !MF.getProperties().hasProperty(
73 InstsToErase.clear();
74}
75
78 AU.setPreservesAll();
81}
82
86 Register PtrAddRHS;
87 Register BaseReg;
88 if (!mi_match(Ptr, MRI, m_GPtrAdd(m_Reg(BaseReg), m_Reg(PtrAddRHS)))) {
89 Info.setBase(Ptr);
90 Info.setOffset(0);
91 return Info;
92 }
93 Info.setBase(BaseReg);
94 auto RHSCst = getIConstantVRegValWithLookThrough(PtrAddRHS, MRI);
95 if (RHSCst)
96 Info.setOffset(RHSCst->Value.getSExtValue());
97
98 // Just recognize a simple case for now. In future we'll need to match
99 // indexing patterns for base + index + constant.
100 Info.setIndex(PtrAddRHS);
101 return Info;
102}
103
105 const MachineInstr &MI2,
106 bool &IsAlias,
108 auto *LdSt1 = dyn_cast<GLoadStore>(&MI1);
109 auto *LdSt2 = dyn_cast<GLoadStore>(&MI2);
110 if (!LdSt1 || !LdSt2)
111 return false;
112
113 BaseIndexOffset BasePtr0 = getPointerInfo(LdSt1->getPointerReg(), MRI);
114 BaseIndexOffset BasePtr1 = getPointerInfo(LdSt2->getPointerReg(), MRI);
115
116 if (!BasePtr0.getBase().isValid() || !BasePtr1.getBase().isValid())
117 return false;
118
119 LocationSize Size1 = LdSt1->getMemSize();
120 LocationSize Size2 = LdSt2->getMemSize();
121
122 int64_t PtrDiff;
123 if (BasePtr0.getBase() == BasePtr1.getBase() && BasePtr0.hasValidOffset() &&
124 BasePtr1.hasValidOffset()) {
125 PtrDiff = BasePtr1.getOffset() - BasePtr0.getOffset();
126 // If the size of memory access is unknown, do not use it to do analysis.
127 // One example of unknown size memory access is to load/store scalable
128 // vector objects on the stack.
129 // BasePtr1 is PtrDiff away from BasePtr0. They alias if none of the
130 // following situations arise:
131 if (PtrDiff >= 0 && Size1.hasValue() && !Size1.isScalable()) {
132 // [----BasePtr0----]
133 // [---BasePtr1--]
134 // ========PtrDiff========>
135 IsAlias = !((int64_t)Size1.getValue() <= PtrDiff);
136 return true;
137 }
138 if (PtrDiff < 0 && Size2.hasValue() && !Size2.isScalable()) {
139 // [----BasePtr0----]
140 // [---BasePtr1--]
141 // =====(-PtrDiff)====>
142 IsAlias = !((PtrDiff + (int64_t)Size2.getValue()) <= 0);
143 return true;
144 }
145 return false;
146 }
147
148 // If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
149 // able to calculate their relative offset if at least one arises
150 // from an alloca. However, these allocas cannot overlap and we
151 // can infer there is no alias.
152 auto *Base0Def = getDefIgnoringCopies(BasePtr0.getBase(), MRI);
153 auto *Base1Def = getDefIgnoringCopies(BasePtr1.getBase(), MRI);
154 if (!Base0Def || !Base1Def)
155 return false; // Couldn't tell anything.
156
157
158 if (Base0Def->getOpcode() != Base1Def->getOpcode())
159 return false;
160
161 if (Base0Def->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
162 MachineFrameInfo &MFI = Base0Def->getMF()->getFrameInfo();
163 // If the bases have the same frame index but we couldn't find a
164 // constant offset, (indices are different) be conservative.
165 if (Base0Def != Base1Def &&
166 (!MFI.isFixedObjectIndex(Base0Def->getOperand(1).getIndex()) ||
167 !MFI.isFixedObjectIndex(Base1Def->getOperand(1).getIndex()))) {
168 IsAlias = false;
169 return true;
170 }
171 }
172
173 // This implementation is a lot more primitive than the SDAG one for now.
174 // FIXME: what about constant pools?
175 if (Base0Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
176 auto GV0 = Base0Def->getOperand(1).getGlobal();
177 auto GV1 = Base1Def->getOperand(1).getGlobal();
178 if (GV0 != GV1) {
179 IsAlias = false;
180 return true;
181 }
182 }
183
184 // Can't tell anything about aliasing.
185 return false;
186}
187
189 const MachineInstr &Other,
191 AliasAnalysis *AA) {
192 struct MemUseCharacteristics {
193 bool IsVolatile;
194 bool IsAtomic;
195 Register BasePtr;
196 int64_t Offset;
197 LocationSize NumBytes;
199 };
200
201 auto getCharacteristics =
202 [&](const MachineInstr *MI) -> MemUseCharacteristics {
203 if (const auto *LS = dyn_cast<GLoadStore>(MI)) {
204 Register BaseReg;
205 int64_t Offset = 0;
206 // No pre/post-inc addressing modes are considered here, unlike in SDAG.
207 if (!mi_match(LS->getPointerReg(), MRI,
208 m_GPtrAdd(m_Reg(BaseReg), m_ICst(Offset)))) {
209 BaseReg = LS->getPointerReg();
210 Offset = 0;
211 }
212
213 LocationSize Size = LS->getMMO().getSize();
214 return {LS->isVolatile(), LS->isAtomic(), BaseReg,
215 Offset /*base offset*/, Size, &LS->getMMO()};
216 }
217 // FIXME: support recognizing lifetime instructions.
218 // Default.
219 return {false /*isvolatile*/,
220 /*isAtomic*/ false,
221 Register(),
222 (int64_t)0 /*offset*/,
224 (MachineMemOperand *)nullptr};
225 };
226 MemUseCharacteristics MUC0 = getCharacteristics(&MI),
227 MUC1 = getCharacteristics(&Other);
228
229 // If they are to the same address, then they must be aliases.
230 if (MUC0.BasePtr.isValid() && MUC0.BasePtr == MUC1.BasePtr &&
231 MUC0.Offset == MUC1.Offset)
232 return true;
233
234 // If they are both volatile then they cannot be reordered.
235 if (MUC0.IsVolatile && MUC1.IsVolatile)
236 return true;
237
238 // Be conservative about atomics for the moment
239 // TODO: This is way overconservative for unordered atomics (see D66309)
240 if (MUC0.IsAtomic && MUC1.IsAtomic)
241 return true;
242
243 // If one operation reads from invariant memory, and the other may store, they
244 // cannot alias.
245 if (MUC0.MMO && MUC1.MMO) {
246 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
247 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
248 return false;
249 }
250
251 // If NumBytes is scalable and offset is not 0, conservatively return may
252 // alias
253 if ((MUC0.NumBytes.isScalable() && MUC0.Offset != 0) ||
254 (MUC1.NumBytes.isScalable() && MUC1.Offset != 0))
255 return true;
256
257 const bool BothNotScalable =
258 !MUC0.NumBytes.isScalable() && !MUC1.NumBytes.isScalable();
259
260 // Try to prove that there is aliasing, or that there is no aliasing. Either
261 // way, we can return now. If nothing can be proved, proceed with more tests.
262 bool IsAlias;
263 if (BothNotScalable &&
265 return IsAlias;
266
267 // The following all rely on MMO0 and MMO1 being valid.
268 if (!MUC0.MMO || !MUC1.MMO)
269 return true;
270
271 // FIXME: port the alignment based alias analysis from SDAG's isAlias().
272 int64_t SrcValOffset0 = MUC0.MMO->getOffset();
273 int64_t SrcValOffset1 = MUC1.MMO->getOffset();
274 LocationSize Size0 = MUC0.NumBytes;
275 LocationSize Size1 = MUC1.NumBytes;
276 if (AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() && Size0.hasValue() &&
277 Size1.hasValue()) {
278 // Use alias analysis information.
279 int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
280 int64_t Overlap0 =
281 Size0.getValue().getKnownMinValue() + SrcValOffset0 - MinOffset;
282 int64_t Overlap1 =
283 Size1.getValue().getKnownMinValue() + SrcValOffset1 - MinOffset;
284 LocationSize Loc0 =
285 Size0.isScalable() ? Size0 : LocationSize::precise(Overlap0);
286 LocationSize Loc1 =
287 Size1.isScalable() ? Size1 : LocationSize::precise(Overlap1);
288
289 if (AA->isNoAlias(
290 MemoryLocation(MUC0.MMO->getValue(), Loc0, MUC0.MMO->getAAInfo()),
291 MemoryLocation(MUC1.MMO->getValue(), Loc1, MUC1.MMO->getAAInfo())))
292 return false;
293 }
294
295 // Otherwise we have to assume they alias.
296 return true;
297}
298
299/// Returns true if the instruction creates an unavoidable hazard that
300/// forces a boundary between store merge candidates.
302 return MI.hasUnmodeledSideEffects() || MI.hasOrderedMemoryRef();
303}
304
305bool LoadStoreOpt::mergeStores(SmallVectorImpl<GStore *> &StoresToMerge) {
306 // Try to merge all the stores in the vector, splitting into separate segments
307 // as necessary.
308 assert(StoresToMerge.size() > 1 && "Expected multiple stores to merge");
309 LLT OrigTy = MRI->getType(StoresToMerge[0]->getValueReg());
310 LLT PtrTy = MRI->getType(StoresToMerge[0]->getPointerReg());
311 unsigned AS = PtrTy.getAddressSpace();
312 // Ensure the legal store info is computed for this address space.
313 initializeStoreMergeTargetInfo(AS);
314 const auto &LegalSizes = LegalStoreSizes[AS];
315
316#ifndef NDEBUG
317 for (auto *StoreMI : StoresToMerge)
318 assert(MRI->getType(StoreMI->getValueReg()) == OrigTy);
319#endif
320
321 const auto &DL = MF->getFunction().getDataLayout();
322 bool AnyMerged = false;
323 do {
324 unsigned NumPow2 = llvm::bit_floor(StoresToMerge.size());
325 unsigned MaxSizeBits = NumPow2 * OrigTy.getSizeInBits().getFixedValue();
326 // Compute the biggest store we can generate to handle the number of stores.
327 unsigned MergeSizeBits;
328 for (MergeSizeBits = MaxSizeBits; MergeSizeBits > 1; MergeSizeBits /= 2) {
329 LLT StoreTy = LLT::scalar(MergeSizeBits);
330 EVT StoreEVT =
332 if (LegalSizes.size() > MergeSizeBits && LegalSizes[MergeSizeBits] &&
333 TLI->canMergeStoresTo(AS, StoreEVT, *MF) &&
334 (TLI->isTypeLegal(StoreEVT)))
335 break; // We can generate a MergeSize bits store.
336 }
337 if (MergeSizeBits <= OrigTy.getSizeInBits())
338 return AnyMerged; // No greater merge.
339
340 unsigned NumStoresToMerge = MergeSizeBits / OrigTy.getSizeInBits();
341 // Perform the actual merging.
342 SmallVector<GStore *, 8> SingleMergeStores(
343 StoresToMerge.begin(), StoresToMerge.begin() + NumStoresToMerge);
344 AnyMerged |= doSingleStoreMerge(SingleMergeStores);
345 StoresToMerge.erase(StoresToMerge.begin(),
346 StoresToMerge.begin() + NumStoresToMerge);
347 } while (StoresToMerge.size() > 1);
348 return AnyMerged;
349}
350
351bool LoadStoreOpt::isLegalOrBeforeLegalizer(const LegalityQuery &Query,
352 MachineFunction &MF) const {
353 auto Action = LI->getAction(Query).Action;
354 // If the instruction is unsupported, it can't be legalized at all.
355 if (Action == LegalizeActions::Unsupported)
356 return false;
357 return IsPreLegalizer || Action == LegalizeAction::Legal;
358}
359
360bool LoadStoreOpt::doSingleStoreMerge(SmallVectorImpl<GStore *> &Stores) {
361 assert(Stores.size() > 1);
362 // We know that all the stores are consecutive and there are no aliasing
363 // operations in the range. However, the values that are being stored may be
364 // generated anywhere before each store. To ensure we have the values
365 // available, we materialize the wide value and new store at the place of the
366 // final store in the merge sequence.
367 GStore *FirstStore = Stores[0];
368 const unsigned NumStores = Stores.size();
369 LLT SmallTy = MRI->getType(FirstStore->getValueReg());
370 LLT WideValueTy =
371 LLT::scalar(NumStores * SmallTy.getSizeInBits().getFixedValue());
372
373 // For each store, compute pairwise merged debug locs.
374 DebugLoc MergedLoc = Stores.front()->getDebugLoc();
375 for (auto *Store : drop_begin(Stores))
376 MergedLoc = DILocation::getMergedLocation(MergedLoc, Store->getDebugLoc());
377
378 Builder.setInstr(*Stores.back());
379 Builder.setDebugLoc(MergedLoc);
380
381 // If all of the store values are constants, then create a wide constant
382 // directly. Otherwise, we need to generate some instructions to merge the
383 // existing values together into a wider type.
384 SmallVector<APInt, 8> ConstantVals;
385 for (auto *Store : Stores) {
386 auto MaybeCst =
387 getIConstantVRegValWithLookThrough(Store->getValueReg(), *MRI);
388 if (!MaybeCst) {
389 ConstantVals.clear();
390 break;
391 }
392 ConstantVals.emplace_back(MaybeCst->Value);
393 }
394
395 Register WideReg;
396 auto *WideMMO =
397 MF->getMachineMemOperand(&FirstStore->getMMO(), 0, WideValueTy);
398 if (ConstantVals.empty()) {
399 // Mimic the SDAG behaviour here and don't try to do anything for unknown
400 // values. In future, we should also support the cases of loads and
401 // extracted vector elements.
402 return false;
403 }
404
405 assert(ConstantVals.size() == NumStores);
406 // Check if our wide constant is legal.
407 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {WideValueTy}}, *MF))
408 return false;
409 APInt WideConst(WideValueTy.getSizeInBits(), 0);
410 for (unsigned Idx = 0; Idx < ConstantVals.size(); ++Idx) {
411 // Insert the smaller constant into the corresponding position in the
412 // wider one.
413 WideConst.insertBits(ConstantVals[Idx], Idx * SmallTy.getSizeInBits());
414 }
415 WideReg = Builder.buildConstant(WideValueTy, WideConst).getReg(0);
416 auto NewStore =
417 Builder.buildStore(WideReg, FirstStore->getPointerReg(), *WideMMO);
418 (void) NewStore;
419 LLVM_DEBUG(dbgs() << "Merged " << Stores.size()
420 << " stores into merged store: " << *NewStore);
421 LLVM_DEBUG(for (auto *MI : Stores) dbgs() << " " << *MI;);
422 NumStoresMerged += Stores.size();
423
425 MORE.emit([&]() {
427 FirstStore->getDebugLoc(),
428 FirstStore->getParent());
429 R << "Merged " << NV("NumMerged", Stores.size()) << " stores of "
430 << NV("OrigWidth", SmallTy.getSizeInBytes())
431 << " bytes into a single store of "
432 << NV("NewWidth", WideValueTy.getSizeInBytes()) << " bytes";
433 return R;
434 });
435
436 for (auto *MI : Stores)
437 InstsToErase.insert(MI);
438 return true;
439}
440
441bool LoadStoreOpt::processMergeCandidate(StoreMergeCandidate &C) {
442 if (C.Stores.size() < 2) {
443 C.reset();
444 return false;
445 }
446
447 LLVM_DEBUG(dbgs() << "Checking store merge candidate with " << C.Stores.size()
448 << " stores, starting with " << *C.Stores[0]);
449 // We know that the stores in the candidate are adjacent.
450 // Now we need to check if any potential aliasing instructions recorded
451 // during the search alias with load/stores added to the candidate after.
452 // For example, if we have the candidate:
453 // C.Stores = [ST1, ST2, ST3, ST4]
454 // and after seeing ST2 we saw a load LD1, which did not alias with ST1 or
455 // ST2, then we would have recorded it into the PotentialAliases structure
456 // with the associated index value of "1". Then we see ST3 and ST4 and add
457 // them to the candidate group. We know that LD1 does not alias with ST1 or
458 // ST2, since we already did that check. However we don't yet know if it
459 // may alias ST3 and ST4, so we perform those checks now.
460 SmallVector<GStore *> StoresToMerge;
461
462 auto DoesStoreAliasWithPotential = [&](unsigned Idx, GStore &CheckStore) {
463 for (auto AliasInfo : reverse(C.PotentialAliases)) {
464 MachineInstr *PotentialAliasOp = AliasInfo.first;
465 unsigned PreCheckedIdx = AliasInfo.second;
466 if (static_cast<unsigned>(Idx) < PreCheckedIdx) {
467 // Once our store index is lower than the index associated with the
468 // potential alias, we know that we've already checked for this alias
469 // and all of the earlier potential aliases too.
470 return false;
471 }
472 // Need to check this alias.
473 if (GISelAddressing::instMayAlias(CheckStore, *PotentialAliasOp, *MRI,
474 AA)) {
475 LLVM_DEBUG(dbgs() << "Potential alias " << *PotentialAliasOp
476 << " detected\n");
477 return true;
478 }
479 }
480 return false;
481 };
482 // Start from the last store in the group, and check if it aliases with any
483 // of the potential aliasing operations in the list.
484 for (int StoreIdx = C.Stores.size() - 1; StoreIdx >= 0; --StoreIdx) {
485 auto *CheckStore = C.Stores[StoreIdx];
486 if (DoesStoreAliasWithPotential(StoreIdx, *CheckStore))
487 continue;
488 StoresToMerge.emplace_back(CheckStore);
489 }
490
491 LLVM_DEBUG(dbgs() << StoresToMerge.size()
492 << " stores remaining after alias checks. Merging...\n");
493
494 // Now we've checked for aliasing hazards, merge any stores left.
495 C.reset();
496 if (StoresToMerge.size() < 2)
497 return false;
498 return mergeStores(StoresToMerge);
499}
500
501bool LoadStoreOpt::operationAliasesWithCandidate(MachineInstr &MI,
502 StoreMergeCandidate &C) {
503 if (C.Stores.empty())
504 return false;
505 return llvm::any_of(C.Stores, [&](MachineInstr *OtherMI) {
506 return instMayAlias(MI, *OtherMI, *MRI, AA);
507 });
508}
509
510void LoadStoreOpt::StoreMergeCandidate::addPotentialAlias(MachineInstr &MI) {
511 PotentialAliases.emplace_back(std::make_pair(&MI, Stores.size() - 1));
512}
513
514bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI,
515 StoreMergeCandidate &C) {
516 // Check if the given store writes to an adjacent address, and other
517 // requirements.
518 LLT ValueTy = MRI->getType(StoreMI.getValueReg());
519 LLT PtrTy = MRI->getType(StoreMI.getPointerReg());
520
521 // Only handle scalars.
522 if (!ValueTy.isScalar())
523 return false;
524
525 // Don't allow truncating stores for now.
526 if (StoreMI.getMemSizeInBits() != ValueTy.getSizeInBits())
527 return false;
528
529 // Avoid adding volatile or ordered stores to the candidate. We already have a
530 // check for this in instMayAlias() but that only get's called later between
531 // potential aliasing hazards.
532 if (!StoreMI.isSimple())
533 return false;
534
535 Register StoreAddr = StoreMI.getPointerReg();
536 auto BIO = getPointerInfo(StoreAddr, *MRI);
537 Register StoreBase = BIO.getBase();
538 if (C.Stores.empty()) {
539 C.BasePtr = StoreBase;
540 if (!BIO.hasValidOffset()) {
541 C.CurrentLowestOffset = 0;
542 } else {
543 C.CurrentLowestOffset = BIO.getOffset();
544 }
545 // This is the first store of the candidate.
546 // If the offset can't possibly allow for a lower addressed store with the
547 // same base, don't bother adding it.
548 if (BIO.hasValidOffset() &&
549 BIO.getOffset() < static_cast<int64_t>(ValueTy.getSizeInBytes()))
550 return false;
551 C.Stores.emplace_back(&StoreMI);
552 LLVM_DEBUG(dbgs() << "Starting a new merge candidate group with: "
553 << StoreMI);
554 return true;
555 }
556
557 // Check the store is the same size as the existing ones in the candidate.
558 if (MRI->getType(C.Stores[0]->getValueReg()).getSizeInBits() !=
559 ValueTy.getSizeInBits())
560 return false;
561
562 if (MRI->getType(C.Stores[0]->getPointerReg()).getAddressSpace() !=
563 PtrTy.getAddressSpace())
564 return false;
565
566 // There are other stores in the candidate. Check that the store address
567 // writes to the next lowest adjacent address.
568 if (C.BasePtr != StoreBase)
569 return false;
570 // If we don't have a valid offset, we can't guarantee to be an adjacent
571 // offset.
572 if (!BIO.hasValidOffset())
573 return false;
574 if ((C.CurrentLowestOffset -
575 static_cast<int64_t>(ValueTy.getSizeInBytes())) != BIO.getOffset())
576 return false;
577
578 // This writes to an adjacent address. Allow it.
579 C.Stores.emplace_back(&StoreMI);
580 C.CurrentLowestOffset = C.CurrentLowestOffset - ValueTy.getSizeInBytes();
581 LLVM_DEBUG(dbgs() << "Candidate added store: " << StoreMI);
582 return true;
583}
584
585bool LoadStoreOpt::mergeBlockStores(MachineBasicBlock &MBB) {
586 bool Changed = false;
587 // Walk through the block bottom-up, looking for merging candidates.
588 StoreMergeCandidate Candidate;
589 for (MachineInstr &MI : llvm::reverse(MBB)) {
590 if (InstsToErase.contains(&MI))
591 continue;
592
593 if (auto *StoreMI = dyn_cast<GStore>(&MI)) {
594 // We have a G_STORE. Add it to the candidate if it writes to an adjacent
595 // address.
596 if (!addStoreToCandidate(*StoreMI, Candidate)) {
597 // Store wasn't eligible to be added. May need to record it as a
598 // potential alias.
599 if (operationAliasesWithCandidate(*StoreMI, Candidate)) {
600 Changed |= processMergeCandidate(Candidate);
601 continue;
602 }
603 Candidate.addPotentialAlias(*StoreMI);
604 }
605 continue;
606 }
607
608 // If we don't have any stores yet, this instruction can't pose a problem.
609 if (Candidate.Stores.empty())
610 continue;
611
612 // We're dealing with some other kind of instruction.
614 Changed |= processMergeCandidate(Candidate);
615 Candidate.Stores.clear();
616 continue;
617 }
618
619 if (!MI.mayLoadOrStore())
620 continue;
621
622 if (operationAliasesWithCandidate(MI, Candidate)) {
623 // We have a potential alias, so process the current candidate if we can
624 // and then continue looking for a new candidate.
625 Changed |= processMergeCandidate(Candidate);
626 continue;
627 }
628
629 // Record this instruction as a potential alias for future stores that are
630 // added to the candidate.
631 Candidate.addPotentialAlias(MI);
632 }
633
634 // Process any candidate left after finishing searching the entire block.
635 Changed |= processMergeCandidate(Candidate);
636
637 // Erase instructions now that we're no longer iterating over the block.
638 for (auto *MI : InstsToErase)
639 MI->eraseFromParent();
640 InstsToErase.clear();
641 return Changed;
642}
643
644/// Check if the store \p Store is a truncstore that can be merged. That is,
645/// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty
646/// Register then it does not need to match and SrcVal is set to the source
647/// value found.
648/// On match, returns the start byte offset of the \p SrcVal that is being
649/// stored.
650static std::optional<int64_t>
653 Register TruncVal;
654 if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal))))
655 return std::nullopt;
656
657 // The shift amount must be a constant multiple of the narrow type.
658 // It is translated to the offset address in the wide source value "y".
659 //
660 // x = G_LSHR y, ShiftAmtC
661 // s8 z = G_TRUNC x
662 // store z, ...
663 Register FoundSrcVal;
664 int64_t ShiftAmt;
665 if (!mi_match(TruncVal, MRI,
666 m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)),
667 m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) {
668 if (!SrcVal.isValid() || TruncVal == SrcVal) {
669 if (!SrcVal.isValid())
670 SrcVal = TruncVal;
671 return 0; // If it's the lowest index store.
672 }
673 return std::nullopt;
674 }
675
676 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
677 if (ShiftAmt % NarrowBits != 0)
678 return std::nullopt;
679 const unsigned Offset = ShiftAmt / NarrowBits;
680
681 if (SrcVal.isValid() && FoundSrcVal != SrcVal)
682 return std::nullopt;
683
684 if (!SrcVal.isValid())
685 SrcVal = FoundSrcVal;
686 else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))
687 return std::nullopt;
688 return Offset;
689}
690
691/// Match a pattern where a wide type scalar value is stored by several narrow
692/// stores. Fold it into a single store or a BSWAP and a store if the targets
693/// supports it.
694///
695/// Assuming little endian target:
696/// i8 *p = ...
697/// i32 val = ...
698/// p[0] = (val >> 0) & 0xFF;
699/// p[1] = (val >> 8) & 0xFF;
700/// p[2] = (val >> 16) & 0xFF;
701/// p[3] = (val >> 24) & 0xFF;
702/// =>
703/// *((i32)p) = val;
704///
705/// i8 *p = ...
706/// i32 val = ...
707/// p[0] = (val >> 24) & 0xFF;
708/// p[1] = (val >> 16) & 0xFF;
709/// p[2] = (val >> 8) & 0xFF;
710/// p[3] = (val >> 0) & 0xFF;
711/// =>
712/// *((i32)p) = BSWAP(val);
713bool LoadStoreOpt::mergeTruncStore(GStore &StoreMI,
714 SmallPtrSetImpl<GStore *> &DeletedStores) {
715 LLT MemTy = StoreMI.getMMO().getMemoryType();
716
717 // We only handle merging simple stores of 1-4 bytes.
718 if (!MemTy.isScalar())
719 return false;
720 switch (MemTy.getSizeInBits()) {
721 case 8:
722 case 16:
723 case 32:
724 break;
725 default:
726 return false;
727 }
728 if (!StoreMI.isSimple())
729 return false;
730
731 // We do a simple search for mergeable stores prior to this one.
732 // Any potential alias hazard along the way terminates the search.
733 SmallVector<GStore *> FoundStores;
734
735 // We're looking for:
736 // 1) a (store(trunc(...)))
737 // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get
738 // the partial value stored.
739 // 3) where the offsets form either a little or big-endian sequence.
740
741 auto &LastStore = StoreMI;
742
743 // The single base pointer that all stores must use.
744 Register BaseReg;
745 int64_t LastOffset;
746 if (!mi_match(LastStore.getPointerReg(), *MRI,
747 m_GPtrAdd(m_Reg(BaseReg), m_ICst(LastOffset)))) {
748 BaseReg = LastStore.getPointerReg();
749 LastOffset = 0;
750 }
751
752 GStore *LowestIdxStore = &LastStore;
753 int64_t LowestIdxOffset = LastOffset;
754
755 Register WideSrcVal;
756 auto LowestShiftAmt = getTruncStoreByteOffset(LastStore, WideSrcVal, *MRI);
757 if (!LowestShiftAmt)
758 return false; // Didn't match a trunc.
759 assert(WideSrcVal.isValid());
760
761 LLT WideStoreTy = MRI->getType(WideSrcVal);
762 // The wide type might not be a multiple of the memory type, e.g. s48 and s32.
763 if (WideStoreTy.getSizeInBits() % MemTy.getSizeInBits() != 0)
764 return false;
765 const unsigned NumStoresRequired =
766 WideStoreTy.getSizeInBits() / MemTy.getSizeInBits();
767
768 SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX);
769 OffsetMap[*LowestShiftAmt] = LastOffset;
770 FoundStores.emplace_back(&LastStore);
771
772 const int MaxInstsToCheck = 10;
773 int NumInstsChecked = 0;
774 for (auto II = ++LastStore.getReverseIterator();
775 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
776 ++II) {
777 NumInstsChecked++;
778 GStore *NewStore;
779 if ((NewStore = dyn_cast<GStore>(&*II))) {
780 if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple())
781 break;
782 } else if (II->isLoadFoldBarrier() || II->mayLoad()) {
783 break;
784 } else {
785 continue; // This is a safe instruction we can look past.
786 }
787
788 Register NewBaseReg;
789 int64_t MemOffset;
790 // Check we're storing to the same base + some offset.
791 if (!mi_match(NewStore->getPointerReg(), *MRI,
792 m_GPtrAdd(m_Reg(NewBaseReg), m_ICst(MemOffset)))) {
793 NewBaseReg = NewStore->getPointerReg();
794 MemOffset = 0;
795 }
796 if (BaseReg != NewBaseReg)
797 break;
798
799 auto ShiftByteOffset = getTruncStoreByteOffset(*NewStore, WideSrcVal, *MRI);
800 if (!ShiftByteOffset)
801 break;
802 if (MemOffset < LowestIdxOffset) {
803 LowestIdxOffset = MemOffset;
804 LowestIdxStore = NewStore;
805 }
806
807 // Map the offset in the store and the offset in the combined value, and
808 // early return if it has been set before.
809 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
810 OffsetMap[*ShiftByteOffset] != INT64_MAX)
811 break;
812 OffsetMap[*ShiftByteOffset] = MemOffset;
813
814 FoundStores.emplace_back(NewStore);
815 // Reset counter since we've found a matching inst.
816 NumInstsChecked = 0;
817 if (FoundStores.size() == NumStoresRequired)
818 break;
819 }
820
821 if (FoundStores.size() != NumStoresRequired) {
822 if (FoundStores.size() == 1)
823 return false;
824 // We didn't find enough stores to merge into the size of the original
825 // source value, but we may be able to generate a smaller store if we
826 // truncate the source value.
827 WideStoreTy = LLT::scalar(FoundStores.size() * MemTy.getScalarSizeInBits());
828 }
829
830 unsigned NumStoresFound = FoundStores.size();
831
832 const auto &DL = LastStore.getMF()->getDataLayout();
833 auto &C = LastStore.getMF()->getFunction().getContext();
834 // Check that a store of the wide type is both allowed and fast on the target
835 unsigned Fast = 0;
836 bool Allowed = TLI->allowsMemoryAccess(
837 C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast);
838 if (!Allowed || !Fast)
839 return false;
840
841 // Check if the pieces of the value are going to the expected places in memory
842 // to merge the stores.
843 unsigned NarrowBits = MemTy.getScalarSizeInBits();
844 auto checkOffsets = [&](bool MatchLittleEndian) {
845 if (MatchLittleEndian) {
846 for (unsigned i = 0; i != NumStoresFound; ++i)
847 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
848 return false;
849 } else { // MatchBigEndian by reversing loop counter.
850 for (unsigned i = 0, j = NumStoresFound - 1; i != NumStoresFound;
851 ++i, --j)
852 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
853 return false;
854 }
855 return true;
856 };
857
858 // Check if the offsets line up for the native data layout of this target.
859 bool NeedBswap = false;
860 bool NeedRotate = false;
861 if (!checkOffsets(DL.isLittleEndian())) {
862 // Special-case: check if byte offsets line up for the opposite endian.
863 if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))
864 NeedBswap = true;
865 else if (NumStoresFound == 2 && checkOffsets(DL.isBigEndian()))
866 NeedRotate = true;
867 else
868 return false;
869 }
870
871 if (NeedBswap &&
872 !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}, *MF))
873 return false;
874 if (NeedRotate &&
875 !isLegalOrBeforeLegalizer(
876 {TargetOpcode::G_ROTR, {WideStoreTy, WideStoreTy}}, *MF))
877 return false;
878
879 Builder.setInstrAndDebugLoc(StoreMI);
880
881 if (WideStoreTy != MRI->getType(WideSrcVal))
882 WideSrcVal = Builder.buildTrunc(WideStoreTy, WideSrcVal).getReg(0);
883
884 if (NeedBswap) {
885 WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0);
886 } else if (NeedRotate) {
887 assert(WideStoreTy.getSizeInBits() % 2 == 0 &&
888 "Unexpected type for rotate");
889 auto RotAmt =
890 Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2);
891 WideSrcVal =
892 Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0);
893 }
894
895 Builder.buildStore(WideSrcVal, LowestIdxStore->getPointerReg(),
896 LowestIdxStore->getMMO().getPointerInfo(),
897 LowestIdxStore->getMMO().getAlign());
898
899 // Erase the old stores.
900 for (auto *ST : FoundStores) {
901 ST->eraseFromParent();
902 DeletedStores.insert(ST);
903 }
904 return true;
905}
906
907bool LoadStoreOpt::mergeTruncStoresBlock(MachineBasicBlock &BB) {
908 bool Changed = false;
910 SmallPtrSet<GStore *, 8> DeletedStores;
911 // Walk up the block so we can see the most eligible stores.
912 for (MachineInstr &MI : llvm::reverse(BB))
913 if (auto *StoreMI = dyn_cast<GStore>(&MI))
914 Stores.emplace_back(StoreMI);
915
916 for (auto *StoreMI : Stores) {
917 if (DeletedStores.count(StoreMI))
918 continue;
919 if (mergeTruncStore(*StoreMI, DeletedStores))
920 Changed = true;
921 }
922 return Changed;
923}
924
925bool LoadStoreOpt::mergeFunctionStores(MachineFunction &MF) {
926 bool Changed = false;
927 for (auto &BB : MF){
928 Changed |= mergeBlockStores(BB);
929 Changed |= mergeTruncStoresBlock(BB);
930 }
931
932 // Erase all dead instructions left over by the merging.
933 if (Changed) {
934 for (auto &BB : MF) {
935 for (auto &I : make_early_inc_range(make_range(BB.rbegin(), BB.rend()))) {
936 if (isTriviallyDead(I, *MRI))
937 I.eraseFromParent();
938 }
939 }
940 }
941
942 return Changed;
943}
944
945void LoadStoreOpt::initializeStoreMergeTargetInfo(unsigned AddrSpace) {
946 // Query the legalizer info to record what store types are legal.
947 // We record this because we don't want to bother trying to merge stores into
948 // illegal ones, which would just result in being split again.
949
950 if (LegalStoreSizes.count(AddrSpace)) {
951 assert(LegalStoreSizes[AddrSpace].any());
952 return; // Already cached sizes for this address space.
953 }
954
955 // Need to reserve at least MaxStoreSizeToForm + 1 bits.
956 BitVector LegalSizes(MaxStoreSizeToForm * 2);
957 const auto &LI = *MF->getSubtarget().getLegalizerInfo();
958 const auto &DL = MF->getFunction().getDataLayout();
959 Type *IRPtrTy = PointerType::get(MF->getFunction().getContext(), AddrSpace);
960 LLT PtrTy = getLLTForType(*IRPtrTy, DL);
961 // We assume that we're not going to be generating any stores wider than
962 // MaxStoreSizeToForm bits for now.
963 for (unsigned Size = 2; Size <= MaxStoreSizeToForm; Size *= 2) {
964 LLT Ty = LLT::scalar(Size);
967 SmallVector<LLT> StoreTys({Ty, PtrTy});
968 LegalityQuery Q(TargetOpcode::G_STORE, StoreTys, MemDescrs);
969 LegalizeActionStep ActionStep = LI.getAction(Q);
970 if (ActionStep.Action == LegalizeActions::Legal)
971 LegalSizes.set(Size);
972 }
973 assert(LegalSizes.any() && "Expected some store sizes to be legal!");
974 LegalStoreSizes[AddrSpace] = LegalSizes;
975}
976
978 // If the ISel pipeline failed, do not bother running that pass.
979 if (MF.getProperties().hasProperty(
981 return false;
982
983 LLVM_DEBUG(dbgs() << "Begin memory optimizations for: " << MF.getName()
984 << '\n');
985
986 init(MF);
987 bool Changed = false;
988 Changed |= mergeFunctionStores(MF);
989
990 LegalStoreSizes.clear();
991 return Changed;
992}
unsigned const MachineRegisterInfo * MRI
@ Generic
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Performs the initial survey of the specified function
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
Interface for Targets to specify which operations they can successfully select and how the others sho...
Generic memory optimizations
const unsigned MaxStoreSizeToForm
static std::optional< int64_t > getTruncStoreByteOffset(GStore &Store, Register &SrcVal, MachineRegisterInfo &MRI)
Check if the store Store is a truncstore that can be merged.
#define DEBUG_TYPE
static bool isInstHardMergeHazard(MachineInstr &MI)
Returns true if the instruction creates an unavoidable hazard that forces a boundary between store me...
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*-—===//
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This file describes how to lower LLVM code to machine code.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
Class for arbitrary precision integers.
Definition: APInt.h:78
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
A debug info location.
Definition: DebugLoc.h:33
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Definition: Function.cpp:384
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
Helper struct to store a base, index and offset that forms an address.
Definition: LoadStoreOpt.h:38
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
Represents a G_STORE.
Register getValueReg() const
Get the stored value register.
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:280
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelType.h:203
LegalizeActionStep getAction(const LegalityQuery &Query) const
Determine what action should be taken to legalize the described instruction.
static char ID
Definition: LoadStoreOpt.h:78
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasValue() const
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
reverse_iterator rend()
reverse_iterator rbegin()
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
MachineInstrBuilder buildRotateRight(const DstOp &Dst, const SrcOp &Src, const SrcOp &Amt)
Build and insert Dst = G_ROTR Src, Amt.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0)
Build and insert Dst = G_BSWAP Src0.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
void setMF(MachineFunction &MF)
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
Definition: MachineInstr.h:69
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:346
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:498
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MachinePointerInfo & getPointerInfo() const
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
Diagnostic information for applied optimization remarks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Representation for a specific memory location.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:346
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:435
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const
Returns if it's reasonable to merge stores to MemVT size.
virtual const LegalizerInfo * getLegalizerInfo() const
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define INT64_MAX
Definition: DataTypes.h:71
constexpr bool any(E Val)
Definition: BitmaskEnum.h:141
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool aliasIsKnownForLoadStore(const MachineInstr &MI1, const MachineInstr &MI2, bool &IsAlias, MachineRegisterInfo &MRI)
Compute whether or not a memory access at MI1 aliases with an access at MI2.
BaseIndexOffset getPointerInfo(Register Ptr, MachineRegisterInfo &MRI)
Returns a BaseIndexOffset which describes the pointer in Ptr.
bool instMayAlias(const MachineInstr &MI, const MachineInstr &Other, MachineRegisterInfo &MRI, AliasAnalysis *AA)
Returns true if the instruction MI may alias Other.
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
Definition: LegalizerInfo.h:47
@ Unsupported
This operation is completely unsupported on the target.
Definition: LegalizerInfo.h:91
operand_type_match m_Reg()
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ASHR, false > m_GAShr(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
Or< Preds... > m_any_of(Preds &&... preds)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
ManagedStatic< cl::opt< FnT >, OptCreatorT > Action
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
EVT getApproximateEVTForLLT(LLT Ty, const DataLayout &DL, LLVMContext &Ctx)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:486
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
@ Other
Any other memory.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:1168
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:433
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition: bit.h:327
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition: Utils.cpp:222
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define MORE()
Definition: regcomp.c:252
Extended Value Type.
Definition: ValueTypes.h:35
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
The result of a query.
LegalizeAction Action
The action to take or the final answer.