LLVM 20.0.0git
InterferenceCache.cpp
Go to the documentation of this file.
1//===- InterferenceCache.cpp - Caching per-block interference -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InterferenceCache remembers per-block interference in LiveIntervalUnions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InterferenceCache.h"
14#include "llvm/ADT/ArrayRef.h"
21#include <cassert>
22#include <cstdint>
23#include <tuple>
24
25using namespace llvm;
26
27#define DEBUG_TYPE "regalloc"
28
29// Static member used for null interference cursors.
30const InterferenceCache::BlockInterference
31 InterferenceCache::Cursor::NoInterference;
32
33// Initializes PhysRegEntries (instead of a SmallVector, PhysRegEntries is a
34// buffer of size NumPhysRegs to speed up alloc/clear for targets with large
35// reg files). Calloced memory is used for good form, and quites tools like
36// Valgrind too, but zero initialized memory is not required by the algorithm:
37// this is because PhysRegEntries works like a SparseSet and its entries are
38// only valid when there is a corresponding CacheEntries assignment. There is
39// also support for when pass managers are reused for targets with different
40// numbers of PhysRegs: in this case PhysRegEntries is freed and reinitialized.
42 if (PhysRegEntriesCount == TRI->getNumRegs()) return;
43 free(PhysRegEntries);
44 PhysRegEntriesCount = TRI->getNumRegs();
45 PhysRegEntries = static_cast<unsigned char*>(
46 safe_calloc(PhysRegEntriesCount, sizeof(unsigned char)));
47}
48
50 LiveIntervalUnion *liuarray,
51 SlotIndexes *indexes,
52 LiveIntervals *lis,
53 const TargetRegisterInfo *tri) {
54 MF = mf;
55 LIUArray = liuarray;
56 TRI = tri;
58 for (Entry &E : Entries)
59 E.clear(mf, indexes, lis);
60}
61
62InterferenceCache::Entry *InterferenceCache::get(MCRegister PhysReg) {
63 unsigned char E = PhysRegEntries[PhysReg.id()];
64 if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
65 if (!Entries[E].valid(LIUArray, TRI))
66 Entries[E].revalidate(LIUArray, TRI);
67 return &Entries[E];
68 }
69 // No valid entry exists, pick the next round-robin entry.
70 E = RoundRobin;
71 if (++RoundRobin == CacheEntries)
72 RoundRobin = 0;
73 for (unsigned i = 0; i != CacheEntries; ++i) {
74 // Skip entries that are in use.
75 if (Entries[E].hasRefs()) {
76 if (++E == CacheEntries)
77 E = 0;
78 continue;
79 }
80 Entries[E].reset(PhysReg, LIUArray, TRI, MF);
81 PhysRegEntries[PhysReg] = E;
82 return &Entries[E];
83 }
84 llvm_unreachable("Ran out of interference cache entries.");
85}
86
87/// revalidate - LIU contents have changed, update tags.
88void InterferenceCache::Entry::revalidate(LiveIntervalUnion *LIUArray,
89 const TargetRegisterInfo *TRI) {
90 // Invalidate all block entries.
91 ++Tag;
92 // Invalidate all iterators.
93 PrevPos = SlotIndex();
94 unsigned i = 0;
95 for (MCRegUnit Unit : TRI->regunits(PhysReg))
96 RegUnits[i++].VirtTag = LIUArray[Unit].getTag();
97}
98
99void InterferenceCache::Entry::reset(MCRegister physReg,
100 LiveIntervalUnion *LIUArray,
101 const TargetRegisterInfo *TRI,
102 const MachineFunction *MF) {
103 assert(!hasRefs() && "Cannot reset cache entry with references");
104 // LIU's changed, invalidate cache.
105 ++Tag;
106 PhysReg = physReg;
107 Blocks.resize(MF->getNumBlockIDs());
108
109 // Reset iterators.
110 PrevPos = SlotIndex();
111 RegUnits.clear();
112 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
113 RegUnits.push_back(LIUArray[Unit]);
114 RegUnits.back().Fixed = &LIS->getRegUnit(Unit);
115 }
116}
117
118bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
119 const TargetRegisterInfo *TRI) {
120 unsigned i = 0, e = RegUnits.size();
121 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
122 if (i == e)
123 return false;
124 if (LIUArray[Unit].changedSince(RegUnits[i].VirtTag))
125 return false;
126 ++i;
127 }
128 return i == e;
129}
130
131void InterferenceCache::Entry::update(unsigned MBBNum) {
132 SlotIndex Start, Stop;
133 std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
134
135 // Use advanceTo only when possible.
136 if (PrevPos != Start) {
137 if (!PrevPos.isValid() || Start < PrevPos) {
138 for (RegUnitInfo &RUI : RegUnits) {
139 RUI.VirtI.find(Start);
140 RUI.FixedI = RUI.Fixed->find(Start);
141 }
142 } else {
143 for (RegUnitInfo &RUI : RegUnits) {
144 RUI.VirtI.advanceTo(Start);
145 if (RUI.FixedI != RUI.Fixed->end())
146 RUI.FixedI = RUI.Fixed->advanceTo(RUI.FixedI, Start);
147 }
148 }
149 PrevPos = Start;
150 }
151
153 MF->getBlockNumbered(MBBNum)->getIterator();
154 BlockInterference *BI = &Blocks[MBBNum];
155 ArrayRef<SlotIndex> RegMaskSlots;
156 ArrayRef<const uint32_t*> RegMaskBits;
157 while (true) {
158 BI->Tag = Tag;
159 BI->First = BI->Last = SlotIndex();
160
161 // Check for first interference from virtregs.
162 for (RegUnitInfo &RUI : RegUnits) {
164 if (!I.valid())
165 continue;
166 SlotIndex StartI = I.start();
167 if (StartI >= Stop)
168 continue;
169 if (!BI->First.isValid() || StartI < BI->First)
170 BI->First = StartI;
171 }
172
173 // Same thing for fixed interference.
174 for (RegUnitInfo &RUI : RegUnits) {
175 LiveInterval::const_iterator I = RUI.FixedI;
176 LiveInterval::const_iterator E = RUI.Fixed->end();
177 if (I == E)
178 continue;
179 SlotIndex StartI = I->start;
180 if (StartI >= Stop)
181 continue;
182 if (!BI->First.isValid() || StartI < BI->First)
183 BI->First = StartI;
184 }
185
186 // Also check for register mask interference.
187 RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
188 RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
189 SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
190 for (unsigned i = 0, e = RegMaskSlots.size();
191 i != e && RegMaskSlots[i] < Limit; ++i)
192 if (MachineOperand::clobbersPhysReg(RegMaskBits[i], PhysReg)) {
193 // Register mask i clobbers PhysReg before the LIU interference.
194 BI->First = RegMaskSlots[i];
195 break;
196 }
197
198 PrevPos = Stop;
199 if (BI->First.isValid())
200 break;
201
202 // No interference in this block? Go ahead and precompute the next block.
203 if (++MFI == MF->end())
204 return;
205 MBBNum = MFI->getNumber();
206 BI = &Blocks[MBBNum];
207 if (BI->Tag == Tag)
208 return;
209 std::tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
210 }
211
212 // Check for last interference in block.
213 for (RegUnitInfo &RUI : RegUnits) {
215 if (!I.valid() || I.start() >= Stop)
216 continue;
217 I.advanceTo(Stop);
218 bool Backup = !I.valid() || I.start() >= Stop;
219 if (Backup)
220 --I;
221 SlotIndex StopI = I.stop();
222 if (!BI->Last.isValid() || StopI > BI->Last)
223 BI->Last = StopI;
224 if (Backup)
225 ++I;
226 }
227
228 // Fixed interference.
229 for (RegUnitInfo &RUI : RegUnits) {
230 LiveInterval::iterator &I = RUI.FixedI;
231 LiveRange *LR = RUI.Fixed;
232 if (I == LR->end() || I->start >= Stop)
233 continue;
234 I = LR->advanceTo(I, Stop);
235 bool Backup = I == LR->end() || I->start >= Stop;
236 if (Backup)
237 --I;
238 SlotIndex StopI = I->end;
239 if (!BI->Last.isValid() || StopI > BI->Last)
240 BI->Last = StopI;
241 if (Backup)
242 ++I;
243 }
244
245 // Also check for register mask interference.
246 SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
247 for (unsigned i = RegMaskSlots.size();
248 i && RegMaskSlots[i-1].getDeadSlot() > Limit; --i)
249 if (MachineOperand::clobbersPhysReg(RegMaskBits[i-1], PhysReg)) {
250 // Register mask i-1 clobbers PhysReg after the LIU interference.
251 // Model the regmask clobber as a dead def.
252 BI->Last = RegMaskSlots[i-1].getDeadSlot();
253 break;
254 }
255}
DenseMap< Block *, BlockRelaxAux > Blocks
Definition: ELF_riscv.cpp:507
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
void init(MachineFunction *mf, LiveIntervalUnion *liuarray, SlotIndexes *indexes, LiveIntervals *lis, const TargetRegisterInfo *tri)
init - Prepare cache for a new function.
Union of live intervals that are strong candidates for coalescing into a single register (either phys...
LiveSegments::iterator SegmentIter
unsigned getTag() const
getTag - Return an opaque tag representing the current state of the union.
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
iterator advanceTo(iterator I, SlotIndex Pos)
advanceTo - Advance the specified iterator to point to the Segment containing the specified position,...
Definition: LiveInterval.h:271
iterator end()
Definition: LiveInterval.h:216
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
constexpr unsigned id() const
Definition: MCRegister.h:79
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
BasicBlockListType::const_iterator const_iterator
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
bool isValid() const
Returns true if this is a valid index.
Definition: SlotIndexes.h:130
SlotIndexes pass.
Definition: SlotIndexes.h:297
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr double e
Definition: MathExtras.h:47
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
LLVM_ATTRIBUTE_RETURNS_NONNULL void * safe_calloc(size_t Count, size_t Sz)
Definition: MemAlloc.h:38
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.