File: | lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp |
Warning: | line 841, column 9 Value stored to 'Imms' during its initialization is never read |
1 | //=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file contains a pass that performs load / store related peephole |
11 | // optimizations. This pass should be run after register allocation. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "AArch64InstrInfo.h" |
16 | #include "AArch64Subtarget.h" |
17 | #include "MCTargetDesc/AArch64AddressingModes.h" |
18 | #include "llvm/ADT/BitVector.h" |
19 | #include "llvm/ADT/iterator_range.h" |
20 | #include "llvm/ADT/SmallVector.h" |
21 | #include "llvm/ADT/Statistic.h" |
22 | #include "llvm/ADT/StringRef.h" |
23 | #include "llvm/CodeGen/MachineBasicBlock.h" |
24 | #include "llvm/CodeGen/MachineFunction.h" |
25 | #include "llvm/CodeGen/MachineFunctionPass.h" |
26 | #include "llvm/CodeGen/MachineInstr.h" |
27 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
28 | #include "llvm/CodeGen/MachineOperand.h" |
29 | #include "llvm/IR/DebugLoc.h" |
30 | #include "llvm/MC/MCRegisterInfo.h" |
31 | #include "llvm/Pass.h" |
32 | #include "llvm/Support/CommandLine.h" |
33 | #include "llvm/Support/Debug.h" |
34 | #include "llvm/Support/ErrorHandling.h" |
35 | #include "llvm/Support/raw_ostream.h" |
36 | #include "llvm/Target/TargetRegisterInfo.h" |
37 | #include <cassert> |
38 | #include <cstdint> |
39 | #include <iterator> |
40 | #include <limits> |
41 | |
42 | using namespace llvm; |
43 | |
44 | #define DEBUG_TYPE"aarch64-ldst-opt" "aarch64-ldst-opt" |
45 | |
46 | STATISTIC(NumPairCreated, "Number of load/store pair instructions generated")static llvm::Statistic NumPairCreated = {"aarch64-ldst-opt", "NumPairCreated" , "Number of load/store pair instructions generated", {0}, false }; |
47 | STATISTIC(NumPostFolded, "Number of post-index updates folded")static llvm::Statistic NumPostFolded = {"aarch64-ldst-opt", "NumPostFolded" , "Number of post-index updates folded", {0}, false}; |
48 | STATISTIC(NumPreFolded, "Number of pre-index updates folded")static llvm::Statistic NumPreFolded = {"aarch64-ldst-opt", "NumPreFolded" , "Number of pre-index updates folded", {0}, false}; |
49 | STATISTIC(NumUnscaledPairCreated,static llvm::Statistic NumUnscaledPairCreated = {"aarch64-ldst-opt" , "NumUnscaledPairCreated", "Number of load/store from unscaled generated" , {0}, false} |
50 | "Number of load/store from unscaled generated")static llvm::Statistic NumUnscaledPairCreated = {"aarch64-ldst-opt" , "NumUnscaledPairCreated", "Number of load/store from unscaled generated" , {0}, false}; |
51 | STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted")static llvm::Statistic NumZeroStoresPromoted = {"aarch64-ldst-opt" , "NumZeroStoresPromoted", "Number of narrow zero stores promoted" , {0}, false}; |
52 | STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted")static llvm::Statistic NumLoadsFromStoresPromoted = {"aarch64-ldst-opt" , "NumLoadsFromStoresPromoted", "Number of loads from stores promoted" , {0}, false}; |
53 | |
54 | // The LdStLimit limits how far we search for load/store pairs. |
55 | static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit", |
56 | cl::init(20), cl::Hidden); |
57 | |
58 | // The UpdateLimit limits how far we search for update instructions when we form |
59 | // pre-/post-index instructions. |
60 | static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100), |
61 | cl::Hidden); |
62 | |
63 | #define AARCH64_LOAD_STORE_OPT_NAME"AArch64 load / store optimization pass" "AArch64 load / store optimization pass" |
64 | |
65 | namespace { |
66 | |
67 | typedef struct LdStPairFlags { |
68 | // If a matching instruction is found, MergeForward is set to true if the |
69 | // merge is to remove the first instruction and replace the second with |
70 | // a pair-wise insn, and false if the reverse is true. |
71 | bool MergeForward = false; |
72 | |
73 | // SExtIdx gives the index of the result of the load pair that must be |
74 | // extended. The value of SExtIdx assumes that the paired load produces the |
75 | // value in this order: (I, returned iterator), i.e., -1 means no value has |
76 | // to be extended, 0 means I, and 1 means the returned iterator. |
77 | int SExtIdx = -1; |
78 | |
79 | LdStPairFlags() = default; |
80 | |
81 | void setMergeForward(bool V = true) { MergeForward = V; } |
82 | bool getMergeForward() const { return MergeForward; } |
83 | |
84 | void setSExtIdx(int V) { SExtIdx = V; } |
85 | int getSExtIdx() const { return SExtIdx; } |
86 | |
87 | } LdStPairFlags; |
88 | |
89 | struct AArch64LoadStoreOpt : public MachineFunctionPass { |
90 | static char ID; |
91 | |
92 | AArch64LoadStoreOpt() : MachineFunctionPass(ID) { |
93 | initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry()); |
94 | } |
95 | |
96 | AliasAnalysis *AA; |
97 | const AArch64InstrInfo *TII; |
98 | const TargetRegisterInfo *TRI; |
99 | const AArch64Subtarget *Subtarget; |
100 | |
101 | // Track which registers have been modified and used. |
102 | BitVector ModifiedRegs, UsedRegs; |
103 | |
104 | virtual void getAnalysisUsage(AnalysisUsage &AU) const override { |
105 | AU.addRequired<AAResultsWrapperPass>(); |
106 | MachineFunctionPass::getAnalysisUsage(AU); |
107 | } |
108 | |
109 | // Scan the instructions looking for a load/store that can be combined |
110 | // with the current instruction into a load/store pair. |
111 | // Return the matching instruction if one is found, else MBB->end(). |
112 | MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I, |
113 | LdStPairFlags &Flags, |
114 | unsigned Limit, |
115 | bool FindNarrowMerge); |
116 | |
117 | // Scan the instructions looking for a store that writes to the address from |
118 | // which the current load instruction reads. Return true if one is found. |
119 | bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit, |
120 | MachineBasicBlock::iterator &StoreI); |
121 | |
122 | // Merge the two instructions indicated into a wider narrow store instruction. |
123 | MachineBasicBlock::iterator |
124 | mergeNarrowZeroStores(MachineBasicBlock::iterator I, |
125 | MachineBasicBlock::iterator MergeMI, |
126 | const LdStPairFlags &Flags); |
127 | |
128 | // Merge the two instructions indicated into a single pair-wise instruction. |
129 | MachineBasicBlock::iterator |
130 | mergePairedInsns(MachineBasicBlock::iterator I, |
131 | MachineBasicBlock::iterator Paired, |
132 | const LdStPairFlags &Flags); |
133 | |
134 | // Promote the load that reads directly from the address stored to. |
135 | MachineBasicBlock::iterator |
136 | promoteLoadFromStore(MachineBasicBlock::iterator LoadI, |
137 | MachineBasicBlock::iterator StoreI); |
138 | |
139 | // Scan the instruction list to find a base register update that can |
140 | // be combined with the current instruction (a load or store) using |
141 | // pre or post indexed addressing with writeback. Scan forwards. |
142 | MachineBasicBlock::iterator |
143 | findMatchingUpdateInsnForward(MachineBasicBlock::iterator I, |
144 | int UnscaledOffset, unsigned Limit); |
145 | |
146 | // Scan the instruction list to find a base register update that can |
147 | // be combined with the current instruction (a load or store) using |
148 | // pre or post indexed addressing with writeback. Scan backwards. |
149 | MachineBasicBlock::iterator |
150 | findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit); |
151 | |
152 | // Find an instruction that updates the base register of the ld/st |
153 | // instruction. |
154 | bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI, |
155 | unsigned BaseReg, int Offset); |
156 | |
157 | // Merge a pre- or post-index base register update into a ld/st instruction. |
158 | MachineBasicBlock::iterator |
159 | mergeUpdateInsn(MachineBasicBlock::iterator I, |
160 | MachineBasicBlock::iterator Update, bool IsPreIdx); |
161 | |
162 | // Find and merge zero store instructions. |
163 | bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI); |
164 | |
165 | // Find and pair ldr/str instructions. |
166 | bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI); |
167 | |
168 | // Find and promote load instructions which read directly from store. |
169 | bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI); |
170 | |
171 | bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt); |
172 | |
173 | bool runOnMachineFunction(MachineFunction &Fn) override; |
174 | |
175 | MachineFunctionProperties getRequiredProperties() const override { |
176 | return MachineFunctionProperties().set( |
177 | MachineFunctionProperties::Property::NoVRegs); |
178 | } |
179 | |
180 | StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME"AArch64 load / store optimization pass"; } |
181 | }; |
182 | |
183 | char AArch64LoadStoreOpt::ID = 0; |
184 | |
185 | } // end anonymous namespace |
186 | |
187 | INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",static void *initializeAArch64LoadStoreOptPassOnce(PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AArch64 load / store optimization pass" , "aarch64-ldst-opt", &AArch64LoadStoreOpt::ID, PassInfo:: NormalCtor_t(callDefaultCtor<AArch64LoadStoreOpt>), false , false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAArch64LoadStoreOptPassFlag; void llvm ::initializeAArch64LoadStoreOptPass(PassRegistry &Registry ) { llvm::call_once(InitializeAArch64LoadStoreOptPassFlag, initializeAArch64LoadStoreOptPassOnce , std::ref(Registry)); } |
188 | AARCH64_LOAD_STORE_OPT_NAME, false, false)static void *initializeAArch64LoadStoreOptPassOnce(PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AArch64 load / store optimization pass" , "aarch64-ldst-opt", &AArch64LoadStoreOpt::ID, PassInfo:: NormalCtor_t(callDefaultCtor<AArch64LoadStoreOpt>), false , false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeAArch64LoadStoreOptPassFlag; void llvm ::initializeAArch64LoadStoreOptPass(PassRegistry &Registry ) { llvm::call_once(InitializeAArch64LoadStoreOptPassFlag, initializeAArch64LoadStoreOptPassOnce , std::ref(Registry)); } |
189 | |
190 | static bool isNarrowStore(unsigned Opc) { |
191 | switch (Opc) { |
192 | default: |
193 | return false; |
194 | case AArch64::STRBBui: |
195 | case AArch64::STURBBi: |
196 | case AArch64::STRHHui: |
197 | case AArch64::STURHHi: |
198 | return true; |
199 | } |
200 | } |
201 | |
202 | // Scaling factor for unscaled load or store. |
203 | static int getMemScale(MachineInstr &MI) { |
204 | switch (MI.getOpcode()) { |
205 | default: |
206 | llvm_unreachable("Opcode has unknown scale!")::llvm::llvm_unreachable_internal("Opcode has unknown scale!" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 206); |
207 | case AArch64::LDRBBui: |
208 | case AArch64::LDURBBi: |
209 | case AArch64::LDRSBWui: |
210 | case AArch64::LDURSBWi: |
211 | case AArch64::STRBBui: |
212 | case AArch64::STURBBi: |
213 | return 1; |
214 | case AArch64::LDRHHui: |
215 | case AArch64::LDURHHi: |
216 | case AArch64::LDRSHWui: |
217 | case AArch64::LDURSHWi: |
218 | case AArch64::STRHHui: |
219 | case AArch64::STURHHi: |
220 | return 2; |
221 | case AArch64::LDRSui: |
222 | case AArch64::LDURSi: |
223 | case AArch64::LDRSWui: |
224 | case AArch64::LDURSWi: |
225 | case AArch64::LDRWui: |
226 | case AArch64::LDURWi: |
227 | case AArch64::STRSui: |
228 | case AArch64::STURSi: |
229 | case AArch64::STRWui: |
230 | case AArch64::STURWi: |
231 | case AArch64::LDPSi: |
232 | case AArch64::LDPSWi: |
233 | case AArch64::LDPWi: |
234 | case AArch64::STPSi: |
235 | case AArch64::STPWi: |
236 | return 4; |
237 | case AArch64::LDRDui: |
238 | case AArch64::LDURDi: |
239 | case AArch64::LDRXui: |
240 | case AArch64::LDURXi: |
241 | case AArch64::STRDui: |
242 | case AArch64::STURDi: |
243 | case AArch64::STRXui: |
244 | case AArch64::STURXi: |
245 | case AArch64::LDPDi: |
246 | case AArch64::LDPXi: |
247 | case AArch64::STPDi: |
248 | case AArch64::STPXi: |
249 | return 8; |
250 | case AArch64::LDRQui: |
251 | case AArch64::LDURQi: |
252 | case AArch64::STRQui: |
253 | case AArch64::STURQi: |
254 | case AArch64::LDPQi: |
255 | case AArch64::STPQi: |
256 | return 16; |
257 | } |
258 | } |
259 | |
260 | static unsigned getMatchingNonSExtOpcode(unsigned Opc, |
261 | bool *IsValidLdStrOpc = nullptr) { |
262 | if (IsValidLdStrOpc) |
263 | *IsValidLdStrOpc = true; |
264 | switch (Opc) { |
265 | default: |
266 | if (IsValidLdStrOpc) |
267 | *IsValidLdStrOpc = false; |
268 | return std::numeric_limits<unsigned>::max(); |
269 | case AArch64::STRDui: |
270 | case AArch64::STURDi: |
271 | case AArch64::STRQui: |
272 | case AArch64::STURQi: |
273 | case AArch64::STRBBui: |
274 | case AArch64::STURBBi: |
275 | case AArch64::STRHHui: |
276 | case AArch64::STURHHi: |
277 | case AArch64::STRWui: |
278 | case AArch64::STURWi: |
279 | case AArch64::STRXui: |
280 | case AArch64::STURXi: |
281 | case AArch64::LDRDui: |
282 | case AArch64::LDURDi: |
283 | case AArch64::LDRQui: |
284 | case AArch64::LDURQi: |
285 | case AArch64::LDRWui: |
286 | case AArch64::LDURWi: |
287 | case AArch64::LDRXui: |
288 | case AArch64::LDURXi: |
289 | case AArch64::STRSui: |
290 | case AArch64::STURSi: |
291 | case AArch64::LDRSui: |
292 | case AArch64::LDURSi: |
293 | return Opc; |
294 | case AArch64::LDRSWui: |
295 | return AArch64::LDRWui; |
296 | case AArch64::LDURSWi: |
297 | return AArch64::LDURWi; |
298 | } |
299 | } |
300 | |
301 | static unsigned getMatchingWideOpcode(unsigned Opc) { |
302 | switch (Opc) { |
303 | default: |
304 | llvm_unreachable("Opcode has no wide equivalent!")::llvm::llvm_unreachable_internal("Opcode has no wide equivalent!" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 304); |
305 | case AArch64::STRBBui: |
306 | return AArch64::STRHHui; |
307 | case AArch64::STRHHui: |
308 | return AArch64::STRWui; |
309 | case AArch64::STURBBi: |
310 | return AArch64::STURHHi; |
311 | case AArch64::STURHHi: |
312 | return AArch64::STURWi; |
313 | case AArch64::STURWi: |
314 | return AArch64::STURXi; |
315 | case AArch64::STRWui: |
316 | return AArch64::STRXui; |
317 | } |
318 | } |
319 | |
320 | static unsigned getMatchingPairOpcode(unsigned Opc) { |
321 | switch (Opc) { |
322 | default: |
323 | llvm_unreachable("Opcode has no pairwise equivalent!")::llvm::llvm_unreachable_internal("Opcode has no pairwise equivalent!" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 323); |
324 | case AArch64::STRSui: |
325 | case AArch64::STURSi: |
326 | return AArch64::STPSi; |
327 | case AArch64::STRDui: |
328 | case AArch64::STURDi: |
329 | return AArch64::STPDi; |
330 | case AArch64::STRQui: |
331 | case AArch64::STURQi: |
332 | return AArch64::STPQi; |
333 | case AArch64::STRWui: |
334 | case AArch64::STURWi: |
335 | return AArch64::STPWi; |
336 | case AArch64::STRXui: |
337 | case AArch64::STURXi: |
338 | return AArch64::STPXi; |
339 | case AArch64::LDRSui: |
340 | case AArch64::LDURSi: |
341 | return AArch64::LDPSi; |
342 | case AArch64::LDRDui: |
343 | case AArch64::LDURDi: |
344 | return AArch64::LDPDi; |
345 | case AArch64::LDRQui: |
346 | case AArch64::LDURQi: |
347 | return AArch64::LDPQi; |
348 | case AArch64::LDRWui: |
349 | case AArch64::LDURWi: |
350 | return AArch64::LDPWi; |
351 | case AArch64::LDRXui: |
352 | case AArch64::LDURXi: |
353 | return AArch64::LDPXi; |
354 | case AArch64::LDRSWui: |
355 | case AArch64::LDURSWi: |
356 | return AArch64::LDPSWi; |
357 | } |
358 | } |
359 | |
360 | static unsigned isMatchingStore(MachineInstr &LoadInst, |
361 | MachineInstr &StoreInst) { |
362 | unsigned LdOpc = LoadInst.getOpcode(); |
363 | unsigned StOpc = StoreInst.getOpcode(); |
364 | switch (LdOpc) { |
365 | default: |
366 | llvm_unreachable("Unsupported load instruction!")::llvm::llvm_unreachable_internal("Unsupported load instruction!" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 366); |
367 | case AArch64::LDRBBui: |
368 | return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui || |
369 | StOpc == AArch64::STRWui || StOpc == AArch64::STRXui; |
370 | case AArch64::LDURBBi: |
371 | return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi || |
372 | StOpc == AArch64::STURWi || StOpc == AArch64::STURXi; |
373 | case AArch64::LDRHHui: |
374 | return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui || |
375 | StOpc == AArch64::STRXui; |
376 | case AArch64::LDURHHi: |
377 | return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi || |
378 | StOpc == AArch64::STURXi; |
379 | case AArch64::LDRWui: |
380 | return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui; |
381 | case AArch64::LDURWi: |
382 | return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi; |
383 | case AArch64::LDRXui: |
384 | return StOpc == AArch64::STRXui; |
385 | case AArch64::LDURXi: |
386 | return StOpc == AArch64::STURXi; |
387 | } |
388 | } |
389 | |
390 | static unsigned getPreIndexedOpcode(unsigned Opc) { |
391 | switch (Opc) { |
392 | default: |
393 | llvm_unreachable("Opcode has no pre-indexed equivalent!")::llvm::llvm_unreachable_internal("Opcode has no pre-indexed equivalent!" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 393); |
394 | case AArch64::STRSui: |
395 | return AArch64::STRSpre; |
396 | case AArch64::STRDui: |
397 | return AArch64::STRDpre; |
398 | case AArch64::STRQui: |
399 | return AArch64::STRQpre; |
400 | case AArch64::STRBBui: |
401 | return AArch64::STRBBpre; |
402 | case AArch64::STRHHui: |
403 | return AArch64::STRHHpre; |
404 | case AArch64::STRWui: |
405 | return AArch64::STRWpre; |
406 | case AArch64::STRXui: |
407 | return AArch64::STRXpre; |
408 | case AArch64::LDRSui: |
409 | return AArch64::LDRSpre; |
410 | case AArch64::LDRDui: |
411 | return AArch64::LDRDpre; |
412 | case AArch64::LDRQui: |
413 | return AArch64::LDRQpre; |
414 | case AArch64::LDRBBui: |
415 | return AArch64::LDRBBpre; |
416 | case AArch64::LDRHHui: |
417 | return AArch64::LDRHHpre; |
418 | case AArch64::LDRWui: |
419 | return AArch64::LDRWpre; |
420 | case AArch64::LDRXui: |
421 | return AArch64::LDRXpre; |
422 | case AArch64::LDRSWui: |
423 | return AArch64::LDRSWpre; |
424 | case AArch64::LDPSi: |
425 | return AArch64::LDPSpre; |
426 | case AArch64::LDPSWi: |
427 | return AArch64::LDPSWpre; |
428 | case AArch64::LDPDi: |
429 | return AArch64::LDPDpre; |
430 | case AArch64::LDPQi: |
431 | return AArch64::LDPQpre; |
432 | case AArch64::LDPWi: |
433 | return AArch64::LDPWpre; |
434 | case AArch64::LDPXi: |
435 | return AArch64::LDPXpre; |
436 | case AArch64::STPSi: |
437 | return AArch64::STPSpre; |
438 | case AArch64::STPDi: |
439 | return AArch64::STPDpre; |
440 | case AArch64::STPQi: |
441 | return AArch64::STPQpre; |
442 | case AArch64::STPWi: |
443 | return AArch64::STPWpre; |
444 | case AArch64::STPXi: |
445 | return AArch64::STPXpre; |
446 | } |
447 | } |
448 | |
449 | static unsigned getPostIndexedOpcode(unsigned Opc) { |
450 | switch (Opc) { |
451 | default: |
452 | llvm_unreachable("Opcode has no post-indexed wise equivalent!")::llvm::llvm_unreachable_internal("Opcode has no post-indexed wise equivalent!" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 452); |
453 | case AArch64::STRSui: |
454 | return AArch64::STRSpost; |
455 | case AArch64::STRDui: |
456 | return AArch64::STRDpost; |
457 | case AArch64::STRQui: |
458 | return AArch64::STRQpost; |
459 | case AArch64::STRBBui: |
460 | return AArch64::STRBBpost; |
461 | case AArch64::STRHHui: |
462 | return AArch64::STRHHpost; |
463 | case AArch64::STRWui: |
464 | return AArch64::STRWpost; |
465 | case AArch64::STRXui: |
466 | return AArch64::STRXpost; |
467 | case AArch64::LDRSui: |
468 | return AArch64::LDRSpost; |
469 | case AArch64::LDRDui: |
470 | return AArch64::LDRDpost; |
471 | case AArch64::LDRQui: |
472 | return AArch64::LDRQpost; |
473 | case AArch64::LDRBBui: |
474 | return AArch64::LDRBBpost; |
475 | case AArch64::LDRHHui: |
476 | return AArch64::LDRHHpost; |
477 | case AArch64::LDRWui: |
478 | return AArch64::LDRWpost; |
479 | case AArch64::LDRXui: |
480 | return AArch64::LDRXpost; |
481 | case AArch64::LDRSWui: |
482 | return AArch64::LDRSWpost; |
483 | case AArch64::LDPSi: |
484 | return AArch64::LDPSpost; |
485 | case AArch64::LDPSWi: |
486 | return AArch64::LDPSWpost; |
487 | case AArch64::LDPDi: |
488 | return AArch64::LDPDpost; |
489 | case AArch64::LDPQi: |
490 | return AArch64::LDPQpost; |
491 | case AArch64::LDPWi: |
492 | return AArch64::LDPWpost; |
493 | case AArch64::LDPXi: |
494 | return AArch64::LDPXpost; |
495 | case AArch64::STPSi: |
496 | return AArch64::STPSpost; |
497 | case AArch64::STPDi: |
498 | return AArch64::STPDpost; |
499 | case AArch64::STPQi: |
500 | return AArch64::STPQpost; |
501 | case AArch64::STPWi: |
502 | return AArch64::STPWpost; |
503 | case AArch64::STPXi: |
504 | return AArch64::STPXpost; |
505 | } |
506 | } |
507 | |
508 | static bool isPairedLdSt(const MachineInstr &MI) { |
509 | switch (MI.getOpcode()) { |
510 | default: |
511 | return false; |
512 | case AArch64::LDPSi: |
513 | case AArch64::LDPSWi: |
514 | case AArch64::LDPDi: |
515 | case AArch64::LDPQi: |
516 | case AArch64::LDPWi: |
517 | case AArch64::LDPXi: |
518 | case AArch64::STPSi: |
519 | case AArch64::STPDi: |
520 | case AArch64::STPQi: |
521 | case AArch64::STPWi: |
522 | case AArch64::STPXi: |
523 | return true; |
524 | } |
525 | } |
526 | |
527 | static const MachineOperand &getLdStRegOp(const MachineInstr &MI, |
528 | unsigned PairedRegOp = 0) { |
529 | assert(PairedRegOp < 2 && "Unexpected register operand idx.")((PairedRegOp < 2 && "Unexpected register operand idx." ) ? static_cast<void> (0) : __assert_fail ("PairedRegOp < 2 && \"Unexpected register operand idx.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 529, __PRETTY_FUNCTION__)); |
530 | unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0; |
531 | return MI.getOperand(Idx); |
532 | } |
533 | |
534 | static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) { |
535 | unsigned Idx = isPairedLdSt(MI) ? 2 : 1; |
536 | return MI.getOperand(Idx); |
537 | } |
538 | |
539 | static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) { |
540 | unsigned Idx = isPairedLdSt(MI) ? 3 : 2; |
541 | return MI.getOperand(Idx); |
542 | } |
543 | |
544 | static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst, |
545 | MachineInstr &StoreInst, |
546 | const AArch64InstrInfo *TII) { |
547 | assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.")((isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st." ) ? static_cast<void> (0) : __assert_fail ("isMatchingStore(LoadInst, StoreInst) && \"Expect only matched ld/st.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 547, __PRETTY_FUNCTION__)); |
548 | int LoadSize = getMemScale(LoadInst); |
549 | int StoreSize = getMemScale(StoreInst); |
550 | int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst) |
551 | ? getLdStOffsetOp(StoreInst).getImm() |
552 | : getLdStOffsetOp(StoreInst).getImm() * StoreSize; |
553 | int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst) |
554 | ? getLdStOffsetOp(LoadInst).getImm() |
555 | : getLdStOffsetOp(LoadInst).getImm() * LoadSize; |
556 | return (UnscaledStOffset <= UnscaledLdOffset) && |
557 | (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize)); |
558 | } |
559 | |
560 | static bool isPromotableZeroStoreInst(MachineInstr &MI) { |
561 | unsigned Opc = MI.getOpcode(); |
562 | return (Opc == AArch64::STRWui || Opc == AArch64::STURWi || |
563 | isNarrowStore(Opc)) && |
564 | getLdStRegOp(MI).getReg() == AArch64::WZR; |
565 | } |
566 | |
567 | MachineBasicBlock::iterator |
568 | AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I, |
569 | MachineBasicBlock::iterator MergeMI, |
570 | const LdStPairFlags &Flags) { |
571 | assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&((isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst (*MergeMI) && "Expected promotable zero stores.") ? static_cast <void> (0) : __assert_fail ("isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) && \"Expected promotable zero stores.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 572, __PRETTY_FUNCTION__)) |
572 | "Expected promotable zero stores.")((isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst (*MergeMI) && "Expected promotable zero stores.") ? static_cast <void> (0) : __assert_fail ("isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) && \"Expected promotable zero stores.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 572, __PRETTY_FUNCTION__)); |
573 | |
574 | MachineBasicBlock::iterator NextI = I; |
575 | ++NextI; |
576 | // If NextI is the second of the two instructions to be merged, we need |
577 | // to skip one further. Either way we merge will invalidate the iterator, |
578 | // and we don't need to scan the new instruction, as it's a pairwise |
579 | // instruction, which we're not considering for further action anyway. |
580 | if (NextI == MergeMI) |
581 | ++NextI; |
582 | |
583 | unsigned Opc = I->getOpcode(); |
584 | bool IsScaled = !TII->isUnscaledLdSt(Opc); |
585 | int OffsetStride = IsScaled ? 1 : getMemScale(*I); |
586 | |
587 | bool MergeForward = Flags.getMergeForward(); |
588 | // Insert our new paired instruction after whichever of the paired |
589 | // instructions MergeForward indicates. |
590 | MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I; |
591 | // Also based on MergeForward is from where we copy the base register operand |
592 | // so we get the flags compatible with the input code. |
593 | const MachineOperand &BaseRegOp = |
594 | MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I); |
595 | |
596 | // Which register is Rt and which is Rt2 depends on the offset order. |
597 | MachineInstr *RtMI; |
598 | if (getLdStOffsetOp(*I).getImm() == |
599 | getLdStOffsetOp(*MergeMI).getImm() + OffsetStride) |
600 | RtMI = &*MergeMI; |
601 | else |
602 | RtMI = &*I; |
603 | |
604 | int OffsetImm = getLdStOffsetOp(*RtMI).getImm(); |
605 | // Change the scaled offset from small to large type. |
606 | if (IsScaled) { |
607 | assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge")((((OffsetImm & 1) == 0) && "Unexpected offset to merge" ) ? static_cast<void> (0) : __assert_fail ("((OffsetImm & 1) == 0) && \"Unexpected offset to merge\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 607, __PRETTY_FUNCTION__)); |
608 | OffsetImm /= 2; |
609 | } |
610 | |
611 | // Construct the new instruction. |
612 | DebugLoc DL = I->getDebugLoc(); |
613 | MachineBasicBlock *MBB = I->getParent(); |
614 | MachineInstrBuilder MIB; |
615 | MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc))) |
616 | .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR) |
617 | .add(BaseRegOp) |
618 | .addImm(OffsetImm) |
619 | .setMemRefs(I->mergeMemRefsWith(*MergeMI)); |
620 | (void)MIB; |
621 | |
622 | DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "Creating wider store. Replacing instructions:\n " ; } } while (false); |
623 | DEBUG(I->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { I->print(dbgs()); } } while (false ); |
624 | DEBUG(dbgs() << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " "; } } while (false ); |
625 | DEBUG(MergeMI->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { MergeMI->print(dbgs()); } } while ( false); |
626 | DEBUG(dbgs() << " with instruction:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " with instruction:\n " ; } } while (false); |
627 | DEBUG(((MachineInstr *)MIB)->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { ((MachineInstr *)MIB)->print(dbgs( )); } } while (false); |
628 | DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "\n"; } } while (false ); |
629 | |
630 | // Erase the old instructions. |
631 | I->eraseFromParent(); |
632 | MergeMI->eraseFromParent(); |
633 | return NextI; |
634 | } |
635 | |
636 | MachineBasicBlock::iterator |
637 | AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, |
638 | MachineBasicBlock::iterator Paired, |
639 | const LdStPairFlags &Flags) { |
640 | MachineBasicBlock::iterator NextI = I; |
641 | ++NextI; |
642 | // If NextI is the second of the two instructions to be merged, we need |
643 | // to skip one further. Either way we merge will invalidate the iterator, |
644 | // and we don't need to scan the new instruction, as it's a pairwise |
645 | // instruction, which we're not considering for further action anyway. |
646 | if (NextI == Paired) |
647 | ++NextI; |
648 | |
649 | int SExtIdx = Flags.getSExtIdx(); |
650 | unsigned Opc = |
651 | SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode()); |
652 | bool IsUnscaled = TII->isUnscaledLdSt(Opc); |
653 | int OffsetStride = IsUnscaled ? getMemScale(*I) : 1; |
654 | |
655 | bool MergeForward = Flags.getMergeForward(); |
656 | // Insert our new paired instruction after whichever of the paired |
657 | // instructions MergeForward indicates. |
658 | MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I; |
659 | // Also based on MergeForward is from where we copy the base register operand |
660 | // so we get the flags compatible with the input code. |
661 | const MachineOperand &BaseRegOp = |
662 | MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I); |
663 | |
664 | int Offset = getLdStOffsetOp(*I).getImm(); |
665 | int PairedOffset = getLdStOffsetOp(*Paired).getImm(); |
666 | bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode()); |
667 | if (IsUnscaled != PairedIsUnscaled) { |
668 | // We're trying to pair instructions that differ in how they are scaled. If |
669 | // I is scaled then scale the offset of Paired accordingly. Otherwise, do |
670 | // the opposite (i.e., make Paired's offset unscaled). |
671 | int MemSize = getMemScale(*Paired); |
672 | if (PairedIsUnscaled) { |
673 | // If the unscaled offset isn't a multiple of the MemSize, we can't |
674 | // pair the operations together. |
675 | assert(!(PairedOffset % getMemScale(*Paired)) &&((!(PairedOffset % getMemScale(*Paired)) && "Offset should be a multiple of the stride!" ) ? static_cast<void> (0) : __assert_fail ("!(PairedOffset % getMemScale(*Paired)) && \"Offset should be a multiple of the stride!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 676, __PRETTY_FUNCTION__)) |
676 | "Offset should be a multiple of the stride!")((!(PairedOffset % getMemScale(*Paired)) && "Offset should be a multiple of the stride!" ) ? static_cast<void> (0) : __assert_fail ("!(PairedOffset % getMemScale(*Paired)) && \"Offset should be a multiple of the stride!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 676, __PRETTY_FUNCTION__)); |
677 | PairedOffset /= MemSize; |
678 | } else { |
679 | PairedOffset *= MemSize; |
680 | } |
681 | } |
682 | |
683 | // Which register is Rt and which is Rt2 depends on the offset order. |
684 | MachineInstr *RtMI, *Rt2MI; |
685 | if (Offset == PairedOffset + OffsetStride) { |
686 | RtMI = &*Paired; |
687 | Rt2MI = &*I; |
688 | // Here we swapped the assumption made for SExtIdx. |
689 | // I.e., we turn ldp I, Paired into ldp Paired, I. |
690 | // Update the index accordingly. |
691 | if (SExtIdx != -1) |
692 | SExtIdx = (SExtIdx + 1) % 2; |
693 | } else { |
694 | RtMI = &*I; |
695 | Rt2MI = &*Paired; |
696 | } |
697 | int OffsetImm = getLdStOffsetOp(*RtMI).getImm(); |
698 | // Scale the immediate offset, if necessary. |
699 | if (TII->isUnscaledLdSt(RtMI->getOpcode())) { |
700 | assert(!(OffsetImm % getMemScale(*RtMI)) &&((!(OffsetImm % getMemScale(*RtMI)) && "Unscaled offset cannot be scaled." ) ? static_cast<void> (0) : __assert_fail ("!(OffsetImm % getMemScale(*RtMI)) && \"Unscaled offset cannot be scaled.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 701, __PRETTY_FUNCTION__)) |
701 | "Unscaled offset cannot be scaled.")((!(OffsetImm % getMemScale(*RtMI)) && "Unscaled offset cannot be scaled." ) ? static_cast<void> (0) : __assert_fail ("!(OffsetImm % getMemScale(*RtMI)) && \"Unscaled offset cannot be scaled.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 701, __PRETTY_FUNCTION__)); |
702 | OffsetImm /= getMemScale(*RtMI); |
703 | } |
704 | |
705 | // Construct the new instruction. |
706 | MachineInstrBuilder MIB; |
707 | DebugLoc DL = I->getDebugLoc(); |
708 | MachineBasicBlock *MBB = I->getParent(); |
709 | MachineOperand RegOp0 = getLdStRegOp(*RtMI); |
710 | MachineOperand RegOp1 = getLdStRegOp(*Rt2MI); |
711 | // Kill flags may become invalid when moving stores for pairing. |
712 | if (RegOp0.isUse()) { |
713 | if (!MergeForward) { |
714 | // Clear kill flags on store if moving upwards. Example: |
715 | // STRWui %w0, ... |
716 | // USE %w1 |
717 | // STRWui kill %w1 ; need to clear kill flag when moving STRWui upwards |
718 | RegOp0.setIsKill(false); |
719 | RegOp1.setIsKill(false); |
720 | } else { |
721 | // Clear kill flags of the first stores register. Example: |
722 | // STRWui %w1, ... |
723 | // USE kill %w1 ; need to clear kill flag when moving STRWui downwards |
724 | // STRW %w0 |
725 | unsigned Reg = getLdStRegOp(*I).getReg(); |
726 | for (MachineInstr &MI : make_range(std::next(I), Paired)) |
727 | MI.clearRegisterKills(Reg, TRI); |
728 | } |
729 | } |
730 | MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc))) |
731 | .add(RegOp0) |
732 | .add(RegOp1) |
733 | .add(BaseRegOp) |
734 | .addImm(OffsetImm) |
735 | .setMemRefs(I->mergeMemRefsWith(*Paired)); |
736 | |
737 | (void)MIB; |
738 | |
739 | DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "Creating pair load/store. Replacing instructions:\n " ; } } while (false); |
740 | DEBUG(I->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { I->print(dbgs()); } } while (false ); |
741 | DEBUG(dbgs() << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " "; } } while (false ); |
742 | DEBUG(Paired->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { Paired->print(dbgs()); } } while ( false); |
743 | DEBUG(dbgs() << " with instruction:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " with instruction:\n " ; } } while (false); |
744 | if (SExtIdx != -1) { |
745 | // Generate the sign extension for the proper result of the ldp. |
746 | // I.e., with X1, that would be: |
747 | // %W1<def> = KILL %W1, %X1<imp-def> |
748 | // %X1<def> = SBFMXri %X1<kill>, 0, 31 |
749 | MachineOperand &DstMO = MIB->getOperand(SExtIdx); |
750 | // Right now, DstMO has the extended register, since it comes from an |
751 | // extended opcode. |
752 | unsigned DstRegX = DstMO.getReg(); |
753 | // Get the W variant of that register. |
754 | unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32); |
755 | // Update the result of LDP to use the W instead of the X variant. |
756 | DstMO.setReg(DstRegW); |
757 | DEBUG(((MachineInstr *)MIB)->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { ((MachineInstr *)MIB)->print(dbgs( )); } } while (false); |
758 | DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "\n"; } } while (false ); |
759 | // Make the machine verifier happy by providing a definition for |
760 | // the X register. |
761 | // Insert this definition right after the generated LDP, i.e., before |
762 | // InsertionPoint. |
763 | MachineInstrBuilder MIBKill = |
764 | BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW) |
765 | .addReg(DstRegW) |
766 | .addReg(DstRegX, RegState::Define); |
767 | MIBKill->getOperand(2).setImplicit(); |
768 | // Create the sign extension. |
769 | MachineInstrBuilder MIBSXTW = |
770 | BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX) |
771 | .addReg(DstRegX) |
772 | .addImm(0) |
773 | .addImm(31); |
774 | (void)MIBSXTW; |
775 | DEBUG(dbgs() << " Extend operand:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " Extend operand:\n " ; } } while (false); |
776 | DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { ((MachineInstr *)MIBSXTW)->print(dbgs ()); } } while (false); |
777 | } else { |
778 | DEBUG(((MachineInstr *)MIB)->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { ((MachineInstr *)MIB)->print(dbgs( )); } } while (false); |
779 | } |
780 | DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "\n"; } } while (false ); |
781 | |
782 | // Erase the old instructions. |
783 | I->eraseFromParent(); |
784 | Paired->eraseFromParent(); |
785 | |
786 | return NextI; |
787 | } |
788 | |
789 | MachineBasicBlock::iterator |
790 | AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI, |
791 | MachineBasicBlock::iterator StoreI) { |
792 | MachineBasicBlock::iterator NextI = LoadI; |
793 | ++NextI; |
794 | |
795 | int LoadSize = getMemScale(*LoadI); |
796 | int StoreSize = getMemScale(*StoreI); |
797 | unsigned LdRt = getLdStRegOp(*LoadI).getReg(); |
798 | unsigned StRt = getLdStRegOp(*StoreI).getReg(); |
799 | bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt); |
800 | |
801 | assert((IsStoreXReg ||(((IsStoreXReg || TRI->getRegClass(AArch64::GPR32RegClassID )->contains(StRt)) && "Unexpected RegClass") ? static_cast <void> (0) : __assert_fail ("(IsStoreXReg || TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) && \"Unexpected RegClass\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 803, __PRETTY_FUNCTION__)) |
802 | TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&(((IsStoreXReg || TRI->getRegClass(AArch64::GPR32RegClassID )->contains(StRt)) && "Unexpected RegClass") ? static_cast <void> (0) : __assert_fail ("(IsStoreXReg || TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) && \"Unexpected RegClass\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 803, __PRETTY_FUNCTION__)) |
803 | "Unexpected RegClass")(((IsStoreXReg || TRI->getRegClass(AArch64::GPR32RegClassID )->contains(StRt)) && "Unexpected RegClass") ? static_cast <void> (0) : __assert_fail ("(IsStoreXReg || TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) && \"Unexpected RegClass\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 803, __PRETTY_FUNCTION__)); |
804 | |
805 | MachineInstr *BitExtMI; |
806 | if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) { |
807 | // Remove the load, if the destination register of the loads is the same |
808 | // register for stored value. |
809 | if (StRt == LdRt && LoadSize == 8) { |
810 | StoreI->clearRegisterKills(StRt, TRI); |
811 | DEBUG(dbgs() << "Remove load instruction:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "Remove load instruction:\n " ; } } while (false); |
812 | DEBUG(LoadI->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { LoadI->print(dbgs()); } } while (false ); |
813 | DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "\n"; } } while (false ); |
814 | LoadI->eraseFromParent(); |
815 | return NextI; |
816 | } |
817 | // Replace the load with a mov if the load and store are in the same size. |
818 | BitExtMI = |
819 | BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(), |
820 | TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt) |
821 | .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR) |
822 | .addReg(StRt) |
823 | .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); |
824 | } else { |
825 | // FIXME: Currently we disable this transformation in big-endian targets as |
826 | // performance and correctness are verified only in little-endian. |
827 | if (!Subtarget->isLittleEndian()) |
828 | return NextI; |
829 | bool IsUnscaled = TII->isUnscaledLdSt(*LoadI); |
830 | assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&((IsUnscaled == TII->isUnscaledLdSt(*StoreI) && "Unsupported ld/st match" ) ? static_cast<void> (0) : __assert_fail ("IsUnscaled == TII->isUnscaledLdSt(*StoreI) && \"Unsupported ld/st match\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 831, __PRETTY_FUNCTION__)) |
831 | "Unsupported ld/st match")((IsUnscaled == TII->isUnscaledLdSt(*StoreI) && "Unsupported ld/st match" ) ? static_cast<void> (0) : __assert_fail ("IsUnscaled == TII->isUnscaledLdSt(*StoreI) && \"Unsupported ld/st match\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 831, __PRETTY_FUNCTION__)); |
832 | assert(LoadSize <= StoreSize && "Invalid load size")((LoadSize <= StoreSize && "Invalid load size") ? static_cast <void> (0) : __assert_fail ("LoadSize <= StoreSize && \"Invalid load size\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 832, __PRETTY_FUNCTION__)); |
833 | int UnscaledLdOffset = IsUnscaled |
834 | ? getLdStOffsetOp(*LoadI).getImm() |
835 | : getLdStOffsetOp(*LoadI).getImm() * LoadSize; |
836 | int UnscaledStOffset = IsUnscaled |
837 | ? getLdStOffsetOp(*StoreI).getImm() |
838 | : getLdStOffsetOp(*StoreI).getImm() * StoreSize; |
839 | int Width = LoadSize * 8; |
840 | int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset); |
841 | int Imms = Immr + Width - 1; |
Value stored to 'Imms' during its initialization is never read | |
842 | unsigned DestReg = IsStoreXReg |
843 | ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32, |
844 | &AArch64::GPR64RegClass) |
845 | : LdRt; |
846 | |
847 | assert((UnscaledLdOffset >= UnscaledStOffset &&(((UnscaledLdOffset >= UnscaledStOffset && (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && "Invalid offset" ) ? static_cast<void> (0) : __assert_fail ("(UnscaledLdOffset >= UnscaledStOffset && (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && \"Invalid offset\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 849, __PRETTY_FUNCTION__)) |
848 | (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&(((UnscaledLdOffset >= UnscaledStOffset && (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && "Invalid offset" ) ? static_cast<void> (0) : __assert_fail ("(UnscaledLdOffset >= UnscaledStOffset && (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && \"Invalid offset\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 849, __PRETTY_FUNCTION__)) |
849 | "Invalid offset")(((UnscaledLdOffset >= UnscaledStOffset && (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && "Invalid offset" ) ? static_cast<void> (0) : __assert_fail ("(UnscaledLdOffset >= UnscaledStOffset && (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && \"Invalid offset\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 849, __PRETTY_FUNCTION__)); |
850 | |
851 | Immr = 8 * (UnscaledLdOffset - UnscaledStOffset); |
852 | Imms = Immr + Width - 1; |
853 | if (UnscaledLdOffset == UnscaledStOffset) { |
854 | uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N |
855 | | ((Immr) << 6) // immr |
856 | | ((Imms) << 0) // imms |
857 | ; |
858 | |
859 | BitExtMI = |
860 | BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(), |
861 | TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri), |
862 | DestReg) |
863 | .addReg(StRt) |
864 | .addImm(AndMaskEncoded); |
865 | } else { |
866 | BitExtMI = |
867 | BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(), |
868 | TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri), |
869 | DestReg) |
870 | .addReg(StRt) |
871 | .addImm(Immr) |
872 | .addImm(Imms); |
873 | } |
874 | } |
875 | |
876 | // Clear kill flags between store and load. |
877 | for (MachineInstr &MI : make_range(StoreI->getIterator(), |
878 | BitExtMI->getIterator())) |
879 | MI.clearRegisterKills(StRt, TRI); |
880 | |
881 | DEBUG(dbgs() << "Promoting load by replacing :\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "Promoting load by replacing :\n " ; } } while (false); |
882 | DEBUG(StoreI->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { StoreI->print(dbgs()); } } while ( false); |
883 | DEBUG(dbgs() << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " "; } } while (false ); |
884 | DEBUG(LoadI->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { LoadI->print(dbgs()); } } while (false ); |
885 | DEBUG(dbgs() << " with instructions:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " with instructions:\n " ; } } while (false); |
886 | DEBUG(StoreI->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { StoreI->print(dbgs()); } } while ( false); |
887 | DEBUG(dbgs() << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " "; } } while (false ); |
888 | DEBUG((BitExtMI)->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { (BitExtMI)->print(dbgs()); } } while (false); |
889 | DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "\n"; } } while (false ); |
890 | |
891 | // Erase the old instructions. |
892 | LoadI->eraseFromParent(); |
893 | return NextI; |
894 | } |
895 | |
896 | /// trackRegDefsUses - Remember what registers the specified instruction uses |
897 | /// and modifies. |
898 | static void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs, |
899 | BitVector &UsedRegs, |
900 | const TargetRegisterInfo *TRI) { |
901 | for (const MachineOperand &MO : MI.operands()) { |
902 | if (MO.isRegMask()) |
903 | ModifiedRegs.setBitsNotInMask(MO.getRegMask()); |
904 | |
905 | if (!MO.isReg()) |
906 | continue; |
907 | unsigned Reg = MO.getReg(); |
908 | if (!Reg) |
909 | continue; |
910 | if (MO.isDef()) { |
911 | // WZR/XZR are not modified even when used as a destination register. |
912 | if (Reg != AArch64::WZR && Reg != AArch64::XZR) |
913 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) |
914 | ModifiedRegs.set(*AI); |
915 | } else { |
916 | assert(MO.isUse() && "Reg operand not a def and not a use?!?")((MO.isUse() && "Reg operand not a def and not a use?!?" ) ? static_cast<void> (0) : __assert_fail ("MO.isUse() && \"Reg operand not a def and not a use?!?\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 916, __PRETTY_FUNCTION__)); |
917 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) |
918 | UsedRegs.set(*AI); |
919 | } |
920 | } |
921 | } |
922 | |
923 | static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) { |
924 | // Convert the byte-offset used by unscaled into an "element" offset used |
925 | // by the scaled pair load/store instructions. |
926 | if (IsUnscaled) { |
927 | // If the byte-offset isn't a multiple of the stride, there's no point |
928 | // trying to match it. |
929 | if (Offset % OffsetStride) |
930 | return false; |
931 | Offset /= OffsetStride; |
932 | } |
933 | return Offset <= 63 && Offset >= -64; |
934 | } |
935 | |
936 | // Do alignment, specialized to power of 2 and for signed ints, |
937 | // avoiding having to do a C-style cast from uint_64t to int when |
938 | // using alignTo from include/llvm/Support/MathExtras.h. |
939 | // FIXME: Move this function to include/MathExtras.h? |
940 | static int alignTo(int Num, int PowOf2) { |
941 | return (Num + PowOf2 - 1) & ~(PowOf2 - 1); |
942 | } |
943 | |
944 | static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb, |
945 | AliasAnalysis *AA) { |
946 | // One of the instructions must modify memory. |
947 | if (!MIa.mayStore() && !MIb.mayStore()) |
948 | return false; |
949 | |
950 | // Both instructions must be memory operations. |
951 | if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore()) |
952 | return false; |
953 | |
954 | return MIa.mayAlias(AA, MIb, /*UseTBAA*/false); |
955 | } |
956 | |
957 | static bool mayAlias(MachineInstr &MIa, |
958 | SmallVectorImpl<MachineInstr *> &MemInsns, |
959 | AliasAnalysis *AA) { |
960 | for (MachineInstr *MIb : MemInsns) |
961 | if (mayAlias(MIa, *MIb, AA)) |
962 | return true; |
963 | |
964 | return false; |
965 | } |
966 | |
967 | bool AArch64LoadStoreOpt::findMatchingStore( |
968 | MachineBasicBlock::iterator I, unsigned Limit, |
969 | MachineBasicBlock::iterator &StoreI) { |
970 | MachineBasicBlock::iterator B = I->getParent()->begin(); |
971 | MachineBasicBlock::iterator MBBI = I; |
972 | MachineInstr &LoadMI = *I; |
973 | unsigned BaseReg = getLdStBaseOp(LoadMI).getReg(); |
974 | |
975 | // If the load is the first instruction in the block, there's obviously |
976 | // not any matching store. |
977 | if (MBBI == B) |
978 | return false; |
979 | |
980 | // Track which registers have been modified and used between the first insn |
981 | // and the second insn. |
982 | ModifiedRegs.reset(); |
983 | UsedRegs.reset(); |
984 | |
985 | unsigned Count = 0; |
986 | do { |
987 | --MBBI; |
988 | MachineInstr &MI = *MBBI; |
989 | |
990 | // Don't count transient instructions towards the search limit since there |
991 | // may be different numbers of them if e.g. debug information is present. |
992 | if (!MI.isTransient()) |
993 | ++Count; |
994 | |
995 | // If the load instruction reads directly from the address to which the |
996 | // store instruction writes and the stored value is not modified, we can |
997 | // promote the load. Since we do not handle stores with pre-/post-index, |
998 | // it's unnecessary to check if BaseReg is modified by the store itself. |
999 | if (MI.mayStore() && isMatchingStore(LoadMI, MI) && |
1000 | BaseReg == getLdStBaseOp(MI).getReg() && |
1001 | isLdOffsetInRangeOfSt(LoadMI, MI, TII) && |
1002 | !ModifiedRegs[getLdStRegOp(MI).getReg()]) { |
1003 | StoreI = MBBI; |
1004 | return true; |
1005 | } |
1006 | |
1007 | if (MI.isCall()) |
1008 | return false; |
1009 | |
1010 | // Update modified / uses register lists. |
1011 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1012 | |
1013 | // Otherwise, if the base register is modified, we have no match, so |
1014 | // return early. |
1015 | if (ModifiedRegs[BaseReg]) |
1016 | return false; |
1017 | |
1018 | // If we encounter a store aliased with the load, return early. |
1019 | if (MI.mayStore() && mayAlias(LoadMI, MI, AA)) |
1020 | return false; |
1021 | } while (MBBI != B && Count < Limit); |
1022 | return false; |
1023 | } |
1024 | |
1025 | // Returns true if FirstMI and MI are candidates for merging or pairing. |
1026 | // Otherwise, returns false. |
1027 | static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI, |
1028 | LdStPairFlags &Flags, |
1029 | const AArch64InstrInfo *TII) { |
1030 | // If this is volatile or if pairing is suppressed, not a candidate. |
1031 | if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI)) |
1032 | return false; |
1033 | |
1034 | // We should have already checked FirstMI for pair suppression and volatility. |
1035 | assert(!FirstMI.hasOrderedMemoryRef() &&((!FirstMI.hasOrderedMemoryRef() && !TII->isLdStPairSuppressed (FirstMI) && "FirstMI shouldn't get here if either of these checks are true." ) ? static_cast<void> (0) : __assert_fail ("!FirstMI.hasOrderedMemoryRef() && !TII->isLdStPairSuppressed(FirstMI) && \"FirstMI shouldn't get here if either of these checks are true.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1037, __PRETTY_FUNCTION__)) |
1036 | !TII->isLdStPairSuppressed(FirstMI) &&((!FirstMI.hasOrderedMemoryRef() && !TII->isLdStPairSuppressed (FirstMI) && "FirstMI shouldn't get here if either of these checks are true." ) ? static_cast<void> (0) : __assert_fail ("!FirstMI.hasOrderedMemoryRef() && !TII->isLdStPairSuppressed(FirstMI) && \"FirstMI shouldn't get here if either of these checks are true.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1037, __PRETTY_FUNCTION__)) |
1037 | "FirstMI shouldn't get here if either of these checks are true.")((!FirstMI.hasOrderedMemoryRef() && !TII->isLdStPairSuppressed (FirstMI) && "FirstMI shouldn't get here if either of these checks are true." ) ? static_cast<void> (0) : __assert_fail ("!FirstMI.hasOrderedMemoryRef() && !TII->isLdStPairSuppressed(FirstMI) && \"FirstMI shouldn't get here if either of these checks are true.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1037, __PRETTY_FUNCTION__)); |
1038 | |
1039 | unsigned OpcA = FirstMI.getOpcode(); |
1040 | unsigned OpcB = MI.getOpcode(); |
1041 | |
1042 | // Opcodes match: nothing more to check. |
1043 | if (OpcA == OpcB) |
1044 | return true; |
1045 | |
1046 | // Try to match a sign-extended load/store with a zero-extended load/store. |
1047 | bool IsValidLdStrOpc, PairIsValidLdStrOpc; |
1048 | unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc); |
1049 | assert(IsValidLdStrOpc &&((IsValidLdStrOpc && "Given Opc should be a Load or Store with an immediate" ) ? static_cast<void> (0) : __assert_fail ("IsValidLdStrOpc && \"Given Opc should be a Load or Store with an immediate\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1050, __PRETTY_FUNCTION__)) |
1050 | "Given Opc should be a Load or Store with an immediate")((IsValidLdStrOpc && "Given Opc should be a Load or Store with an immediate" ) ? static_cast<void> (0) : __assert_fail ("IsValidLdStrOpc && \"Given Opc should be a Load or Store with an immediate\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1050, __PRETTY_FUNCTION__)); |
1051 | // OpcA will be the first instruction in the pair. |
1052 | if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) { |
1053 | Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0); |
1054 | return true; |
1055 | } |
1056 | |
1057 | // If the second instruction isn't even a mergable/pairable load/store, bail |
1058 | // out. |
1059 | if (!PairIsValidLdStrOpc) |
1060 | return false; |
1061 | |
1062 | // FIXME: We don't support merging narrow stores with mixed scaled/unscaled |
1063 | // offsets. |
1064 | if (isNarrowStore(OpcA) || isNarrowStore(OpcB)) |
1065 | return false; |
1066 | |
1067 | // Try to match an unscaled load/store with a scaled load/store. |
1068 | return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) && |
1069 | getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB); |
1070 | |
1071 | // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair? |
1072 | } |
1073 | |
1074 | /// Scan the instructions looking for a load/store that can be combined with the |
1075 | /// current instruction into a wider equivalent or a load/store pair. |
1076 | MachineBasicBlock::iterator |
1077 | AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, |
1078 | LdStPairFlags &Flags, unsigned Limit, |
1079 | bool FindNarrowMerge) { |
1080 | MachineBasicBlock::iterator E = I->getParent()->end(); |
1081 | MachineBasicBlock::iterator MBBI = I; |
1082 | MachineInstr &FirstMI = *I; |
1083 | ++MBBI; |
1084 | |
1085 | bool MayLoad = FirstMI.mayLoad(); |
1086 | bool IsUnscaled = TII->isUnscaledLdSt(FirstMI); |
1087 | unsigned Reg = getLdStRegOp(FirstMI).getReg(); |
1088 | unsigned BaseReg = getLdStBaseOp(FirstMI).getReg(); |
1089 | int Offset = getLdStOffsetOp(FirstMI).getImm(); |
1090 | int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1; |
1091 | bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI); |
1092 | |
1093 | // Track which registers have been modified and used between the first insn |
1094 | // (inclusive) and the second insn. |
1095 | ModifiedRegs.reset(); |
1096 | UsedRegs.reset(); |
1097 | |
1098 | // Remember any instructions that read/write memory between FirstMI and MI. |
1099 | SmallVector<MachineInstr *, 4> MemInsns; |
1100 | |
1101 | for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) { |
1102 | MachineInstr &MI = *MBBI; |
1103 | |
1104 | // Don't count transient instructions towards the search limit since there |
1105 | // may be different numbers of them if e.g. debug information is present. |
1106 | if (!MI.isTransient()) |
1107 | ++Count; |
1108 | |
1109 | Flags.setSExtIdx(-1); |
1110 | if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) && |
1111 | getLdStOffsetOp(MI).isImm()) { |
1112 | assert(MI.mayLoadOrStore() && "Expected memory operation.")((MI.mayLoadOrStore() && "Expected memory operation." ) ? static_cast<void> (0) : __assert_fail ("MI.mayLoadOrStore() && \"Expected memory operation.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1112, __PRETTY_FUNCTION__)); |
1113 | // If we've found another instruction with the same opcode, check to see |
1114 | // if the base and offset are compatible with our starting instruction. |
1115 | // These instructions all have scaled immediate operands, so we just |
1116 | // check for +1/-1. Make sure to check the new instruction offset is |
1117 | // actually an immediate and not a symbolic reference destined for |
1118 | // a relocation. |
1119 | unsigned MIBaseReg = getLdStBaseOp(MI).getReg(); |
1120 | int MIOffset = getLdStOffsetOp(MI).getImm(); |
1121 | bool MIIsUnscaled = TII->isUnscaledLdSt(MI); |
1122 | if (IsUnscaled != MIIsUnscaled) { |
1123 | // We're trying to pair instructions that differ in how they are scaled. |
1124 | // If FirstMI is scaled then scale the offset of MI accordingly. |
1125 | // Otherwise, do the opposite (i.e., make MI's offset unscaled). |
1126 | int MemSize = getMemScale(MI); |
1127 | if (MIIsUnscaled) { |
1128 | // If the unscaled offset isn't a multiple of the MemSize, we can't |
1129 | // pair the operations together: bail and keep looking. |
1130 | if (MIOffset % MemSize) { |
1131 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1132 | MemInsns.push_back(&MI); |
1133 | continue; |
1134 | } |
1135 | MIOffset /= MemSize; |
1136 | } else { |
1137 | MIOffset *= MemSize; |
1138 | } |
1139 | } |
1140 | |
1141 | if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) || |
1142 | (Offset + OffsetStride == MIOffset))) { |
1143 | int MinOffset = Offset < MIOffset ? Offset : MIOffset; |
1144 | if (FindNarrowMerge) { |
1145 | // If the alignment requirements of the scaled wide load/store |
1146 | // instruction can't express the offset of the scaled narrow input, |
1147 | // bail and keep looking. For promotable zero stores, allow only when |
1148 | // the stored value is the same (i.e., WZR). |
1149 | if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) || |
1150 | (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) { |
1151 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1152 | MemInsns.push_back(&MI); |
1153 | continue; |
1154 | } |
1155 | } else { |
1156 | // Pairwise instructions have a 7-bit signed offset field. Single |
1157 | // insns have a 12-bit unsigned offset field. If the resultant |
1158 | // immediate offset of merging these instructions is out of range for |
1159 | // a pairwise instruction, bail and keep looking. |
1160 | if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) { |
1161 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1162 | MemInsns.push_back(&MI); |
1163 | continue; |
1164 | } |
1165 | // If the alignment requirements of the paired (scaled) instruction |
1166 | // can't express the offset of the unscaled input, bail and keep |
1167 | // looking. |
1168 | if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) { |
1169 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1170 | MemInsns.push_back(&MI); |
1171 | continue; |
1172 | } |
1173 | } |
1174 | // If the destination register of the loads is the same register, bail |
1175 | // and keep looking. A load-pair instruction with both destination |
1176 | // registers the same is UNPREDICTABLE and will result in an exception. |
1177 | if (MayLoad && Reg == getLdStRegOp(MI).getReg()) { |
1178 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1179 | MemInsns.push_back(&MI); |
1180 | continue; |
1181 | } |
1182 | |
1183 | // If the Rt of the second instruction was not modified or used between |
1184 | // the two instructions and none of the instructions between the second |
1185 | // and first alias with the second, we can combine the second into the |
1186 | // first. |
1187 | if (!ModifiedRegs[getLdStRegOp(MI).getReg()] && |
1188 | !(MI.mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) && |
1189 | !mayAlias(MI, MemInsns, AA)) { |
1190 | Flags.setMergeForward(false); |
1191 | return MBBI; |
1192 | } |
1193 | |
1194 | // Likewise, if the Rt of the first instruction is not modified or used |
1195 | // between the two instructions and none of the instructions between the |
1196 | // first and the second alias with the first, we can combine the first |
1197 | // into the second. |
1198 | if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] && |
1199 | !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) && |
1200 | !mayAlias(FirstMI, MemInsns, AA)) { |
1201 | Flags.setMergeForward(true); |
1202 | return MBBI; |
1203 | } |
1204 | // Unable to combine these instructions due to interference in between. |
1205 | // Keep looking. |
1206 | } |
1207 | } |
1208 | |
1209 | // If the instruction wasn't a matching load or store. Stop searching if we |
1210 | // encounter a call instruction that might modify memory. |
1211 | if (MI.isCall()) |
1212 | return E; |
1213 | |
1214 | // Update modified / uses register lists. |
1215 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1216 | |
1217 | // Otherwise, if the base register is modified, we have no match, so |
1218 | // return early. |
1219 | if (ModifiedRegs[BaseReg]) |
1220 | return E; |
1221 | |
1222 | // Update list of instructions that read/write memory. |
1223 | if (MI.mayLoadOrStore()) |
1224 | MemInsns.push_back(&MI); |
1225 | } |
1226 | return E; |
1227 | } |
1228 | |
1229 | MachineBasicBlock::iterator |
1230 | AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I, |
1231 | MachineBasicBlock::iterator Update, |
1232 | bool IsPreIdx) { |
1233 | assert((Update->getOpcode() == AArch64::ADDXri ||(((Update->getOpcode() == AArch64::ADDXri || Update->getOpcode () == AArch64::SUBXri) && "Unexpected base register update instruction to merge!" ) ? static_cast<void> (0) : __assert_fail ("(Update->getOpcode() == AArch64::ADDXri || Update->getOpcode() == AArch64::SUBXri) && \"Unexpected base register update instruction to merge!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1235, __PRETTY_FUNCTION__)) |
1234 | Update->getOpcode() == AArch64::SUBXri) &&(((Update->getOpcode() == AArch64::ADDXri || Update->getOpcode () == AArch64::SUBXri) && "Unexpected base register update instruction to merge!" ) ? static_cast<void> (0) : __assert_fail ("(Update->getOpcode() == AArch64::ADDXri || Update->getOpcode() == AArch64::SUBXri) && \"Unexpected base register update instruction to merge!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1235, __PRETTY_FUNCTION__)) |
1235 | "Unexpected base register update instruction to merge!")(((Update->getOpcode() == AArch64::ADDXri || Update->getOpcode () == AArch64::SUBXri) && "Unexpected base register update instruction to merge!" ) ? static_cast<void> (0) : __assert_fail ("(Update->getOpcode() == AArch64::ADDXri || Update->getOpcode() == AArch64::SUBXri) && \"Unexpected base register update instruction to merge!\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1235, __PRETTY_FUNCTION__)); |
1236 | MachineBasicBlock::iterator NextI = I; |
1237 | // Return the instruction following the merged instruction, which is |
1238 | // the instruction following our unmerged load. Unless that's the add/sub |
1239 | // instruction we're merging, in which case it's the one after that. |
1240 | if (++NextI == Update) |
1241 | ++NextI; |
1242 | |
1243 | int Value = Update->getOperand(2).getImm(); |
1244 | assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&((AArch64_AM::getShiftValue(Update->getOperand(3).getImm() ) == 0 && "Can't merge 1 << 12 offset into pre-/post-indexed load / store" ) ? static_cast<void> (0) : __assert_fail ("AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 && \"Can't merge 1 << 12 offset into pre-/post-indexed load / store\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1245, __PRETTY_FUNCTION__)) |
1245 | "Can't merge 1 << 12 offset into pre-/post-indexed load / store")((AArch64_AM::getShiftValue(Update->getOperand(3).getImm() ) == 0 && "Can't merge 1 << 12 offset into pre-/post-indexed load / store" ) ? static_cast<void> (0) : __assert_fail ("AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 && \"Can't merge 1 << 12 offset into pre-/post-indexed load / store\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1245, __PRETTY_FUNCTION__)); |
1246 | if (Update->getOpcode() == AArch64::SUBXri) |
1247 | Value = -Value; |
1248 | |
1249 | unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode()) |
1250 | : getPostIndexedOpcode(I->getOpcode()); |
1251 | MachineInstrBuilder MIB; |
1252 | if (!isPairedLdSt(*I)) { |
1253 | // Non-paired instruction. |
1254 | MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) |
1255 | .add(getLdStRegOp(*Update)) |
1256 | .add(getLdStRegOp(*I)) |
1257 | .add(getLdStBaseOp(*I)) |
1258 | .addImm(Value) |
1259 | .setMemRefs(I->memoperands_begin(), I->memoperands_end()); |
1260 | } else { |
1261 | // Paired instruction. |
1262 | int Scale = getMemScale(*I); |
1263 | MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) |
1264 | .add(getLdStRegOp(*Update)) |
1265 | .add(getLdStRegOp(*I, 0)) |
1266 | .add(getLdStRegOp(*I, 1)) |
1267 | .add(getLdStBaseOp(*I)) |
1268 | .addImm(Value / Scale) |
1269 | .setMemRefs(I->memoperands_begin(), I->memoperands_end()); |
1270 | } |
1271 | (void)MIB; |
1272 | |
1273 | if (IsPreIdx) |
1274 | DEBUG(dbgs() << "Creating pre-indexed load/store.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "Creating pre-indexed load/store." ; } } while (false); |
1275 | else |
1276 | DEBUG(dbgs() << "Creating post-indexed load/store.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "Creating post-indexed load/store." ; } } while (false); |
1277 | DEBUG(dbgs() << " Replacing instructions:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " Replacing instructions:\n " ; } } while (false); |
1278 | DEBUG(I->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { I->print(dbgs()); } } while (false ); |
1279 | DEBUG(dbgs() << " ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " "; } } while (false ); |
1280 | DEBUG(Update->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { Update->print(dbgs()); } } while ( false); |
1281 | DEBUG(dbgs() << " with instruction:\n ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << " with instruction:\n " ; } } while (false); |
1282 | DEBUG(((MachineInstr *)MIB)->print(dbgs()))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { ((MachineInstr *)MIB)->print(dbgs( )); } } while (false); |
1283 | DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("aarch64-ldst-opt")) { dbgs() << "\n"; } } while (false ); |
1284 | |
1285 | // Erase the old instructions for the block. |
1286 | I->eraseFromParent(); |
1287 | Update->eraseFromParent(); |
1288 | |
1289 | return NextI; |
1290 | } |
1291 | |
1292 | bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI, |
1293 | MachineInstr &MI, |
1294 | unsigned BaseReg, int Offset) { |
1295 | switch (MI.getOpcode()) { |
1296 | default: |
1297 | break; |
1298 | case AArch64::SUBXri: |
1299 | case AArch64::ADDXri: |
1300 | // Make sure it's a vanilla immediate operand, not a relocation or |
1301 | // anything else we can't handle. |
1302 | if (!MI.getOperand(2).isImm()) |
1303 | break; |
1304 | // Watch out for 1 << 12 shifted value. |
1305 | if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm())) |
1306 | break; |
1307 | |
1308 | // The update instruction source and destination register must be the |
1309 | // same as the load/store base register. |
1310 | if (MI.getOperand(0).getReg() != BaseReg || |
1311 | MI.getOperand(1).getReg() != BaseReg) |
1312 | break; |
1313 | |
1314 | bool IsPairedInsn = isPairedLdSt(MemMI); |
1315 | int UpdateOffset = MI.getOperand(2).getImm(); |
1316 | if (MI.getOpcode() == AArch64::SUBXri) |
1317 | UpdateOffset = -UpdateOffset; |
1318 | |
1319 | // For non-paired load/store instructions, the immediate must fit in a |
1320 | // signed 9-bit integer. |
1321 | if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256)) |
1322 | break; |
1323 | |
1324 | // For paired load/store instructions, the immediate must be a multiple of |
1325 | // the scaling factor. The scaled offset must also fit into a signed 7-bit |
1326 | // integer. |
1327 | if (IsPairedInsn) { |
1328 | int Scale = getMemScale(MemMI); |
1329 | if (UpdateOffset % Scale != 0) |
1330 | break; |
1331 | |
1332 | int ScaledOffset = UpdateOffset / Scale; |
1333 | if (ScaledOffset > 63 || ScaledOffset < -64) |
1334 | break; |
1335 | } |
1336 | |
1337 | // If we have a non-zero Offset, we check that it matches the amount |
1338 | // we're adding to the register. |
1339 | if (!Offset || Offset == UpdateOffset) |
1340 | return true; |
1341 | break; |
1342 | } |
1343 | return false; |
1344 | } |
1345 | |
1346 | MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward( |
1347 | MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) { |
1348 | MachineBasicBlock::iterator E = I->getParent()->end(); |
1349 | MachineInstr &MemMI = *I; |
1350 | MachineBasicBlock::iterator MBBI = I; |
1351 | |
1352 | unsigned BaseReg = getLdStBaseOp(MemMI).getReg(); |
1353 | int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI); |
1354 | |
1355 | // Scan forward looking for post-index opportunities. Updating instructions |
1356 | // can't be formed if the memory instruction doesn't have the offset we're |
1357 | // looking for. |
1358 | if (MIUnscaledOffset != UnscaledOffset) |
1359 | return E; |
1360 | |
1361 | // If the base register overlaps a destination register, we can't |
1362 | // merge the update. |
1363 | bool IsPairedInsn = isPairedLdSt(MemMI); |
1364 | for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) { |
1365 | unsigned DestReg = getLdStRegOp(MemMI, i).getReg(); |
1366 | if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg)) |
1367 | return E; |
1368 | } |
1369 | |
1370 | // Track which registers have been modified and used between the first insn |
1371 | // (inclusive) and the second insn. |
1372 | ModifiedRegs.reset(); |
1373 | UsedRegs.reset(); |
1374 | ++MBBI; |
1375 | for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) { |
1376 | MachineInstr &MI = *MBBI; |
1377 | |
1378 | // Don't count transient instructions towards the search limit since there |
1379 | // may be different numbers of them if e.g. debug information is present. |
1380 | if (!MI.isTransient()) |
1381 | ++Count; |
1382 | |
1383 | // If we found a match, return it. |
1384 | if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset)) |
1385 | return MBBI; |
1386 | |
1387 | // Update the status of what the instruction clobbered and used. |
1388 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1389 | |
1390 | // Otherwise, if the base register is used or modified, we have no match, so |
1391 | // return early. |
1392 | if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg]) |
1393 | return E; |
1394 | } |
1395 | return E; |
1396 | } |
1397 | |
1398 | MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward( |
1399 | MachineBasicBlock::iterator I, unsigned Limit) { |
1400 | MachineBasicBlock::iterator B = I->getParent()->begin(); |
1401 | MachineBasicBlock::iterator E = I->getParent()->end(); |
1402 | MachineInstr &MemMI = *I; |
1403 | MachineBasicBlock::iterator MBBI = I; |
1404 | |
1405 | unsigned BaseReg = getLdStBaseOp(MemMI).getReg(); |
1406 | int Offset = getLdStOffsetOp(MemMI).getImm(); |
1407 | |
1408 | // If the load/store is the first instruction in the block, there's obviously |
1409 | // not any matching update. Ditto if the memory offset isn't zero. |
1410 | if (MBBI == B || Offset != 0) |
1411 | return E; |
1412 | // If the base register overlaps a destination register, we can't |
1413 | // merge the update. |
1414 | bool IsPairedInsn = isPairedLdSt(MemMI); |
1415 | for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) { |
1416 | unsigned DestReg = getLdStRegOp(MemMI, i).getReg(); |
1417 | if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg)) |
1418 | return E; |
1419 | } |
1420 | |
1421 | // Track which registers have been modified and used between the first insn |
1422 | // (inclusive) and the second insn. |
1423 | ModifiedRegs.reset(); |
1424 | UsedRegs.reset(); |
1425 | unsigned Count = 0; |
1426 | do { |
1427 | --MBBI; |
1428 | MachineInstr &MI = *MBBI; |
1429 | |
1430 | // Don't count transient instructions towards the search limit since there |
1431 | // may be different numbers of them if e.g. debug information is present. |
1432 | if (!MI.isTransient()) |
1433 | ++Count; |
1434 | |
1435 | // If we found a match, return it. |
1436 | if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset)) |
1437 | return MBBI; |
1438 | |
1439 | // Update the status of what the instruction clobbered and used. |
1440 | trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI); |
1441 | |
1442 | // Otherwise, if the base register is used or modified, we have no match, so |
1443 | // return early. |
1444 | if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg]) |
1445 | return E; |
1446 | } while (MBBI != B && Count < Limit); |
1447 | return E; |
1448 | } |
1449 | |
1450 | bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore( |
1451 | MachineBasicBlock::iterator &MBBI) { |
1452 | MachineInstr &MI = *MBBI; |
1453 | // If this is a volatile load, don't mess with it. |
1454 | if (MI.hasOrderedMemoryRef()) |
1455 | return false; |
1456 | |
1457 | // Make sure this is a reg+imm. |
1458 | // FIXME: It is possible to extend it to handle reg+reg cases. |
1459 | if (!getLdStOffsetOp(MI).isImm()) |
1460 | return false; |
1461 | |
1462 | // Look backward up to LdStLimit instructions. |
1463 | MachineBasicBlock::iterator StoreI; |
1464 | if (findMatchingStore(MBBI, LdStLimit, StoreI)) { |
1465 | ++NumLoadsFromStoresPromoted; |
1466 | // Promote the load. Keeping the iterator straight is a |
1467 | // pain, so we let the merge routine tell us what the next instruction |
1468 | // is after it's done mucking about. |
1469 | MBBI = promoteLoadFromStore(MBBI, StoreI); |
1470 | return true; |
1471 | } |
1472 | return false; |
1473 | } |
1474 | |
1475 | // Merge adjacent zero stores into a wider store. |
1476 | bool AArch64LoadStoreOpt::tryToMergeZeroStInst( |
1477 | MachineBasicBlock::iterator &MBBI) { |
1478 | assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.")((isPromotableZeroStoreInst(*MBBI) && "Expected narrow store." ) ? static_cast<void> (0) : __assert_fail ("isPromotableZeroStoreInst(*MBBI) && \"Expected narrow store.\"" , "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp" , 1478, __PRETTY_FUNCTION__)); |
1479 | MachineInstr &MI = *MBBI; |
1480 | MachineBasicBlock::iterator E = MI.getParent()->end(); |
1481 | |
1482 | if (!TII->isCandidateToMergeOrPair(MI)) |
1483 | return false; |
1484 | |
1485 | // Look ahead up to LdStLimit instructions for a mergable instruction. |
1486 | LdStPairFlags Flags; |
1487 | MachineBasicBlock::iterator MergeMI = |
1488 | findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true); |
1489 | if (MergeMI != E) { |
1490 | ++NumZeroStoresPromoted; |
1491 | |
1492 | // Keeping the iterator straight is a pain, so we let the merge routine tell |
1493 | // us what the next instruction is after it's done mucking about. |
1494 | MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags); |
1495 | return true; |
1496 | } |
1497 | return false; |
1498 | } |
1499 | |
1500 | // Find loads and stores that can be merged into a single load or store pair |
1501 | // instruction. |
1502 | bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { |
1503 | MachineInstr &MI = *MBBI; |
1504 | MachineBasicBlock::iterator E = MI.getParent()->end(); |
1505 | |
1506 | if (!TII->isCandidateToMergeOrPair(MI)) |
1507 | return false; |
1508 | |
1509 | // Early exit if the offset is not possible to match. (6 bits of positive |
1510 | // range, plus allow an extra one in case we find a later insn that matches |
1511 | // with Offset-1) |
1512 | bool IsUnscaled = TII->isUnscaledLdSt(MI); |
1513 | int Offset = getLdStOffsetOp(MI).getImm(); |
1514 | int OffsetStride = IsUnscaled ? getMemScale(MI) : 1; |
1515 | // Allow one more for offset. |
1516 | if (Offset > 0) |
1517 | Offset -= OffsetStride; |
1518 | if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride)) |
1519 | return false; |
1520 | |
1521 | // Look ahead up to LdStLimit instructions for a pairable instruction. |
1522 | LdStPairFlags Flags; |
1523 | MachineBasicBlock::iterator Paired = |
1524 | findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false); |
1525 | if (Paired != E) { |
1526 | ++NumPairCreated; |
1527 | if (TII->isUnscaledLdSt(MI)) |
1528 | ++NumUnscaledPairCreated; |
1529 | // Keeping the iterator straight is a pain, so we let the merge routine tell |
1530 | // us what the next instruction is after it's done mucking about. |
1531 | MBBI = mergePairedInsns(MBBI, Paired, Flags); |
1532 | return true; |
1533 | } |
1534 | return false; |
1535 | } |
1536 | |
1537 | bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, |
1538 | bool EnableNarrowZeroStOpt) { |
1539 | bool Modified = false; |
1540 | // Four tranformations to do here: |
1541 | // 1) Find loads that directly read from stores and promote them by |
1542 | // replacing with mov instructions. If the store is wider than the load, |
1543 | // the load will be replaced with a bitfield extract. |
1544 | // e.g., |
1545 | // str w1, [x0, #4] |
1546 | // ldrh w2, [x0, #6] |
1547 | // ; becomes |
1548 | // str w1, [x0, #4] |
1549 | // lsr w2, w1, #16 |
1550 | for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); |
1551 | MBBI != E;) { |
1552 | MachineInstr &MI = *MBBI; |
1553 | switch (MI.getOpcode()) { |
1554 | default: |
1555 | // Just move on to the next instruction. |
1556 | ++MBBI; |
1557 | break; |
1558 | // Scaled instructions. |
1559 | case AArch64::LDRBBui: |
1560 | case AArch64::LDRHHui: |
1561 | case AArch64::LDRWui: |
1562 | case AArch64::LDRXui: |
1563 | // Unscaled instructions. |
1564 | case AArch64::LDURBBi: |
1565 | case AArch64::LDURHHi: |
1566 | case AArch64::LDURWi: |
1567 | case AArch64::LDURXi: |
1568 | if (tryToPromoteLoadFromStore(MBBI)) { |
1569 | Modified = true; |
1570 | break; |
1571 | } |
1572 | ++MBBI; |
1573 | break; |
1574 | } |
1575 | } |
1576 | // 2) Merge adjacent zero stores into a wider store. |
1577 | // e.g., |
1578 | // strh wzr, [x0] |
1579 | // strh wzr, [x0, #2] |
1580 | // ; becomes |
1581 | // str wzr, [x0] |
1582 | // e.g., |
1583 | // str wzr, [x0] |
1584 | // str wzr, [x0, #4] |
1585 | // ; becomes |
1586 | // str xzr, [x0] |
1587 | for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); |
1588 | EnableNarrowZeroStOpt && MBBI != E;) { |
1589 | if (isPromotableZeroStoreInst(*MBBI)) { |
1590 | if (tryToMergeZeroStInst(MBBI)) { |
1591 | Modified = true; |
1592 | } else |
1593 | ++MBBI; |
1594 | } else |
1595 | ++MBBI; |
1596 | } |
1597 | |
1598 | // 3) Find loads and stores that can be merged into a single load or store |
1599 | // pair instruction. |
1600 | // e.g., |
1601 | // ldr x0, [x2] |
1602 | // ldr x1, [x2, #8] |
1603 | // ; becomes |
1604 | // ldp x0, x1, [x2] |
1605 | for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); |
1606 | MBBI != E;) { |
1607 | if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI)) |
1608 | Modified = true; |
1609 | else |
1610 | ++MBBI; |
1611 | } |
1612 | // 4) Find base register updates that can be merged into the load or store |
1613 | // as a base-reg writeback. |
1614 | // e.g., |
1615 | // ldr x0, [x2] |
1616 | // add x2, x2, #4 |
1617 | // ; becomes |
1618 | // ldr x0, [x2], #4 |
1619 | for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); |
1620 | MBBI != E;) { |
1621 | MachineInstr &MI = *MBBI; |
1622 | // Do update merging. It's simpler to keep this separate from the above |
1623 | // switchs, though not strictly necessary. |
1624 | unsigned Opc = MI.getOpcode(); |
1625 | switch (Opc) { |
1626 | default: |
1627 | // Just move on to the next instruction. |
1628 | ++MBBI; |
1629 | break; |
1630 | // Scaled instructions. |
1631 | case AArch64::STRSui: |
1632 | case AArch64::STRDui: |
1633 | case AArch64::STRQui: |
1634 | case AArch64::STRXui: |
1635 | case AArch64::STRWui: |
1636 | case AArch64::STRHHui: |
1637 | case AArch64::STRBBui: |
1638 | case AArch64::LDRSui: |
1639 | case AArch64::LDRDui: |
1640 | case AArch64::LDRQui: |
1641 | case AArch64::LDRXui: |
1642 | case AArch64::LDRWui: |
1643 | case AArch64::LDRHHui: |
1644 | case AArch64::LDRBBui: |
1645 | // Unscaled instructions. |
1646 | case AArch64::STURSi: |
1647 | case AArch64::STURDi: |
1648 | case AArch64::STURQi: |
1649 | case AArch64::STURWi: |
1650 | case AArch64::STURXi: |
1651 | case AArch64::LDURSi: |
1652 | case AArch64::LDURDi: |
1653 | case AArch64::LDURQi: |
1654 | case AArch64::LDURWi: |
1655 | case AArch64::LDURXi: |
1656 | // Paired instructions. |
1657 | case AArch64::LDPSi: |
1658 | case AArch64::LDPSWi: |
1659 | case AArch64::LDPDi: |
1660 | case AArch64::LDPQi: |
1661 | case AArch64::LDPWi: |
1662 | case AArch64::LDPXi: |
1663 | case AArch64::STPSi: |
1664 | case AArch64::STPDi: |
1665 | case AArch64::STPQi: |
1666 | case AArch64::STPWi: |
1667 | case AArch64::STPXi: { |
1668 | // Make sure this is a reg+imm (as opposed to an address reloc). |
1669 | if (!getLdStOffsetOp(MI).isImm()) { |
1670 | ++MBBI; |
1671 | break; |
1672 | } |
1673 | // Look forward to try to form a post-index instruction. For example, |
1674 | // ldr x0, [x20] |
1675 | // add x20, x20, #32 |
1676 | // merged into: |
1677 | // ldr x0, [x20], #32 |
1678 | MachineBasicBlock::iterator Update = |
1679 | findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit); |
1680 | if (Update != E) { |
1681 | // Merge the update into the ld/st. |
1682 | MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false); |
1683 | Modified = true; |
1684 | ++NumPostFolded; |
1685 | break; |
1686 | } |
1687 | // Don't know how to handle pre/post-index versions, so move to the next |
1688 | // instruction. |
1689 | if (TII->isUnscaledLdSt(Opc)) { |
1690 | ++MBBI; |
1691 | break; |
1692 | } |
1693 | |
1694 | // Look back to try to find a pre-index instruction. For example, |
1695 | // add x0, x0, #8 |
1696 | // ldr x1, [x0] |
1697 | // merged into: |
1698 | // ldr x1, [x0, #8]! |
1699 | Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit); |
1700 | if (Update != E) { |
1701 | // Merge the update into the ld/st. |
1702 | MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true); |
1703 | Modified = true; |
1704 | ++NumPreFolded; |
1705 | break; |
1706 | } |
1707 | // The immediate in the load/store is scaled by the size of the memory |
1708 | // operation. The immediate in the add we're looking for, |
1709 | // however, is not, so adjust here. |
1710 | int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI); |
1711 | |
1712 | // Look forward to try to find a post-index instruction. For example, |
1713 | // ldr x1, [x0, #64] |
1714 | // add x0, x0, #64 |
1715 | // merged into: |
1716 | // ldr x1, [x0, #64]! |
1717 | Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit); |
1718 | if (Update != E) { |
1719 | // Merge the update into the ld/st. |
1720 | MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true); |
1721 | Modified = true; |
1722 | ++NumPreFolded; |
1723 | break; |
1724 | } |
1725 | |
1726 | // Nothing found. Just move to the next instruction. |
1727 | ++MBBI; |
1728 | break; |
1729 | } |
1730 | } |
1731 | } |
1732 | |
1733 | return Modified; |
1734 | } |
1735 | |
1736 | bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { |
1737 | if (skipFunction(*Fn.getFunction())) |
1738 | return false; |
1739 | |
1740 | Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget()); |
1741 | TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo()); |
1742 | TRI = Subtarget->getRegisterInfo(); |
1743 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); |
1744 | |
1745 | // Resize the modified and used register bitfield trackers. We do this once |
1746 | // per function and then clear the bitfield each time we optimize a load or |
1747 | // store. |
1748 | ModifiedRegs.resize(TRI->getNumRegs()); |
1749 | UsedRegs.resize(TRI->getNumRegs()); |
1750 | |
1751 | bool Modified = false; |
1752 | bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign(); |
1753 | for (auto &MBB : Fn) |
1754 | Modified |= optimizeBlock(MBB, enableNarrowZeroStOpt); |
1755 | |
1756 | return Modified; |
1757 | } |
1758 | |
1759 | // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and |
1760 | // stores near one another? Note: The pre-RA instruction scheduler already has |
1761 | // hooks to try and schedule pairable loads/stores together to improve pairing |
1762 | // opportunities. Thus, pre-RA pairing pass may not be worth the effort. |
1763 | |
1764 | // FIXME: When pairing store instructions it's very possible for this pass to |
1765 | // hoist a store with a KILL marker above another use (without a KILL marker). |
1766 | // The resulting IR is invalid, but nothing uses the KILL markers after this |
1767 | // pass, so it's never caused a problem in practice. |
1768 | |
1769 | /// createAArch64LoadStoreOptimizationPass - returns an instance of the |
1770 | /// load / store optimization pass. |
1771 | FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() { |
1772 | return new AArch64LoadStoreOpt(); |
1773 | } |