File: | lib/Target/AArch64/AArch64InstrInfo.cpp |
Warning: | line 3134, column 5 Value stored to 'SrcReg' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the AArch64 implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "AArch64InstrInfo.h" |
14 | #include "AArch64MachineFunctionInfo.h" |
15 | #include "AArch64Subtarget.h" |
16 | #include "MCTargetDesc/AArch64AddressingModes.h" |
17 | #include "Utils/AArch64BaseInfo.h" |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/STLExtras.h" |
20 | #include "llvm/ADT/SmallVector.h" |
21 | #include "llvm/CodeGen/MachineBasicBlock.h" |
22 | #include "llvm/CodeGen/MachineFrameInfo.h" |
23 | #include "llvm/CodeGen/MachineFunction.h" |
24 | #include "llvm/CodeGen/MachineInstr.h" |
25 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
26 | #include "llvm/CodeGen/MachineMemOperand.h" |
27 | #include "llvm/CodeGen/MachineOperand.h" |
28 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
29 | #include "llvm/CodeGen/MachineModuleInfo.h" |
30 | #include "llvm/CodeGen/StackMaps.h" |
31 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
32 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
33 | #include "llvm/IR/DebugLoc.h" |
34 | #include "llvm/IR/GlobalValue.h" |
35 | #include "llvm/MC/MCAsmInfo.h" |
36 | #include "llvm/MC/MCInst.h" |
37 | #include "llvm/MC/MCInstrDesc.h" |
38 | #include "llvm/Support/Casting.h" |
39 | #include "llvm/Support/CodeGen.h" |
40 | #include "llvm/Support/CommandLine.h" |
41 | #include "llvm/Support/Compiler.h" |
42 | #include "llvm/Support/ErrorHandling.h" |
43 | #include "llvm/Support/MathExtras.h" |
44 | #include "llvm/Target/TargetMachine.h" |
45 | #include "llvm/Target/TargetOptions.h" |
46 | #include <cassert> |
47 | #include <cstdint> |
48 | #include <iterator> |
49 | #include <utility> |
50 | |
51 | using namespace llvm; |
52 | |
53 | #define GET_INSTRINFO_CTOR_DTOR |
54 | #include "AArch64GenInstrInfo.inc" |
55 | |
56 | static cl::opt<unsigned> TBZDisplacementBits( |
57 | "aarch64-tbz-offset-bits", cl::Hidden, cl::init(14), |
58 | cl::desc("Restrict range of TB[N]Z instructions (DEBUG)")); |
59 | |
60 | static cl::opt<unsigned> CBZDisplacementBits( |
61 | "aarch64-cbz-offset-bits", cl::Hidden, cl::init(19), |
62 | cl::desc("Restrict range of CB[N]Z instructions (DEBUG)")); |
63 | |
64 | static cl::opt<unsigned> |
65 | BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19), |
66 | cl::desc("Restrict range of Bcc instructions (DEBUG)")); |
67 | |
68 | AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI) |
69 | : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP, |
70 | AArch64::CATCHRET), |
71 | RI(STI.getTargetTriple()), Subtarget(STI) {} |
72 | |
73 | /// GetInstSize - Return the number of bytes of code the specified |
74 | /// instruction may be. This returns the maximum number of bytes. |
75 | unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { |
76 | const MachineBasicBlock &MBB = *MI.getParent(); |
77 | const MachineFunction *MF = MBB.getParent(); |
78 | const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); |
79 | |
80 | { |
81 | auto Op = MI.getOpcode(); |
82 | if (Op == AArch64::INLINEASM || Op == AArch64::INLINEASM_BR) |
83 | return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI); |
84 | } |
85 | |
86 | // Meta-instructions emit no code. |
87 | if (MI.isMetaInstruction()) |
88 | return 0; |
89 | |
90 | // FIXME: We currently only handle pseudoinstructions that don't get expanded |
91 | // before the assembly printer. |
92 | unsigned NumBytes = 0; |
93 | const MCInstrDesc &Desc = MI.getDesc(); |
94 | switch (Desc.getOpcode()) { |
95 | default: |
96 | // Anything not explicitly designated otherwise is a normal 4-byte insn. |
97 | NumBytes = 4; |
98 | break; |
99 | case TargetOpcode::STACKMAP: |
100 | // The upper bound for a stackmap intrinsic is the full length of its shadow |
101 | NumBytes = StackMapOpers(&MI).getNumPatchBytes(); |
102 | assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!")((NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!" ) ? static_cast<void> (0) : __assert_fail ("NumBytes % 4 == 0 && \"Invalid number of NOP bytes requested!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 102, __PRETTY_FUNCTION__)); |
103 | break; |
104 | case TargetOpcode::PATCHPOINT: |
105 | // The size of the patchpoint intrinsic is the number of bytes requested |
106 | NumBytes = PatchPointOpers(&MI).getNumPatchBytes(); |
107 | assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!")((NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!" ) ? static_cast<void> (0) : __assert_fail ("NumBytes % 4 == 0 && \"Invalid number of NOP bytes requested!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 107, __PRETTY_FUNCTION__)); |
108 | break; |
109 | case AArch64::TLSDESC_CALLSEQ: |
110 | // This gets lowered to an instruction sequence which takes 16 bytes |
111 | NumBytes = 16; |
112 | break; |
113 | case AArch64::JumpTableDest32: |
114 | case AArch64::JumpTableDest16: |
115 | case AArch64::JumpTableDest8: |
116 | NumBytes = 12; |
117 | break; |
118 | case AArch64::SPACE: |
119 | NumBytes = MI.getOperand(1).getImm(); |
120 | break; |
121 | } |
122 | |
123 | return NumBytes; |
124 | } |
125 | |
126 | static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, |
127 | SmallVectorImpl<MachineOperand> &Cond) { |
128 | // Block ends with fall-through condbranch. |
129 | switch (LastInst->getOpcode()) { |
130 | default: |
131 | llvm_unreachable("Unknown branch instruction?")::llvm::llvm_unreachable_internal("Unknown branch instruction?" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 131); |
132 | case AArch64::Bcc: |
133 | Target = LastInst->getOperand(1).getMBB(); |
134 | Cond.push_back(LastInst->getOperand(0)); |
135 | break; |
136 | case AArch64::CBZW: |
137 | case AArch64::CBZX: |
138 | case AArch64::CBNZW: |
139 | case AArch64::CBNZX: |
140 | Target = LastInst->getOperand(1).getMBB(); |
141 | Cond.push_back(MachineOperand::CreateImm(-1)); |
142 | Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); |
143 | Cond.push_back(LastInst->getOperand(0)); |
144 | break; |
145 | case AArch64::TBZW: |
146 | case AArch64::TBZX: |
147 | case AArch64::TBNZW: |
148 | case AArch64::TBNZX: |
149 | Target = LastInst->getOperand(2).getMBB(); |
150 | Cond.push_back(MachineOperand::CreateImm(-1)); |
151 | Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); |
152 | Cond.push_back(LastInst->getOperand(0)); |
153 | Cond.push_back(LastInst->getOperand(1)); |
154 | } |
155 | } |
156 | |
157 | static unsigned getBranchDisplacementBits(unsigned Opc) { |
158 | switch (Opc) { |
159 | default: |
160 | llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 160); |
161 | case AArch64::B: |
162 | return 64; |
163 | case AArch64::TBNZW: |
164 | case AArch64::TBZW: |
165 | case AArch64::TBNZX: |
166 | case AArch64::TBZX: |
167 | return TBZDisplacementBits; |
168 | case AArch64::CBNZW: |
169 | case AArch64::CBZW: |
170 | case AArch64::CBNZX: |
171 | case AArch64::CBZX: |
172 | return CBZDisplacementBits; |
173 | case AArch64::Bcc: |
174 | return BCCDisplacementBits; |
175 | } |
176 | } |
177 | |
178 | bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp, |
179 | int64_t BrOffset) const { |
180 | unsigned Bits = getBranchDisplacementBits(BranchOp); |
181 | assert(Bits >= 3 && "max branch displacement must be enough to jump"((Bits >= 3 && "max branch displacement must be enough to jump" "over conditional branch expansion") ? static_cast<void> (0) : __assert_fail ("Bits >= 3 && \"max branch displacement must be enough to jump\" \"over conditional branch expansion\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 182, __PRETTY_FUNCTION__)) |
182 | "over conditional branch expansion")((Bits >= 3 && "max branch displacement must be enough to jump" "over conditional branch expansion") ? static_cast<void> (0) : __assert_fail ("Bits >= 3 && \"max branch displacement must be enough to jump\" \"over conditional branch expansion\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 182, __PRETTY_FUNCTION__)); |
183 | return isIntN(Bits, BrOffset / 4); |
184 | } |
185 | |
186 | MachineBasicBlock * |
187 | AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const { |
188 | switch (MI.getOpcode()) { |
189 | default: |
190 | llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 190); |
191 | case AArch64::B: |
192 | return MI.getOperand(0).getMBB(); |
193 | case AArch64::TBZW: |
194 | case AArch64::TBNZW: |
195 | case AArch64::TBZX: |
196 | case AArch64::TBNZX: |
197 | return MI.getOperand(2).getMBB(); |
198 | case AArch64::CBZW: |
199 | case AArch64::CBNZW: |
200 | case AArch64::CBZX: |
201 | case AArch64::CBNZX: |
202 | case AArch64::Bcc: |
203 | return MI.getOperand(1).getMBB(); |
204 | } |
205 | } |
206 | |
207 | // Branch analysis. |
208 | bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
209 | MachineBasicBlock *&TBB, |
210 | MachineBasicBlock *&FBB, |
211 | SmallVectorImpl<MachineOperand> &Cond, |
212 | bool AllowModify) const { |
213 | // If the block has no terminators, it just falls into the block after it. |
214 | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
215 | if (I == MBB.end()) |
216 | return false; |
217 | |
218 | if (!isUnpredicatedTerminator(*I)) |
219 | return false; |
220 | |
221 | // Get the last instruction in the block. |
222 | MachineInstr *LastInst = &*I; |
223 | |
224 | // If there is only one terminator instruction, process it. |
225 | unsigned LastOpc = LastInst->getOpcode(); |
226 | if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { |
227 | if (isUncondBranchOpcode(LastOpc)) { |
228 | TBB = LastInst->getOperand(0).getMBB(); |
229 | return false; |
230 | } |
231 | if (isCondBranchOpcode(LastOpc)) { |
232 | // Block ends with fall-through condbranch. |
233 | parseCondBranch(LastInst, TBB, Cond); |
234 | return false; |
235 | } |
236 | return true; // Can't handle indirect branch. |
237 | } |
238 | |
239 | // Get the instruction before it if it is a terminator. |
240 | MachineInstr *SecondLastInst = &*I; |
241 | unsigned SecondLastOpc = SecondLastInst->getOpcode(); |
242 | |
243 | // If AllowModify is true and the block ends with two or more unconditional |
244 | // branches, delete all but the first unconditional branch. |
245 | if (AllowModify && isUncondBranchOpcode(LastOpc)) { |
246 | while (isUncondBranchOpcode(SecondLastOpc)) { |
247 | LastInst->eraseFromParent(); |
248 | LastInst = SecondLastInst; |
249 | LastOpc = LastInst->getOpcode(); |
250 | if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { |
251 | // Return now the only terminator is an unconditional branch. |
252 | TBB = LastInst->getOperand(0).getMBB(); |
253 | return false; |
254 | } else { |
255 | SecondLastInst = &*I; |
256 | SecondLastOpc = SecondLastInst->getOpcode(); |
257 | } |
258 | } |
259 | } |
260 | |
261 | // If there are three terminators, we don't know what sort of block this is. |
262 | if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I)) |
263 | return true; |
264 | |
265 | // If the block ends with a B and a Bcc, handle it. |
266 | if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { |
267 | parseCondBranch(SecondLastInst, TBB, Cond); |
268 | FBB = LastInst->getOperand(0).getMBB(); |
269 | return false; |
270 | } |
271 | |
272 | // If the block ends with two unconditional branches, handle it. The second |
273 | // one is not executed, so remove it. |
274 | if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { |
275 | TBB = SecondLastInst->getOperand(0).getMBB(); |
276 | I = LastInst; |
277 | if (AllowModify) |
278 | I->eraseFromParent(); |
279 | return false; |
280 | } |
281 | |
282 | // ...likewise if it ends with an indirect branch followed by an unconditional |
283 | // branch. |
284 | if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { |
285 | I = LastInst; |
286 | if (AllowModify) |
287 | I->eraseFromParent(); |
288 | return true; |
289 | } |
290 | |
291 | // Otherwise, can't handle this. |
292 | return true; |
293 | } |
294 | |
295 | bool AArch64InstrInfo::reverseBranchCondition( |
296 | SmallVectorImpl<MachineOperand> &Cond) const { |
297 | if (Cond[0].getImm() != -1) { |
298 | // Regular Bcc |
299 | AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm(); |
300 | Cond[0].setImm(AArch64CC::getInvertedCondCode(CC)); |
301 | } else { |
302 | // Folded compare-and-branch |
303 | switch (Cond[1].getImm()) { |
304 | default: |
305 | llvm_unreachable("Unknown conditional branch!")::llvm::llvm_unreachable_internal("Unknown conditional branch!" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 305); |
306 | case AArch64::CBZW: |
307 | Cond[1].setImm(AArch64::CBNZW); |
308 | break; |
309 | case AArch64::CBNZW: |
310 | Cond[1].setImm(AArch64::CBZW); |
311 | break; |
312 | case AArch64::CBZX: |
313 | Cond[1].setImm(AArch64::CBNZX); |
314 | break; |
315 | case AArch64::CBNZX: |
316 | Cond[1].setImm(AArch64::CBZX); |
317 | break; |
318 | case AArch64::TBZW: |
319 | Cond[1].setImm(AArch64::TBNZW); |
320 | break; |
321 | case AArch64::TBNZW: |
322 | Cond[1].setImm(AArch64::TBZW); |
323 | break; |
324 | case AArch64::TBZX: |
325 | Cond[1].setImm(AArch64::TBNZX); |
326 | break; |
327 | case AArch64::TBNZX: |
328 | Cond[1].setImm(AArch64::TBZX); |
329 | break; |
330 | } |
331 | } |
332 | |
333 | return false; |
334 | } |
335 | |
336 | unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB, |
337 | int *BytesRemoved) const { |
338 | MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
339 | if (I == MBB.end()) |
340 | return 0; |
341 | |
342 | if (!isUncondBranchOpcode(I->getOpcode()) && |
343 | !isCondBranchOpcode(I->getOpcode())) |
344 | return 0; |
345 | |
346 | // Remove the branch. |
347 | I->eraseFromParent(); |
348 | |
349 | I = MBB.end(); |
350 | |
351 | if (I == MBB.begin()) { |
352 | if (BytesRemoved) |
353 | *BytesRemoved = 4; |
354 | return 1; |
355 | } |
356 | --I; |
357 | if (!isCondBranchOpcode(I->getOpcode())) { |
358 | if (BytesRemoved) |
359 | *BytesRemoved = 4; |
360 | return 1; |
361 | } |
362 | |
363 | // Remove the branch. |
364 | I->eraseFromParent(); |
365 | if (BytesRemoved) |
366 | *BytesRemoved = 8; |
367 | |
368 | return 2; |
369 | } |
370 | |
371 | void AArch64InstrInfo::instantiateCondBranch( |
372 | MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB, |
373 | ArrayRef<MachineOperand> Cond) const { |
374 | if (Cond[0].getImm() != -1) { |
375 | // Regular Bcc |
376 | BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB); |
377 | } else { |
378 | // Folded compare-and-branch |
379 | // Note that we use addOperand instead of addReg to keep the flags. |
380 | const MachineInstrBuilder MIB = |
381 | BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]); |
382 | if (Cond.size() > 3) |
383 | MIB.addImm(Cond[3].getImm()); |
384 | MIB.addMBB(TBB); |
385 | } |
386 | } |
387 | |
388 | unsigned AArch64InstrInfo::insertBranch( |
389 | MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, |
390 | ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const { |
391 | // Shouldn't be a fall through. |
392 | assert(TBB && "insertBranch must not be told to insert a fallthrough")((TBB && "insertBranch must not be told to insert a fallthrough" ) ? static_cast<void> (0) : __assert_fail ("TBB && \"insertBranch must not be told to insert a fallthrough\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 392, __PRETTY_FUNCTION__)); |
393 | |
394 | if (!FBB) { |
395 | if (Cond.empty()) // Unconditional branch? |
396 | BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB); |
397 | else |
398 | instantiateCondBranch(MBB, DL, TBB, Cond); |
399 | |
400 | if (BytesAdded) |
401 | *BytesAdded = 4; |
402 | |
403 | return 1; |
404 | } |
405 | |
406 | // Two-way conditional branch. |
407 | instantiateCondBranch(MBB, DL, TBB, Cond); |
408 | BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB); |
409 | |
410 | if (BytesAdded) |
411 | *BytesAdded = 8; |
412 | |
413 | return 2; |
414 | } |
415 | |
416 | // Find the original register that VReg is copied from. |
417 | static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) { |
418 | while (Register::isVirtualRegister(VReg)) { |
419 | const MachineInstr *DefMI = MRI.getVRegDef(VReg); |
420 | if (!DefMI->isFullCopy()) |
421 | return VReg; |
422 | VReg = DefMI->getOperand(1).getReg(); |
423 | } |
424 | return VReg; |
425 | } |
426 | |
427 | // Determine if VReg is defined by an instruction that can be folded into a |
428 | // csel instruction. If so, return the folded opcode, and the replacement |
429 | // register. |
430 | static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg, |
431 | unsigned *NewVReg = nullptr) { |
432 | VReg = removeCopies(MRI, VReg); |
433 | if (!Register::isVirtualRegister(VReg)) |
434 | return 0; |
435 | |
436 | bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg)); |
437 | const MachineInstr *DefMI = MRI.getVRegDef(VReg); |
438 | unsigned Opc = 0; |
439 | unsigned SrcOpNum = 0; |
440 | switch (DefMI->getOpcode()) { |
441 | case AArch64::ADDSXri: |
442 | case AArch64::ADDSWri: |
443 | // if NZCV is used, do not fold. |
444 | if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1) |
445 | return 0; |
446 | // fall-through to ADDXri and ADDWri. |
447 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
448 | case AArch64::ADDXri: |
449 | case AArch64::ADDWri: |
450 | // add x, 1 -> csinc. |
451 | if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 || |
452 | DefMI->getOperand(3).getImm() != 0) |
453 | return 0; |
454 | SrcOpNum = 1; |
455 | Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr; |
456 | break; |
457 | |
458 | case AArch64::ORNXrr: |
459 | case AArch64::ORNWrr: { |
460 | // not x -> csinv, represented as orn dst, xzr, src. |
461 | unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg()); |
462 | if (ZReg != AArch64::XZR && ZReg != AArch64::WZR) |
463 | return 0; |
464 | SrcOpNum = 2; |
465 | Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr; |
466 | break; |
467 | } |
468 | |
469 | case AArch64::SUBSXrr: |
470 | case AArch64::SUBSWrr: |
471 | // if NZCV is used, do not fold. |
472 | if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1) |
473 | return 0; |
474 | // fall-through to SUBXrr and SUBWrr. |
475 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
476 | case AArch64::SUBXrr: |
477 | case AArch64::SUBWrr: { |
478 | // neg x -> csneg, represented as sub dst, xzr, src. |
479 | unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg()); |
480 | if (ZReg != AArch64::XZR && ZReg != AArch64::WZR) |
481 | return 0; |
482 | SrcOpNum = 2; |
483 | Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr; |
484 | break; |
485 | } |
486 | default: |
487 | return 0; |
488 | } |
489 | assert(Opc && SrcOpNum && "Missing parameters")((Opc && SrcOpNum && "Missing parameters") ? static_cast <void> (0) : __assert_fail ("Opc && SrcOpNum && \"Missing parameters\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 489, __PRETTY_FUNCTION__)); |
490 | |
491 | if (NewVReg) |
492 | *NewVReg = DefMI->getOperand(SrcOpNum).getReg(); |
493 | return Opc; |
494 | } |
495 | |
496 | bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB, |
497 | ArrayRef<MachineOperand> Cond, |
498 | unsigned TrueReg, unsigned FalseReg, |
499 | int &CondCycles, int &TrueCycles, |
500 | int &FalseCycles) const { |
501 | // Check register classes. |
502 | const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
503 | const TargetRegisterClass *RC = |
504 | RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); |
505 | if (!RC) |
506 | return false; |
507 | |
508 | // Expanding cbz/tbz requires an extra cycle of latency on the condition. |
509 | unsigned ExtraCondLat = Cond.size() != 1; |
510 | |
511 | // GPRs are handled by csel. |
512 | // FIXME: Fold in x+1, -x, and ~x when applicable. |
513 | if (AArch64::GPR64allRegClass.hasSubClassEq(RC) || |
514 | AArch64::GPR32allRegClass.hasSubClassEq(RC)) { |
515 | // Single-cycle csel, csinc, csinv, and csneg. |
516 | CondCycles = 1 + ExtraCondLat; |
517 | TrueCycles = FalseCycles = 1; |
518 | if (canFoldIntoCSel(MRI, TrueReg)) |
519 | TrueCycles = 0; |
520 | else if (canFoldIntoCSel(MRI, FalseReg)) |
521 | FalseCycles = 0; |
522 | return true; |
523 | } |
524 | |
525 | // Scalar floating point is handled by fcsel. |
526 | // FIXME: Form fabs, fmin, and fmax when applicable. |
527 | if (AArch64::FPR64RegClass.hasSubClassEq(RC) || |
528 | AArch64::FPR32RegClass.hasSubClassEq(RC)) { |
529 | CondCycles = 5 + ExtraCondLat; |
530 | TrueCycles = FalseCycles = 2; |
531 | return true; |
532 | } |
533 | |
534 | // Can't do vectors. |
535 | return false; |
536 | } |
537 | |
538 | void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB, |
539 | MachineBasicBlock::iterator I, |
540 | const DebugLoc &DL, unsigned DstReg, |
541 | ArrayRef<MachineOperand> Cond, |
542 | unsigned TrueReg, unsigned FalseReg) const { |
543 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
544 | |
545 | // Parse the condition code, see parseCondBranch() above. |
546 | AArch64CC::CondCode CC; |
547 | switch (Cond.size()) { |
548 | default: |
549 | llvm_unreachable("Unknown condition opcode in Cond")::llvm::llvm_unreachable_internal("Unknown condition opcode in Cond" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 549); |
550 | case 1: // b.cc |
551 | CC = AArch64CC::CondCode(Cond[0].getImm()); |
552 | break; |
553 | case 3: { // cbz/cbnz |
554 | // We must insert a compare against 0. |
555 | bool Is64Bit; |
556 | switch (Cond[1].getImm()) { |
557 | default: |
558 | llvm_unreachable("Unknown branch opcode in Cond")::llvm::llvm_unreachable_internal("Unknown branch opcode in Cond" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 558); |
559 | case AArch64::CBZW: |
560 | Is64Bit = false; |
561 | CC = AArch64CC::EQ; |
562 | break; |
563 | case AArch64::CBZX: |
564 | Is64Bit = true; |
565 | CC = AArch64CC::EQ; |
566 | break; |
567 | case AArch64::CBNZW: |
568 | Is64Bit = false; |
569 | CC = AArch64CC::NE; |
570 | break; |
571 | case AArch64::CBNZX: |
572 | Is64Bit = true; |
573 | CC = AArch64CC::NE; |
574 | break; |
575 | } |
576 | Register SrcReg = Cond[2].getReg(); |
577 | if (Is64Bit) { |
578 | // cmp reg, #0 is actually subs xzr, reg, #0. |
579 | MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass); |
580 | BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR) |
581 | .addReg(SrcReg) |
582 | .addImm(0) |
583 | .addImm(0); |
584 | } else { |
585 | MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass); |
586 | BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR) |
587 | .addReg(SrcReg) |
588 | .addImm(0) |
589 | .addImm(0); |
590 | } |
591 | break; |
592 | } |
593 | case 4: { // tbz/tbnz |
594 | // We must insert a tst instruction. |
595 | switch (Cond[1].getImm()) { |
596 | default: |
597 | llvm_unreachable("Unknown branch opcode in Cond")::llvm::llvm_unreachable_internal("Unknown branch opcode in Cond" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 597); |
598 | case AArch64::TBZW: |
599 | case AArch64::TBZX: |
600 | CC = AArch64CC::EQ; |
601 | break; |
602 | case AArch64::TBNZW: |
603 | case AArch64::TBNZX: |
604 | CC = AArch64CC::NE; |
605 | break; |
606 | } |
607 | // cmp reg, #foo is actually ands xzr, reg, #1<<foo. |
608 | if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW) |
609 | BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR) |
610 | .addReg(Cond[2].getReg()) |
611 | .addImm( |
612 | AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32)); |
613 | else |
614 | BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR) |
615 | .addReg(Cond[2].getReg()) |
616 | .addImm( |
617 | AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64)); |
618 | break; |
619 | } |
620 | } |
621 | |
622 | unsigned Opc = 0; |
623 | const TargetRegisterClass *RC = nullptr; |
624 | bool TryFold = false; |
625 | if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) { |
626 | RC = &AArch64::GPR64RegClass; |
627 | Opc = AArch64::CSELXr; |
628 | TryFold = true; |
629 | } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) { |
630 | RC = &AArch64::GPR32RegClass; |
631 | Opc = AArch64::CSELWr; |
632 | TryFold = true; |
633 | } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) { |
634 | RC = &AArch64::FPR64RegClass; |
635 | Opc = AArch64::FCSELDrrr; |
636 | } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) { |
637 | RC = &AArch64::FPR32RegClass; |
638 | Opc = AArch64::FCSELSrrr; |
639 | } |
640 | assert(RC && "Unsupported regclass")((RC && "Unsupported regclass") ? static_cast<void > (0) : __assert_fail ("RC && \"Unsupported regclass\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 640, __PRETTY_FUNCTION__)); |
641 | |
642 | // Try folding simple instructions into the csel. |
643 | if (TryFold) { |
644 | unsigned NewVReg = 0; |
645 | unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg); |
646 | if (FoldedOpc) { |
647 | // The folded opcodes csinc, csinc and csneg apply the operation to |
648 | // FalseReg, so we need to invert the condition. |
649 | CC = AArch64CC::getInvertedCondCode(CC); |
650 | TrueReg = FalseReg; |
651 | } else |
652 | FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg); |
653 | |
654 | // Fold the operation. Leave any dead instructions for DCE to clean up. |
655 | if (FoldedOpc) { |
656 | FalseReg = NewVReg; |
657 | Opc = FoldedOpc; |
658 | // The extends the live range of NewVReg. |
659 | MRI.clearKillFlags(NewVReg); |
660 | } |
661 | } |
662 | |
663 | // Pull all virtual register into the appropriate class. |
664 | MRI.constrainRegClass(TrueReg, RC); |
665 | MRI.constrainRegClass(FalseReg, RC); |
666 | |
667 | // Insert the csel. |
668 | BuildMI(MBB, I, DL, get(Opc), DstReg) |
669 | .addReg(TrueReg) |
670 | .addReg(FalseReg) |
671 | .addImm(CC); |
672 | } |
673 | |
674 | /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx. |
675 | static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) { |
676 | uint64_t Imm = MI.getOperand(1).getImm(); |
677 | uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize); |
678 | uint64_t Encoding; |
679 | return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding); |
680 | } |
681 | |
682 | // FIXME: this implementation should be micro-architecture dependent, so a |
683 | // micro-architecture target hook should be introduced here in future. |
684 | bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { |
685 | if (!Subtarget.hasCustomCheapAsMoveHandling()) |
686 | return MI.isAsCheapAsAMove(); |
687 | |
688 | const unsigned Opcode = MI.getOpcode(); |
689 | |
690 | // Firstly, check cases gated by features. |
691 | |
692 | if (Subtarget.hasZeroCycleZeroingFP()) { |
693 | if (Opcode == AArch64::FMOVH0 || |
694 | Opcode == AArch64::FMOVS0 || |
695 | Opcode == AArch64::FMOVD0) |
696 | return true; |
697 | } |
698 | |
699 | if (Subtarget.hasZeroCycleZeroingGP()) { |
700 | if (Opcode == TargetOpcode::COPY && |
701 | (MI.getOperand(1).getReg() == AArch64::WZR || |
702 | MI.getOperand(1).getReg() == AArch64::XZR)) |
703 | return true; |
704 | } |
705 | |
706 | // Secondly, check cases specific to sub-targets. |
707 | |
708 | if (Subtarget.hasExynosCheapAsMoveHandling()) { |
709 | if (isExynosCheapAsMove(MI)) |
710 | return true; |
711 | |
712 | return MI.isAsCheapAsAMove(); |
713 | } |
714 | |
715 | // Finally, check generic cases. |
716 | |
717 | switch (Opcode) { |
718 | default: |
719 | return false; |
720 | |
721 | // add/sub on register without shift |
722 | case AArch64::ADDWri: |
723 | case AArch64::ADDXri: |
724 | case AArch64::SUBWri: |
725 | case AArch64::SUBXri: |
726 | return (MI.getOperand(3).getImm() == 0); |
727 | |
728 | // logical ops on immediate |
729 | case AArch64::ANDWri: |
730 | case AArch64::ANDXri: |
731 | case AArch64::EORWri: |
732 | case AArch64::EORXri: |
733 | case AArch64::ORRWri: |
734 | case AArch64::ORRXri: |
735 | return true; |
736 | |
737 | // logical ops on register without shift |
738 | case AArch64::ANDWrr: |
739 | case AArch64::ANDXrr: |
740 | case AArch64::BICWrr: |
741 | case AArch64::BICXrr: |
742 | case AArch64::EONWrr: |
743 | case AArch64::EONXrr: |
744 | case AArch64::EORWrr: |
745 | case AArch64::EORXrr: |
746 | case AArch64::ORNWrr: |
747 | case AArch64::ORNXrr: |
748 | case AArch64::ORRWrr: |
749 | case AArch64::ORRXrr: |
750 | return true; |
751 | |
752 | // If MOVi32imm or MOVi64imm can be expanded into ORRWri or |
753 | // ORRXri, it is as cheap as MOV |
754 | case AArch64::MOVi32imm: |
755 | return canBeExpandedToORR(MI, 32); |
756 | case AArch64::MOVi64imm: |
757 | return canBeExpandedToORR(MI, 64); |
758 | } |
759 | |
760 | llvm_unreachable("Unknown opcode to check as cheap as a move!")::llvm::llvm_unreachable_internal("Unknown opcode to check as cheap as a move!" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 760); |
761 | } |
762 | |
763 | bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) { |
764 | switch (MI.getOpcode()) { |
765 | default: |
766 | return false; |
767 | |
768 | case AArch64::ADDWrs: |
769 | case AArch64::ADDXrs: |
770 | case AArch64::ADDSWrs: |
771 | case AArch64::ADDSXrs: { |
772 | unsigned Imm = MI.getOperand(3).getImm(); |
773 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); |
774 | if (ShiftVal == 0) |
775 | return true; |
776 | return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5; |
777 | } |
778 | |
779 | case AArch64::ADDWrx: |
780 | case AArch64::ADDXrx: |
781 | case AArch64::ADDXrx64: |
782 | case AArch64::ADDSWrx: |
783 | case AArch64::ADDSXrx: |
784 | case AArch64::ADDSXrx64: { |
785 | unsigned Imm = MI.getOperand(3).getImm(); |
786 | switch (AArch64_AM::getArithExtendType(Imm)) { |
787 | default: |
788 | return false; |
789 | case AArch64_AM::UXTB: |
790 | case AArch64_AM::UXTH: |
791 | case AArch64_AM::UXTW: |
792 | case AArch64_AM::UXTX: |
793 | return AArch64_AM::getArithShiftValue(Imm) <= 4; |
794 | } |
795 | } |
796 | |
797 | case AArch64::SUBWrs: |
798 | case AArch64::SUBSWrs: { |
799 | unsigned Imm = MI.getOperand(3).getImm(); |
800 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); |
801 | return ShiftVal == 0 || |
802 | (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31); |
803 | } |
804 | |
805 | case AArch64::SUBXrs: |
806 | case AArch64::SUBSXrs: { |
807 | unsigned Imm = MI.getOperand(3).getImm(); |
808 | unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); |
809 | return ShiftVal == 0 || |
810 | (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63); |
811 | } |
812 | |
813 | case AArch64::SUBWrx: |
814 | case AArch64::SUBXrx: |
815 | case AArch64::SUBXrx64: |
816 | case AArch64::SUBSWrx: |
817 | case AArch64::SUBSXrx: |
818 | case AArch64::SUBSXrx64: { |
819 | unsigned Imm = MI.getOperand(3).getImm(); |
820 | switch (AArch64_AM::getArithExtendType(Imm)) { |
821 | default: |
822 | return false; |
823 | case AArch64_AM::UXTB: |
824 | case AArch64_AM::UXTH: |
825 | case AArch64_AM::UXTW: |
826 | case AArch64_AM::UXTX: |
827 | return AArch64_AM::getArithShiftValue(Imm) == 0; |
828 | } |
829 | } |
830 | |
831 | case AArch64::LDRBBroW: |
832 | case AArch64::LDRBBroX: |
833 | case AArch64::LDRBroW: |
834 | case AArch64::LDRBroX: |
835 | case AArch64::LDRDroW: |
836 | case AArch64::LDRDroX: |
837 | case AArch64::LDRHHroW: |
838 | case AArch64::LDRHHroX: |
839 | case AArch64::LDRHroW: |
840 | case AArch64::LDRHroX: |
841 | case AArch64::LDRQroW: |
842 | case AArch64::LDRQroX: |
843 | case AArch64::LDRSBWroW: |
844 | case AArch64::LDRSBWroX: |
845 | case AArch64::LDRSBXroW: |
846 | case AArch64::LDRSBXroX: |
847 | case AArch64::LDRSHWroW: |
848 | case AArch64::LDRSHWroX: |
849 | case AArch64::LDRSHXroW: |
850 | case AArch64::LDRSHXroX: |
851 | case AArch64::LDRSWroW: |
852 | case AArch64::LDRSWroX: |
853 | case AArch64::LDRSroW: |
854 | case AArch64::LDRSroX: |
855 | case AArch64::LDRWroW: |
856 | case AArch64::LDRWroX: |
857 | case AArch64::LDRXroW: |
858 | case AArch64::LDRXroX: |
859 | case AArch64::PRFMroW: |
860 | case AArch64::PRFMroX: |
861 | case AArch64::STRBBroW: |
862 | case AArch64::STRBBroX: |
863 | case AArch64::STRBroW: |
864 | case AArch64::STRBroX: |
865 | case AArch64::STRDroW: |
866 | case AArch64::STRDroX: |
867 | case AArch64::STRHHroW: |
868 | case AArch64::STRHHroX: |
869 | case AArch64::STRHroW: |
870 | case AArch64::STRHroX: |
871 | case AArch64::STRQroW: |
872 | case AArch64::STRQroX: |
873 | case AArch64::STRSroW: |
874 | case AArch64::STRSroX: |
875 | case AArch64::STRWroW: |
876 | case AArch64::STRWroX: |
877 | case AArch64::STRXroW: |
878 | case AArch64::STRXroX: { |
879 | unsigned IsSigned = MI.getOperand(3).getImm(); |
880 | return !IsSigned; |
881 | } |
882 | } |
883 | } |
884 | |
885 | bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) { |
886 | unsigned Opc = MI.getOpcode(); |
887 | switch (Opc) { |
888 | default: |
889 | return false; |
890 | case AArch64::SEH_StackAlloc: |
891 | case AArch64::SEH_SaveFPLR: |
892 | case AArch64::SEH_SaveFPLR_X: |
893 | case AArch64::SEH_SaveReg: |
894 | case AArch64::SEH_SaveReg_X: |
895 | case AArch64::SEH_SaveRegP: |
896 | case AArch64::SEH_SaveRegP_X: |
897 | case AArch64::SEH_SaveFReg: |
898 | case AArch64::SEH_SaveFReg_X: |
899 | case AArch64::SEH_SaveFRegP: |
900 | case AArch64::SEH_SaveFRegP_X: |
901 | case AArch64::SEH_SetFP: |
902 | case AArch64::SEH_AddFP: |
903 | case AArch64::SEH_Nop: |
904 | case AArch64::SEH_PrologEnd: |
905 | case AArch64::SEH_EpilogStart: |
906 | case AArch64::SEH_EpilogEnd: |
907 | return true; |
908 | } |
909 | } |
910 | |
911 | bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, |
912 | unsigned &SrcReg, unsigned &DstReg, |
913 | unsigned &SubIdx) const { |
914 | switch (MI.getOpcode()) { |
915 | default: |
916 | return false; |
917 | case AArch64::SBFMXri: // aka sxtw |
918 | case AArch64::UBFMXri: // aka uxtw |
919 | // Check for the 32 -> 64 bit extension case, these instructions can do |
920 | // much more. |
921 | if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31) |
922 | return false; |
923 | // This is a signed or unsigned 32 -> 64 bit extension. |
924 | SrcReg = MI.getOperand(1).getReg(); |
925 | DstReg = MI.getOperand(0).getReg(); |
926 | SubIdx = AArch64::sub_32; |
927 | return true; |
928 | } |
929 | } |
930 | |
931 | bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint( |
932 | const MachineInstr &MIa, const MachineInstr &MIb) const { |
933 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
934 | const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; |
935 | int64_t OffsetA = 0, OffsetB = 0; |
936 | unsigned WidthA = 0, WidthB = 0; |
937 | |
938 | assert(MIa.mayLoadOrStore() && "MIa must be a load or store.")((MIa.mayLoadOrStore() && "MIa must be a load or store." ) ? static_cast<void> (0) : __assert_fail ("MIa.mayLoadOrStore() && \"MIa must be a load or store.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 938, __PRETTY_FUNCTION__)); |
939 | assert(MIb.mayLoadOrStore() && "MIb must be a load or store.")((MIb.mayLoadOrStore() && "MIb must be a load or store." ) ? static_cast<void> (0) : __assert_fail ("MIb.mayLoadOrStore() && \"MIb must be a load or store.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 939, __PRETTY_FUNCTION__)); |
940 | |
941 | if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() || |
942 | MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) |
943 | return false; |
944 | |
945 | // Retrieve the base, offset from the base and width. Width |
946 | // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If |
947 | // base are identical, and the offset of a lower memory access + |
948 | // the width doesn't overlap the offset of a higher memory access, |
949 | // then the memory accesses are different. |
950 | if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) && |
951 | getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) { |
952 | if (BaseOpA->isIdenticalTo(*BaseOpB)) { |
953 | int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; |
954 | int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; |
955 | int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; |
956 | if (LowOffset + LowWidth <= HighOffset) |
957 | return true; |
958 | } |
959 | } |
960 | return false; |
961 | } |
962 | |
963 | bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI, |
964 | const MachineBasicBlock *MBB, |
965 | const MachineFunction &MF) const { |
966 | if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF)) |
967 | return true; |
968 | switch (MI.getOpcode()) { |
969 | case AArch64::HINT: |
970 | // CSDB hints are scheduling barriers. |
971 | if (MI.getOperand(0).getImm() == 0x14) |
972 | return true; |
973 | break; |
974 | case AArch64::DSB: |
975 | case AArch64::ISB: |
976 | // DSB and ISB also are scheduling barriers. |
977 | return true; |
978 | default:; |
979 | } |
980 | return isSEHInstruction(MI); |
981 | } |
982 | |
983 | /// analyzeCompare - For a comparison instruction, return the source registers |
984 | /// in SrcReg and SrcReg2, and the value it compares against in CmpValue. |
985 | /// Return true if the comparison instruction can be analyzed. |
986 | bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, |
987 | unsigned &SrcReg2, int &CmpMask, |
988 | int &CmpValue) const { |
989 | // The first operand can be a frame index where we'd normally expect a |
990 | // register. |
991 | assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands")((MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() >= 2 && \"All AArch64 cmps should have 2 operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 991, __PRETTY_FUNCTION__)); |
992 | if (!MI.getOperand(1).isReg()) |
993 | return false; |
994 | |
995 | switch (MI.getOpcode()) { |
996 | default: |
997 | break; |
998 | case AArch64::SUBSWrr: |
999 | case AArch64::SUBSWrs: |
1000 | case AArch64::SUBSWrx: |
1001 | case AArch64::SUBSXrr: |
1002 | case AArch64::SUBSXrs: |
1003 | case AArch64::SUBSXrx: |
1004 | case AArch64::ADDSWrr: |
1005 | case AArch64::ADDSWrs: |
1006 | case AArch64::ADDSWrx: |
1007 | case AArch64::ADDSXrr: |
1008 | case AArch64::ADDSXrs: |
1009 | case AArch64::ADDSXrx: |
1010 | // Replace SUBSWrr with SUBWrr if NZCV is not used. |
1011 | SrcReg = MI.getOperand(1).getReg(); |
1012 | SrcReg2 = MI.getOperand(2).getReg(); |
1013 | CmpMask = ~0; |
1014 | CmpValue = 0; |
1015 | return true; |
1016 | case AArch64::SUBSWri: |
1017 | case AArch64::ADDSWri: |
1018 | case AArch64::SUBSXri: |
1019 | case AArch64::ADDSXri: |
1020 | SrcReg = MI.getOperand(1).getReg(); |
1021 | SrcReg2 = 0; |
1022 | CmpMask = ~0; |
1023 | // FIXME: In order to convert CmpValue to 0 or 1 |
1024 | CmpValue = MI.getOperand(2).getImm() != 0; |
1025 | return true; |
1026 | case AArch64::ANDSWri: |
1027 | case AArch64::ANDSXri: |
1028 | // ANDS does not use the same encoding scheme as the others xxxS |
1029 | // instructions. |
1030 | SrcReg = MI.getOperand(1).getReg(); |
1031 | SrcReg2 = 0; |
1032 | CmpMask = ~0; |
1033 | // FIXME:The return val type of decodeLogicalImmediate is uint64_t, |
1034 | // while the type of CmpValue is int. When converting uint64_t to int, |
1035 | // the high 32 bits of uint64_t will be lost. |
1036 | // In fact it causes a bug in spec2006-483.xalancbmk |
1037 | // CmpValue is only used to compare with zero in OptimizeCompareInstr |
1038 | CmpValue = AArch64_AM::decodeLogicalImmediate( |
1039 | MI.getOperand(2).getImm(), |
1040 | MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0; |
1041 | return true; |
1042 | } |
1043 | |
1044 | return false; |
1045 | } |
1046 | |
1047 | static bool UpdateOperandRegClass(MachineInstr &Instr) { |
1048 | MachineBasicBlock *MBB = Instr.getParent(); |
1049 | assert(MBB && "Can't get MachineBasicBlock here")((MBB && "Can't get MachineBasicBlock here") ? static_cast <void> (0) : __assert_fail ("MBB && \"Can't get MachineBasicBlock here\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1049, __PRETTY_FUNCTION__)); |
1050 | MachineFunction *MF = MBB->getParent(); |
1051 | assert(MF && "Can't get MachineFunction here")((MF && "Can't get MachineFunction here") ? static_cast <void> (0) : __assert_fail ("MF && \"Can't get MachineFunction here\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1051, __PRETTY_FUNCTION__)); |
1052 | const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); |
1053 | const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); |
1054 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
1055 | |
1056 | for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx; |
1057 | ++OpIdx) { |
1058 | MachineOperand &MO = Instr.getOperand(OpIdx); |
1059 | const TargetRegisterClass *OpRegCstraints = |
1060 | Instr.getRegClassConstraint(OpIdx, TII, TRI); |
1061 | |
1062 | // If there's no constraint, there's nothing to do. |
1063 | if (!OpRegCstraints) |
1064 | continue; |
1065 | // If the operand is a frame index, there's nothing to do here. |
1066 | // A frame index operand will resolve correctly during PEI. |
1067 | if (MO.isFI()) |
1068 | continue; |
1069 | |
1070 | assert(MO.isReg() &&((MO.isReg() && "Operand has register constraints without being a register!" ) ? static_cast<void> (0) : __assert_fail ("MO.isReg() && \"Operand has register constraints without being a register!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1071, __PRETTY_FUNCTION__)) |
1071 | "Operand has register constraints without being a register!")((MO.isReg() && "Operand has register constraints without being a register!" ) ? static_cast<void> (0) : __assert_fail ("MO.isReg() && \"Operand has register constraints without being a register!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1071, __PRETTY_FUNCTION__)); |
1072 | |
1073 | Register Reg = MO.getReg(); |
1074 | if (Register::isPhysicalRegister(Reg)) { |
1075 | if (!OpRegCstraints->contains(Reg)) |
1076 | return false; |
1077 | } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) && |
1078 | !MRI->constrainRegClass(Reg, OpRegCstraints)) |
1079 | return false; |
1080 | } |
1081 | |
1082 | return true; |
1083 | } |
1084 | |
1085 | /// Return the opcode that does not set flags when possible - otherwise |
1086 | /// return the original opcode. The caller is responsible to do the actual |
1087 | /// substitution and legality checking. |
1088 | static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) { |
1089 | // Don't convert all compare instructions, because for some the zero register |
1090 | // encoding becomes the sp register. |
1091 | bool MIDefinesZeroReg = false; |
1092 | if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR)) |
1093 | MIDefinesZeroReg = true; |
1094 | |
1095 | switch (MI.getOpcode()) { |
1096 | default: |
1097 | return MI.getOpcode(); |
1098 | case AArch64::ADDSWrr: |
1099 | return AArch64::ADDWrr; |
1100 | case AArch64::ADDSWri: |
1101 | return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri; |
1102 | case AArch64::ADDSWrs: |
1103 | return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs; |
1104 | case AArch64::ADDSWrx: |
1105 | return AArch64::ADDWrx; |
1106 | case AArch64::ADDSXrr: |
1107 | return AArch64::ADDXrr; |
1108 | case AArch64::ADDSXri: |
1109 | return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri; |
1110 | case AArch64::ADDSXrs: |
1111 | return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs; |
1112 | case AArch64::ADDSXrx: |
1113 | return AArch64::ADDXrx; |
1114 | case AArch64::SUBSWrr: |
1115 | return AArch64::SUBWrr; |
1116 | case AArch64::SUBSWri: |
1117 | return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri; |
1118 | case AArch64::SUBSWrs: |
1119 | return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs; |
1120 | case AArch64::SUBSWrx: |
1121 | return AArch64::SUBWrx; |
1122 | case AArch64::SUBSXrr: |
1123 | return AArch64::SUBXrr; |
1124 | case AArch64::SUBSXri: |
1125 | return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri; |
1126 | case AArch64::SUBSXrs: |
1127 | return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs; |
1128 | case AArch64::SUBSXrx: |
1129 | return AArch64::SUBXrx; |
1130 | } |
1131 | } |
1132 | |
1133 | enum AccessKind { AK_Write = 0x01, AK_Read = 0x10, AK_All = 0x11 }; |
1134 | |
1135 | /// True when condition flags are accessed (either by writing or reading) |
1136 | /// on the instruction trace starting at From and ending at To. |
1137 | /// |
1138 | /// Note: If From and To are from different blocks it's assumed CC are accessed |
1139 | /// on the path. |
1140 | static bool areCFlagsAccessedBetweenInstrs( |
1141 | MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, |
1142 | const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) { |
1143 | // Early exit if To is at the beginning of the BB. |
1144 | if (To == To->getParent()->begin()) |
1145 | return true; |
1146 | |
1147 | // Check whether the instructions are in the same basic block |
1148 | // If not, assume the condition flags might get modified somewhere. |
1149 | if (To->getParent() != From->getParent()) |
1150 | return true; |
1151 | |
1152 | // From must be above To. |
1153 | assert(std::find_if(++To.getReverse(), To->getParent()->rend(),((std::find_if(++To.getReverse(), To->getParent()->rend (), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()) ? static_cast< void> (0) : __assert_fail ("std::find_if(++To.getReverse(), To->getParent()->rend(), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1156, __PRETTY_FUNCTION__)) |
1154 | [From](MachineInstr &MI) {((std::find_if(++To.getReverse(), To->getParent()->rend (), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()) ? static_cast< void> (0) : __assert_fail ("std::find_if(++To.getReverse(), To->getParent()->rend(), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1156, __PRETTY_FUNCTION__)) |
1155 | return MI.getIterator() == From;((std::find_if(++To.getReverse(), To->getParent()->rend (), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()) ? static_cast< void> (0) : __assert_fail ("std::find_if(++To.getReverse(), To->getParent()->rend(), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1156, __PRETTY_FUNCTION__)) |
1156 | }) != To->getParent()->rend())((std::find_if(++To.getReverse(), To->getParent()->rend (), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()) ? static_cast< void> (0) : __assert_fail ("std::find_if(++To.getReverse(), To->getParent()->rend(), [From](MachineInstr &MI) { return MI.getIterator() == From; }) != To->getParent()->rend()" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1156, __PRETTY_FUNCTION__)); |
1157 | |
1158 | // We iterate backward starting \p To until we hit \p From. |
1159 | for (--To; To != From; --To) { |
1160 | const MachineInstr &Instr = *To; |
1161 | |
1162 | if (((AccessToCheck & AK_Write) && |
1163 | Instr.modifiesRegister(AArch64::NZCV, TRI)) || |
1164 | ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI))) |
1165 | return true; |
1166 | } |
1167 | return false; |
1168 | } |
1169 | |
1170 | /// Try to optimize a compare instruction. A compare instruction is an |
1171 | /// instruction which produces AArch64::NZCV. It can be truly compare |
1172 | /// instruction |
1173 | /// when there are no uses of its destination register. |
1174 | /// |
1175 | /// The following steps are tried in order: |
1176 | /// 1. Convert CmpInstr into an unconditional version. |
1177 | /// 2. Remove CmpInstr if above there is an instruction producing a needed |
1178 | /// condition code or an instruction which can be converted into such an |
1179 | /// instruction. |
1180 | /// Only comparison with zero is supported. |
1181 | bool AArch64InstrInfo::optimizeCompareInstr( |
1182 | MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask, |
1183 | int CmpValue, const MachineRegisterInfo *MRI) const { |
1184 | assert(CmpInstr.getParent())((CmpInstr.getParent()) ? static_cast<void> (0) : __assert_fail ("CmpInstr.getParent()", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1184, __PRETTY_FUNCTION__)); |
1185 | assert(MRI)((MRI) ? static_cast<void> (0) : __assert_fail ("MRI", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1185, __PRETTY_FUNCTION__)); |
1186 | |
1187 | // Replace SUBSWrr with SUBWrr if NZCV is not used. |
1188 | int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true); |
1189 | if (DeadNZCVIdx != -1) { |
1190 | if (CmpInstr.definesRegister(AArch64::WZR) || |
1191 | CmpInstr.definesRegister(AArch64::XZR)) { |
1192 | CmpInstr.eraseFromParent(); |
1193 | return true; |
1194 | } |
1195 | unsigned Opc = CmpInstr.getOpcode(); |
1196 | unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr); |
1197 | if (NewOpc == Opc) |
1198 | return false; |
1199 | const MCInstrDesc &MCID = get(NewOpc); |
1200 | CmpInstr.setDesc(MCID); |
1201 | CmpInstr.RemoveOperand(DeadNZCVIdx); |
1202 | bool succeeded = UpdateOperandRegClass(CmpInstr); |
1203 | (void)succeeded; |
1204 | assert(succeeded && "Some operands reg class are incompatible!")((succeeded && "Some operands reg class are incompatible!" ) ? static_cast<void> (0) : __assert_fail ("succeeded && \"Some operands reg class are incompatible!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1204, __PRETTY_FUNCTION__)); |
1205 | return true; |
1206 | } |
1207 | |
1208 | // Continue only if we have a "ri" where immediate is zero. |
1209 | // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare |
1210 | // function. |
1211 | assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!")(((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!" ) ? static_cast<void> (0) : __assert_fail ("(CmpValue == 0 || CmpValue == 1) && \"CmpValue must be 0 or 1!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1211, __PRETTY_FUNCTION__)); |
1212 | if (CmpValue != 0 || SrcReg2 != 0) |
1213 | return false; |
1214 | |
1215 | // CmpInstr is a Compare instruction if destination register is not used. |
1216 | if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg())) |
1217 | return false; |
1218 | |
1219 | return substituteCmpToZero(CmpInstr, SrcReg, MRI); |
1220 | } |
1221 | |
1222 | /// Get opcode of S version of Instr. |
1223 | /// If Instr is S version its opcode is returned. |
1224 | /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version |
1225 | /// or we are not interested in it. |
1226 | static unsigned sForm(MachineInstr &Instr) { |
1227 | switch (Instr.getOpcode()) { |
1228 | default: |
1229 | return AArch64::INSTRUCTION_LIST_END; |
1230 | |
1231 | case AArch64::ADDSWrr: |
1232 | case AArch64::ADDSWri: |
1233 | case AArch64::ADDSXrr: |
1234 | case AArch64::ADDSXri: |
1235 | case AArch64::SUBSWrr: |
1236 | case AArch64::SUBSWri: |
1237 | case AArch64::SUBSXrr: |
1238 | case AArch64::SUBSXri: |
1239 | return Instr.getOpcode(); |
1240 | |
1241 | case AArch64::ADDWrr: |
1242 | return AArch64::ADDSWrr; |
1243 | case AArch64::ADDWri: |
1244 | return AArch64::ADDSWri; |
1245 | case AArch64::ADDXrr: |
1246 | return AArch64::ADDSXrr; |
1247 | case AArch64::ADDXri: |
1248 | return AArch64::ADDSXri; |
1249 | case AArch64::ADCWr: |
1250 | return AArch64::ADCSWr; |
1251 | case AArch64::ADCXr: |
1252 | return AArch64::ADCSXr; |
1253 | case AArch64::SUBWrr: |
1254 | return AArch64::SUBSWrr; |
1255 | case AArch64::SUBWri: |
1256 | return AArch64::SUBSWri; |
1257 | case AArch64::SUBXrr: |
1258 | return AArch64::SUBSXrr; |
1259 | case AArch64::SUBXri: |
1260 | return AArch64::SUBSXri; |
1261 | case AArch64::SBCWr: |
1262 | return AArch64::SBCSWr; |
1263 | case AArch64::SBCXr: |
1264 | return AArch64::SBCSXr; |
1265 | case AArch64::ANDWri: |
1266 | return AArch64::ANDSWri; |
1267 | case AArch64::ANDXri: |
1268 | return AArch64::ANDSXri; |
1269 | } |
1270 | } |
1271 | |
1272 | /// Check if AArch64::NZCV should be alive in successors of MBB. |
1273 | static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) { |
1274 | for (auto *BB : MBB->successors()) |
1275 | if (BB->isLiveIn(AArch64::NZCV)) |
1276 | return true; |
1277 | return false; |
1278 | } |
1279 | |
1280 | namespace { |
1281 | |
1282 | struct UsedNZCV { |
1283 | bool N = false; |
1284 | bool Z = false; |
1285 | bool C = false; |
1286 | bool V = false; |
1287 | |
1288 | UsedNZCV() = default; |
1289 | |
1290 | UsedNZCV &operator|=(const UsedNZCV &UsedFlags) { |
1291 | this->N |= UsedFlags.N; |
1292 | this->Z |= UsedFlags.Z; |
1293 | this->C |= UsedFlags.C; |
1294 | this->V |= UsedFlags.V; |
1295 | return *this; |
1296 | } |
1297 | }; |
1298 | |
1299 | } // end anonymous namespace |
1300 | |
1301 | /// Find a condition code used by the instruction. |
1302 | /// Returns AArch64CC::Invalid if either the instruction does not use condition |
1303 | /// codes or we don't optimize CmpInstr in the presence of such instructions. |
1304 | static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) { |
1305 | switch (Instr.getOpcode()) { |
1306 | default: |
1307 | return AArch64CC::Invalid; |
1308 | |
1309 | case AArch64::Bcc: { |
1310 | int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV); |
1311 | assert(Idx >= 2)((Idx >= 2) ? static_cast<void> (0) : __assert_fail ( "Idx >= 2", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1311, __PRETTY_FUNCTION__)); |
1312 | return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm()); |
1313 | } |
1314 | |
1315 | case AArch64::CSINVWr: |
1316 | case AArch64::CSINVXr: |
1317 | case AArch64::CSINCWr: |
1318 | case AArch64::CSINCXr: |
1319 | case AArch64::CSELWr: |
1320 | case AArch64::CSELXr: |
1321 | case AArch64::CSNEGWr: |
1322 | case AArch64::CSNEGXr: |
1323 | case AArch64::FCSELSrrr: |
1324 | case AArch64::FCSELDrrr: { |
1325 | int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV); |
1326 | assert(Idx >= 1)((Idx >= 1) ? static_cast<void> (0) : __assert_fail ( "Idx >= 1", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1326, __PRETTY_FUNCTION__)); |
1327 | return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm()); |
1328 | } |
1329 | } |
1330 | } |
1331 | |
1332 | static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) { |
1333 | assert(CC != AArch64CC::Invalid)((CC != AArch64CC::Invalid) ? static_cast<void> (0) : __assert_fail ("CC != AArch64CC::Invalid", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1333, __PRETTY_FUNCTION__)); |
1334 | UsedNZCV UsedFlags; |
1335 | switch (CC) { |
1336 | default: |
1337 | break; |
1338 | |
1339 | case AArch64CC::EQ: // Z set |
1340 | case AArch64CC::NE: // Z clear |
1341 | UsedFlags.Z = true; |
1342 | break; |
1343 | |
1344 | case AArch64CC::HI: // Z clear and C set |
1345 | case AArch64CC::LS: // Z set or C clear |
1346 | UsedFlags.Z = true; |
1347 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
1348 | case AArch64CC::HS: // C set |
1349 | case AArch64CC::LO: // C clear |
1350 | UsedFlags.C = true; |
1351 | break; |
1352 | |
1353 | case AArch64CC::MI: // N set |
1354 | case AArch64CC::PL: // N clear |
1355 | UsedFlags.N = true; |
1356 | break; |
1357 | |
1358 | case AArch64CC::VS: // V set |
1359 | case AArch64CC::VC: // V clear |
1360 | UsedFlags.V = true; |
1361 | break; |
1362 | |
1363 | case AArch64CC::GT: // Z clear, N and V the same |
1364 | case AArch64CC::LE: // Z set, N and V differ |
1365 | UsedFlags.Z = true; |
1366 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
1367 | case AArch64CC::GE: // N and V the same |
1368 | case AArch64CC::LT: // N and V differ |
1369 | UsedFlags.N = true; |
1370 | UsedFlags.V = true; |
1371 | break; |
1372 | } |
1373 | return UsedFlags; |
1374 | } |
1375 | |
1376 | static bool isADDSRegImm(unsigned Opcode) { |
1377 | return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri; |
1378 | } |
1379 | |
1380 | static bool isSUBSRegImm(unsigned Opcode) { |
1381 | return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri; |
1382 | } |
1383 | |
1384 | /// Check if CmpInstr can be substituted by MI. |
1385 | /// |
1386 | /// CmpInstr can be substituted: |
1387 | /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0' |
1388 | /// - and, MI and CmpInstr are from the same MachineBB |
1389 | /// - and, condition flags are not alive in successors of the CmpInstr parent |
1390 | /// - and, if MI opcode is the S form there must be no defs of flags between |
1391 | /// MI and CmpInstr |
1392 | /// or if MI opcode is not the S form there must be neither defs of flags |
1393 | /// nor uses of flags between MI and CmpInstr. |
1394 | /// - and C/V flags are not used after CmpInstr |
1395 | static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr, |
1396 | const TargetRegisterInfo *TRI) { |
1397 | assert(MI)((MI) ? static_cast<void> (0) : __assert_fail ("MI", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1397, __PRETTY_FUNCTION__)); |
1398 | assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END)((sForm(*MI) != AArch64::INSTRUCTION_LIST_END) ? static_cast< void> (0) : __assert_fail ("sForm(*MI) != AArch64::INSTRUCTION_LIST_END" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1398, __PRETTY_FUNCTION__)); |
1399 | assert(CmpInstr)((CmpInstr) ? static_cast<void> (0) : __assert_fail ("CmpInstr" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1399, __PRETTY_FUNCTION__)); |
1400 | |
1401 | const unsigned CmpOpcode = CmpInstr->getOpcode(); |
1402 | if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode)) |
1403 | return false; |
1404 | |
1405 | if (MI->getParent() != CmpInstr->getParent()) |
1406 | return false; |
1407 | |
1408 | if (areCFlagsAliveInSuccessors(CmpInstr->getParent())) |
1409 | return false; |
1410 | |
1411 | AccessKind AccessToCheck = AK_Write; |
1412 | if (sForm(*MI) != MI->getOpcode()) |
1413 | AccessToCheck = AK_All; |
1414 | if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck)) |
1415 | return false; |
1416 | |
1417 | UsedNZCV NZCVUsedAfterCmp; |
1418 | for (auto I = std::next(CmpInstr->getIterator()), |
1419 | E = CmpInstr->getParent()->instr_end(); |
1420 | I != E; ++I) { |
1421 | const MachineInstr &Instr = *I; |
1422 | if (Instr.readsRegister(AArch64::NZCV, TRI)) { |
1423 | AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr); |
1424 | if (CC == AArch64CC::Invalid) // Unsupported conditional instruction |
1425 | return false; |
1426 | NZCVUsedAfterCmp |= getUsedNZCV(CC); |
1427 | } |
1428 | |
1429 | if (Instr.modifiesRegister(AArch64::NZCV, TRI)) |
1430 | break; |
1431 | } |
1432 | |
1433 | return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V; |
1434 | } |
1435 | |
1436 | /// Substitute an instruction comparing to zero with another instruction |
1437 | /// which produces needed condition flags. |
1438 | /// |
1439 | /// Return true on success. |
1440 | bool AArch64InstrInfo::substituteCmpToZero( |
1441 | MachineInstr &CmpInstr, unsigned SrcReg, |
1442 | const MachineRegisterInfo *MRI) const { |
1443 | assert(MRI)((MRI) ? static_cast<void> (0) : __assert_fail ("MRI", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1443, __PRETTY_FUNCTION__)); |
1444 | // Get the unique definition of SrcReg. |
1445 | MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); |
1446 | if (!MI) |
1447 | return false; |
1448 | |
1449 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
1450 | |
1451 | unsigned NewOpc = sForm(*MI); |
1452 | if (NewOpc == AArch64::INSTRUCTION_LIST_END) |
1453 | return false; |
1454 | |
1455 | if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI)) |
1456 | return false; |
1457 | |
1458 | // Update the instruction to set NZCV. |
1459 | MI->setDesc(get(NewOpc)); |
1460 | CmpInstr.eraseFromParent(); |
1461 | bool succeeded = UpdateOperandRegClass(*MI); |
1462 | (void)succeeded; |
1463 | assert(succeeded && "Some operands reg class are incompatible!")((succeeded && "Some operands reg class are incompatible!" ) ? static_cast<void> (0) : __assert_fail ("succeeded && \"Some operands reg class are incompatible!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1463, __PRETTY_FUNCTION__)); |
1464 | MI->addRegisterDefined(AArch64::NZCV, TRI); |
1465 | return true; |
1466 | } |
1467 | |
1468 | bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
1469 | if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD && |
1470 | MI.getOpcode() != AArch64::CATCHRET) |
1471 | return false; |
1472 | |
1473 | MachineBasicBlock &MBB = *MI.getParent(); |
1474 | auto &Subtarget = MBB.getParent()->getSubtarget<AArch64Subtarget>(); |
1475 | auto TRI = Subtarget.getRegisterInfo(); |
1476 | DebugLoc DL = MI.getDebugLoc(); |
1477 | |
1478 | if (MI.getOpcode() == AArch64::CATCHRET) { |
1479 | // Skip to the first instruction before the epilog. |
1480 | const TargetInstrInfo *TII = |
1481 | MBB.getParent()->getSubtarget().getInstrInfo(); |
1482 | MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB(); |
1483 | auto MBBI = MachineBasicBlock::iterator(MI); |
1484 | MachineBasicBlock::iterator FirstEpilogSEH = std::prev(MBBI); |
1485 | while (FirstEpilogSEH->getFlag(MachineInstr::FrameDestroy) && |
1486 | FirstEpilogSEH != MBB.begin()) |
1487 | FirstEpilogSEH = std::prev(FirstEpilogSEH); |
1488 | if (FirstEpilogSEH != MBB.begin()) |
1489 | FirstEpilogSEH = std::next(FirstEpilogSEH); |
1490 | BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADRP)) |
1491 | .addReg(AArch64::X0, RegState::Define) |
1492 | .addMBB(TargetMBB); |
1493 | BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADDXri)) |
1494 | .addReg(AArch64::X0, RegState::Define) |
1495 | .addReg(AArch64::X0) |
1496 | .addMBB(TargetMBB) |
1497 | .addImm(0); |
1498 | return true; |
1499 | } |
1500 | |
1501 | Register Reg = MI.getOperand(0).getReg(); |
1502 | const GlobalValue *GV = |
1503 | cast<GlobalValue>((*MI.memoperands_begin())->getValue()); |
1504 | const TargetMachine &TM = MBB.getParent()->getTarget(); |
1505 | unsigned OpFlags = Subtarget.ClassifyGlobalReference(GV, TM); |
1506 | const unsigned char MO_NC = AArch64II::MO_NC; |
1507 | |
1508 | if ((OpFlags & AArch64II::MO_GOT) != 0) { |
1509 | BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg) |
1510 | .addGlobalAddress(GV, 0, OpFlags); |
1511 | if (Subtarget.isTargetILP32()) { |
1512 | unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32); |
1513 | BuildMI(MBB, MI, DL, get(AArch64::LDRWui)) |
1514 | .addDef(Reg32, RegState::Dead) |
1515 | .addUse(Reg, RegState::Kill) |
1516 | .addImm(0) |
1517 | .addMemOperand(*MI.memoperands_begin()) |
1518 | .addDef(Reg, RegState::Implicit); |
1519 | } else { |
1520 | BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg) |
1521 | .addReg(Reg, RegState::Kill) |
1522 | .addImm(0) |
1523 | .addMemOperand(*MI.memoperands_begin()); |
1524 | } |
1525 | } else if (TM.getCodeModel() == CodeModel::Large) { |
1526 | assert(!Subtarget.isTargetILP32() && "how can large exist in ILP32?")((!Subtarget.isTargetILP32() && "how can large exist in ILP32?" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.isTargetILP32() && \"how can large exist in ILP32?\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1526, __PRETTY_FUNCTION__)); |
1527 | BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg) |
1528 | .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC) |
1529 | .addImm(0); |
1530 | BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg) |
1531 | .addReg(Reg, RegState::Kill) |
1532 | .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC) |
1533 | .addImm(16); |
1534 | BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg) |
1535 | .addReg(Reg, RegState::Kill) |
1536 | .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC) |
1537 | .addImm(32); |
1538 | BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg) |
1539 | .addReg(Reg, RegState::Kill) |
1540 | .addGlobalAddress(GV, 0, AArch64II::MO_G3) |
1541 | .addImm(48); |
1542 | BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg) |
1543 | .addReg(Reg, RegState::Kill) |
1544 | .addImm(0) |
1545 | .addMemOperand(*MI.memoperands_begin()); |
1546 | } else if (TM.getCodeModel() == CodeModel::Tiny) { |
1547 | BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg) |
1548 | .addGlobalAddress(GV, 0, OpFlags); |
1549 | } else { |
1550 | BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg) |
1551 | .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE); |
1552 | unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC; |
1553 | if (Subtarget.isTargetILP32()) { |
1554 | unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32); |
1555 | BuildMI(MBB, MI, DL, get(AArch64::LDRWui)) |
1556 | .addDef(Reg32, RegState::Dead) |
1557 | .addUse(Reg, RegState::Kill) |
1558 | .addGlobalAddress(GV, 0, LoFlags) |
1559 | .addMemOperand(*MI.memoperands_begin()) |
1560 | .addDef(Reg, RegState::Implicit); |
1561 | } else { |
1562 | BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg) |
1563 | .addReg(Reg, RegState::Kill) |
1564 | .addGlobalAddress(GV, 0, LoFlags) |
1565 | .addMemOperand(*MI.memoperands_begin()); |
1566 | } |
1567 | } |
1568 | |
1569 | MBB.erase(MI); |
1570 | |
1571 | return true; |
1572 | } |
1573 | |
1574 | // Return true if this instruction simply sets its single destination register |
1575 | // to zero. This is equivalent to a register rename of the zero-register. |
1576 | bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) { |
1577 | switch (MI.getOpcode()) { |
1578 | default: |
1579 | break; |
1580 | case AArch64::MOVZWi: |
1581 | case AArch64::MOVZXi: // movz Rd, #0 (LSL #0) |
1582 | if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) { |
1583 | assert(MI.getDesc().getNumOperands() == 3 &&((MI.getDesc().getNumOperands() == 3 && MI.getOperand (2).getImm() == 0 && "invalid MOVZi operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 3 && MI.getOperand(2).getImm() == 0 && \"invalid MOVZi operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1584, __PRETTY_FUNCTION__)) |
1584 | MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands")((MI.getDesc().getNumOperands() == 3 && MI.getOperand (2).getImm() == 0 && "invalid MOVZi operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 3 && MI.getOperand(2).getImm() == 0 && \"invalid MOVZi operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1584, __PRETTY_FUNCTION__)); |
1585 | return true; |
1586 | } |
1587 | break; |
1588 | case AArch64::ANDWri: // and Rd, Rzr, #imm |
1589 | return MI.getOperand(1).getReg() == AArch64::WZR; |
1590 | case AArch64::ANDXri: |
1591 | return MI.getOperand(1).getReg() == AArch64::XZR; |
1592 | case TargetOpcode::COPY: |
1593 | return MI.getOperand(1).getReg() == AArch64::WZR; |
1594 | } |
1595 | return false; |
1596 | } |
1597 | |
1598 | // Return true if this instruction simply renames a general register without |
1599 | // modifying bits. |
1600 | bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) { |
1601 | switch (MI.getOpcode()) { |
1602 | default: |
1603 | break; |
1604 | case TargetOpcode::COPY: { |
1605 | // GPR32 copies will by lowered to ORRXrs |
1606 | Register DstReg = MI.getOperand(0).getReg(); |
1607 | return (AArch64::GPR32RegClass.contains(DstReg) || |
1608 | AArch64::GPR64RegClass.contains(DstReg)); |
1609 | } |
1610 | case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0) |
1611 | if (MI.getOperand(1).getReg() == AArch64::XZR) { |
1612 | assert(MI.getDesc().getNumOperands() == 4 &&((MI.getDesc().getNumOperands() == 4 && MI.getOperand (3).getImm() == 0 && "invalid ORRrs operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 4 && MI.getOperand(3).getImm() == 0 && \"invalid ORRrs operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1613, __PRETTY_FUNCTION__)) |
1613 | MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands")((MI.getDesc().getNumOperands() == 4 && MI.getOperand (3).getImm() == 0 && "invalid ORRrs operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 4 && MI.getOperand(3).getImm() == 0 && \"invalid ORRrs operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1613, __PRETTY_FUNCTION__)); |
1614 | return true; |
1615 | } |
1616 | break; |
1617 | case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0) |
1618 | if (MI.getOperand(2).getImm() == 0) { |
1619 | assert(MI.getDesc().getNumOperands() == 4 &&((MI.getDesc().getNumOperands() == 4 && MI.getOperand (3).getImm() == 0 && "invalid ADDXri operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 4 && MI.getOperand(3).getImm() == 0 && \"invalid ADDXri operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1620, __PRETTY_FUNCTION__)) |
1620 | MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands")((MI.getDesc().getNumOperands() == 4 && MI.getOperand (3).getImm() == 0 && "invalid ADDXri operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 4 && MI.getOperand(3).getImm() == 0 && \"invalid ADDXri operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1620, __PRETTY_FUNCTION__)); |
1621 | return true; |
1622 | } |
1623 | break; |
1624 | } |
1625 | return false; |
1626 | } |
1627 | |
1628 | // Return true if this instruction simply renames a general register without |
1629 | // modifying bits. |
1630 | bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) { |
1631 | switch (MI.getOpcode()) { |
1632 | default: |
1633 | break; |
1634 | case TargetOpcode::COPY: { |
1635 | // FPR64 copies will by lowered to ORR.16b |
1636 | Register DstReg = MI.getOperand(0).getReg(); |
1637 | return (AArch64::FPR64RegClass.contains(DstReg) || |
1638 | AArch64::FPR128RegClass.contains(DstReg)); |
1639 | } |
1640 | case AArch64::ORRv16i8: |
1641 | if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) { |
1642 | assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&((MI.getDesc().getNumOperands() == 3 && MI.getOperand (0).isReg() && "invalid ORRv16i8 operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() && \"invalid ORRv16i8 operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1643, __PRETTY_FUNCTION__)) |
1643 | "invalid ORRv16i8 operands")((MI.getDesc().getNumOperands() == 3 && MI.getOperand (0).isReg() && "invalid ORRv16i8 operands") ? static_cast <void> (0) : __assert_fail ("MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() && \"invalid ORRv16i8 operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1643, __PRETTY_FUNCTION__)); |
1644 | return true; |
1645 | } |
1646 | break; |
1647 | } |
1648 | return false; |
1649 | } |
1650 | |
1651 | unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
1652 | int &FrameIndex) const { |
1653 | switch (MI.getOpcode()) { |
1654 | default: |
1655 | break; |
1656 | case AArch64::LDRWui: |
1657 | case AArch64::LDRXui: |
1658 | case AArch64::LDRBui: |
1659 | case AArch64::LDRHui: |
1660 | case AArch64::LDRSui: |
1661 | case AArch64::LDRDui: |
1662 | case AArch64::LDRQui: |
1663 | if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() && |
1664 | MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) { |
1665 | FrameIndex = MI.getOperand(1).getIndex(); |
1666 | return MI.getOperand(0).getReg(); |
1667 | } |
1668 | break; |
1669 | } |
1670 | |
1671 | return 0; |
1672 | } |
1673 | |
1674 | unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
1675 | int &FrameIndex) const { |
1676 | switch (MI.getOpcode()) { |
1677 | default: |
1678 | break; |
1679 | case AArch64::STRWui: |
1680 | case AArch64::STRXui: |
1681 | case AArch64::STRBui: |
1682 | case AArch64::STRHui: |
1683 | case AArch64::STRSui: |
1684 | case AArch64::STRDui: |
1685 | case AArch64::STRQui: |
1686 | if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() && |
1687 | MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) { |
1688 | FrameIndex = MI.getOperand(1).getIndex(); |
1689 | return MI.getOperand(0).getReg(); |
1690 | } |
1691 | break; |
1692 | } |
1693 | return 0; |
1694 | } |
1695 | |
1696 | /// Check all MachineMemOperands for a hint to suppress pairing. |
1697 | bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) { |
1698 | return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) { |
1699 | return MMO->getFlags() & MOSuppressPair; |
1700 | }); |
1701 | } |
1702 | |
1703 | /// Set a flag on the first MachineMemOperand to suppress pairing. |
1704 | void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) { |
1705 | if (MI.memoperands_empty()) |
1706 | return; |
1707 | (*MI.memoperands_begin())->setFlags(MOSuppressPair); |
1708 | } |
1709 | |
1710 | /// Check all MachineMemOperands for a hint that the load/store is strided. |
1711 | bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) { |
1712 | return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) { |
1713 | return MMO->getFlags() & MOStridedAccess; |
1714 | }); |
1715 | } |
1716 | |
1717 | bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) { |
1718 | switch (Opc) { |
1719 | default: |
1720 | return false; |
1721 | case AArch64::STURSi: |
1722 | case AArch64::STURDi: |
1723 | case AArch64::STURQi: |
1724 | case AArch64::STURBBi: |
1725 | case AArch64::STURHHi: |
1726 | case AArch64::STURWi: |
1727 | case AArch64::STURXi: |
1728 | case AArch64::LDURSi: |
1729 | case AArch64::LDURDi: |
1730 | case AArch64::LDURQi: |
1731 | case AArch64::LDURWi: |
1732 | case AArch64::LDURXi: |
1733 | case AArch64::LDURSWi: |
1734 | case AArch64::LDURHHi: |
1735 | case AArch64::LDURBBi: |
1736 | case AArch64::LDURSBWi: |
1737 | case AArch64::LDURSHWi: |
1738 | return true; |
1739 | } |
1740 | } |
1741 | |
1742 | Optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) { |
1743 | switch (Opc) { |
1744 | default: return {}; |
1745 | case AArch64::PRFMui: return AArch64::PRFUMi; |
1746 | case AArch64::LDRXui: return AArch64::LDURXi; |
1747 | case AArch64::LDRWui: return AArch64::LDURWi; |
1748 | case AArch64::LDRBui: return AArch64::LDURBi; |
1749 | case AArch64::LDRHui: return AArch64::LDURHi; |
1750 | case AArch64::LDRSui: return AArch64::LDURSi; |
1751 | case AArch64::LDRDui: return AArch64::LDURDi; |
1752 | case AArch64::LDRQui: return AArch64::LDURQi; |
1753 | case AArch64::LDRBBui: return AArch64::LDURBBi; |
1754 | case AArch64::LDRHHui: return AArch64::LDURHHi; |
1755 | case AArch64::LDRSBXui: return AArch64::LDURSBXi; |
1756 | case AArch64::LDRSBWui: return AArch64::LDURSBWi; |
1757 | case AArch64::LDRSHXui: return AArch64::LDURSHXi; |
1758 | case AArch64::LDRSHWui: return AArch64::LDURSHWi; |
1759 | case AArch64::LDRSWui: return AArch64::LDURSWi; |
1760 | case AArch64::STRXui: return AArch64::STURXi; |
1761 | case AArch64::STRWui: return AArch64::STURWi; |
1762 | case AArch64::STRBui: return AArch64::STURBi; |
1763 | case AArch64::STRHui: return AArch64::STURHi; |
1764 | case AArch64::STRSui: return AArch64::STURSi; |
1765 | case AArch64::STRDui: return AArch64::STURDi; |
1766 | case AArch64::STRQui: return AArch64::STURQi; |
1767 | case AArch64::STRBBui: return AArch64::STURBBi; |
1768 | case AArch64::STRHHui: return AArch64::STURHHi; |
1769 | } |
1770 | } |
1771 | |
1772 | unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) { |
1773 | switch (Opc) { |
1774 | default: |
1775 | return 2; |
1776 | case AArch64::LDPXi: |
1777 | case AArch64::LDPDi: |
1778 | case AArch64::STPXi: |
1779 | case AArch64::STPDi: |
1780 | case AArch64::LDNPXi: |
1781 | case AArch64::LDNPDi: |
1782 | case AArch64::STNPXi: |
1783 | case AArch64::STNPDi: |
1784 | case AArch64::LDPQi: |
1785 | case AArch64::STPQi: |
1786 | case AArch64::LDNPQi: |
1787 | case AArch64::STNPQi: |
1788 | case AArch64::LDPWi: |
1789 | case AArch64::LDPSi: |
1790 | case AArch64::STPWi: |
1791 | case AArch64::STPSi: |
1792 | case AArch64::LDNPWi: |
1793 | case AArch64::LDNPSi: |
1794 | case AArch64::STNPWi: |
1795 | case AArch64::STNPSi: |
1796 | case AArch64::LDG: |
1797 | case AArch64::STGPi: |
1798 | return 3; |
1799 | case AArch64::ADDG: |
1800 | case AArch64::STGOffset: |
1801 | return 2; |
1802 | } |
1803 | } |
1804 | |
1805 | bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) { |
1806 | switch (MI.getOpcode()) { |
1807 | default: |
1808 | return false; |
1809 | // Scaled instructions. |
1810 | case AArch64::STRSui: |
1811 | case AArch64::STRDui: |
1812 | case AArch64::STRQui: |
1813 | case AArch64::STRXui: |
1814 | case AArch64::STRWui: |
1815 | case AArch64::LDRSui: |
1816 | case AArch64::LDRDui: |
1817 | case AArch64::LDRQui: |
1818 | case AArch64::LDRXui: |
1819 | case AArch64::LDRWui: |
1820 | case AArch64::LDRSWui: |
1821 | // Unscaled instructions. |
1822 | case AArch64::STURSi: |
1823 | case AArch64::STURDi: |
1824 | case AArch64::STURQi: |
1825 | case AArch64::STURWi: |
1826 | case AArch64::STURXi: |
1827 | case AArch64::LDURSi: |
1828 | case AArch64::LDURDi: |
1829 | case AArch64::LDURQi: |
1830 | case AArch64::LDURWi: |
1831 | case AArch64::LDURXi: |
1832 | case AArch64::LDURSWi: |
1833 | return true; |
1834 | } |
1835 | } |
1836 | |
1837 | unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc, |
1838 | bool &Is64Bit) { |
1839 | switch (Opc) { |
1840 | default: |
1841 | llvm_unreachable("Opcode has no flag setting equivalent!")::llvm::llvm_unreachable_internal("Opcode has no flag setting equivalent!" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1841); |
1842 | // 32-bit cases: |
1843 | case AArch64::ADDWri: |
1844 | Is64Bit = false; |
1845 | return AArch64::ADDSWri; |
1846 | case AArch64::ADDWrr: |
1847 | Is64Bit = false; |
1848 | return AArch64::ADDSWrr; |
1849 | case AArch64::ADDWrs: |
1850 | Is64Bit = false; |
1851 | return AArch64::ADDSWrs; |
1852 | case AArch64::ADDWrx: |
1853 | Is64Bit = false; |
1854 | return AArch64::ADDSWrx; |
1855 | case AArch64::ANDWri: |
1856 | Is64Bit = false; |
1857 | return AArch64::ANDSWri; |
1858 | case AArch64::ANDWrr: |
1859 | Is64Bit = false; |
1860 | return AArch64::ANDSWrr; |
1861 | case AArch64::ANDWrs: |
1862 | Is64Bit = false; |
1863 | return AArch64::ANDSWrs; |
1864 | case AArch64::BICWrr: |
1865 | Is64Bit = false; |
1866 | return AArch64::BICSWrr; |
1867 | case AArch64::BICWrs: |
1868 | Is64Bit = false; |
1869 | return AArch64::BICSWrs; |
1870 | case AArch64::SUBWri: |
1871 | Is64Bit = false; |
1872 | return AArch64::SUBSWri; |
1873 | case AArch64::SUBWrr: |
1874 | Is64Bit = false; |
1875 | return AArch64::SUBSWrr; |
1876 | case AArch64::SUBWrs: |
1877 | Is64Bit = false; |
1878 | return AArch64::SUBSWrs; |
1879 | case AArch64::SUBWrx: |
1880 | Is64Bit = false; |
1881 | return AArch64::SUBSWrx; |
1882 | // 64-bit cases: |
1883 | case AArch64::ADDXri: |
1884 | Is64Bit = true; |
1885 | return AArch64::ADDSXri; |
1886 | case AArch64::ADDXrr: |
1887 | Is64Bit = true; |
1888 | return AArch64::ADDSXrr; |
1889 | case AArch64::ADDXrs: |
1890 | Is64Bit = true; |
1891 | return AArch64::ADDSXrs; |
1892 | case AArch64::ADDXrx: |
1893 | Is64Bit = true; |
1894 | return AArch64::ADDSXrx; |
1895 | case AArch64::ANDXri: |
1896 | Is64Bit = true; |
1897 | return AArch64::ANDSXri; |
1898 | case AArch64::ANDXrr: |
1899 | Is64Bit = true; |
1900 | return AArch64::ANDSXrr; |
1901 | case AArch64::ANDXrs: |
1902 | Is64Bit = true; |
1903 | return AArch64::ANDSXrs; |
1904 | case AArch64::BICXrr: |
1905 | Is64Bit = true; |
1906 | return AArch64::BICSXrr; |
1907 | case AArch64::BICXrs: |
1908 | Is64Bit = true; |
1909 | return AArch64::BICSXrs; |
1910 | case AArch64::SUBXri: |
1911 | Is64Bit = true; |
1912 | return AArch64::SUBSXri; |
1913 | case AArch64::SUBXrr: |
1914 | Is64Bit = true; |
1915 | return AArch64::SUBSXrr; |
1916 | case AArch64::SUBXrs: |
1917 | Is64Bit = true; |
1918 | return AArch64::SUBSXrs; |
1919 | case AArch64::SUBXrx: |
1920 | Is64Bit = true; |
1921 | return AArch64::SUBSXrx; |
1922 | } |
1923 | } |
1924 | |
1925 | // Is this a candidate for ld/st merging or pairing? For example, we don't |
1926 | // touch volatiles or load/stores that have a hint to avoid pair formation. |
1927 | bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const { |
1928 | // If this is a volatile load/store, don't mess with it. |
1929 | if (MI.hasOrderedMemoryRef()) |
1930 | return false; |
1931 | |
1932 | // Make sure this is a reg/fi+imm (as opposed to an address reloc). |
1933 | assert((MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) &&(((MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) && "Expected a reg or frame index operand.") ? static_cast<void > (0) : __assert_fail ("(MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) && \"Expected a reg or frame index operand.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1934, __PRETTY_FUNCTION__)) |
1934 | "Expected a reg or frame index operand.")(((MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) && "Expected a reg or frame index operand.") ? static_cast<void > (0) : __assert_fail ("(MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) && \"Expected a reg or frame index operand.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1934, __PRETTY_FUNCTION__)); |
1935 | if (!MI.getOperand(2).isImm()) |
1936 | return false; |
1937 | |
1938 | // Can't merge/pair if the instruction modifies the base register. |
1939 | // e.g., ldr x0, [x0] |
1940 | // This case will never occur with an FI base. |
1941 | if (MI.getOperand(1).isReg()) { |
1942 | Register BaseReg = MI.getOperand(1).getReg(); |
1943 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
1944 | if (MI.modifiesRegister(BaseReg, TRI)) |
1945 | return false; |
1946 | } |
1947 | |
1948 | // Check if this load/store has a hint to avoid pair formation. |
1949 | // MachineMemOperands hints are set by the AArch64StorePairSuppress pass. |
1950 | if (isLdStPairSuppressed(MI)) |
1951 | return false; |
1952 | |
1953 | // Do not pair any callee-save store/reload instructions in the |
1954 | // prologue/epilogue if the CFI information encoded the operations as separate |
1955 | // instructions, as that will cause the size of the actual prologue to mismatch |
1956 | // with the prologue size recorded in the Windows CFI. |
1957 | const MCAsmInfo *MAI = MI.getMF()->getTarget().getMCAsmInfo(); |
1958 | bool NeedsWinCFI = MAI->usesWindowsCFI() && |
1959 | MI.getMF()->getFunction().needsUnwindTableEntry(); |
1960 | if (NeedsWinCFI && (MI.getFlag(MachineInstr::FrameSetup) || |
1961 | MI.getFlag(MachineInstr::FrameDestroy))) |
1962 | return false; |
1963 | |
1964 | // On some CPUs quad load/store pairs are slower than two single load/stores. |
1965 | if (Subtarget.isPaired128Slow()) { |
1966 | switch (MI.getOpcode()) { |
1967 | default: |
1968 | break; |
1969 | case AArch64::LDURQi: |
1970 | case AArch64::STURQi: |
1971 | case AArch64::LDRQui: |
1972 | case AArch64::STRQui: |
1973 | return false; |
1974 | } |
1975 | } |
1976 | |
1977 | return true; |
1978 | } |
1979 | |
1980 | bool AArch64InstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, |
1981 | const MachineOperand *&BaseOp, |
1982 | int64_t &Offset, |
1983 | const TargetRegisterInfo *TRI) const { |
1984 | unsigned Width; |
1985 | return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI); |
1986 | } |
1987 | |
1988 | bool AArch64InstrInfo::getMemOperandWithOffsetWidth( |
1989 | const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, |
1990 | unsigned &Width, const TargetRegisterInfo *TRI) const { |
1991 | assert(LdSt.mayLoadOrStore() && "Expected a memory operation.")((LdSt.mayLoadOrStore() && "Expected a memory operation." ) ? static_cast<void> (0) : __assert_fail ("LdSt.mayLoadOrStore() && \"Expected a memory operation.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 1991, __PRETTY_FUNCTION__)); |
1992 | // Handle only loads/stores with base register followed by immediate offset. |
1993 | if (LdSt.getNumExplicitOperands() == 3) { |
1994 | // Non-paired instruction (e.g., ldr x1, [x0, #8]). |
1995 | if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) || |
1996 | !LdSt.getOperand(2).isImm()) |
1997 | return false; |
1998 | } else if (LdSt.getNumExplicitOperands() == 4) { |
1999 | // Paired instruction (e.g., ldp x1, x2, [x0, #8]). |
2000 | if (!LdSt.getOperand(1).isReg() || |
2001 | (!LdSt.getOperand(2).isReg() && !LdSt.getOperand(2).isFI()) || |
2002 | !LdSt.getOperand(3).isImm()) |
2003 | return false; |
2004 | } else |
2005 | return false; |
2006 | |
2007 | // Get the scaling factor for the instruction and set the width for the |
2008 | // instruction. |
2009 | unsigned Scale = 0; |
2010 | int64_t Dummy1, Dummy2; |
2011 | |
2012 | // If this returns false, then it's an instruction we don't want to handle. |
2013 | if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2)) |
2014 | return false; |
2015 | |
2016 | // Compute the offset. Offset is calculated as the immediate operand |
2017 | // multiplied by the scaling factor. Unscaled instructions have scaling factor |
2018 | // set to 1. |
2019 | if (LdSt.getNumExplicitOperands() == 3) { |
2020 | BaseOp = &LdSt.getOperand(1); |
2021 | Offset = LdSt.getOperand(2).getImm() * Scale; |
2022 | } else { |
2023 | assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands")((LdSt.getNumExplicitOperands() == 4 && "invalid number of operands" ) ? static_cast<void> (0) : __assert_fail ("LdSt.getNumExplicitOperands() == 4 && \"invalid number of operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2023, __PRETTY_FUNCTION__)); |
2024 | BaseOp = &LdSt.getOperand(2); |
2025 | Offset = LdSt.getOperand(3).getImm() * Scale; |
2026 | } |
2027 | |
2028 | assert((BaseOp->isReg() || BaseOp->isFI()) &&(((BaseOp->isReg() || BaseOp->isFI()) && "getMemOperandWithOffset only supports base " "operands of type register or frame index.") ? static_cast< void> (0) : __assert_fail ("(BaseOp->isReg() || BaseOp->isFI()) && \"getMemOperandWithOffset only supports base \" \"operands of type register or frame index.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2030, __PRETTY_FUNCTION__)) |
2029 | "getMemOperandWithOffset only supports base "(((BaseOp->isReg() || BaseOp->isFI()) && "getMemOperandWithOffset only supports base " "operands of type register or frame index.") ? static_cast< void> (0) : __assert_fail ("(BaseOp->isReg() || BaseOp->isFI()) && \"getMemOperandWithOffset only supports base \" \"operands of type register or frame index.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2030, __PRETTY_FUNCTION__)) |
2030 | "operands of type register or frame index.")(((BaseOp->isReg() || BaseOp->isFI()) && "getMemOperandWithOffset only supports base " "operands of type register or frame index.") ? static_cast< void> (0) : __assert_fail ("(BaseOp->isReg() || BaseOp->isFI()) && \"getMemOperandWithOffset only supports base \" \"operands of type register or frame index.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2030, __PRETTY_FUNCTION__)); |
2031 | |
2032 | return true; |
2033 | } |
2034 | |
2035 | MachineOperand & |
2036 | AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const { |
2037 | assert(LdSt.mayLoadOrStore() && "Expected a memory operation.")((LdSt.mayLoadOrStore() && "Expected a memory operation." ) ? static_cast<void> (0) : __assert_fail ("LdSt.mayLoadOrStore() && \"Expected a memory operation.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2037, __PRETTY_FUNCTION__)); |
2038 | MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands() - 1); |
2039 | assert(OfsOp.isImm() && "Offset operand wasn't immediate.")((OfsOp.isImm() && "Offset operand wasn't immediate." ) ? static_cast<void> (0) : __assert_fail ("OfsOp.isImm() && \"Offset operand wasn't immediate.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2039, __PRETTY_FUNCTION__)); |
2040 | return OfsOp; |
2041 | } |
2042 | |
2043 | bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, unsigned &Scale, |
2044 | unsigned &Width, int64_t &MinOffset, |
2045 | int64_t &MaxOffset) { |
2046 | switch (Opcode) { |
2047 | // Not a memory operation or something we want to handle. |
2048 | default: |
2049 | Scale = Width = 0; |
2050 | MinOffset = MaxOffset = 0; |
2051 | return false; |
2052 | case AArch64::STRWpost: |
2053 | case AArch64::LDRWpost: |
2054 | Width = 32; |
2055 | Scale = 4; |
2056 | MinOffset = -256; |
2057 | MaxOffset = 255; |
2058 | break; |
2059 | case AArch64::LDURQi: |
2060 | case AArch64::STURQi: |
2061 | Width = 16; |
2062 | Scale = 1; |
2063 | MinOffset = -256; |
2064 | MaxOffset = 255; |
2065 | break; |
2066 | case AArch64::PRFUMi: |
2067 | case AArch64::LDURXi: |
2068 | case AArch64::LDURDi: |
2069 | case AArch64::STURXi: |
2070 | case AArch64::STURDi: |
2071 | Width = 8; |
2072 | Scale = 1; |
2073 | MinOffset = -256; |
2074 | MaxOffset = 255; |
2075 | break; |
2076 | case AArch64::LDURWi: |
2077 | case AArch64::LDURSi: |
2078 | case AArch64::LDURSWi: |
2079 | case AArch64::STURWi: |
2080 | case AArch64::STURSi: |
2081 | Width = 4; |
2082 | Scale = 1; |
2083 | MinOffset = -256; |
2084 | MaxOffset = 255; |
2085 | break; |
2086 | case AArch64::LDURHi: |
2087 | case AArch64::LDURHHi: |
2088 | case AArch64::LDURSHXi: |
2089 | case AArch64::LDURSHWi: |
2090 | case AArch64::STURHi: |
2091 | case AArch64::STURHHi: |
2092 | Width = 2; |
2093 | Scale = 1; |
2094 | MinOffset = -256; |
2095 | MaxOffset = 255; |
2096 | break; |
2097 | case AArch64::LDURBi: |
2098 | case AArch64::LDURBBi: |
2099 | case AArch64::LDURSBXi: |
2100 | case AArch64::LDURSBWi: |
2101 | case AArch64::STURBi: |
2102 | case AArch64::STURBBi: |
2103 | Width = 1; |
2104 | Scale = 1; |
2105 | MinOffset = -256; |
2106 | MaxOffset = 255; |
2107 | break; |
2108 | case AArch64::LDPQi: |
2109 | case AArch64::LDNPQi: |
2110 | case AArch64::STPQi: |
2111 | case AArch64::STNPQi: |
2112 | Scale = 16; |
2113 | Width = 32; |
2114 | MinOffset = -64; |
2115 | MaxOffset = 63; |
2116 | break; |
2117 | case AArch64::LDRQui: |
2118 | case AArch64::STRQui: |
2119 | Scale = Width = 16; |
2120 | MinOffset = 0; |
2121 | MaxOffset = 4095; |
2122 | break; |
2123 | case AArch64::LDPXi: |
2124 | case AArch64::LDPDi: |
2125 | case AArch64::LDNPXi: |
2126 | case AArch64::LDNPDi: |
2127 | case AArch64::STPXi: |
2128 | case AArch64::STPDi: |
2129 | case AArch64::STNPXi: |
2130 | case AArch64::STNPDi: |
2131 | Scale = 8; |
2132 | Width = 16; |
2133 | MinOffset = -64; |
2134 | MaxOffset = 63; |
2135 | break; |
2136 | case AArch64::PRFMui: |
2137 | case AArch64::LDRXui: |
2138 | case AArch64::LDRDui: |
2139 | case AArch64::STRXui: |
2140 | case AArch64::STRDui: |
2141 | Scale = Width = 8; |
2142 | MinOffset = 0; |
2143 | MaxOffset = 4095; |
2144 | break; |
2145 | case AArch64::LDPWi: |
2146 | case AArch64::LDPSi: |
2147 | case AArch64::LDNPWi: |
2148 | case AArch64::LDNPSi: |
2149 | case AArch64::STPWi: |
2150 | case AArch64::STPSi: |
2151 | case AArch64::STNPWi: |
2152 | case AArch64::STNPSi: |
2153 | Scale = 4; |
2154 | Width = 8; |
2155 | MinOffset = -64; |
2156 | MaxOffset = 63; |
2157 | break; |
2158 | case AArch64::LDRWui: |
2159 | case AArch64::LDRSui: |
2160 | case AArch64::LDRSWui: |
2161 | case AArch64::STRWui: |
2162 | case AArch64::STRSui: |
2163 | Scale = Width = 4; |
2164 | MinOffset = 0; |
2165 | MaxOffset = 4095; |
2166 | break; |
2167 | case AArch64::LDRHui: |
2168 | case AArch64::LDRHHui: |
2169 | case AArch64::LDRSHWui: |
2170 | case AArch64::LDRSHXui: |
2171 | case AArch64::STRHui: |
2172 | case AArch64::STRHHui: |
2173 | Scale = Width = 2; |
2174 | MinOffset = 0; |
2175 | MaxOffset = 4095; |
2176 | break; |
2177 | case AArch64::LDRBui: |
2178 | case AArch64::LDRBBui: |
2179 | case AArch64::LDRSBWui: |
2180 | case AArch64::LDRSBXui: |
2181 | case AArch64::STRBui: |
2182 | case AArch64::STRBBui: |
2183 | Scale = Width = 1; |
2184 | MinOffset = 0; |
2185 | MaxOffset = 4095; |
2186 | break; |
2187 | case AArch64::ADDG: |
2188 | case AArch64::TAGPstack: |
2189 | Scale = 16; |
2190 | Width = 0; |
2191 | MinOffset = 0; |
2192 | MaxOffset = 63; |
2193 | break; |
2194 | case AArch64::LDG: |
2195 | case AArch64::STGOffset: |
2196 | case AArch64::STZGOffset: |
2197 | Scale = Width = 16; |
2198 | MinOffset = -256; |
2199 | MaxOffset = 255; |
2200 | break; |
2201 | case AArch64::ST2GOffset: |
2202 | case AArch64::STZ2GOffset: |
2203 | Scale = 16; |
2204 | Width = 32; |
2205 | MinOffset = -256; |
2206 | MaxOffset = 255; |
2207 | break; |
2208 | case AArch64::STGPi: |
2209 | Scale = Width = 16; |
2210 | MinOffset = -64; |
2211 | MaxOffset = 63; |
2212 | break; |
2213 | } |
2214 | |
2215 | return true; |
2216 | } |
2217 | |
2218 | static unsigned getOffsetStride(unsigned Opc) { |
2219 | switch (Opc) { |
2220 | default: |
2221 | return 0; |
2222 | case AArch64::LDURQi: |
2223 | case AArch64::STURQi: |
2224 | return 16; |
2225 | case AArch64::LDURXi: |
2226 | case AArch64::LDURDi: |
2227 | case AArch64::STURXi: |
2228 | case AArch64::STURDi: |
2229 | return 8; |
2230 | case AArch64::LDURWi: |
2231 | case AArch64::LDURSi: |
2232 | case AArch64::LDURSWi: |
2233 | case AArch64::STURWi: |
2234 | case AArch64::STURSi: |
2235 | return 4; |
2236 | } |
2237 | } |
2238 | |
2239 | // Scale the unscaled offsets. Returns false if the unscaled offset can't be |
2240 | // scaled. |
2241 | static bool scaleOffset(unsigned Opc, int64_t &Offset) { |
2242 | unsigned OffsetStride = getOffsetStride(Opc); |
2243 | if (OffsetStride == 0) |
2244 | return false; |
2245 | // If the byte-offset isn't a multiple of the stride, we can't scale this |
2246 | // offset. |
2247 | if (Offset % OffsetStride != 0) |
2248 | return false; |
2249 | |
2250 | // Convert the byte-offset used by unscaled into an "element" offset used |
2251 | // by the scaled pair load/store instructions. |
2252 | Offset /= OffsetStride; |
2253 | return true; |
2254 | } |
2255 | |
2256 | // Unscale the scaled offsets. Returns false if the scaled offset can't be |
2257 | // unscaled. |
2258 | static bool unscaleOffset(unsigned Opc, int64_t &Offset) { |
2259 | unsigned OffsetStride = getOffsetStride(Opc); |
2260 | if (OffsetStride == 0) |
2261 | return false; |
2262 | |
2263 | // Convert the "element" offset used by scaled pair load/store instructions |
2264 | // into the byte-offset used by unscaled. |
2265 | Offset *= OffsetStride; |
2266 | return true; |
2267 | } |
2268 | |
2269 | static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) { |
2270 | if (FirstOpc == SecondOpc) |
2271 | return true; |
2272 | // We can also pair sign-ext and zero-ext instructions. |
2273 | switch (FirstOpc) { |
2274 | default: |
2275 | return false; |
2276 | case AArch64::LDRWui: |
2277 | case AArch64::LDURWi: |
2278 | return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi; |
2279 | case AArch64::LDRSWui: |
2280 | case AArch64::LDURSWi: |
2281 | return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi; |
2282 | } |
2283 | // These instructions can't be paired based on their opcodes. |
2284 | return false; |
2285 | } |
2286 | |
2287 | static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1, |
2288 | int64_t Offset1, unsigned Opcode1, int FI2, |
2289 | int64_t Offset2, unsigned Opcode2) { |
2290 | // Accesses through fixed stack object frame indices may access a different |
2291 | // fixed stack slot. Check that the object offsets + offsets match. |
2292 | if (MFI.isFixedObjectIndex(FI1) && MFI.isFixedObjectIndex(FI2)) { |
2293 | int64_t ObjectOffset1 = MFI.getObjectOffset(FI1); |
2294 | int64_t ObjectOffset2 = MFI.getObjectOffset(FI2); |
2295 | assert(ObjectOffset1 <= ObjectOffset2 && "Object offsets are not ordered.")((ObjectOffset1 <= ObjectOffset2 && "Object offsets are not ordered." ) ? static_cast<void> (0) : __assert_fail ("ObjectOffset1 <= ObjectOffset2 && \"Object offsets are not ordered.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2295, __PRETTY_FUNCTION__)); |
2296 | // Get the byte-offset from the object offset. |
2297 | if (!unscaleOffset(Opcode1, Offset1) || !unscaleOffset(Opcode2, Offset2)) |
2298 | return false; |
2299 | ObjectOffset1 += Offset1; |
2300 | ObjectOffset2 += Offset2; |
2301 | // Get the "element" index in the object. |
2302 | if (!scaleOffset(Opcode1, ObjectOffset1) || |
2303 | !scaleOffset(Opcode2, ObjectOffset2)) |
2304 | return false; |
2305 | return ObjectOffset1 + 1 == ObjectOffset2; |
2306 | } |
2307 | |
2308 | return FI1 == FI2; |
2309 | } |
2310 | |
2311 | /// Detect opportunities for ldp/stp formation. |
2312 | /// |
2313 | /// Only called for LdSt for which getMemOperandWithOffset returns true. |
2314 | bool AArch64InstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, |
2315 | const MachineOperand &BaseOp2, |
2316 | unsigned NumLoads) const { |
2317 | const MachineInstr &FirstLdSt = *BaseOp1.getParent(); |
2318 | const MachineInstr &SecondLdSt = *BaseOp2.getParent(); |
2319 | if (BaseOp1.getType() != BaseOp2.getType()) |
2320 | return false; |
2321 | |
2322 | assert((BaseOp1.isReg() || BaseOp1.isFI()) &&(((BaseOp1.isReg() || BaseOp1.isFI()) && "Only base registers and frame indices are supported." ) ? static_cast<void> (0) : __assert_fail ("(BaseOp1.isReg() || BaseOp1.isFI()) && \"Only base registers and frame indices are supported.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2323, __PRETTY_FUNCTION__)) |
2323 | "Only base registers and frame indices are supported.")(((BaseOp1.isReg() || BaseOp1.isFI()) && "Only base registers and frame indices are supported." ) ? static_cast<void> (0) : __assert_fail ("(BaseOp1.isReg() || BaseOp1.isFI()) && \"Only base registers and frame indices are supported.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2323, __PRETTY_FUNCTION__)); |
2324 | |
2325 | // Check for both base regs and base FI. |
2326 | if (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg()) |
2327 | return false; |
2328 | |
2329 | // Only cluster up to a single pair. |
2330 | if (NumLoads > 1) |
2331 | return false; |
2332 | |
2333 | if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt)) |
2334 | return false; |
2335 | |
2336 | // Can we pair these instructions based on their opcodes? |
2337 | unsigned FirstOpc = FirstLdSt.getOpcode(); |
2338 | unsigned SecondOpc = SecondLdSt.getOpcode(); |
2339 | if (!canPairLdStOpc(FirstOpc, SecondOpc)) |
2340 | return false; |
2341 | |
2342 | // Can't merge volatiles or load/stores that have a hint to avoid pair |
2343 | // formation, for example. |
2344 | if (!isCandidateToMergeOrPair(FirstLdSt) || |
2345 | !isCandidateToMergeOrPair(SecondLdSt)) |
2346 | return false; |
2347 | |
2348 | // isCandidateToMergeOrPair guarantees that operand 2 is an immediate. |
2349 | int64_t Offset1 = FirstLdSt.getOperand(2).getImm(); |
2350 | if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1)) |
2351 | return false; |
2352 | |
2353 | int64_t Offset2 = SecondLdSt.getOperand(2).getImm(); |
2354 | if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2)) |
2355 | return false; |
2356 | |
2357 | // Pairwise instructions have a 7-bit signed offset field. |
2358 | if (Offset1 > 63 || Offset1 < -64) |
2359 | return false; |
2360 | |
2361 | // The caller should already have ordered First/SecondLdSt by offset. |
2362 | // Note: except for non-equal frame index bases |
2363 | if (BaseOp1.isFI()) { |
2364 | assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) &&(((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) && "Caller should have ordered offsets.") ? static_cast<void > (0) : __assert_fail ("(!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) && \"Caller should have ordered offsets.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2365, __PRETTY_FUNCTION__)) |
2365 | "Caller should have ordered offsets.")(((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) && "Caller should have ordered offsets.") ? static_cast<void > (0) : __assert_fail ("(!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) && \"Caller should have ordered offsets.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2365, __PRETTY_FUNCTION__)); |
2366 | |
2367 | const MachineFrameInfo &MFI = |
2368 | FirstLdSt.getParent()->getParent()->getFrameInfo(); |
2369 | return shouldClusterFI(MFI, BaseOp1.getIndex(), Offset1, FirstOpc, |
2370 | BaseOp2.getIndex(), Offset2, SecondOpc); |
2371 | } |
2372 | |
2373 | assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) &&(((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && "Caller should have ordered offsets.") ? static_cast<void > (0) : __assert_fail ("(!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && \"Caller should have ordered offsets.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2374, __PRETTY_FUNCTION__)) |
2374 | "Caller should have ordered offsets.")(((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && "Caller should have ordered offsets.") ? static_cast<void > (0) : __assert_fail ("(!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) && \"Caller should have ordered offsets.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2374, __PRETTY_FUNCTION__)); |
2375 | |
2376 | return Offset1 + 1 == Offset2; |
2377 | } |
2378 | |
2379 | static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB, |
2380 | unsigned Reg, unsigned SubIdx, |
2381 | unsigned State, |
2382 | const TargetRegisterInfo *TRI) { |
2383 | if (!SubIdx) |
2384 | return MIB.addReg(Reg, State); |
2385 | |
2386 | if (Register::isPhysicalRegister(Reg)) |
2387 | return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); |
2388 | return MIB.addReg(Reg, State, SubIdx); |
2389 | } |
2390 | |
2391 | static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, |
2392 | unsigned NumRegs) { |
2393 | // We really want the positive remainder mod 32 here, that happens to be |
2394 | // easily obtainable with a mask. |
2395 | return ((DestReg - SrcReg) & 0x1f) < NumRegs; |
2396 | } |
2397 | |
2398 | void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB, |
2399 | MachineBasicBlock::iterator I, |
2400 | const DebugLoc &DL, unsigned DestReg, |
2401 | unsigned SrcReg, bool KillSrc, |
2402 | unsigned Opcode, |
2403 | ArrayRef<unsigned> Indices) const { |
2404 | assert(Subtarget.hasNEON() && "Unexpected register copy without NEON")((Subtarget.hasNEON() && "Unexpected register copy without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register copy without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2404, __PRETTY_FUNCTION__)); |
2405 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2406 | uint16_t DestEncoding = TRI->getEncodingValue(DestReg); |
2407 | uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg); |
2408 | unsigned NumRegs = Indices.size(); |
2409 | |
2410 | int SubReg = 0, End = NumRegs, Incr = 1; |
2411 | if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) { |
2412 | SubReg = NumRegs - 1; |
2413 | End = -1; |
2414 | Incr = -1; |
2415 | } |
2416 | |
2417 | for (; SubReg != End; SubReg += Incr) { |
2418 | const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode)); |
2419 | AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI); |
2420 | AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI); |
2421 | AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI); |
2422 | } |
2423 | } |
2424 | |
2425 | void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB, |
2426 | MachineBasicBlock::iterator I, |
2427 | DebugLoc DL, unsigned DestReg, |
2428 | unsigned SrcReg, bool KillSrc, |
2429 | unsigned Opcode, unsigned ZeroReg, |
2430 | llvm::ArrayRef<unsigned> Indices) const { |
2431 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2432 | unsigned NumRegs = Indices.size(); |
2433 | |
2434 | #ifndef NDEBUG |
2435 | uint16_t DestEncoding = TRI->getEncodingValue(DestReg); |
2436 | uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg); |
2437 | assert(DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 &&((DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 && "GPR reg sequences should not be able to overlap" ) ? static_cast<void> (0) : __assert_fail ("DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 && \"GPR reg sequences should not be able to overlap\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2438, __PRETTY_FUNCTION__)) |
2438 | "GPR reg sequences should not be able to overlap")((DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 && "GPR reg sequences should not be able to overlap" ) ? static_cast<void> (0) : __assert_fail ("DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 && \"GPR reg sequences should not be able to overlap\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2438, __PRETTY_FUNCTION__)); |
2439 | #endif |
2440 | |
2441 | for (unsigned SubReg = 0; SubReg != NumRegs; ++SubReg) { |
2442 | const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode)); |
2443 | AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI); |
2444 | MIB.addReg(ZeroReg); |
2445 | AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI); |
2446 | MIB.addImm(0); |
2447 | } |
2448 | } |
2449 | |
2450 | void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
2451 | MachineBasicBlock::iterator I, |
2452 | const DebugLoc &DL, unsigned DestReg, |
2453 | unsigned SrcReg, bool KillSrc) const { |
2454 | if (AArch64::GPR32spRegClass.contains(DestReg) && |
2455 | (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) { |
2456 | const TargetRegisterInfo *TRI = &getRegisterInfo(); |
2457 | |
2458 | if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) { |
2459 | // If either operand is WSP, expand to ADD #0. |
2460 | if (Subtarget.hasZeroCycleRegMove()) { |
2461 | // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move. |
2462 | unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32, |
2463 | &AArch64::GPR64spRegClass); |
2464 | unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32, |
2465 | &AArch64::GPR64spRegClass); |
2466 | // This instruction is reading and writing X registers. This may upset |
2467 | // the register scavenger and machine verifier, so we need to indicate |
2468 | // that we are reading an undefined value from SrcRegX, but a proper |
2469 | // value from SrcReg. |
2470 | BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX) |
2471 | .addReg(SrcRegX, RegState::Undef) |
2472 | .addImm(0) |
2473 | .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) |
2474 | .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc)); |
2475 | } else { |
2476 | BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg) |
2477 | .addReg(SrcReg, getKillRegState(KillSrc)) |
2478 | .addImm(0) |
2479 | .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); |
2480 | } |
2481 | } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroingGP()) { |
2482 | BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg) |
2483 | .addImm(0) |
2484 | .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); |
2485 | } else { |
2486 | if (Subtarget.hasZeroCycleRegMove()) { |
2487 | // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move. |
2488 | unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32, |
2489 | &AArch64::GPR64spRegClass); |
2490 | unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32, |
2491 | &AArch64::GPR64spRegClass); |
2492 | // This instruction is reading and writing X registers. This may upset |
2493 | // the register scavenger and machine verifier, so we need to indicate |
2494 | // that we are reading an undefined value from SrcRegX, but a proper |
2495 | // value from SrcReg. |
2496 | BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX) |
2497 | .addReg(AArch64::XZR) |
2498 | .addReg(SrcRegX, RegState::Undef) |
2499 | .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc)); |
2500 | } else { |
2501 | // Otherwise, expand to ORR WZR. |
2502 | BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg) |
2503 | .addReg(AArch64::WZR) |
2504 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2505 | } |
2506 | } |
2507 | return; |
2508 | } |
2509 | |
2510 | // Copy a Predicate register by ORRing with itself. |
2511 | if (AArch64::PPRRegClass.contains(DestReg) && |
2512 | AArch64::PPRRegClass.contains(SrcReg)) { |
2513 | assert(Subtarget.hasSVE() && "Unexpected SVE register.")((Subtarget.hasSVE() && "Unexpected SVE register.") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSVE() && \"Unexpected SVE register.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2513, __PRETTY_FUNCTION__)); |
2514 | BuildMI(MBB, I, DL, get(AArch64::ORR_PPzPP), DestReg) |
2515 | .addReg(SrcReg) // Pg |
2516 | .addReg(SrcReg) |
2517 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2518 | return; |
2519 | } |
2520 | |
2521 | // Copy a Z register by ORRing with itself. |
2522 | if (AArch64::ZPRRegClass.contains(DestReg) && |
2523 | AArch64::ZPRRegClass.contains(SrcReg)) { |
2524 | assert(Subtarget.hasSVE() && "Unexpected SVE register.")((Subtarget.hasSVE() && "Unexpected SVE register.") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSVE() && \"Unexpected SVE register.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2524, __PRETTY_FUNCTION__)); |
2525 | BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ), DestReg) |
2526 | .addReg(SrcReg) |
2527 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2528 | return; |
2529 | } |
2530 | |
2531 | if (AArch64::GPR64spRegClass.contains(DestReg) && |
2532 | (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) { |
2533 | if (DestReg == AArch64::SP || SrcReg == AArch64::SP) { |
2534 | // If either operand is SP, expand to ADD #0. |
2535 | BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg) |
2536 | .addReg(SrcReg, getKillRegState(KillSrc)) |
2537 | .addImm(0) |
2538 | .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); |
2539 | } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroingGP()) { |
2540 | BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg) |
2541 | .addImm(0) |
2542 | .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); |
2543 | } else { |
2544 | // Otherwise, expand to ORR XZR. |
2545 | BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg) |
2546 | .addReg(AArch64::XZR) |
2547 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2548 | } |
2549 | return; |
2550 | } |
2551 | |
2552 | // Copy a DDDD register quad by copying the individual sub-registers. |
2553 | if (AArch64::DDDDRegClass.contains(DestReg) && |
2554 | AArch64::DDDDRegClass.contains(SrcReg)) { |
2555 | static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1, |
2556 | AArch64::dsub2, AArch64::dsub3}; |
2557 | copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8, |
2558 | Indices); |
2559 | return; |
2560 | } |
2561 | |
2562 | // Copy a DDD register triple by copying the individual sub-registers. |
2563 | if (AArch64::DDDRegClass.contains(DestReg) && |
2564 | AArch64::DDDRegClass.contains(SrcReg)) { |
2565 | static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1, |
2566 | AArch64::dsub2}; |
2567 | copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8, |
2568 | Indices); |
2569 | return; |
2570 | } |
2571 | |
2572 | // Copy a DD register pair by copying the individual sub-registers. |
2573 | if (AArch64::DDRegClass.contains(DestReg) && |
2574 | AArch64::DDRegClass.contains(SrcReg)) { |
2575 | static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1}; |
2576 | copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8, |
2577 | Indices); |
2578 | return; |
2579 | } |
2580 | |
2581 | // Copy a QQQQ register quad by copying the individual sub-registers. |
2582 | if (AArch64::QQQQRegClass.contains(DestReg) && |
2583 | AArch64::QQQQRegClass.contains(SrcReg)) { |
2584 | static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1, |
2585 | AArch64::qsub2, AArch64::qsub3}; |
2586 | copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8, |
2587 | Indices); |
2588 | return; |
2589 | } |
2590 | |
2591 | // Copy a QQQ register triple by copying the individual sub-registers. |
2592 | if (AArch64::QQQRegClass.contains(DestReg) && |
2593 | AArch64::QQQRegClass.contains(SrcReg)) { |
2594 | static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1, |
2595 | AArch64::qsub2}; |
2596 | copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8, |
2597 | Indices); |
2598 | return; |
2599 | } |
2600 | |
2601 | // Copy a QQ register pair by copying the individual sub-registers. |
2602 | if (AArch64::QQRegClass.contains(DestReg) && |
2603 | AArch64::QQRegClass.contains(SrcReg)) { |
2604 | static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1}; |
2605 | copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8, |
2606 | Indices); |
2607 | return; |
2608 | } |
2609 | |
2610 | if (AArch64::XSeqPairsClassRegClass.contains(DestReg) && |
2611 | AArch64::XSeqPairsClassRegClass.contains(SrcReg)) { |
2612 | static const unsigned Indices[] = {AArch64::sube64, AArch64::subo64}; |
2613 | copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRXrs, |
2614 | AArch64::XZR, Indices); |
2615 | return; |
2616 | } |
2617 | |
2618 | if (AArch64::WSeqPairsClassRegClass.contains(DestReg) && |
2619 | AArch64::WSeqPairsClassRegClass.contains(SrcReg)) { |
2620 | static const unsigned Indices[] = {AArch64::sube32, AArch64::subo32}; |
2621 | copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRWrs, |
2622 | AArch64::WZR, Indices); |
2623 | return; |
2624 | } |
2625 | |
2626 | if (AArch64::FPR128RegClass.contains(DestReg) && |
2627 | AArch64::FPR128RegClass.contains(SrcReg)) { |
2628 | if (Subtarget.hasNEON()) { |
2629 | BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg) |
2630 | .addReg(SrcReg) |
2631 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2632 | } else { |
2633 | BuildMI(MBB, I, DL, get(AArch64::STRQpre)) |
2634 | .addReg(AArch64::SP, RegState::Define) |
2635 | .addReg(SrcReg, getKillRegState(KillSrc)) |
2636 | .addReg(AArch64::SP) |
2637 | .addImm(-16); |
2638 | BuildMI(MBB, I, DL, get(AArch64::LDRQpre)) |
2639 | .addReg(AArch64::SP, RegState::Define) |
2640 | .addReg(DestReg, RegState::Define) |
2641 | .addReg(AArch64::SP) |
2642 | .addImm(16); |
2643 | } |
2644 | return; |
2645 | } |
2646 | |
2647 | if (AArch64::FPR64RegClass.contains(DestReg) && |
2648 | AArch64::FPR64RegClass.contains(SrcReg)) { |
2649 | if (Subtarget.hasNEON()) { |
2650 | DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub, |
2651 | &AArch64::FPR128RegClass); |
2652 | SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub, |
2653 | &AArch64::FPR128RegClass); |
2654 | BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg) |
2655 | .addReg(SrcReg) |
2656 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2657 | } else { |
2658 | BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg) |
2659 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2660 | } |
2661 | return; |
2662 | } |
2663 | |
2664 | if (AArch64::FPR32RegClass.contains(DestReg) && |
2665 | AArch64::FPR32RegClass.contains(SrcReg)) { |
2666 | if (Subtarget.hasNEON()) { |
2667 | DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub, |
2668 | &AArch64::FPR128RegClass); |
2669 | SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub, |
2670 | &AArch64::FPR128RegClass); |
2671 | BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg) |
2672 | .addReg(SrcReg) |
2673 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2674 | } else { |
2675 | BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg) |
2676 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2677 | } |
2678 | return; |
2679 | } |
2680 | |
2681 | if (AArch64::FPR16RegClass.contains(DestReg) && |
2682 | AArch64::FPR16RegClass.contains(SrcReg)) { |
2683 | if (Subtarget.hasNEON()) { |
2684 | DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub, |
2685 | &AArch64::FPR128RegClass); |
2686 | SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub, |
2687 | &AArch64::FPR128RegClass); |
2688 | BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg) |
2689 | .addReg(SrcReg) |
2690 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2691 | } else { |
2692 | DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub, |
2693 | &AArch64::FPR32RegClass); |
2694 | SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub, |
2695 | &AArch64::FPR32RegClass); |
2696 | BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg) |
2697 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2698 | } |
2699 | return; |
2700 | } |
2701 | |
2702 | if (AArch64::FPR8RegClass.contains(DestReg) && |
2703 | AArch64::FPR8RegClass.contains(SrcReg)) { |
2704 | if (Subtarget.hasNEON()) { |
2705 | DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub, |
2706 | &AArch64::FPR128RegClass); |
2707 | SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub, |
2708 | &AArch64::FPR128RegClass); |
2709 | BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg) |
2710 | .addReg(SrcReg) |
2711 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2712 | } else { |
2713 | DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub, |
2714 | &AArch64::FPR32RegClass); |
2715 | SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub, |
2716 | &AArch64::FPR32RegClass); |
2717 | BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg) |
2718 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2719 | } |
2720 | return; |
2721 | } |
2722 | |
2723 | // Copies between GPR64 and FPR64. |
2724 | if (AArch64::FPR64RegClass.contains(DestReg) && |
2725 | AArch64::GPR64RegClass.contains(SrcReg)) { |
2726 | BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg) |
2727 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2728 | return; |
2729 | } |
2730 | if (AArch64::GPR64RegClass.contains(DestReg) && |
2731 | AArch64::FPR64RegClass.contains(SrcReg)) { |
2732 | BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg) |
2733 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2734 | return; |
2735 | } |
2736 | // Copies between GPR32 and FPR32. |
2737 | if (AArch64::FPR32RegClass.contains(DestReg) && |
2738 | AArch64::GPR32RegClass.contains(SrcReg)) { |
2739 | BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg) |
2740 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2741 | return; |
2742 | } |
2743 | if (AArch64::GPR32RegClass.contains(DestReg) && |
2744 | AArch64::FPR32RegClass.contains(SrcReg)) { |
2745 | BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg) |
2746 | .addReg(SrcReg, getKillRegState(KillSrc)); |
2747 | return; |
2748 | } |
2749 | |
2750 | if (DestReg == AArch64::NZCV) { |
2751 | assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy")((AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy" ) ? static_cast<void> (0) : __assert_fail ("AArch64::GPR64RegClass.contains(SrcReg) && \"Invalid NZCV copy\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2751, __PRETTY_FUNCTION__)); |
2752 | BuildMI(MBB, I, DL, get(AArch64::MSR)) |
2753 | .addImm(AArch64SysReg::NZCV) |
2754 | .addReg(SrcReg, getKillRegState(KillSrc)) |
2755 | .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define); |
2756 | return; |
2757 | } |
2758 | |
2759 | if (SrcReg == AArch64::NZCV) { |
2760 | assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy")((AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy" ) ? static_cast<void> (0) : __assert_fail ("AArch64::GPR64RegClass.contains(DestReg) && \"Invalid NZCV copy\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2760, __PRETTY_FUNCTION__)); |
2761 | BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg) |
2762 | .addImm(AArch64SysReg::NZCV) |
2763 | .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc)); |
2764 | return; |
2765 | } |
2766 | |
2767 | llvm_unreachable("unimplemented reg-to-reg copy")::llvm::llvm_unreachable_internal("unimplemented reg-to-reg copy" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2767); |
2768 | } |
2769 | |
2770 | static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI, |
2771 | MachineBasicBlock &MBB, |
2772 | MachineBasicBlock::iterator InsertBefore, |
2773 | const MCInstrDesc &MCID, |
2774 | unsigned SrcReg, bool IsKill, |
2775 | unsigned SubIdx0, unsigned SubIdx1, int FI, |
2776 | MachineMemOperand *MMO) { |
2777 | unsigned SrcReg0 = SrcReg; |
2778 | unsigned SrcReg1 = SrcReg; |
2779 | if (Register::isPhysicalRegister(SrcReg)) { |
2780 | SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0); |
2781 | SubIdx0 = 0; |
2782 | SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1); |
2783 | SubIdx1 = 0; |
2784 | } |
2785 | BuildMI(MBB, InsertBefore, DebugLoc(), MCID) |
2786 | .addReg(SrcReg0, getKillRegState(IsKill), SubIdx0) |
2787 | .addReg(SrcReg1, getKillRegState(IsKill), SubIdx1) |
2788 | .addFrameIndex(FI) |
2789 | .addImm(0) |
2790 | .addMemOperand(MMO); |
2791 | } |
2792 | |
2793 | void AArch64InstrInfo::storeRegToStackSlot( |
2794 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, |
2795 | bool isKill, int FI, const TargetRegisterClass *RC, |
2796 | const TargetRegisterInfo *TRI) const { |
2797 | MachineFunction &MF = *MBB.getParent(); |
2798 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2799 | unsigned Align = MFI.getObjectAlignment(FI); |
2800 | |
2801 | MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); |
2802 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
2803 | PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align); |
2804 | unsigned Opc = 0; |
2805 | bool Offset = true; |
2806 | switch (TRI->getSpillSize(*RC)) { |
2807 | case 1: |
2808 | if (AArch64::FPR8RegClass.hasSubClassEq(RC)) |
2809 | Opc = AArch64::STRBui; |
2810 | break; |
2811 | case 2: |
2812 | if (AArch64::FPR16RegClass.hasSubClassEq(RC)) |
2813 | Opc = AArch64::STRHui; |
2814 | break; |
2815 | case 4: |
2816 | if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) { |
2817 | Opc = AArch64::STRWui; |
2818 | if (Register::isVirtualRegister(SrcReg)) |
2819 | MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass); |
2820 | else |
2821 | assert(SrcReg != AArch64::WSP)((SrcReg != AArch64::WSP) ? static_cast<void> (0) : __assert_fail ("SrcReg != AArch64::WSP", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2821, __PRETTY_FUNCTION__)); |
2822 | } else if (AArch64::FPR32RegClass.hasSubClassEq(RC)) |
2823 | Opc = AArch64::STRSui; |
2824 | break; |
2825 | case 8: |
2826 | if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) { |
2827 | Opc = AArch64::STRXui; |
2828 | if (Register::isVirtualRegister(SrcReg)) |
2829 | MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass); |
2830 | else |
2831 | assert(SrcReg != AArch64::SP)((SrcReg != AArch64::SP) ? static_cast<void> (0) : __assert_fail ("SrcReg != AArch64::SP", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2831, __PRETTY_FUNCTION__)); |
2832 | } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) { |
2833 | Opc = AArch64::STRDui; |
2834 | } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) { |
2835 | storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI, |
2836 | get(AArch64::STPWi), SrcReg, isKill, |
2837 | AArch64::sube32, AArch64::subo32, FI, MMO); |
2838 | return; |
2839 | } |
2840 | break; |
2841 | case 16: |
2842 | if (AArch64::FPR128RegClass.hasSubClassEq(RC)) |
2843 | Opc = AArch64::STRQui; |
2844 | else if (AArch64::DDRegClass.hasSubClassEq(RC)) { |
2845 | assert(Subtarget.hasNEON() && "Unexpected register store without NEON")((Subtarget.hasNEON() && "Unexpected register store without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register store without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2845, __PRETTY_FUNCTION__)); |
2846 | Opc = AArch64::ST1Twov1d; |
2847 | Offset = false; |
2848 | } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) { |
2849 | storeRegPairToStackSlot(getRegisterInfo(), MBB, MBBI, |
2850 | get(AArch64::STPXi), SrcReg, isKill, |
2851 | AArch64::sube64, AArch64::subo64, FI, MMO); |
2852 | return; |
2853 | } |
2854 | break; |
2855 | case 24: |
2856 | if (AArch64::DDDRegClass.hasSubClassEq(RC)) { |
2857 | assert(Subtarget.hasNEON() && "Unexpected register store without NEON")((Subtarget.hasNEON() && "Unexpected register store without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register store without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2857, __PRETTY_FUNCTION__)); |
2858 | Opc = AArch64::ST1Threev1d; |
2859 | Offset = false; |
2860 | } |
2861 | break; |
2862 | case 32: |
2863 | if (AArch64::DDDDRegClass.hasSubClassEq(RC)) { |
2864 | assert(Subtarget.hasNEON() && "Unexpected register store without NEON")((Subtarget.hasNEON() && "Unexpected register store without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register store without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2864, __PRETTY_FUNCTION__)); |
2865 | Opc = AArch64::ST1Fourv1d; |
2866 | Offset = false; |
2867 | } else if (AArch64::QQRegClass.hasSubClassEq(RC)) { |
2868 | assert(Subtarget.hasNEON() && "Unexpected register store without NEON")((Subtarget.hasNEON() && "Unexpected register store without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register store without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2868, __PRETTY_FUNCTION__)); |
2869 | Opc = AArch64::ST1Twov2d; |
2870 | Offset = false; |
2871 | } |
2872 | break; |
2873 | case 48: |
2874 | if (AArch64::QQQRegClass.hasSubClassEq(RC)) { |
2875 | assert(Subtarget.hasNEON() && "Unexpected register store without NEON")((Subtarget.hasNEON() && "Unexpected register store without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register store without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2875, __PRETTY_FUNCTION__)); |
2876 | Opc = AArch64::ST1Threev2d; |
2877 | Offset = false; |
2878 | } |
2879 | break; |
2880 | case 64: |
2881 | if (AArch64::QQQQRegClass.hasSubClassEq(RC)) { |
2882 | assert(Subtarget.hasNEON() && "Unexpected register store without NEON")((Subtarget.hasNEON() && "Unexpected register store without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register store without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2882, __PRETTY_FUNCTION__)); |
2883 | Opc = AArch64::ST1Fourv2d; |
2884 | Offset = false; |
2885 | } |
2886 | break; |
2887 | } |
2888 | assert(Opc && "Unknown register class")((Opc && "Unknown register class") ? static_cast<void > (0) : __assert_fail ("Opc && \"Unknown register class\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2888, __PRETTY_FUNCTION__)); |
2889 | |
2890 | const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc)) |
2891 | .addReg(SrcReg, getKillRegState(isKill)) |
2892 | .addFrameIndex(FI); |
2893 | |
2894 | if (Offset) |
2895 | MI.addImm(0); |
2896 | MI.addMemOperand(MMO); |
2897 | } |
2898 | |
2899 | static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI, |
2900 | MachineBasicBlock &MBB, |
2901 | MachineBasicBlock::iterator InsertBefore, |
2902 | const MCInstrDesc &MCID, |
2903 | unsigned DestReg, unsigned SubIdx0, |
2904 | unsigned SubIdx1, int FI, |
2905 | MachineMemOperand *MMO) { |
2906 | unsigned DestReg0 = DestReg; |
2907 | unsigned DestReg1 = DestReg; |
2908 | bool IsUndef = true; |
2909 | if (Register::isPhysicalRegister(DestReg)) { |
2910 | DestReg0 = TRI.getSubReg(DestReg, SubIdx0); |
2911 | SubIdx0 = 0; |
2912 | DestReg1 = TRI.getSubReg(DestReg, SubIdx1); |
2913 | SubIdx1 = 0; |
2914 | IsUndef = false; |
2915 | } |
2916 | BuildMI(MBB, InsertBefore, DebugLoc(), MCID) |
2917 | .addReg(DestReg0, RegState::Define | getUndefRegState(IsUndef), SubIdx0) |
2918 | .addReg(DestReg1, RegState::Define | getUndefRegState(IsUndef), SubIdx1) |
2919 | .addFrameIndex(FI) |
2920 | .addImm(0) |
2921 | .addMemOperand(MMO); |
2922 | } |
2923 | |
2924 | void AArch64InstrInfo::loadRegFromStackSlot( |
2925 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, |
2926 | int FI, const TargetRegisterClass *RC, |
2927 | const TargetRegisterInfo *TRI) const { |
2928 | MachineFunction &MF = *MBB.getParent(); |
2929 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2930 | unsigned Align = MFI.getObjectAlignment(FI); |
2931 | MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI); |
2932 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
2933 | PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align); |
2934 | |
2935 | unsigned Opc = 0; |
2936 | bool Offset = true; |
2937 | switch (TRI->getSpillSize(*RC)) { |
2938 | case 1: |
2939 | if (AArch64::FPR8RegClass.hasSubClassEq(RC)) |
2940 | Opc = AArch64::LDRBui; |
2941 | break; |
2942 | case 2: |
2943 | if (AArch64::FPR16RegClass.hasSubClassEq(RC)) |
2944 | Opc = AArch64::LDRHui; |
2945 | break; |
2946 | case 4: |
2947 | if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) { |
2948 | Opc = AArch64::LDRWui; |
2949 | if (Register::isVirtualRegister(DestReg)) |
2950 | MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass); |
2951 | else |
2952 | assert(DestReg != AArch64::WSP)((DestReg != AArch64::WSP) ? static_cast<void> (0) : __assert_fail ("DestReg != AArch64::WSP", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2952, __PRETTY_FUNCTION__)); |
2953 | } else if (AArch64::FPR32RegClass.hasSubClassEq(RC)) |
2954 | Opc = AArch64::LDRSui; |
2955 | break; |
2956 | case 8: |
2957 | if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) { |
2958 | Opc = AArch64::LDRXui; |
2959 | if (Register::isVirtualRegister(DestReg)) |
2960 | MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass); |
2961 | else |
2962 | assert(DestReg != AArch64::SP)((DestReg != AArch64::SP) ? static_cast<void> (0) : __assert_fail ("DestReg != AArch64::SP", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2962, __PRETTY_FUNCTION__)); |
2963 | } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) { |
2964 | Opc = AArch64::LDRDui; |
2965 | } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) { |
2966 | loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI, |
2967 | get(AArch64::LDPWi), DestReg, AArch64::sube32, |
2968 | AArch64::subo32, FI, MMO); |
2969 | return; |
2970 | } |
2971 | break; |
2972 | case 16: |
2973 | if (AArch64::FPR128RegClass.hasSubClassEq(RC)) |
2974 | Opc = AArch64::LDRQui; |
2975 | else if (AArch64::DDRegClass.hasSubClassEq(RC)) { |
2976 | assert(Subtarget.hasNEON() && "Unexpected register load without NEON")((Subtarget.hasNEON() && "Unexpected register load without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register load without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2976, __PRETTY_FUNCTION__)); |
2977 | Opc = AArch64::LD1Twov1d; |
2978 | Offset = false; |
2979 | } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) { |
2980 | loadRegPairFromStackSlot(getRegisterInfo(), MBB, MBBI, |
2981 | get(AArch64::LDPXi), DestReg, AArch64::sube64, |
2982 | AArch64::subo64, FI, MMO); |
2983 | return; |
2984 | } |
2985 | break; |
2986 | case 24: |
2987 | if (AArch64::DDDRegClass.hasSubClassEq(RC)) { |
2988 | assert(Subtarget.hasNEON() && "Unexpected register load without NEON")((Subtarget.hasNEON() && "Unexpected register load without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register load without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2988, __PRETTY_FUNCTION__)); |
2989 | Opc = AArch64::LD1Threev1d; |
2990 | Offset = false; |
2991 | } |
2992 | break; |
2993 | case 32: |
2994 | if (AArch64::DDDDRegClass.hasSubClassEq(RC)) { |
2995 | assert(Subtarget.hasNEON() && "Unexpected register load without NEON")((Subtarget.hasNEON() && "Unexpected register load without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register load without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2995, __PRETTY_FUNCTION__)); |
2996 | Opc = AArch64::LD1Fourv1d; |
2997 | Offset = false; |
2998 | } else if (AArch64::QQRegClass.hasSubClassEq(RC)) { |
2999 | assert(Subtarget.hasNEON() && "Unexpected register load without NEON")((Subtarget.hasNEON() && "Unexpected register load without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register load without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 2999, __PRETTY_FUNCTION__)); |
3000 | Opc = AArch64::LD1Twov2d; |
3001 | Offset = false; |
3002 | } |
3003 | break; |
3004 | case 48: |
3005 | if (AArch64::QQQRegClass.hasSubClassEq(RC)) { |
3006 | assert(Subtarget.hasNEON() && "Unexpected register load without NEON")((Subtarget.hasNEON() && "Unexpected register load without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register load without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3006, __PRETTY_FUNCTION__)); |
3007 | Opc = AArch64::LD1Threev2d; |
3008 | Offset = false; |
3009 | } |
3010 | break; |
3011 | case 64: |
3012 | if (AArch64::QQQQRegClass.hasSubClassEq(RC)) { |
3013 | assert(Subtarget.hasNEON() && "Unexpected register load without NEON")((Subtarget.hasNEON() && "Unexpected register load without NEON" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasNEON() && \"Unexpected register load without NEON\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3013, __PRETTY_FUNCTION__)); |
3014 | Opc = AArch64::LD1Fourv2d; |
3015 | Offset = false; |
3016 | } |
3017 | break; |
3018 | } |
3019 | assert(Opc && "Unknown register class")((Opc && "Unknown register class") ? static_cast<void > (0) : __assert_fail ("Opc && \"Unknown register class\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3019, __PRETTY_FUNCTION__)); |
3020 | |
3021 | const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc)) |
3022 | .addReg(DestReg, getDefRegState(true)) |
3023 | .addFrameIndex(FI); |
3024 | if (Offset) |
3025 | MI.addImm(0); |
3026 | MI.addMemOperand(MMO); |
3027 | } |
3028 | |
3029 | // Helper function to emit a frame offset adjustment from a given |
3030 | // pointer (SrcReg), stored into DestReg. This function is explicit |
3031 | // in that it requires the opcode. |
3032 | static void emitFrameOffsetAdj(MachineBasicBlock &MBB, |
3033 | MachineBasicBlock::iterator MBBI, |
3034 | const DebugLoc &DL, unsigned DestReg, |
3035 | unsigned SrcReg, int64_t Offset, unsigned Opc, |
3036 | const TargetInstrInfo *TII, |
3037 | MachineInstr::MIFlag Flag, bool NeedsWinCFI, |
3038 | bool *HasWinCFI) { |
3039 | int Sign = 1; |
3040 | unsigned MaxEncoding, ShiftSize; |
3041 | switch (Opc) { |
3042 | case AArch64::ADDXri: |
3043 | case AArch64::ADDSXri: |
3044 | case AArch64::SUBXri: |
3045 | case AArch64::SUBSXri: |
3046 | MaxEncoding = 0xfff; |
3047 | ShiftSize = 12; |
3048 | break; |
3049 | default: |
3050 | llvm_unreachable("Unsupported opcode")::llvm::llvm_unreachable_internal("Unsupported opcode", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3050); |
3051 | } |
3052 | |
3053 | // FIXME: If the offset won't fit in 24-bits, compute the offset into a |
3054 | // scratch register. If DestReg is a virtual register, use it as the |
3055 | // scratch register; otherwise, create a new virtual register (to be |
3056 | // replaced by the scavenger at the end of PEI). That case can be optimized |
3057 | // slightly if DestReg is SP which is always 16-byte aligned, so the scratch |
3058 | // register can be loaded with offset%8 and the add/sub can use an extending |
3059 | // instruction with LSL#3. |
3060 | // Currently the function handles any offsets but generates a poor sequence |
3061 | // of code. |
3062 | // assert(Offset < (1 << 24) && "unimplemented reg plus immediate"); |
3063 | |
3064 | const unsigned MaxEncodableValue = MaxEncoding << ShiftSize; |
3065 | do { |
3066 | unsigned ThisVal = std::min<unsigned>(Offset, MaxEncodableValue); |
3067 | unsigned LocalShiftSize = 0; |
3068 | if (ThisVal > MaxEncoding) { |
3069 | ThisVal = ThisVal >> ShiftSize; |
3070 | LocalShiftSize = ShiftSize; |
3071 | } |
3072 | assert((ThisVal >> ShiftSize) <= MaxEncoding &&(((ThisVal >> ShiftSize) <= MaxEncoding && "Encoding cannot handle value that big" ) ? static_cast<void> (0) : __assert_fail ("(ThisVal >> ShiftSize) <= MaxEncoding && \"Encoding cannot handle value that big\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3073, __PRETTY_FUNCTION__)) |
3073 | "Encoding cannot handle value that big")(((ThisVal >> ShiftSize) <= MaxEncoding && "Encoding cannot handle value that big" ) ? static_cast<void> (0) : __assert_fail ("(ThisVal >> ShiftSize) <= MaxEncoding && \"Encoding cannot handle value that big\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3073, __PRETTY_FUNCTION__)); |
3074 | auto MBI = BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg) |
3075 | .addReg(SrcReg) |
3076 | .addImm(Sign * (int)ThisVal); |
3077 | if (ShiftSize) |
3078 | MBI = MBI.addImm( |
3079 | AArch64_AM::getShifterImm(AArch64_AM::LSL, LocalShiftSize)); |
3080 | MBI = MBI.setMIFlag(Flag); |
3081 | |
3082 | if (NeedsWinCFI) { |
3083 | assert(Sign == 1 && "SEH directives should always have a positive sign")((Sign == 1 && "SEH directives should always have a positive sign" ) ? static_cast<void> (0) : __assert_fail ("Sign == 1 && \"SEH directives should always have a positive sign\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3083, __PRETTY_FUNCTION__)); |
3084 | int Imm = (int)(ThisVal << LocalShiftSize); |
3085 | if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) || |
3086 | (SrcReg == AArch64::FP && DestReg == AArch64::SP)) { |
3087 | if (HasWinCFI) |
3088 | *HasWinCFI = true; |
3089 | if (Imm == 0) |
3090 | BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_SetFP)).setMIFlag(Flag); |
3091 | else |
3092 | BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_AddFP)) |
3093 | .addImm(Imm) |
3094 | .setMIFlag(Flag); |
3095 | assert((Offset - Imm) == 0 && "Expected remaining offset to be zero to "(((Offset - Imm) == 0 && "Expected remaining offset to be zero to " "emit a single SEH directive") ? static_cast<void> (0) : __assert_fail ("(Offset - Imm) == 0 && \"Expected remaining offset to be zero to \" \"emit a single SEH directive\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3096, __PRETTY_FUNCTION__)) |
3096 | "emit a single SEH directive")(((Offset - Imm) == 0 && "Expected remaining offset to be zero to " "emit a single SEH directive") ? static_cast<void> (0) : __assert_fail ("(Offset - Imm) == 0 && \"Expected remaining offset to be zero to \" \"emit a single SEH directive\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3096, __PRETTY_FUNCTION__)); |
3097 | } else if (DestReg == AArch64::SP) { |
3098 | if (HasWinCFI) |
3099 | *HasWinCFI = true; |
3100 | assert(SrcReg == AArch64::SP && "Unexpected SrcReg for SEH_StackAlloc")((SrcReg == AArch64::SP && "Unexpected SrcReg for SEH_StackAlloc" ) ? static_cast<void> (0) : __assert_fail ("SrcReg == AArch64::SP && \"Unexpected SrcReg for SEH_StackAlloc\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3100, __PRETTY_FUNCTION__)); |
3101 | BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) |
3102 | .addImm(Imm) |
3103 | .setMIFlag(Flag); |
3104 | } |
3105 | if (HasWinCFI) |
3106 | *HasWinCFI = true; |
3107 | } |
3108 | |
3109 | SrcReg = DestReg; |
3110 | Offset -= ThisVal << LocalShiftSize; |
3111 | } while (Offset); |
3112 | } |
3113 | |
3114 | void llvm::emitFrameOffset(MachineBasicBlock &MBB, |
3115 | MachineBasicBlock::iterator MBBI, const DebugLoc &DL, |
3116 | unsigned DestReg, unsigned SrcReg, |
3117 | StackOffset Offset, const TargetInstrInfo *TII, |
3118 | MachineInstr::MIFlag Flag, bool SetNZCV, |
3119 | bool NeedsWinCFI, bool *HasWinCFI) { |
3120 | int64_t Bytes; |
3121 | Offset.getForFrameOffset(Bytes); |
3122 | |
3123 | // First emit non-scalable frame offsets, or a simple 'mov'. |
3124 | if (Bytes || (!Offset && SrcReg != DestReg)) { |
3125 | assert((DestReg != AArch64::SP || Bytes % 16 == 0) &&(((DestReg != AArch64::SP || Bytes % 16 == 0) && "SP increment/decrement not 16-byte aligned" ) ? static_cast<void> (0) : __assert_fail ("(DestReg != AArch64::SP || Bytes % 16 == 0) && \"SP increment/decrement not 16-byte aligned\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3126, __PRETTY_FUNCTION__)) |
3126 | "SP increment/decrement not 16-byte aligned")(((DestReg != AArch64::SP || Bytes % 16 == 0) && "SP increment/decrement not 16-byte aligned" ) ? static_cast<void> (0) : __assert_fail ("(DestReg != AArch64::SP || Bytes % 16 == 0) && \"SP increment/decrement not 16-byte aligned\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3126, __PRETTY_FUNCTION__)); |
3127 | unsigned Opc = SetNZCV ? AArch64::ADDSXri : AArch64::ADDXri; |
3128 | if (Bytes < 0) { |
3129 | Bytes = -Bytes; |
3130 | Opc = SetNZCV ? AArch64::SUBSXri : AArch64::SUBXri; |
3131 | } |
3132 | emitFrameOffsetAdj(MBB, MBBI, DL, DestReg, SrcReg, Bytes, Opc, TII, Flag, |
3133 | NeedsWinCFI, HasWinCFI); |
3134 | SrcReg = DestReg; |
Value stored to 'SrcReg' is never read | |
3135 | } |
3136 | } |
3137 | |
3138 | MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( |
3139 | MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, |
3140 | MachineBasicBlock::iterator InsertPt, int FrameIndex, |
3141 | LiveIntervals *LIS, VirtRegMap *VRM) const { |
3142 | // This is a bit of a hack. Consider this instruction: |
3143 | // |
3144 | // %0 = COPY %sp; GPR64all:%0 |
3145 | // |
3146 | // We explicitly chose GPR64all for the virtual register so such a copy might |
3147 | // be eliminated by RegisterCoalescer. However, that may not be possible, and |
3148 | // %0 may even spill. We can't spill %sp, and since it is in the GPR64all |
3149 | // register class, TargetInstrInfo::foldMemoryOperand() is going to try. |
3150 | // |
3151 | // To prevent that, we are going to constrain the %0 register class here. |
3152 | // |
3153 | // <rdar://problem/11522048> |
3154 | // |
3155 | if (MI.isFullCopy()) { |
3156 | Register DstReg = MI.getOperand(0).getReg(); |
3157 | Register SrcReg = MI.getOperand(1).getReg(); |
3158 | if (SrcReg == AArch64::SP && Register::isVirtualRegister(DstReg)) { |
3159 | MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass); |
3160 | return nullptr; |
3161 | } |
3162 | if (DstReg == AArch64::SP && Register::isVirtualRegister(SrcReg)) { |
3163 | MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass); |
3164 | return nullptr; |
3165 | } |
3166 | } |
3167 | |
3168 | // Handle the case where a copy is being spilled or filled but the source |
3169 | // and destination register class don't match. For example: |
3170 | // |
3171 | // %0 = COPY %xzr; GPR64common:%0 |
3172 | // |
3173 | // In this case we can still safely fold away the COPY and generate the |
3174 | // following spill code: |
3175 | // |
3176 | // STRXui %xzr, %stack.0 |
3177 | // |
3178 | // This also eliminates spilled cross register class COPYs (e.g. between x and |
3179 | // d regs) of the same size. For example: |
3180 | // |
3181 | // %0 = COPY %1; GPR64:%0, FPR64:%1 |
3182 | // |
3183 | // will be filled as |
3184 | // |
3185 | // LDRDui %0, fi<#0> |
3186 | // |
3187 | // instead of |
3188 | // |
3189 | // LDRXui %Temp, fi<#0> |
3190 | // %0 = FMOV %Temp |
3191 | // |
3192 | if (MI.isCopy() && Ops.size() == 1 && |
3193 | // Make sure we're only folding the explicit COPY defs/uses. |
3194 | (Ops[0] == 0 || Ops[0] == 1)) { |
3195 | bool IsSpill = Ops[0] == 0; |
3196 | bool IsFill = !IsSpill; |
3197 | const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); |
3198 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
3199 | MachineBasicBlock &MBB = *MI.getParent(); |
3200 | const MachineOperand &DstMO = MI.getOperand(0); |
3201 | const MachineOperand &SrcMO = MI.getOperand(1); |
3202 | Register DstReg = DstMO.getReg(); |
3203 | Register SrcReg = SrcMO.getReg(); |
3204 | // This is slightly expensive to compute for physical regs since |
3205 | // getMinimalPhysRegClass is slow. |
3206 | auto getRegClass = [&](unsigned Reg) { |
3207 | return Register::isVirtualRegister(Reg) ? MRI.getRegClass(Reg) |
3208 | : TRI.getMinimalPhysRegClass(Reg); |
3209 | }; |
3210 | |
3211 | if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) { |
3212 | assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==((TRI.getRegSizeInBits(*getRegClass(DstReg)) == TRI.getRegSizeInBits (*getRegClass(SrcReg)) && "Mismatched register size in non subreg COPY" ) ? static_cast<void> (0) : __assert_fail ("TRI.getRegSizeInBits(*getRegClass(DstReg)) == TRI.getRegSizeInBits(*getRegClass(SrcReg)) && \"Mismatched register size in non subreg COPY\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3214, __PRETTY_FUNCTION__)) |
3213 | TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&((TRI.getRegSizeInBits(*getRegClass(DstReg)) == TRI.getRegSizeInBits (*getRegClass(SrcReg)) && "Mismatched register size in non subreg COPY" ) ? static_cast<void> (0) : __assert_fail ("TRI.getRegSizeInBits(*getRegClass(DstReg)) == TRI.getRegSizeInBits(*getRegClass(SrcReg)) && \"Mismatched register size in non subreg COPY\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3214, __PRETTY_FUNCTION__)) |
3214 | "Mismatched register size in non subreg COPY")((TRI.getRegSizeInBits(*getRegClass(DstReg)) == TRI.getRegSizeInBits (*getRegClass(SrcReg)) && "Mismatched register size in non subreg COPY" ) ? static_cast<void> (0) : __assert_fail ("TRI.getRegSizeInBits(*getRegClass(DstReg)) == TRI.getRegSizeInBits(*getRegClass(SrcReg)) && \"Mismatched register size in non subreg COPY\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3214, __PRETTY_FUNCTION__)); |
3215 | if (IsSpill) |
3216 | storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex, |
3217 | getRegClass(SrcReg), &TRI); |
3218 | else |
3219 | loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, |
3220 | getRegClass(DstReg), &TRI); |
3221 | return &*--InsertPt; |
3222 | } |
3223 | |
3224 | // Handle cases like spilling def of: |
3225 | // |
3226 | // %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0 |
3227 | // |
3228 | // where the physical register source can be widened and stored to the full |
3229 | // virtual reg destination stack slot, in this case producing: |
3230 | // |
3231 | // STRXui %xzr, %stack.0 |
3232 | // |
3233 | if (IsSpill && DstMO.isUndef() && Register::isPhysicalRegister(SrcReg)) { |
3234 | assert(SrcMO.getSubReg() == 0 &&((SrcMO.getSubReg() == 0 && "Unexpected subreg on physical register" ) ? static_cast<void> (0) : __assert_fail ("SrcMO.getSubReg() == 0 && \"Unexpected subreg on physical register\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3235, __PRETTY_FUNCTION__)) |
3235 | "Unexpected subreg on physical register")((SrcMO.getSubReg() == 0 && "Unexpected subreg on physical register" ) ? static_cast<void> (0) : __assert_fail ("SrcMO.getSubReg() == 0 && \"Unexpected subreg on physical register\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3235, __PRETTY_FUNCTION__)); |
3236 | const TargetRegisterClass *SpillRC; |
3237 | unsigned SpillSubreg; |
3238 | switch (DstMO.getSubReg()) { |
3239 | default: |
3240 | SpillRC = nullptr; |
3241 | break; |
3242 | case AArch64::sub_32: |
3243 | case AArch64::ssub: |
3244 | if (AArch64::GPR32RegClass.contains(SrcReg)) { |
3245 | SpillRC = &AArch64::GPR64RegClass; |
3246 | SpillSubreg = AArch64::sub_32; |
3247 | } else if (AArch64::FPR32RegClass.contains(SrcReg)) { |
3248 | SpillRC = &AArch64::FPR64RegClass; |
3249 | SpillSubreg = AArch64::ssub; |
3250 | } else |
3251 | SpillRC = nullptr; |
3252 | break; |
3253 | case AArch64::dsub: |
3254 | if (AArch64::FPR64RegClass.contains(SrcReg)) { |
3255 | SpillRC = &AArch64::FPR128RegClass; |
3256 | SpillSubreg = AArch64::dsub; |
3257 | } else |
3258 | SpillRC = nullptr; |
3259 | break; |
3260 | } |
3261 | |
3262 | if (SpillRC) |
3263 | if (unsigned WidenedSrcReg = |
3264 | TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) { |
3265 | storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(), |
3266 | FrameIndex, SpillRC, &TRI); |
3267 | return &*--InsertPt; |
3268 | } |
3269 | } |
3270 | |
3271 | // Handle cases like filling use of: |
3272 | // |
3273 | // %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1 |
3274 | // |
3275 | // where we can load the full virtual reg source stack slot, into the subreg |
3276 | // destination, in this case producing: |
3277 | // |
3278 | // LDRWui %0:sub_32<def,read-undef>, %stack.0 |
3279 | // |
3280 | if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) { |
3281 | const TargetRegisterClass *FillRC; |
3282 | switch (DstMO.getSubReg()) { |
3283 | default: |
3284 | FillRC = nullptr; |
3285 | break; |
3286 | case AArch64::sub_32: |
3287 | FillRC = &AArch64::GPR32RegClass; |
3288 | break; |
3289 | case AArch64::ssub: |
3290 | FillRC = &AArch64::FPR32RegClass; |
3291 | break; |
3292 | case AArch64::dsub: |
3293 | FillRC = &AArch64::FPR64RegClass; |
3294 | break; |
3295 | } |
3296 | |
3297 | if (FillRC) { |
3298 | assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==((TRI.getRegSizeInBits(*getRegClass(SrcReg)) == TRI.getRegSizeInBits (*FillRC) && "Mismatched regclass size on folded subreg COPY" ) ? static_cast<void> (0) : __assert_fail ("TRI.getRegSizeInBits(*getRegClass(SrcReg)) == TRI.getRegSizeInBits(*FillRC) && \"Mismatched regclass size on folded subreg COPY\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3300, __PRETTY_FUNCTION__)) |
3299 | TRI.getRegSizeInBits(*FillRC) &&((TRI.getRegSizeInBits(*getRegClass(SrcReg)) == TRI.getRegSizeInBits (*FillRC) && "Mismatched regclass size on folded subreg COPY" ) ? static_cast<void> (0) : __assert_fail ("TRI.getRegSizeInBits(*getRegClass(SrcReg)) == TRI.getRegSizeInBits(*FillRC) && \"Mismatched regclass size on folded subreg COPY\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3300, __PRETTY_FUNCTION__)) |
3300 | "Mismatched regclass size on folded subreg COPY")((TRI.getRegSizeInBits(*getRegClass(SrcReg)) == TRI.getRegSizeInBits (*FillRC) && "Mismatched regclass size on folded subreg COPY" ) ? static_cast<void> (0) : __assert_fail ("TRI.getRegSizeInBits(*getRegClass(SrcReg)) == TRI.getRegSizeInBits(*FillRC) && \"Mismatched regclass size on folded subreg COPY\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3300, __PRETTY_FUNCTION__)); |
3301 | loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI); |
3302 | MachineInstr &LoadMI = *--InsertPt; |
3303 | MachineOperand &LoadDst = LoadMI.getOperand(0); |
3304 | assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load")((LoadDst.getSubReg() == 0 && "unexpected subreg on fill load" ) ? static_cast<void> (0) : __assert_fail ("LoadDst.getSubReg() == 0 && \"unexpected subreg on fill load\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3304, __PRETTY_FUNCTION__)); |
3305 | LoadDst.setSubReg(DstMO.getSubReg()); |
3306 | LoadDst.setIsUndef(); |
3307 | return &LoadMI; |
3308 | } |
3309 | } |
3310 | } |
3311 | |
3312 | // Cannot fold. |
3313 | return nullptr; |
3314 | } |
3315 | |
3316 | int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, |
3317 | StackOffset &SOffset, |
3318 | bool *OutUseUnscaledOp, |
3319 | unsigned *OutUnscaledOp, |
3320 | int *EmittableOffset) { |
3321 | // Set output values in case of early exit. |
3322 | if (EmittableOffset) |
3323 | *EmittableOffset = 0; |
3324 | if (OutUseUnscaledOp) |
3325 | *OutUseUnscaledOp = false; |
3326 | if (OutUnscaledOp) |
3327 | *OutUnscaledOp = 0; |
3328 | |
3329 | // Exit early for structured vector spills/fills as they can't take an |
3330 | // immediate offset. |
3331 | switch (MI.getOpcode()) { |
3332 | default: |
3333 | break; |
3334 | case AArch64::LD1Twov2d: |
3335 | case AArch64::LD1Threev2d: |
3336 | case AArch64::LD1Fourv2d: |
3337 | case AArch64::LD1Twov1d: |
3338 | case AArch64::LD1Threev1d: |
3339 | case AArch64::LD1Fourv1d: |
3340 | case AArch64::ST1Twov2d: |
3341 | case AArch64::ST1Threev2d: |
3342 | case AArch64::ST1Fourv2d: |
3343 | case AArch64::ST1Twov1d: |
3344 | case AArch64::ST1Threev1d: |
3345 | case AArch64::ST1Fourv1d: |
3346 | case AArch64::IRG: |
3347 | case AArch64::IRGstack: |
3348 | return AArch64FrameOffsetCannotUpdate; |
3349 | } |
3350 | |
3351 | // Get the min/max offset and the scale. |
3352 | unsigned Scale, Width; |
3353 | int64_t MinOff, MaxOff; |
3354 | if (!AArch64InstrInfo::getMemOpInfo(MI.getOpcode(), Scale, Width, MinOff, |
3355 | MaxOff)) |
3356 | llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal")::llvm::llvm_unreachable_internal("unhandled opcode in isAArch64FrameOffsetLegal" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3356); |
3357 | |
3358 | // Construct the complete offset. |
3359 | const MachineOperand &ImmOpnd = |
3360 | MI.getOperand(AArch64InstrInfo::getLoadStoreImmIdx(MI.getOpcode())); |
3361 | int Offset = SOffset.getBytes() + ImmOpnd.getImm() * Scale; |
3362 | |
3363 | // If the offset doesn't match the scale, we rewrite the instruction to |
3364 | // use the unscaled instruction instead. Likewise, if we have a negative |
3365 | // offset and there is an unscaled op to use. |
3366 | Optional<unsigned> UnscaledOp = |
3367 | AArch64InstrInfo::getUnscaledLdSt(MI.getOpcode()); |
3368 | bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0); |
3369 | if (useUnscaledOp && |
3370 | !AArch64InstrInfo::getMemOpInfo(*UnscaledOp, Scale, Width, MinOff, MaxOff)) |
3371 | llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal")::llvm::llvm_unreachable_internal("unhandled opcode in isAArch64FrameOffsetLegal" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3371); |
3372 | |
3373 | int64_t Remainder = Offset % Scale; |
3374 | assert(!(Remainder && useUnscaledOp) &&((!(Remainder && useUnscaledOp) && "Cannot have remainder when using unscaled op" ) ? static_cast<void> (0) : __assert_fail ("!(Remainder && useUnscaledOp) && \"Cannot have remainder when using unscaled op\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3375, __PRETTY_FUNCTION__)) |
3375 | "Cannot have remainder when using unscaled op")((!(Remainder && useUnscaledOp) && "Cannot have remainder when using unscaled op" ) ? static_cast<void> (0) : __assert_fail ("!(Remainder && useUnscaledOp) && \"Cannot have remainder when using unscaled op\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3375, __PRETTY_FUNCTION__)); |
3376 | |
3377 | assert(MinOff < MaxOff && "Unexpected Min/Max offsets")((MinOff < MaxOff && "Unexpected Min/Max offsets") ? static_cast<void> (0) : __assert_fail ("MinOff < MaxOff && \"Unexpected Min/Max offsets\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3377, __PRETTY_FUNCTION__)); |
3378 | int NewOffset = Offset / Scale; |
3379 | if (MinOff <= NewOffset && NewOffset <= MaxOff) |
3380 | Offset = Remainder; |
3381 | else { |
3382 | NewOffset = NewOffset < 0 ? MinOff : MaxOff; |
3383 | Offset = Offset - NewOffset * Scale + Remainder; |
3384 | } |
3385 | |
3386 | if (EmittableOffset) |
3387 | *EmittableOffset = NewOffset; |
3388 | if (OutUseUnscaledOp) |
3389 | *OutUseUnscaledOp = useUnscaledOp; |
3390 | if (OutUnscaledOp && UnscaledOp) |
3391 | *OutUnscaledOp = *UnscaledOp; |
3392 | |
3393 | SOffset = StackOffset(Offset, MVT::i8); |
3394 | return AArch64FrameOffsetCanUpdate | |
3395 | (Offset == 0 ? AArch64FrameOffsetIsLegal : 0); |
3396 | } |
3397 | |
3398 | bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, |
3399 | unsigned FrameReg, StackOffset &Offset, |
3400 | const AArch64InstrInfo *TII) { |
3401 | unsigned Opcode = MI.getOpcode(); |
3402 | unsigned ImmIdx = FrameRegIdx + 1; |
3403 | |
3404 | if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) { |
3405 | Offset += StackOffset(MI.getOperand(ImmIdx).getImm(), MVT::i8); |
3406 | emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(), |
3407 | MI.getOperand(0).getReg(), FrameReg, Offset, TII, |
3408 | MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri)); |
3409 | MI.eraseFromParent(); |
3410 | Offset = StackOffset(); |
3411 | return true; |
3412 | } |
3413 | |
3414 | int NewOffset; |
3415 | unsigned UnscaledOp; |
3416 | bool UseUnscaledOp; |
3417 | int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp, |
3418 | &UnscaledOp, &NewOffset); |
3419 | if (Status & AArch64FrameOffsetCanUpdate) { |
3420 | if (Status & AArch64FrameOffsetIsLegal) |
3421 | // Replace the FrameIndex with FrameReg. |
3422 | MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); |
3423 | if (UseUnscaledOp) |
3424 | MI.setDesc(TII->get(UnscaledOp)); |
3425 | |
3426 | MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset); |
3427 | return !Offset; |
3428 | } |
3429 | |
3430 | return false; |
3431 | } |
3432 | |
3433 | void AArch64InstrInfo::getNoop(MCInst &NopInst) const { |
3434 | NopInst.setOpcode(AArch64::HINT); |
3435 | NopInst.addOperand(MCOperand::createImm(0)); |
3436 | } |
3437 | |
3438 | // AArch64 supports MachineCombiner. |
3439 | bool AArch64InstrInfo::useMachineCombiner() const { return true; } |
3440 | |
3441 | // True when Opc sets flag |
3442 | static bool isCombineInstrSettingFlag(unsigned Opc) { |
3443 | switch (Opc) { |
3444 | case AArch64::ADDSWrr: |
3445 | case AArch64::ADDSWri: |
3446 | case AArch64::ADDSXrr: |
3447 | case AArch64::ADDSXri: |
3448 | case AArch64::SUBSWrr: |
3449 | case AArch64::SUBSXrr: |
3450 | // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi. |
3451 | case AArch64::SUBSWri: |
3452 | case AArch64::SUBSXri: |
3453 | return true; |
3454 | default: |
3455 | break; |
3456 | } |
3457 | return false; |
3458 | } |
3459 | |
3460 | // 32b Opcodes that can be combined with a MUL |
3461 | static bool isCombineInstrCandidate32(unsigned Opc) { |
3462 | switch (Opc) { |
3463 | case AArch64::ADDWrr: |
3464 | case AArch64::ADDWri: |
3465 | case AArch64::SUBWrr: |
3466 | case AArch64::ADDSWrr: |
3467 | case AArch64::ADDSWri: |
3468 | case AArch64::SUBSWrr: |
3469 | // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi. |
3470 | case AArch64::SUBWri: |
3471 | case AArch64::SUBSWri: |
3472 | return true; |
3473 | default: |
3474 | break; |
3475 | } |
3476 | return false; |
3477 | } |
3478 | |
3479 | // 64b Opcodes that can be combined with a MUL |
3480 | static bool isCombineInstrCandidate64(unsigned Opc) { |
3481 | switch (Opc) { |
3482 | case AArch64::ADDXrr: |
3483 | case AArch64::ADDXri: |
3484 | case AArch64::SUBXrr: |
3485 | case AArch64::ADDSXrr: |
3486 | case AArch64::ADDSXri: |
3487 | case AArch64::SUBSXrr: |
3488 | // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi. |
3489 | case AArch64::SUBXri: |
3490 | case AArch64::SUBSXri: |
3491 | return true; |
3492 | default: |
3493 | break; |
3494 | } |
3495 | return false; |
3496 | } |
3497 | |
3498 | // FP Opcodes that can be combined with a FMUL |
3499 | static bool isCombineInstrCandidateFP(const MachineInstr &Inst) { |
3500 | switch (Inst.getOpcode()) { |
3501 | default: |
3502 | break; |
3503 | case AArch64::FADDHrr: |
3504 | case AArch64::FADDSrr: |
3505 | case AArch64::FADDDrr: |
3506 | case AArch64::FADDv4f16: |
3507 | case AArch64::FADDv8f16: |
3508 | case AArch64::FADDv2f32: |
3509 | case AArch64::FADDv2f64: |
3510 | case AArch64::FADDv4f32: |
3511 | case AArch64::FSUBHrr: |
3512 | case AArch64::FSUBSrr: |
3513 | case AArch64::FSUBDrr: |
3514 | case AArch64::FSUBv4f16: |
3515 | case AArch64::FSUBv8f16: |
3516 | case AArch64::FSUBv2f32: |
3517 | case AArch64::FSUBv2f64: |
3518 | case AArch64::FSUBv4f32: |
3519 | TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options; |
3520 | return (Options.UnsafeFPMath || |
3521 | Options.AllowFPOpFusion == FPOpFusion::Fast); |
3522 | } |
3523 | return false; |
3524 | } |
3525 | |
3526 | // Opcodes that can be combined with a MUL |
3527 | static bool isCombineInstrCandidate(unsigned Opc) { |
3528 | return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc)); |
3529 | } |
3530 | |
3531 | // |
3532 | // Utility routine that checks if \param MO is defined by an |
3533 | // \param CombineOpc instruction in the basic block \param MBB |
3534 | static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, |
3535 | unsigned CombineOpc, unsigned ZeroReg = 0, |
3536 | bool CheckZeroReg = false) { |
3537 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
3538 | MachineInstr *MI = nullptr; |
3539 | |
3540 | if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) |
3541 | MI = MRI.getUniqueVRegDef(MO.getReg()); |
3542 | // And it needs to be in the trace (otherwise, it won't have a depth). |
3543 | if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc) |
3544 | return false; |
3545 | // Must only used by the user we combine with. |
3546 | if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg())) |
3547 | return false; |
3548 | |
3549 | if (CheckZeroReg) { |
3550 | assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&((MI->getNumOperands() >= 4 && MI->getOperand (0).isReg() && MI->getOperand(1).isReg() && MI->getOperand(2).isReg() && MI->getOperand(3) .isReg() && "MAdd/MSub must have a least 4 regs") ? static_cast <void> (0) : __assert_fail ("MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() && MI->getOperand(1).isReg() && MI->getOperand(2).isReg() && MI->getOperand(3).isReg() && \"MAdd/MSub must have a least 4 regs\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3552, __PRETTY_FUNCTION__)) |
3551 | MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&((MI->getNumOperands() >= 4 && MI->getOperand (0).isReg() && MI->getOperand(1).isReg() && MI->getOperand(2).isReg() && MI->getOperand(3) .isReg() && "MAdd/MSub must have a least 4 regs") ? static_cast <void> (0) : __assert_fail ("MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() && MI->getOperand(1).isReg() && MI->getOperand(2).isReg() && MI->getOperand(3).isReg() && \"MAdd/MSub must have a least 4 regs\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3552, __PRETTY_FUNCTION__)) |
3552 | MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs")((MI->getNumOperands() >= 4 && MI->getOperand (0).isReg() && MI->getOperand(1).isReg() && MI->getOperand(2).isReg() && MI->getOperand(3) .isReg() && "MAdd/MSub must have a least 4 regs") ? static_cast <void> (0) : __assert_fail ("MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() && MI->getOperand(1).isReg() && MI->getOperand(2).isReg() && MI->getOperand(3).isReg() && \"MAdd/MSub must have a least 4 regs\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3552, __PRETTY_FUNCTION__)); |
3553 | // The third input reg must be zero. |
3554 | if (MI->getOperand(3).getReg() != ZeroReg) |
3555 | return false; |
3556 | } |
3557 | |
3558 | return true; |
3559 | } |
3560 | |
3561 | // |
3562 | // Is \param MO defined by an integer multiply and can be combined? |
3563 | static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO, |
3564 | unsigned MulOpc, unsigned ZeroReg) { |
3565 | return canCombine(MBB, MO, MulOpc, ZeroReg, true); |
3566 | } |
3567 | |
3568 | // |
3569 | // Is \param MO defined by a floating-point multiply and can be combined? |
3570 | static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO, |
3571 | unsigned MulOpc) { |
3572 | return canCombine(MBB, MO, MulOpc); |
3573 | } |
3574 | |
3575 | // TODO: There are many more machine instruction opcodes to match: |
3576 | // 1. Other data types (integer, vectors) |
3577 | // 2. Other math / logic operations (xor, or) |
3578 | // 3. Other forms of the same operation (intrinsics and other variants) |
3579 | bool AArch64InstrInfo::isAssociativeAndCommutative( |
3580 | const MachineInstr &Inst) const { |
3581 | switch (Inst.getOpcode()) { |
3582 | case AArch64::FADDDrr: |
3583 | case AArch64::FADDSrr: |
3584 | case AArch64::FADDv2f32: |
3585 | case AArch64::FADDv2f64: |
3586 | case AArch64::FADDv4f32: |
3587 | case AArch64::FMULDrr: |
3588 | case AArch64::FMULSrr: |
3589 | case AArch64::FMULX32: |
3590 | case AArch64::FMULX64: |
3591 | case AArch64::FMULXv2f32: |
3592 | case AArch64::FMULXv2f64: |
3593 | case AArch64::FMULXv4f32: |
3594 | case AArch64::FMULv2f32: |
3595 | case AArch64::FMULv2f64: |
3596 | case AArch64::FMULv4f32: |
3597 | return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath; |
3598 | default: |
3599 | return false; |
3600 | } |
3601 | } |
3602 | |
3603 | /// Find instructions that can be turned into madd. |
3604 | static bool getMaddPatterns(MachineInstr &Root, |
3605 | SmallVectorImpl<MachineCombinerPattern> &Patterns) { |
3606 | unsigned Opc = Root.getOpcode(); |
3607 | MachineBasicBlock &MBB = *Root.getParent(); |
3608 | bool Found = false; |
3609 | |
3610 | if (!isCombineInstrCandidate(Opc)) |
3611 | return false; |
3612 | if (isCombineInstrSettingFlag(Opc)) { |
3613 | int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true); |
3614 | // When NZCV is live bail out. |
3615 | if (Cmp_NZCV == -1) |
3616 | return false; |
3617 | unsigned NewOpc = convertToNonFlagSettingOpc(Root); |
3618 | // When opcode can't change bail out. |
3619 | // CHECKME: do we miss any cases for opcode conversion? |
3620 | if (NewOpc == Opc) |
3621 | return false; |
3622 | Opc = NewOpc; |
3623 | } |
3624 | |
3625 | auto setFound = [&](int Opcode, int Operand, unsigned ZeroReg, |
3626 | MachineCombinerPattern Pattern) { |
3627 | if (canCombineWithMUL(MBB, Root.getOperand(Operand), Opcode, ZeroReg)) { |
3628 | Patterns.push_back(Pattern); |
3629 | Found = true; |
3630 | } |
3631 | }; |
3632 | |
3633 | typedef MachineCombinerPattern MCP; |
3634 | |
3635 | switch (Opc) { |
3636 | default: |
3637 | break; |
3638 | case AArch64::ADDWrr: |
3639 | assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&((Root.getOperand(1).isReg() && Root.getOperand(2).isReg () && "ADDWrr does not have register operands") ? static_cast <void> (0) : __assert_fail ("Root.getOperand(1).isReg() && Root.getOperand(2).isReg() && \"ADDWrr does not have register operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3640, __PRETTY_FUNCTION__)) |
3640 | "ADDWrr does not have register operands")((Root.getOperand(1).isReg() && Root.getOperand(2).isReg () && "ADDWrr does not have register operands") ? static_cast <void> (0) : __assert_fail ("Root.getOperand(1).isReg() && Root.getOperand(2).isReg() && \"ADDWrr does not have register operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3640, __PRETTY_FUNCTION__)); |
3641 | setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULADDW_OP1); |
3642 | setFound(AArch64::MADDWrrr, 2, AArch64::WZR, MCP::MULADDW_OP2); |
3643 | break; |
3644 | case AArch64::ADDXrr: |
3645 | setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULADDX_OP1); |
3646 | setFound(AArch64::MADDXrrr, 2, AArch64::XZR, MCP::MULADDX_OP2); |
3647 | break; |
3648 | case AArch64::SUBWrr: |
3649 | setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULSUBW_OP1); |
3650 | setFound(AArch64::MADDWrrr, 2, AArch64::WZR, MCP::MULSUBW_OP2); |
3651 | break; |
3652 | case AArch64::SUBXrr: |
3653 | setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULSUBX_OP1); |
3654 | setFound(AArch64::MADDXrrr, 2, AArch64::XZR, MCP::MULSUBX_OP2); |
3655 | break; |
3656 | case AArch64::ADDWri: |
3657 | setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULADDWI_OP1); |
3658 | break; |
3659 | case AArch64::ADDXri: |
3660 | setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULADDXI_OP1); |
3661 | break; |
3662 | case AArch64::SUBWri: |
3663 | setFound(AArch64::MADDWrrr, 1, AArch64::WZR, MCP::MULSUBWI_OP1); |
3664 | break; |
3665 | case AArch64::SUBXri: |
3666 | setFound(AArch64::MADDXrrr, 1, AArch64::XZR, MCP::MULSUBXI_OP1); |
3667 | break; |
3668 | } |
3669 | return Found; |
3670 | } |
3671 | /// Floating-Point Support |
3672 | |
3673 | /// Find instructions that can be turned into madd. |
3674 | static bool getFMAPatterns(MachineInstr &Root, |
3675 | SmallVectorImpl<MachineCombinerPattern> &Patterns) { |
3676 | |
3677 | if (!isCombineInstrCandidateFP(Root)) |
3678 | return false; |
3679 | |
3680 | MachineBasicBlock &MBB = *Root.getParent(); |
3681 | bool Found = false; |
3682 | |
3683 | auto Match = [&](int Opcode, int Operand, |
3684 | MachineCombinerPattern Pattern) -> bool { |
3685 | if (canCombineWithFMUL(MBB, Root.getOperand(Operand), Opcode)) { |
3686 | Patterns.push_back(Pattern); |
3687 | return true; |
3688 | } |
3689 | return false; |
3690 | }; |
3691 | |
3692 | typedef MachineCombinerPattern MCP; |
3693 | |
3694 | switch (Root.getOpcode()) { |
3695 | default: |
3696 | assert(false && "Unsupported FP instruction in combiner\n")((false && "Unsupported FP instruction in combiner\n" ) ? static_cast<void> (0) : __assert_fail ("false && \"Unsupported FP instruction in combiner\\n\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3696, __PRETTY_FUNCTION__)); |
3697 | break; |
3698 | case AArch64::FADDHrr: |
3699 | assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&((Root.getOperand(1).isReg() && Root.getOperand(2).isReg () && "FADDHrr does not have register operands") ? static_cast <void> (0) : __assert_fail ("Root.getOperand(1).isReg() && Root.getOperand(2).isReg() && \"FADDHrr does not have register operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3700, __PRETTY_FUNCTION__)) |
3700 | "FADDHrr does not have register operands")((Root.getOperand(1).isReg() && Root.getOperand(2).isReg () && "FADDHrr does not have register operands") ? static_cast <void> (0) : __assert_fail ("Root.getOperand(1).isReg() && Root.getOperand(2).isReg() && \"FADDHrr does not have register operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3700, __PRETTY_FUNCTION__)); |
3701 | |
3702 | Found = Match(AArch64::FMULHrr, 1, MCP::FMULADDH_OP1); |
3703 | Found |= Match(AArch64::FMULHrr, 2, MCP::FMULADDH_OP2); |
3704 | break; |
3705 | case AArch64::FADDSrr: |
3706 | assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&((Root.getOperand(1).isReg() && Root.getOperand(2).isReg () && "FADDSrr does not have register operands") ? static_cast <void> (0) : __assert_fail ("Root.getOperand(1).isReg() && Root.getOperand(2).isReg() && \"FADDSrr does not have register operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3707, __PRETTY_FUNCTION__)) |
3707 | "FADDSrr does not have register operands")((Root.getOperand(1).isReg() && Root.getOperand(2).isReg () && "FADDSrr does not have register operands") ? static_cast <void> (0) : __assert_fail ("Root.getOperand(1).isReg() && Root.getOperand(2).isReg() && \"FADDSrr does not have register operands\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3707, __PRETTY_FUNCTION__)); |
3708 | |
3709 | Found |= Match(AArch64::FMULSrr, 1, MCP::FMULADDS_OP1) || |
3710 | Match(AArch64::FMULv1i32_indexed, 1, MCP::FMLAv1i32_indexed_OP1); |
3711 | |
3712 | Found |= Match(AArch64::FMULSrr, 2, MCP::FMULADDS_OP2) || |
3713 | Match(AArch64::FMULv1i32_indexed, 2, MCP::FMLAv1i32_indexed_OP2); |
3714 | break; |
3715 | case AArch64::FADDDrr: |
3716 | Found |= Match(AArch64::FMULDrr, 1, MCP::FMULADDD_OP1) || |
3717 | Match(AArch64::FMULv1i64_indexed, 1, MCP::FMLAv1i64_indexed_OP1); |
3718 | |
3719 | Found |= Match(AArch64::FMULDrr, 2, MCP::FMULADDD_OP2) || |
3720 | Match(AArch64::FMULv1i64_indexed, 2, MCP::FMLAv1i64_indexed_OP2); |
3721 | break; |
3722 | case AArch64::FADDv4f16: |
3723 | Found |= Match(AArch64::FMULv4i16_indexed, 1, MCP::FMLAv4i16_indexed_OP1) || |
3724 | Match(AArch64::FMULv4f16, 1, MCP::FMLAv4f16_OP1); |
3725 | |
3726 | Found |= Match(AArch64::FMULv4i16_indexed, 2, MCP::FMLAv4i16_indexed_OP2) || |
3727 | Match(AArch64::FMULv4f16, 2, MCP::FMLAv4f16_OP2); |
3728 | break; |
3729 | case AArch64::FADDv8f16: |
3730 | Found |= Match(AArch64::FMULv8i16_indexed, 1, MCP::FMLAv8i16_indexed_OP1) || |
3731 | Match(AArch64::FMULv8f16, 1, MCP::FMLAv8f16_OP1); |
3732 | |
3733 | Found |= Match(AArch64::FMULv8i16_indexed, 2, MCP::FMLAv8i16_indexed_OP2) || |
3734 | Match(AArch64::FMULv8f16, 2, MCP::FMLAv8f16_OP2); |
3735 | break; |
3736 | case AArch64::FADDv2f32: |
3737 | Found |= Match(AArch64::FMULv2i32_indexed, 1, MCP::FMLAv2i32_indexed_OP1) || |
3738 | Match(AArch64::FMULv2f32, 1, MCP::FMLAv2f32_OP1); |
3739 | |
3740 | Found |= Match(AArch64::FMULv2i32_indexed, 2, MCP::FMLAv2i32_indexed_OP2) || |
3741 | Match(AArch64::FMULv2f32, 2, MCP::FMLAv2f32_OP2); |
3742 | break; |
3743 | case AArch64::FADDv2f64: |
3744 | Found |= Match(AArch64::FMULv2i64_indexed, 1, MCP::FMLAv2i64_indexed_OP1) || |
3745 | Match(AArch64::FMULv2f64, 1, MCP::FMLAv2f64_OP1); |
3746 | |
3747 | Found |= Match(AArch64::FMULv2i64_indexed, 2, MCP::FMLAv2i64_indexed_OP2) || |
3748 | Match(AArch64::FMULv2f64, 2, MCP::FMLAv2f64_OP2); |
3749 | break; |
3750 | case AArch64::FADDv4f32: |
3751 | Found |= Match(AArch64::FMULv4i32_indexed, 1, MCP::FMLAv4i32_indexed_OP1) || |
3752 | Match(AArch64::FMULv4f32, 1, MCP::FMLAv4f32_OP1); |
3753 | |
3754 | Found |= Match(AArch64::FMULv4i32_indexed, 2, MCP::FMLAv4i32_indexed_OP2) || |
3755 | Match(AArch64::FMULv4f32, 2, MCP::FMLAv4f32_OP2); |
3756 | break; |
3757 | case AArch64::FSUBHrr: |
3758 | Found = Match(AArch64::FMULHrr, 1, MCP::FMULSUBH_OP1); |
3759 | Found |= Match(AArch64::FMULHrr, 2, MCP::FMULSUBH_OP2); |
3760 | Found |= Match(AArch64::FNMULHrr, 1, MCP::FNMULSUBH_OP1); |
3761 | break; |
3762 | case AArch64::FSUBSrr: |
3763 | Found = Match(AArch64::FMULSrr, 1, MCP::FMULSUBS_OP1); |
3764 | |
3765 | Found |= Match(AArch64::FMULSrr, 2, MCP::FMULSUBS_OP2) || |
3766 | Match(AArch64::FMULv1i32_indexed, 2, MCP::FMLSv1i32_indexed_OP2); |
3767 | |
3768 | Found |= Match(AArch64::FNMULSrr, 1, MCP::FNMULSUBS_OP1); |
3769 | break; |
3770 | case AArch64::FSUBDrr: |
3771 | Found = Match(AArch64::FMULDrr, 1, MCP::FMULSUBD_OP1); |
3772 | |
3773 | Found |= Match(AArch64::FMULDrr, 2, MCP::FMULSUBD_OP2) || |
3774 | Match(AArch64::FMULv1i64_indexed, 2, MCP::FMLSv1i64_indexed_OP2); |
3775 | |
3776 | Found |= Match(AArch64::FNMULDrr, 1, MCP::FNMULSUBD_OP1); |
3777 | break; |
3778 | case AArch64::FSUBv4f16: |
3779 | Found |= Match(AArch64::FMULv4i16_indexed, 2, MCP::FMLSv4i16_indexed_OP2) || |
3780 | Match(AArch64::FMULv4f16, 2, MCP::FMLSv4f16_OP2); |
3781 | |
3782 | Found |= Match(AArch64::FMULv4i16_indexed, 1, MCP::FMLSv2i32_indexed_OP1) || |
3783 | Match(AArch64::FMULv4f16, 1, MCP::FMLSv2f32_OP1); |
3784 | break; |
3785 | case AArch64::FSUBv8f16: |
3786 | Found |= Match(AArch64::FMULv8i16_indexed, 2, MCP::FMLSv8i16_indexed_OP2) || |
3787 | Match(AArch64::FMULv8f16, 2, MCP::FMLSv8f16_OP2); |
3788 | |
3789 | Found |= Match(AArch64::FMULv8i16_indexed, 1, MCP::FMLSv8i16_indexed_OP1) || |
3790 | Match(AArch64::FMULv8f16, 1, MCP::FMLSv8f16_OP1); |
3791 | break; |
3792 | case AArch64::FSUBv2f32: |
3793 | Found |= Match(AArch64::FMULv2i32_indexed, 2, MCP::FMLSv2i32_indexed_OP2) || |
3794 | Match(AArch64::FMULv2f32, 2, MCP::FMLSv2f32_OP2); |
3795 | |
3796 | Found |= Match(AArch64::FMULv2i32_indexed, 1, MCP::FMLSv2i32_indexed_OP1) || |
3797 | Match(AArch64::FMULv2f32, 1, MCP::FMLSv2f32_OP1); |
3798 | break; |
3799 | case AArch64::FSUBv2f64: |
3800 | Found |= Match(AArch64::FMULv2i64_indexed, 2, MCP::FMLSv2i64_indexed_OP2) || |
3801 | Match(AArch64::FMULv2f64, 2, MCP::FMLSv2f64_OP2); |
3802 | |
3803 | Found |= Match(AArch64::FMULv2i64_indexed, 1, MCP::FMLSv2i64_indexed_OP1) || |
3804 | Match(AArch64::FMULv2f64, 1, MCP::FMLSv2f64_OP1); |
3805 | break; |
3806 | case AArch64::FSUBv4f32: |
3807 | Found |= Match(AArch64::FMULv4i32_indexed, 2, MCP::FMLSv4i32_indexed_OP2) || |
3808 | Match(AArch64::FMULv4f32, 2, MCP::FMLSv4f32_OP2); |
3809 | |
3810 | Found |= Match(AArch64::FMULv4i32_indexed, 1, MCP::FMLSv4i32_indexed_OP1) || |
3811 | Match(AArch64::FMULv4f32, 1, MCP::FMLSv4f32_OP1); |
3812 | break; |
3813 | } |
3814 | return Found; |
3815 | } |
3816 | |
3817 | /// Return true when a code sequence can improve throughput. It |
3818 | /// should be called only for instructions in loops. |
3819 | /// \param Pattern - combiner pattern |
3820 | bool AArch64InstrInfo::isThroughputPattern( |
3821 | MachineCombinerPattern Pattern) const { |
3822 | switch (Pattern) { |
3823 | default: |
3824 | break; |
3825 | case MachineCombinerPattern::FMULADDH_OP1: |
3826 | case MachineCombinerPattern::FMULADDH_OP2: |
3827 | case MachineCombinerPattern::FMULSUBH_OP1: |
3828 | case MachineCombinerPattern::FMULSUBH_OP2: |
3829 | case MachineCombinerPattern::FMULADDS_OP1: |
3830 | case MachineCombinerPattern::FMULADDS_OP2: |
3831 | case MachineCombinerPattern::FMULSUBS_OP1: |
3832 | case MachineCombinerPattern::FMULSUBS_OP2: |
3833 | case MachineCombinerPattern::FMULADDD_OP1: |
3834 | case MachineCombinerPattern::FMULADDD_OP2: |
3835 | case MachineCombinerPattern::FMULSUBD_OP1: |
3836 | case MachineCombinerPattern::FMULSUBD_OP2: |
3837 | case MachineCombinerPattern::FNMULSUBH_OP1: |
3838 | case MachineCombinerPattern::FNMULSUBS_OP1: |
3839 | case MachineCombinerPattern::FNMULSUBD_OP1: |
3840 | case MachineCombinerPattern::FMLAv4i16_indexed_OP1: |
3841 | case MachineCombinerPattern::FMLAv4i16_indexed_OP2: |
3842 | case MachineCombinerPattern::FMLAv8i16_indexed_OP1: |
3843 | case MachineCombinerPattern::FMLAv8i16_indexed_OP2: |
3844 | case MachineCombinerPattern::FMLAv1i32_indexed_OP1: |
3845 | case MachineCombinerPattern::FMLAv1i32_indexed_OP2: |
3846 | case MachineCombinerPattern::FMLAv1i64_indexed_OP1: |
3847 | case MachineCombinerPattern::FMLAv1i64_indexed_OP2: |
3848 | case MachineCombinerPattern::FMLAv4f16_OP2: |
3849 | case MachineCombinerPattern::FMLAv4f16_OP1: |
3850 | case MachineCombinerPattern::FMLAv8f16_OP1: |
3851 | case MachineCombinerPattern::FMLAv8f16_OP2: |
3852 | case MachineCombinerPattern::FMLAv2f32_OP2: |
3853 | case MachineCombinerPattern::FMLAv2f32_OP1: |
3854 | case MachineCombinerPattern::FMLAv2f64_OP1: |
3855 | case MachineCombinerPattern::FMLAv2f64_OP2: |
3856 | case MachineCombinerPattern::FMLAv2i32_indexed_OP1: |
3857 | case MachineCombinerPattern::FMLAv2i32_indexed_OP2: |
3858 | case MachineCombinerPattern::FMLAv2i64_indexed_OP1: |
3859 | case MachineCombinerPattern::FMLAv2i64_indexed_OP2: |
3860 | case MachineCombinerPattern::FMLAv4f32_OP1: |
3861 | case MachineCombinerPattern::FMLAv4f32_OP2: |
3862 | case MachineCombinerPattern::FMLAv4i32_indexed_OP1: |
3863 | case MachineCombinerPattern::FMLAv4i32_indexed_OP2: |
3864 | case MachineCombinerPattern::FMLSv4i16_indexed_OP2: |
3865 | case MachineCombinerPattern::FMLSv8i16_indexed_OP1: |
3866 | case MachineCombinerPattern::FMLSv8i16_indexed_OP2: |
3867 | case MachineCombinerPattern::FMLSv1i32_indexed_OP2: |
3868 | case MachineCombinerPattern::FMLSv1i64_indexed_OP2: |
3869 | case MachineCombinerPattern::FMLSv2i32_indexed_OP2: |
3870 | case MachineCombinerPattern::FMLSv2i64_indexed_OP2: |
3871 | case MachineCombinerPattern::FMLSv4f16_OP2: |
3872 | case MachineCombinerPattern::FMLSv8f16_OP1: |
3873 | case MachineCombinerPattern::FMLSv8f16_OP2: |
3874 | case MachineCombinerPattern::FMLSv2f32_OP2: |
3875 | case MachineCombinerPattern::FMLSv2f64_OP2: |
3876 | case MachineCombinerPattern::FMLSv4i32_indexed_OP2: |
3877 | case MachineCombinerPattern::FMLSv4f32_OP2: |
3878 | return true; |
3879 | } // end switch (Pattern) |
3880 | return false; |
3881 | } |
3882 | /// Return true when there is potentially a faster code sequence for an |
3883 | /// instruction chain ending in \p Root. All potential patterns are listed in |
3884 | /// the \p Pattern vector. Pattern should be sorted in priority order since the |
3885 | /// pattern evaluator stops checking as soon as it finds a faster sequence. |
3886 | |
3887 | bool AArch64InstrInfo::getMachineCombinerPatterns( |
3888 | MachineInstr &Root, |
3889 | SmallVectorImpl<MachineCombinerPattern> &Patterns) const { |
3890 | // Integer patterns |
3891 | if (getMaddPatterns(Root, Patterns)) |
3892 | return true; |
3893 | // Floating point patterns |
3894 | if (getFMAPatterns(Root, Patterns)) |
3895 | return true; |
3896 | |
3897 | return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns); |
3898 | } |
3899 | |
3900 | enum class FMAInstKind { Default, Indexed, Accumulator }; |
3901 | /// genFusedMultiply - Generate fused multiply instructions. |
3902 | /// This function supports both integer and floating point instructions. |
3903 | /// A typical example: |
3904 | /// F|MUL I=A,B,0 |
3905 | /// F|ADD R,I,C |
3906 | /// ==> F|MADD R,A,B,C |
3907 | /// \param MF Containing MachineFunction |
3908 | /// \param MRI Register information |
3909 | /// \param TII Target information |
3910 | /// \param Root is the F|ADD instruction |
3911 | /// \param [out] InsInstrs is a vector of machine instructions and will |
3912 | /// contain the generated madd instruction |
3913 | /// \param IdxMulOpd is index of operand in Root that is the result of |
3914 | /// the F|MUL. In the example above IdxMulOpd is 1. |
3915 | /// \param MaddOpc the opcode fo the f|madd instruction |
3916 | /// \param RC Register class of operands |
3917 | /// \param kind of fma instruction (addressing mode) to be generated |
3918 | /// \param ReplacedAddend is the result register from the instruction |
3919 | /// replacing the non-combined operand, if any. |
3920 | static MachineInstr * |
3921 | genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI, |
3922 | const TargetInstrInfo *TII, MachineInstr &Root, |
3923 | SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd, |
3924 | unsigned MaddOpc, const TargetRegisterClass *RC, |
3925 | FMAInstKind kind = FMAInstKind::Default, |
3926 | const Register *ReplacedAddend = nullptr) { |
3927 | assert(IdxMulOpd == 1 || IdxMulOpd == 2)((IdxMulOpd == 1 || IdxMulOpd == 2) ? static_cast<void> (0) : __assert_fail ("IdxMulOpd == 1 || IdxMulOpd == 2", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3927, __PRETTY_FUNCTION__)); |
3928 | |
3929 | unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1; |
3930 | MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg()); |
3931 | Register ResultReg = Root.getOperand(0).getReg(); |
3932 | Register SrcReg0 = MUL->getOperand(1).getReg(); |
3933 | bool Src0IsKill = MUL->getOperand(1).isKill(); |
3934 | Register SrcReg1 = MUL->getOperand(2).getReg(); |
3935 | bool Src1IsKill = MUL->getOperand(2).isKill(); |
3936 | |
3937 | unsigned SrcReg2; |
3938 | bool Src2IsKill; |
3939 | if (ReplacedAddend) { |
3940 | // If we just generated a new addend, we must be it's only use. |
3941 | SrcReg2 = *ReplacedAddend; |
3942 | Src2IsKill = true; |
3943 | } else { |
3944 | SrcReg2 = Root.getOperand(IdxOtherOpd).getReg(); |
3945 | Src2IsKill = Root.getOperand(IdxOtherOpd).isKill(); |
3946 | } |
3947 | |
3948 | if (Register::isVirtualRegister(ResultReg)) |
3949 | MRI.constrainRegClass(ResultReg, RC); |
3950 | if (Register::isVirtualRegister(SrcReg0)) |
3951 | MRI.constrainRegClass(SrcReg0, RC); |
3952 | if (Register::isVirtualRegister(SrcReg1)) |
3953 | MRI.constrainRegClass(SrcReg1, RC); |
3954 | if (Register::isVirtualRegister(SrcReg2)) |
3955 | MRI.constrainRegClass(SrcReg2, RC); |
3956 | |
3957 | MachineInstrBuilder MIB; |
3958 | if (kind == FMAInstKind::Default) |
3959 | MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) |
3960 | .addReg(SrcReg0, getKillRegState(Src0IsKill)) |
3961 | .addReg(SrcReg1, getKillRegState(Src1IsKill)) |
3962 | .addReg(SrcReg2, getKillRegState(Src2IsKill)); |
3963 | else if (kind == FMAInstKind::Indexed) |
3964 | MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) |
3965 | .addReg(SrcReg2, getKillRegState(Src2IsKill)) |
3966 | .addReg(SrcReg0, getKillRegState(Src0IsKill)) |
3967 | .addReg(SrcReg1, getKillRegState(Src1IsKill)) |
3968 | .addImm(MUL->getOperand(3).getImm()); |
3969 | else if (kind == FMAInstKind::Accumulator) |
3970 | MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) |
3971 | .addReg(SrcReg2, getKillRegState(Src2IsKill)) |
3972 | .addReg(SrcReg0, getKillRegState(Src0IsKill)) |
3973 | .addReg(SrcReg1, getKillRegState(Src1IsKill)); |
3974 | else |
3975 | assert(false && "Invalid FMA instruction kind \n")((false && "Invalid FMA instruction kind \n") ? static_cast <void> (0) : __assert_fail ("false && \"Invalid FMA instruction kind \\n\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 3975, __PRETTY_FUNCTION__)); |
3976 | // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL) |
3977 | InsInstrs.push_back(MIB); |
3978 | return MUL; |
3979 | } |
3980 | |
3981 | /// genMaddR - Generate madd instruction and combine mul and add using |
3982 | /// an extra virtual register |
3983 | /// Example - an ADD intermediate needs to be stored in a register: |
3984 | /// MUL I=A,B,0 |
3985 | /// ADD R,I,Imm |
3986 | /// ==> ORR V, ZR, Imm |
3987 | /// ==> MADD R,A,B,V |
3988 | /// \param MF Containing MachineFunction |
3989 | /// \param MRI Register information |
3990 | /// \param TII Target information |
3991 | /// \param Root is the ADD instruction |
3992 | /// \param [out] InsInstrs is a vector of machine instructions and will |
3993 | /// contain the generated madd instruction |
3994 | /// \param IdxMulOpd is index of operand in Root that is the result of |
3995 | /// the MUL. In the example above IdxMulOpd is 1. |
3996 | /// \param MaddOpc the opcode fo the madd instruction |
3997 | /// \param VR is a virtual register that holds the value of an ADD operand |
3998 | /// (V in the example above). |
3999 | /// \param RC Register class of operands |
4000 | static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI, |
4001 | const TargetInstrInfo *TII, MachineInstr &Root, |
4002 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
4003 | unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR, |
4004 | const TargetRegisterClass *RC) { |
4005 | assert(IdxMulOpd == 1 || IdxMulOpd == 2)((IdxMulOpd == 1 || IdxMulOpd == 2) ? static_cast<void> (0) : __assert_fail ("IdxMulOpd == 1 || IdxMulOpd == 2", "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 4005, __PRETTY_FUNCTION__)); |
4006 | |
4007 | MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg()); |
4008 | Register ResultReg = Root.getOperand(0).getReg(); |
4009 | Register SrcReg0 = MUL->getOperand(1).getReg(); |
4010 | bool Src0IsKill = MUL->getOperand(1).isKill(); |
4011 | Register SrcReg1 = MUL->getOperand(2).getReg(); |
4012 | bool Src1IsKill = MUL->getOperand(2).isKill(); |
4013 | |
4014 | if (Register::isVirtualRegister(ResultReg)) |
4015 | MRI.constrainRegClass(ResultReg, RC); |
4016 | if (Register::isVirtualRegister(SrcReg0)) |
4017 | MRI.constrainRegClass(SrcReg0, RC); |
4018 | if (Register::isVirtualRegister(SrcReg1)) |
4019 | MRI.constrainRegClass(SrcReg1, RC); |
4020 | if (Register::isVirtualRegister(VR)) |
4021 | MRI.constrainRegClass(VR, RC); |
4022 | |
4023 | MachineInstrBuilder MIB = |
4024 | BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) |
4025 | .addReg(SrcReg0, getKillRegState(Src0IsKill)) |
4026 | .addReg(SrcReg1, getKillRegState(Src1IsKill)) |
4027 | .addReg(VR); |
4028 | // Insert the MADD |
4029 | InsInstrs.push_back(MIB); |
4030 | return MUL; |
4031 | } |
4032 | |
4033 | /// When getMachineCombinerPatterns() finds potential patterns, |
4034 | /// this function generates the instructions that could replace the |
4035 | /// original code sequence |
4036 | void AArch64InstrInfo::genAlternativeCodeSequence( |
4037 | MachineInstr &Root, MachineCombinerPattern Pattern, |
4038 | SmallVectorImpl<MachineInstr *> &InsInstrs, |
4039 | SmallVectorImpl<MachineInstr *> &DelInstrs, |
4040 | DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { |
4041 | MachineBasicBlock &MBB = *Root.getParent(); |
4042 | MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
4043 | MachineFunction &MF = *MBB.getParent(); |
4044 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); |
4045 | |
4046 | MachineInstr *MUL; |
4047 | const TargetRegisterClass *RC; |
4048 | unsigned Opc; |
4049 | switch (Pattern) { |
4050 | default: |
4051 | // Reassociate instructions. |
4052 | TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs, |
4053 | DelInstrs, InstrIdxForVirtReg); |
4054 | return; |
4055 | case MachineCombinerPattern::MULADDW_OP1: |
4056 | case MachineCombinerPattern::MULADDX_OP1: |
4057 | // MUL I=A,B,0 |
4058 | // ADD R,I,C |
4059 | // ==> MADD R,A,B,C |
4060 | // --- Create(MADD); |
4061 | if (Pattern == MachineCombinerPattern::MULADDW_OP1) { |
4062 | Opc = AArch64::MADDWrrr; |
4063 | RC = &AArch64::GPR32RegClass; |
4064 | } else { |
4065 | Opc = AArch64::MADDXrrr; |
4066 | RC = &AArch64::GPR64RegClass; |
4067 | } |
4068 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4069 | break; |
4070 | case MachineCombinerPattern::MULADDW_OP2: |
4071 | case MachineCombinerPattern::MULADDX_OP2: |
4072 | // MUL I=A,B,0 |
4073 | // ADD R,C,I |
4074 | // ==> MADD R,A,B,C |
4075 | // --- Create(MADD); |
4076 | if (Pattern == MachineCombinerPattern::MULADDW_OP2) { |
4077 | Opc = AArch64::MADDWrrr; |
4078 | RC = &AArch64::GPR32RegClass; |
4079 | } else { |
4080 | Opc = AArch64::MADDXrrr; |
4081 | RC = &AArch64::GPR64RegClass; |
4082 | } |
4083 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4084 | break; |
4085 | case MachineCombinerPattern::MULADDWI_OP1: |
4086 | case MachineCombinerPattern::MULADDXI_OP1: { |
4087 | // MUL I=A,B,0 |
4088 | // ADD R,I,Imm |
4089 | // ==> ORR V, ZR, Imm |
4090 | // ==> MADD R,A,B,V |
4091 | // --- Create(MADD); |
4092 | const TargetRegisterClass *OrrRC; |
4093 | unsigned BitSize, OrrOpc, ZeroReg; |
4094 | if (Pattern == MachineCombinerPattern::MULADDWI_OP1) { |
4095 | OrrOpc = AArch64::ORRWri; |
4096 | OrrRC = &AArch64::GPR32spRegClass; |
4097 | BitSize = 32; |
4098 | ZeroReg = AArch64::WZR; |
4099 | Opc = AArch64::MADDWrrr; |
4100 | RC = &AArch64::GPR32RegClass; |
4101 | } else { |
4102 | OrrOpc = AArch64::ORRXri; |
4103 | OrrRC = &AArch64::GPR64spRegClass; |
4104 | BitSize = 64; |
4105 | ZeroReg = AArch64::XZR; |
4106 | Opc = AArch64::MADDXrrr; |
4107 | RC = &AArch64::GPR64RegClass; |
4108 | } |
4109 | Register NewVR = MRI.createVirtualRegister(OrrRC); |
4110 | uint64_t Imm = Root.getOperand(2).getImm(); |
4111 | |
4112 | if (Root.getOperand(3).isImm()) { |
4113 | unsigned Val = Root.getOperand(3).getImm(); |
4114 | Imm = Imm << Val; |
4115 | } |
4116 | uint64_t UImm = SignExtend64(Imm, BitSize); |
4117 | uint64_t Encoding; |
4118 | if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) { |
4119 | MachineInstrBuilder MIB1 = |
4120 | BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR) |
4121 | .addReg(ZeroReg) |
4122 | .addImm(Encoding); |
4123 | InsInstrs.push_back(MIB1); |
4124 | InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); |
4125 | MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC); |
4126 | } |
4127 | break; |
4128 | } |
4129 | case MachineCombinerPattern::MULSUBW_OP1: |
4130 | case MachineCombinerPattern::MULSUBX_OP1: { |
4131 | // MUL I=A,B,0 |
4132 | // SUB R,I, C |
4133 | // ==> SUB V, 0, C |
4134 | // ==> MADD R,A,B,V // = -C + A*B |
4135 | // --- Create(MADD); |
4136 | const TargetRegisterClass *SubRC; |
4137 | unsigned SubOpc, ZeroReg; |
4138 | if (Pattern == MachineCombinerPattern::MULSUBW_OP1) { |
4139 | SubOpc = AArch64::SUBWrr; |
4140 | SubRC = &AArch64::GPR32spRegClass; |
4141 | ZeroReg = AArch64::WZR; |
4142 | Opc = AArch64::MADDWrrr; |
4143 | RC = &AArch64::GPR32RegClass; |
4144 | } else { |
4145 | SubOpc = AArch64::SUBXrr; |
4146 | SubRC = &AArch64::GPR64spRegClass; |
4147 | ZeroReg = AArch64::XZR; |
4148 | Opc = AArch64::MADDXrrr; |
4149 | RC = &AArch64::GPR64RegClass; |
4150 | } |
4151 | Register NewVR = MRI.createVirtualRegister(SubRC); |
4152 | // SUB NewVR, 0, C |
4153 | MachineInstrBuilder MIB1 = |
4154 | BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR) |
4155 | .addReg(ZeroReg) |
4156 | .add(Root.getOperand(2)); |
4157 | InsInstrs.push_back(MIB1); |
4158 | InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); |
4159 | MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC); |
4160 | break; |
4161 | } |
4162 | case MachineCombinerPattern::MULSUBW_OP2: |
4163 | case MachineCombinerPattern::MULSUBX_OP2: |
4164 | // MUL I=A,B,0 |
4165 | // SUB R,C,I |
4166 | // ==> MSUB R,A,B,C (computes C - A*B) |
4167 | // --- Create(MSUB); |
4168 | if (Pattern == MachineCombinerPattern::MULSUBW_OP2) { |
4169 | Opc = AArch64::MSUBWrrr; |
4170 | RC = &AArch64::GPR32RegClass; |
4171 | } else { |
4172 | Opc = AArch64::MSUBXrrr; |
4173 | RC = &AArch64::GPR64RegClass; |
4174 | } |
4175 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4176 | break; |
4177 | case MachineCombinerPattern::MULSUBWI_OP1: |
4178 | case MachineCombinerPattern::MULSUBXI_OP1: { |
4179 | // MUL I=A,B,0 |
4180 | // SUB R,I, Imm |
4181 | // ==> ORR V, ZR, -Imm |
4182 | // ==> MADD R,A,B,V // = -Imm + A*B |
4183 | // --- Create(MADD); |
4184 | const TargetRegisterClass *OrrRC; |
4185 | unsigned BitSize, OrrOpc, ZeroReg; |
4186 | if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) { |
4187 | OrrOpc = AArch64::ORRWri; |
4188 | OrrRC = &AArch64::GPR32spRegClass; |
4189 | BitSize = 32; |
4190 | ZeroReg = AArch64::WZR; |
4191 | Opc = AArch64::MADDWrrr; |
4192 | RC = &AArch64::GPR32RegClass; |
4193 | } else { |
4194 | OrrOpc = AArch64::ORRXri; |
4195 | OrrRC = &AArch64::GPR64spRegClass; |
4196 | BitSize = 64; |
4197 | ZeroReg = AArch64::XZR; |
4198 | Opc = AArch64::MADDXrrr; |
4199 | RC = &AArch64::GPR64RegClass; |
4200 | } |
4201 | Register NewVR = MRI.createVirtualRegister(OrrRC); |
4202 | uint64_t Imm = Root.getOperand(2).getImm(); |
4203 | if (Root.getOperand(3).isImm()) { |
4204 | unsigned Val = Root.getOperand(3).getImm(); |
4205 | Imm = Imm << Val; |
4206 | } |
4207 | uint64_t UImm = SignExtend64(-Imm, BitSize); |
4208 | uint64_t Encoding; |
4209 | if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) { |
4210 | MachineInstrBuilder MIB1 = |
4211 | BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR) |
4212 | .addReg(ZeroReg) |
4213 | .addImm(Encoding); |
4214 | InsInstrs.push_back(MIB1); |
4215 | InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); |
4216 | MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC); |
4217 | } |
4218 | break; |
4219 | } |
4220 | // Floating Point Support |
4221 | case MachineCombinerPattern::FMULADDH_OP1: |
4222 | Opc = AArch64::FMADDHrrr; |
4223 | RC = &AArch64::FPR16RegClass; |
4224 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4225 | break; |
4226 | case MachineCombinerPattern::FMULADDS_OP1: |
4227 | Opc = AArch64::FMADDSrrr; |
4228 | RC = &AArch64::FPR32RegClass; |
4229 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4230 | break; |
4231 | case MachineCombinerPattern::FMULADDD_OP1: |
4232 | Opc = AArch64::FMADDDrrr; |
4233 | RC = &AArch64::FPR64RegClass; |
4234 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4235 | break; |
4236 | |
4237 | case MachineCombinerPattern::FMULADDH_OP2: |
4238 | Opc = AArch64::FMADDHrrr; |
4239 | RC = &AArch64::FPR16RegClass; |
4240 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4241 | break; |
4242 | case MachineCombinerPattern::FMULADDS_OP2: |
4243 | Opc = AArch64::FMADDSrrr; |
4244 | RC = &AArch64::FPR32RegClass; |
4245 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4246 | break; |
4247 | case MachineCombinerPattern::FMULADDD_OP2: |
4248 | Opc = AArch64::FMADDDrrr; |
4249 | RC = &AArch64::FPR64RegClass; |
4250 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4251 | break; |
4252 | |
4253 | case MachineCombinerPattern::FMLAv1i32_indexed_OP1: |
4254 | Opc = AArch64::FMLAv1i32_indexed; |
4255 | RC = &AArch64::FPR32RegClass; |
4256 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4257 | FMAInstKind::Indexed); |
4258 | break; |
4259 | case MachineCombinerPattern::FMLAv1i32_indexed_OP2: |
4260 | Opc = AArch64::FMLAv1i32_indexed; |
4261 | RC = &AArch64::FPR32RegClass; |
4262 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4263 | FMAInstKind::Indexed); |
4264 | break; |
4265 | |
4266 | case MachineCombinerPattern::FMLAv1i64_indexed_OP1: |
4267 | Opc = AArch64::FMLAv1i64_indexed; |
4268 | RC = &AArch64::FPR64RegClass; |
4269 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4270 | FMAInstKind::Indexed); |
4271 | break; |
4272 | case MachineCombinerPattern::FMLAv1i64_indexed_OP2: |
4273 | Opc = AArch64::FMLAv1i64_indexed; |
4274 | RC = &AArch64::FPR64RegClass; |
4275 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4276 | FMAInstKind::Indexed); |
4277 | break; |
4278 | |
4279 | case MachineCombinerPattern::FMLAv4i16_indexed_OP1: |
4280 | RC = &AArch64::FPR64RegClass; |
4281 | Opc = AArch64::FMLAv4i16_indexed; |
4282 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4283 | FMAInstKind::Indexed); |
4284 | break; |
4285 | case MachineCombinerPattern::FMLAv4f16_OP1: |
4286 | RC = &AArch64::FPR64RegClass; |
4287 | Opc = AArch64::FMLAv4f16; |
4288 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4289 | FMAInstKind::Accumulator); |
4290 | break; |
4291 | case MachineCombinerPattern::FMLAv4i16_indexed_OP2: |
4292 | RC = &AArch64::FPR64RegClass; |
4293 | Opc = AArch64::FMLAv4i16_indexed; |
4294 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4295 | FMAInstKind::Indexed); |
4296 | break; |
4297 | case MachineCombinerPattern::FMLAv4f16_OP2: |
4298 | RC = &AArch64::FPR64RegClass; |
4299 | Opc = AArch64::FMLAv4f16; |
4300 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4301 | FMAInstKind::Accumulator); |
4302 | break; |
4303 | |
4304 | case MachineCombinerPattern::FMLAv2i32_indexed_OP1: |
4305 | case MachineCombinerPattern::FMLAv2f32_OP1: |
4306 | RC = &AArch64::FPR64RegClass; |
4307 | if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) { |
4308 | Opc = AArch64::FMLAv2i32_indexed; |
4309 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4310 | FMAInstKind::Indexed); |
4311 | } else { |
4312 | Opc = AArch64::FMLAv2f32; |
4313 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4314 | FMAInstKind::Accumulator); |
4315 | } |
4316 | break; |
4317 | case MachineCombinerPattern::FMLAv2i32_indexed_OP2: |
4318 | case MachineCombinerPattern::FMLAv2f32_OP2: |
4319 | RC = &AArch64::FPR64RegClass; |
4320 | if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) { |
4321 | Opc = AArch64::FMLAv2i32_indexed; |
4322 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4323 | FMAInstKind::Indexed); |
4324 | } else { |
4325 | Opc = AArch64::FMLAv2f32; |
4326 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4327 | FMAInstKind::Accumulator); |
4328 | } |
4329 | break; |
4330 | |
4331 | case MachineCombinerPattern::FMLAv8i16_indexed_OP1: |
4332 | RC = &AArch64::FPR128RegClass; |
4333 | Opc = AArch64::FMLAv8i16_indexed; |
4334 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4335 | FMAInstKind::Indexed); |
4336 | break; |
4337 | case MachineCombinerPattern::FMLAv8f16_OP1: |
4338 | RC = &AArch64::FPR128RegClass; |
4339 | Opc = AArch64::FMLAv8f16; |
4340 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4341 | FMAInstKind::Accumulator); |
4342 | break; |
4343 | case MachineCombinerPattern::FMLAv8i16_indexed_OP2: |
4344 | RC = &AArch64::FPR128RegClass; |
4345 | Opc = AArch64::FMLAv8i16_indexed; |
4346 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4347 | FMAInstKind::Indexed); |
4348 | break; |
4349 | case MachineCombinerPattern::FMLAv8f16_OP2: |
4350 | RC = &AArch64::FPR128RegClass; |
4351 | Opc = AArch64::FMLAv8f16; |
4352 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4353 | FMAInstKind::Accumulator); |
4354 | break; |
4355 | |
4356 | case MachineCombinerPattern::FMLAv2i64_indexed_OP1: |
4357 | case MachineCombinerPattern::FMLAv2f64_OP1: |
4358 | RC = &AArch64::FPR128RegClass; |
4359 | if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) { |
4360 | Opc = AArch64::FMLAv2i64_indexed; |
4361 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4362 | FMAInstKind::Indexed); |
4363 | } else { |
4364 | Opc = AArch64::FMLAv2f64; |
4365 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4366 | FMAInstKind::Accumulator); |
4367 | } |
4368 | break; |
4369 | case MachineCombinerPattern::FMLAv2i64_indexed_OP2: |
4370 | case MachineCombinerPattern::FMLAv2f64_OP2: |
4371 | RC = &AArch64::FPR128RegClass; |
4372 | if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) { |
4373 | Opc = AArch64::FMLAv2i64_indexed; |
4374 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4375 | FMAInstKind::Indexed); |
4376 | } else { |
4377 | Opc = AArch64::FMLAv2f64; |
4378 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4379 | FMAInstKind::Accumulator); |
4380 | } |
4381 | break; |
4382 | |
4383 | case MachineCombinerPattern::FMLAv4i32_indexed_OP1: |
4384 | case MachineCombinerPattern::FMLAv4f32_OP1: |
4385 | RC = &AArch64::FPR128RegClass; |
4386 | if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) { |
4387 | Opc = AArch64::FMLAv4i32_indexed; |
4388 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4389 | FMAInstKind::Indexed); |
4390 | } else { |
4391 | Opc = AArch64::FMLAv4f32; |
4392 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4393 | FMAInstKind::Accumulator); |
4394 | } |
4395 | break; |
4396 | |
4397 | case MachineCombinerPattern::FMLAv4i32_indexed_OP2: |
4398 | case MachineCombinerPattern::FMLAv4f32_OP2: |
4399 | RC = &AArch64::FPR128RegClass; |
4400 | if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) { |
4401 | Opc = AArch64::FMLAv4i32_indexed; |
4402 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4403 | FMAInstKind::Indexed); |
4404 | } else { |
4405 | Opc = AArch64::FMLAv4f32; |
4406 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4407 | FMAInstKind::Accumulator); |
4408 | } |
4409 | break; |
4410 | |
4411 | case MachineCombinerPattern::FMULSUBH_OP1: |
4412 | Opc = AArch64::FNMSUBHrrr; |
4413 | RC = &AArch64::FPR16RegClass; |
4414 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4415 | break; |
4416 | case MachineCombinerPattern::FMULSUBS_OP1: |
4417 | Opc = AArch64::FNMSUBSrrr; |
4418 | RC = &AArch64::FPR32RegClass; |
4419 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4420 | break; |
4421 | case MachineCombinerPattern::FMULSUBD_OP1: |
4422 | Opc = AArch64::FNMSUBDrrr; |
4423 | RC = &AArch64::FPR64RegClass; |
4424 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4425 | break; |
4426 | |
4427 | case MachineCombinerPattern::FNMULSUBH_OP1: |
4428 | Opc = AArch64::FNMADDHrrr; |
4429 | RC = &AArch64::FPR16RegClass; |
4430 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4431 | break; |
4432 | case MachineCombinerPattern::FNMULSUBS_OP1: |
4433 | Opc = AArch64::FNMADDSrrr; |
4434 | RC = &AArch64::FPR32RegClass; |
4435 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4436 | break; |
4437 | case MachineCombinerPattern::FNMULSUBD_OP1: |
4438 | Opc = AArch64::FNMADDDrrr; |
4439 | RC = &AArch64::FPR64RegClass; |
4440 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC); |
4441 | break; |
4442 | |
4443 | case MachineCombinerPattern::FMULSUBH_OP2: |
4444 | Opc = AArch64::FMSUBHrrr; |
4445 | RC = &AArch64::FPR16RegClass; |
4446 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4447 | break; |
4448 | case MachineCombinerPattern::FMULSUBS_OP2: |
4449 | Opc = AArch64::FMSUBSrrr; |
4450 | RC = &AArch64::FPR32RegClass; |
4451 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4452 | break; |
4453 | case MachineCombinerPattern::FMULSUBD_OP2: |
4454 | Opc = AArch64::FMSUBDrrr; |
4455 | RC = &AArch64::FPR64RegClass; |
4456 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC); |
4457 | break; |
4458 | |
4459 | case MachineCombinerPattern::FMLSv1i32_indexed_OP2: |
4460 | Opc = AArch64::FMLSv1i32_indexed; |
4461 | RC = &AArch64::FPR32RegClass; |
4462 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4463 | FMAInstKind::Indexed); |
4464 | break; |
4465 | |
4466 | case MachineCombinerPattern::FMLSv1i64_indexed_OP2: |
4467 | Opc = AArch64::FMLSv1i64_indexed; |
4468 | RC = &AArch64::FPR64RegClass; |
4469 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4470 | FMAInstKind::Indexed); |
4471 | break; |
4472 | |
4473 | case MachineCombinerPattern::FMLSv4f16_OP2: |
4474 | RC = &AArch64::FPR64RegClass; |
4475 | Opc = AArch64::FMLSv4f16; |
4476 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4477 | FMAInstKind::Accumulator); |
4478 | break; |
4479 | case MachineCombinerPattern::FMLSv4i16_indexed_OP2: |
4480 | RC = &AArch64::FPR64RegClass; |
4481 | Opc = AArch64::FMLSv4i16_indexed; |
4482 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4483 | FMAInstKind::Indexed); |
4484 | break; |
4485 | |
4486 | case MachineCombinerPattern::FMLSv2f32_OP2: |
4487 | case MachineCombinerPattern::FMLSv2i32_indexed_OP2: |
4488 | RC = &AArch64::FPR64RegClass; |
4489 | if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) { |
4490 | Opc = AArch64::FMLSv2i32_indexed; |
4491 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4492 | FMAInstKind::Indexed); |
4493 | } else { |
4494 | Opc = AArch64::FMLSv2f32; |
4495 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4496 | FMAInstKind::Accumulator); |
4497 | } |
4498 | break; |
4499 | |
4500 | case MachineCombinerPattern::FMLSv8f16_OP1: |
4501 | RC = &AArch64::FPR128RegClass; |
4502 | Opc = AArch64::FMLSv8f16; |
4503 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4504 | FMAInstKind::Accumulator); |
4505 | break; |
4506 | case MachineCombinerPattern::FMLSv8i16_indexed_OP1: |
4507 | RC = &AArch64::FPR128RegClass; |
4508 | Opc = AArch64::FMLSv8i16_indexed; |
4509 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4510 | FMAInstKind::Indexed); |
4511 | break; |
4512 | |
4513 | case MachineCombinerPattern::FMLSv8f16_OP2: |
4514 | RC = &AArch64::FPR128RegClass; |
4515 | Opc = AArch64::FMLSv8f16; |
4516 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4517 | FMAInstKind::Accumulator); |
4518 | break; |
4519 | case MachineCombinerPattern::FMLSv8i16_indexed_OP2: |
4520 | RC = &AArch64::FPR128RegClass; |
4521 | Opc = AArch64::FMLSv8i16_indexed; |
4522 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4523 | FMAInstKind::Indexed); |
4524 | break; |
4525 | |
4526 | case MachineCombinerPattern::FMLSv2f64_OP2: |
4527 | case MachineCombinerPattern::FMLSv2i64_indexed_OP2: |
4528 | RC = &AArch64::FPR128RegClass; |
4529 | if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) { |
4530 | Opc = AArch64::FMLSv2i64_indexed; |
4531 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4532 | FMAInstKind::Indexed); |
4533 | } else { |
4534 | Opc = AArch64::FMLSv2f64; |
4535 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4536 | FMAInstKind::Accumulator); |
4537 | } |
4538 | break; |
4539 | |
4540 | case MachineCombinerPattern::FMLSv4f32_OP2: |
4541 | case MachineCombinerPattern::FMLSv4i32_indexed_OP2: |
4542 | RC = &AArch64::FPR128RegClass; |
4543 | if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) { |
4544 | Opc = AArch64::FMLSv4i32_indexed; |
4545 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4546 | FMAInstKind::Indexed); |
4547 | } else { |
4548 | Opc = AArch64::FMLSv4f32; |
4549 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC, |
4550 | FMAInstKind::Accumulator); |
4551 | } |
4552 | break; |
4553 | case MachineCombinerPattern::FMLSv2f32_OP1: |
4554 | case MachineCombinerPattern::FMLSv2i32_indexed_OP1: { |
4555 | RC = &AArch64::FPR64RegClass; |
4556 | Register NewVR = MRI.createVirtualRegister(RC); |
4557 | MachineInstrBuilder MIB1 = |
4558 | BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f32), NewVR) |
4559 | .add(Root.getOperand(2)); |
4560 | InsInstrs.push_back(MIB1); |
4561 | InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); |
4562 | if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP1) { |
4563 | Opc = AArch64::FMLAv2i32_indexed; |
4564 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4565 | FMAInstKind::Indexed, &NewVR); |
4566 | } else { |
4567 | Opc = AArch64::FMLAv2f32; |
4568 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4569 | FMAInstKind::Accumulator, &NewVR); |
4570 | } |
4571 | break; |
4572 | } |
4573 | case MachineCombinerPattern::FMLSv4f32_OP1: |
4574 | case MachineCombinerPattern::FMLSv4i32_indexed_OP1: { |
4575 | RC = &AArch64::FPR128RegClass; |
4576 | Register NewVR = MRI.createVirtualRegister(RC); |
4577 | MachineInstrBuilder MIB1 = |
4578 | BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f32), NewVR) |
4579 | .add(Root.getOperand(2)); |
4580 | InsInstrs.push_back(MIB1); |
4581 | InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); |
4582 | if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP1) { |
4583 | Opc = AArch64::FMLAv4i32_indexed; |
4584 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4585 | FMAInstKind::Indexed, &NewVR); |
4586 | } else { |
4587 | Opc = AArch64::FMLAv4f32; |
4588 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4589 | FMAInstKind::Accumulator, &NewVR); |
4590 | } |
4591 | break; |
4592 | } |
4593 | case MachineCombinerPattern::FMLSv2f64_OP1: |
4594 | case MachineCombinerPattern::FMLSv2i64_indexed_OP1: { |
4595 | RC = &AArch64::FPR128RegClass; |
4596 | Register NewVR = MRI.createVirtualRegister(RC); |
4597 | MachineInstrBuilder MIB1 = |
4598 | BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f64), NewVR) |
4599 | .add(Root.getOperand(2)); |
4600 | InsInstrs.push_back(MIB1); |
4601 | InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); |
4602 | if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP1) { |
4603 | Opc = AArch64::FMLAv2i64_indexed; |
4604 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4605 | FMAInstKind::Indexed, &NewVR); |
4606 | } else { |
4607 | Opc = AArch64::FMLAv2f64; |
4608 | MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC, |
4609 | FMAInstKind::Accumulator, &NewVR); |
4610 | } |
4611 | break; |
4612 | } |
4613 | } // end switch (Pattern) |
4614 | // Record MUL and ADD/SUB for deletion |
4615 | DelInstrs.push_back(MUL); |
4616 | DelInstrs.push_back(&Root); |
4617 | } |
4618 | |
4619 | /// Replace csincr-branch sequence by simple conditional branch |
4620 | /// |
4621 | /// Examples: |
4622 | /// 1. \code |
4623 | /// csinc w9, wzr, wzr, <condition code> |
4624 | /// tbnz w9, #0, 0x44 |
4625 | /// \endcode |
4626 | /// to |
4627 | /// \code |
4628 | /// b.<inverted condition code> |
4629 | /// \endcode |
4630 | /// |
4631 | /// 2. \code |
4632 | /// csinc w9, wzr, wzr, <condition code> |
4633 | /// tbz w9, #0, 0x44 |
4634 | /// \endcode |
4635 | /// to |
4636 | /// \code |
4637 | /// b.<condition code> |
4638 | /// \endcode |
4639 | /// |
4640 | /// Replace compare and branch sequence by TBZ/TBNZ instruction when the |
4641 | /// compare's constant operand is power of 2. |
4642 | /// |
4643 | /// Examples: |
4644 | /// \code |
4645 | /// and w8, w8, #0x400 |
4646 | /// cbnz w8, L1 |
4647 | /// \endcode |
4648 | /// to |
4649 | /// \code |
4650 | /// tbnz w8, #10, L1 |
4651 | /// \endcode |
4652 | /// |
4653 | /// \param MI Conditional Branch |
4654 | /// \return True when the simple conditional branch is generated |
4655 | /// |
4656 | bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const { |
4657 | bool IsNegativeBranch = false; |
4658 | bool IsTestAndBranch = false; |
4659 | unsigned TargetBBInMI = 0; |
4660 | switch (MI.getOpcode()) { |
4661 | default: |
4662 | llvm_unreachable("Unknown branch instruction?")::llvm::llvm_unreachable_internal("Unknown branch instruction?" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 4662); |
4663 | case AArch64::Bcc: |
4664 | return false; |
4665 | case AArch64::CBZW: |
4666 | case AArch64::CBZX: |
4667 | TargetBBInMI = 1; |
4668 | break; |
4669 | case AArch64::CBNZW: |
4670 | case AArch64::CBNZX: |
4671 | TargetBBInMI = 1; |
4672 | IsNegativeBranch = true; |
4673 | break; |
4674 | case AArch64::TBZW: |
4675 | case AArch64::TBZX: |
4676 | TargetBBInMI = 2; |
4677 | IsTestAndBranch = true; |
4678 | break; |
4679 | case AArch64::TBNZW: |
4680 | case AArch64::TBNZX: |
4681 | TargetBBInMI = 2; |
4682 | IsNegativeBranch = true; |
4683 | IsTestAndBranch = true; |
4684 | break; |
4685 | } |
4686 | // So we increment a zero register and test for bits other |
4687 | // than bit 0? Conservatively bail out in case the verifier |
4688 | // missed this case. |
4689 | if (IsTestAndBranch && MI.getOperand(1).getImm()) |
4690 | return false; |
4691 | |
4692 | // Find Definition. |
4693 | assert(MI.getParent() && "Incomplete machine instruciton\n")((MI.getParent() && "Incomplete machine instruciton\n" ) ? static_cast<void> (0) : __assert_fail ("MI.getParent() && \"Incomplete machine instruciton\\n\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 4693, __PRETTY_FUNCTION__)); |
4694 | MachineBasicBlock *MBB = MI.getParent(); |
4695 | MachineFunction *MF = MBB->getParent(); |
4696 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
4697 | Register VReg = MI.getOperand(0).getReg(); |
4698 | if (!Register::isVirtualRegister(VReg)) |
4699 | return false; |
4700 | |
4701 | MachineInstr *DefMI = MRI->getVRegDef(VReg); |
4702 | |
4703 | // Look through COPY instructions to find definition. |
4704 | while (DefMI->isCopy()) { |
4705 | Register CopyVReg = DefMI->getOperand(1).getReg(); |
4706 | if (!MRI->hasOneNonDBGUse(CopyVReg)) |
4707 | return false; |
4708 | if (!MRI->hasOneDef(CopyVReg)) |
4709 | return false; |
4710 | DefMI = MRI->getVRegDef(CopyVReg); |
4711 | } |
4712 | |
4713 | switch (DefMI->getOpcode()) { |
4714 | default: |
4715 | return false; |
4716 | // Fold AND into a TBZ/TBNZ if constant operand is power of 2. |
4717 | case AArch64::ANDWri: |
4718 | case AArch64::ANDXri: { |
4719 | if (IsTestAndBranch) |
4720 | return false; |
4721 | if (DefMI->getParent() != MBB) |
4722 | return false; |
4723 | if (!MRI->hasOneNonDBGUse(VReg)) |
4724 | return false; |
4725 | |
4726 | bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri); |
4727 | uint64_t Mask = AArch64_AM::decodeLogicalImmediate( |
4728 | DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64); |
4729 | if (!isPowerOf2_64(Mask)) |
4730 | return false; |
4731 | |
4732 | MachineOperand &MO = DefMI->getOperand(1); |
4733 | Register NewReg = MO.getReg(); |
4734 | if (!Register::isVirtualRegister(NewReg)) |
4735 | return false; |
4736 | |
4737 | assert(!MRI->def_empty(NewReg) && "Register must be defined.")((!MRI->def_empty(NewReg) && "Register must be defined." ) ? static_cast<void> (0) : __assert_fail ("!MRI->def_empty(NewReg) && \"Register must be defined.\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 4737, __PRETTY_FUNCTION__)); |
4738 | |
4739 | MachineBasicBlock &RefToMBB = *MBB; |
4740 | MachineBasicBlock *TBB = MI.getOperand(1).getMBB(); |
4741 | DebugLoc DL = MI.getDebugLoc(); |
4742 | unsigned Imm = Log2_64(Mask); |
4743 | unsigned Opc = (Imm < 32) |
4744 | ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW) |
4745 | : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX); |
4746 | MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc)) |
4747 | .addReg(NewReg) |
4748 | .addImm(Imm) |
4749 | .addMBB(TBB); |
4750 | // Register lives on to the CBZ now. |
4751 | MO.setIsKill(false); |
4752 | |
4753 | // For immediate smaller than 32, we need to use the 32-bit |
4754 | // variant (W) in all cases. Indeed the 64-bit variant does not |
4755 | // allow to encode them. |
4756 | // Therefore, if the input register is 64-bit, we need to take the |
4757 | // 32-bit sub-part. |
4758 | if (!Is32Bit && Imm < 32) |
4759 | NewMI->getOperand(0).setSubReg(AArch64::sub_32); |
4760 | MI.eraseFromParent(); |
4761 | return true; |
4762 | } |
4763 | // Look for CSINC |
4764 | case AArch64::CSINCWr: |
4765 | case AArch64::CSINCXr: { |
4766 | if (!(DefMI->getOperand(1).getReg() == AArch64::WZR && |
4767 | DefMI->getOperand(2).getReg() == AArch64::WZR) && |
4768 | !(DefMI->getOperand(1).getReg() == AArch64::XZR && |
4769 | DefMI->getOperand(2).getReg() == AArch64::XZR)) |
4770 | return false; |
4771 | |
4772 | if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1) |
4773 | return false; |
4774 | |
4775 | AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm(); |
4776 | // Convert only when the condition code is not modified between |
4777 | // the CSINC and the branch. The CC may be used by other |
4778 | // instructions in between. |
4779 | if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write)) |
4780 | return false; |
4781 | MachineBasicBlock &RefToMBB = *MBB; |
4782 | MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB(); |
4783 | DebugLoc DL = MI.getDebugLoc(); |
4784 | if (IsNegativeBranch) |
4785 | CC = AArch64CC::getInvertedCondCode(CC); |
4786 | BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB); |
4787 | MI.eraseFromParent(); |
4788 | return true; |
4789 | } |
4790 | } |
4791 | } |
4792 | |
4793 | std::pair<unsigned, unsigned> |
4794 | AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { |
4795 | const unsigned Mask = AArch64II::MO_FRAGMENT; |
4796 | return std::make_pair(TF & Mask, TF & ~Mask); |
4797 | } |
4798 | |
4799 | ArrayRef<std::pair<unsigned, const char *>> |
4800 | AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { |
4801 | using namespace AArch64II; |
4802 | |
4803 | static const std::pair<unsigned, const char *> TargetFlags[] = { |
4804 | {MO_PAGE, "aarch64-page"}, {MO_PAGEOFF, "aarch64-pageoff"}, |
4805 | {MO_G3, "aarch64-g3"}, {MO_G2, "aarch64-g2"}, |
4806 | {MO_G1, "aarch64-g1"}, {MO_G0, "aarch64-g0"}, |
4807 | {MO_HI12, "aarch64-hi12"}}; |
4808 | return makeArrayRef(TargetFlags); |
4809 | } |
4810 | |
4811 | ArrayRef<std::pair<unsigned, const char *>> |
4812 | AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { |
4813 | using namespace AArch64II; |
4814 | |
4815 | static const std::pair<unsigned, const char *> TargetFlags[] = { |
4816 | {MO_COFFSTUB, "aarch64-coffstub"}, |
4817 | {MO_GOT, "aarch64-got"}, |
4818 | {MO_NC, "aarch64-nc"}, |
4819 | {MO_S, "aarch64-s"}, |
4820 | {MO_TLS, "aarch64-tls"}, |
4821 | {MO_DLLIMPORT, "aarch64-dllimport"}, |
4822 | {MO_PREL, "aarch64-prel"}, |
4823 | {MO_TAGGED, "aarch64-tagged"}}; |
4824 | return makeArrayRef(TargetFlags); |
4825 | } |
4826 | |
4827 | ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> |
4828 | AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const { |
4829 | static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] = |
4830 | {{MOSuppressPair, "aarch64-suppress-pair"}, |
4831 | {MOStridedAccess, "aarch64-strided-access"}}; |
4832 | return makeArrayRef(TargetFlags); |
4833 | } |
4834 | |
4835 | /// Constants defining how certain sequences should be outlined. |
4836 | /// This encompasses how an outlined function should be called, and what kind of |
4837 | /// frame should be emitted for that outlined function. |
4838 | /// |
4839 | /// \p MachineOutlinerDefault implies that the function should be called with |
4840 | /// a save and restore of LR to the stack. |
4841 | /// |
4842 | /// That is, |
4843 | /// |
4844 | /// I1 Save LR OUTLINED_FUNCTION: |
4845 | /// I2 --> BL OUTLINED_FUNCTION I1 |
4846 | /// I3 Restore LR I2 |
4847 | /// I3 |
4848 | /// RET |
4849 | /// |
4850 | /// * Call construction overhead: 3 (save + BL + restore) |
4851 | /// * Frame construction overhead: 1 (ret) |
4852 | /// * Requires stack fixups? Yes |
4853 | /// |
4854 | /// \p MachineOutlinerTailCall implies that the function is being created from |
4855 | /// a sequence of instructions ending in a return. |
4856 | /// |
4857 | /// That is, |
4858 | /// |
4859 | /// I1 OUTLINED_FUNCTION: |
4860 | /// I2 --> B OUTLINED_FUNCTION I1 |
4861 | /// RET I2 |
4862 | /// RET |
4863 | /// |
4864 | /// * Call construction overhead: 1 (B) |
4865 | /// * Frame construction overhead: 0 (Return included in sequence) |
4866 | /// * Requires stack fixups? No |
4867 | /// |
4868 | /// \p MachineOutlinerNoLRSave implies that the function should be called using |
4869 | /// a BL instruction, but doesn't require LR to be saved and restored. This |
4870 | /// happens when LR is known to be dead. |
4871 | /// |
4872 | /// That is, |
4873 | /// |
4874 | /// I1 OUTLINED_FUNCTION: |
4875 | /// I2 --> BL OUTLINED_FUNCTION I1 |
4876 | /// I3 I2 |
4877 | /// I3 |
4878 | /// RET |
4879 | /// |
4880 | /// * Call construction overhead: 1 (BL) |
4881 | /// * Frame construction overhead: 1 (RET) |
4882 | /// * Requires stack fixups? No |
4883 | /// |
4884 | /// \p MachineOutlinerThunk implies that the function is being created from |
4885 | /// a sequence of instructions ending in a call. The outlined function is |
4886 | /// called with a BL instruction, and the outlined function tail-calls the |
4887 | /// original call destination. |
4888 | /// |
4889 | /// That is, |
4890 | /// |
4891 | /// I1 OUTLINED_FUNCTION: |
4892 | /// I2 --> BL OUTLINED_FUNCTION I1 |
4893 | /// BL f I2 |
4894 | /// B f |
4895 | /// * Call construction overhead: 1 (BL) |
4896 | /// * Frame construction overhead: 0 |
4897 | /// * Requires stack fixups? No |
4898 | /// |
4899 | /// \p MachineOutlinerRegSave implies that the function should be called with a |
4900 | /// save and restore of LR to an available register. This allows us to avoid |
4901 | /// stack fixups. Note that this outlining variant is compatible with the |
4902 | /// NoLRSave case. |
4903 | /// |
4904 | /// That is, |
4905 | /// |
4906 | /// I1 Save LR OUTLINED_FUNCTION: |
4907 | /// I2 --> BL OUTLINED_FUNCTION I1 |
4908 | /// I3 Restore LR I2 |
4909 | /// I3 |
4910 | /// RET |
4911 | /// |
4912 | /// * Call construction overhead: 3 (save + BL + restore) |
4913 | /// * Frame construction overhead: 1 (ret) |
4914 | /// * Requires stack fixups? No |
4915 | enum MachineOutlinerClass { |
4916 | MachineOutlinerDefault, /// Emit a save, restore, call, and return. |
4917 | MachineOutlinerTailCall, /// Only emit a branch. |
4918 | MachineOutlinerNoLRSave, /// Emit a call and return. |
4919 | MachineOutlinerThunk, /// Emit a call and tail-call. |
4920 | MachineOutlinerRegSave /// Same as default, but save to a register. |
4921 | }; |
4922 | |
4923 | enum MachineOutlinerMBBFlags { |
4924 | LRUnavailableSomewhere = 0x2, |
4925 | HasCalls = 0x4, |
4926 | UnsafeRegsDead = 0x8 |
4927 | }; |
4928 | |
4929 | unsigned |
4930 | AArch64InstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const { |
4931 | assert(C.LRUWasSet && "LRU wasn't set?")((C.LRUWasSet && "LRU wasn't set?") ? static_cast< void> (0) : __assert_fail ("C.LRUWasSet && \"LRU wasn't set?\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 4931, __PRETTY_FUNCTION__)); |
4932 | MachineFunction *MF = C.getMF(); |
4933 | const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>( |
4934 | MF->getSubtarget().getRegisterInfo()); |
4935 | |
4936 | // Check if there is an available register across the sequence that we can |
4937 | // use. |
4938 | for (unsigned Reg : AArch64::GPR64RegClass) { |
4939 | if (!ARI->isReservedReg(*MF, Reg) && |
4940 | Reg != AArch64::LR && // LR is not reserved, but don't use it. |
4941 | Reg != AArch64::X16 && // X16 is not guaranteed to be preserved. |
4942 | Reg != AArch64::X17 && // Ditto for X17. |
4943 | C.LRU.available(Reg) && C.UsedInSequence.available(Reg)) |
4944 | return Reg; |
4945 | } |
4946 | |
4947 | // No suitable register. Return 0. |
4948 | return 0u; |
4949 | } |
4950 | |
4951 | outliner::OutlinedFunction |
4952 | AArch64InstrInfo::getOutliningCandidateInfo( |
4953 | std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { |
4954 | outliner::Candidate &FirstCand = RepeatedSequenceLocs[0]; |
4955 | unsigned SequenceSize = |
4956 | std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0, |
4957 | [this](unsigned Sum, const MachineInstr &MI) { |
4958 | return Sum + getInstSizeInBytes(MI); |
4959 | }); |
4960 | |
4961 | // Properties about candidate MBBs that hold for all of them. |
4962 | unsigned FlagsSetInAll = 0xF; |
4963 | |
4964 | // Compute liveness information for each candidate, and set FlagsSetInAll. |
4965 | const TargetRegisterInfo &TRI = getRegisterInfo(); |
4966 | std::for_each(RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(), |
4967 | [&FlagsSetInAll](outliner::Candidate &C) { |
4968 | FlagsSetInAll &= C.Flags; |
4969 | }); |
4970 | |
4971 | // According to the AArch64 Procedure Call Standard, the following are |
4972 | // undefined on entry/exit from a function call: |
4973 | // |
4974 | // * Registers x16, x17, (and thus w16, w17) |
4975 | // * Condition codes (and thus the NZCV register) |
4976 | // |
4977 | // Because if this, we can't outline any sequence of instructions where |
4978 | // one |
4979 | // of these registers is live into/across it. Thus, we need to delete |
4980 | // those |
4981 | // candidates. |
4982 | auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) { |
4983 | // If the unsafe registers in this block are all dead, then we don't need |
4984 | // to compute liveness here. |
4985 | if (C.Flags & UnsafeRegsDead) |
4986 | return false; |
4987 | C.initLRU(TRI); |
4988 | LiveRegUnits LRU = C.LRU; |
4989 | return (!LRU.available(AArch64::W16) || !LRU.available(AArch64::W17) || |
4990 | !LRU.available(AArch64::NZCV)); |
4991 | }; |
4992 | |
4993 | // Are there any candidates where those registers are live? |
4994 | if (!(FlagsSetInAll & UnsafeRegsDead)) { |
4995 | // Erase every candidate that violates the restrictions above. (It could be |
4996 | // true that we have viable candidates, so it's not worth bailing out in |
4997 | // the case that, say, 1 out of 20 candidates violate the restructions.) |
4998 | RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(), |
4999 | RepeatedSequenceLocs.end(), |
5000 | CantGuaranteeValueAcrossCall), |
5001 | RepeatedSequenceLocs.end()); |
5002 | |
5003 | // If the sequence doesn't have enough candidates left, then we're done. |
5004 | if (RepeatedSequenceLocs.size() < 2) |
5005 | return outliner::OutlinedFunction(); |
5006 | } |
5007 | |
5008 | // At this point, we have only "safe" candidates to outline. Figure out |
5009 | // frame + call instruction information. |
5010 | |
5011 | unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode(); |
5012 | |
5013 | // Helper lambda which sets call information for every candidate. |
5014 | auto SetCandidateCallInfo = |
5015 | [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) { |
5016 | for (outliner::Candidate &C : RepeatedSequenceLocs) |
5017 | C.setCallInfo(CallID, NumBytesForCall); |
5018 | }; |
5019 | |
5020 | unsigned FrameID = MachineOutlinerDefault; |
5021 | unsigned NumBytesToCreateFrame = 4; |
5022 | |
5023 | bool HasBTI = any_of(RepeatedSequenceLocs, [](outliner::Candidate &C) { |
5024 | return C.getMF()->getFunction().hasFnAttribute("branch-target-enforcement"); |
5025 | }); |
5026 | |
5027 | // Returns true if an instructions is safe to fix up, false otherwise. |
5028 | auto IsSafeToFixup = [this, &TRI](MachineInstr &MI) { |
5029 | if (MI.isCall()) |
5030 | return true; |
5031 | |
5032 | if (!MI.modifiesRegister(AArch64::SP, &TRI) && |
5033 | !MI.readsRegister(AArch64::SP, &TRI)) |
5034 | return true; |
5035 | |
5036 | // Any modification of SP will break our code to save/restore LR. |
5037 | // FIXME: We could handle some instructions which add a constant |
5038 | // offset to SP, with a bit more work. |
5039 | if (MI.modifiesRegister(AArch64::SP, &TRI)) |
5040 | return false; |
5041 | |
5042 | // At this point, we have a stack instruction that we might need to |
5043 | // fix up. We'll handle it if it's a load or store. |
5044 | if (MI.mayLoadOrStore()) { |
5045 | const MachineOperand *Base; // Filled with the base operand of MI. |
5046 | int64_t Offset; // Filled with the offset of MI. |
5047 | |
5048 | // Does it allow us to offset the base operand and is the base the |
5049 | // register SP? |
5050 | if (!getMemOperandWithOffset(MI, Base, Offset, &TRI) || !Base->isReg() || |
5051 | Base->getReg() != AArch64::SP) |
5052 | return false; |
5053 | |
5054 | // Find the minimum/maximum offset for this instruction and check |
5055 | // if fixing it up would be in range. |
5056 | int64_t MinOffset, |
5057 | MaxOffset; // Unscaled offsets for the instruction. |
5058 | unsigned Scale; // The scale to multiply the offsets by. |
5059 | unsigned DummyWidth; |
5060 | getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset); |
5061 | |
5062 | Offset += 16; // Update the offset to what it would be if we outlined. |
5063 | if (Offset < MinOffset * Scale || Offset > MaxOffset * Scale) |
5064 | return false; |
5065 | |
5066 | // It's in range, so we can outline it. |
5067 | return true; |
5068 | } |
5069 | |
5070 | // FIXME: Add handling for instructions like "add x0, sp, #8". |
5071 | |
5072 | // We can't fix it up, so don't outline it. |
5073 | return false; |
5074 | }; |
5075 | |
5076 | // True if it's possible to fix up each stack instruction in this sequence. |
5077 | // Important for frames/call variants that modify the stack. |
5078 | bool AllStackInstrsSafe = std::all_of( |
5079 | FirstCand.front(), std::next(FirstCand.back()), IsSafeToFixup); |
5080 | |
5081 | // If the last instruction in any candidate is a terminator, then we should |
5082 | // tail call all of the candidates. |
5083 | if (RepeatedSequenceLocs[0].back()->isTerminator()) { |
5084 | FrameID = MachineOutlinerTailCall; |
5085 | NumBytesToCreateFrame = 0; |
5086 | SetCandidateCallInfo(MachineOutlinerTailCall, 4); |
5087 | } |
5088 | |
5089 | else if (LastInstrOpcode == AArch64::BL || |
5090 | (LastInstrOpcode == AArch64::BLR && !HasBTI)) { |
5091 | // FIXME: Do we need to check if the code after this uses the value of LR? |
5092 | FrameID = MachineOutlinerThunk; |
5093 | NumBytesToCreateFrame = 0; |
5094 | SetCandidateCallInfo(MachineOutlinerThunk, 4); |
5095 | } |
5096 | |
5097 | else { |
5098 | // We need to decide how to emit calls + frames. We can always emit the same |
5099 | // frame if we don't need to save to the stack. If we have to save to the |
5100 | // stack, then we need a different frame. |
5101 | unsigned NumBytesNoStackCalls = 0; |
5102 | std::vector<outliner::Candidate> CandidatesWithoutStackFixups; |
5103 | |
5104 | for (outliner::Candidate &C : RepeatedSequenceLocs) { |
5105 | C.initLRU(TRI); |
5106 | |
5107 | // Is LR available? If so, we don't need a save. |
5108 | if (C.LRU.available(AArch64::LR)) { |
5109 | NumBytesNoStackCalls += 4; |
5110 | C.setCallInfo(MachineOutlinerNoLRSave, 4); |
5111 | CandidatesWithoutStackFixups.push_back(C); |
5112 | } |
5113 | |
5114 | // Is an unused register available? If so, we won't modify the stack, so |
5115 | // we can outline with the same frame type as those that don't save LR. |
5116 | else if (findRegisterToSaveLRTo(C)) { |
5117 | NumBytesNoStackCalls += 12; |
5118 | C.setCallInfo(MachineOutlinerRegSave, 12); |
5119 | CandidatesWithoutStackFixups.push_back(C); |
5120 | } |
5121 | |
5122 | // Is SP used in the sequence at all? If not, we don't have to modify |
5123 | // the stack, so we are guaranteed to get the same frame. |
5124 | else if (C.UsedInSequence.available(AArch64::SP)) { |
5125 | NumBytesNoStackCalls += 12; |
5126 | C.setCallInfo(MachineOutlinerDefault, 12); |
5127 | CandidatesWithoutStackFixups.push_back(C); |
5128 | } |
5129 | |
5130 | // If we outline this, we need to modify the stack. Pretend we don't |
5131 | // outline this by saving all of its bytes. |
5132 | else { |
5133 | NumBytesNoStackCalls += SequenceSize; |
5134 | } |
5135 | } |
5136 | |
5137 | // If there are no places where we have to save LR, then note that we |
5138 | // don't have to update the stack. Otherwise, give every candidate the |
5139 | // default call type, as long as it's safe to do so. |
5140 | if (!AllStackInstrsSafe || |
5141 | NumBytesNoStackCalls <= RepeatedSequenceLocs.size() * 12) { |
5142 | RepeatedSequenceLocs = CandidatesWithoutStackFixups; |
5143 | FrameID = MachineOutlinerNoLRSave; |
5144 | } else { |
5145 | SetCandidateCallInfo(MachineOutlinerDefault, 12); |
5146 | } |
5147 | |
5148 | // If we dropped all of the candidates, bail out here. |
5149 | if (RepeatedSequenceLocs.size() < 2) { |
5150 | RepeatedSequenceLocs.clear(); |
5151 | return outliner::OutlinedFunction(); |
5152 | } |
5153 | } |
5154 | |
5155 | // Does every candidate's MBB contain a call? If so, then we might have a call |
5156 | // in the range. |
5157 | if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) { |
5158 | // Check if the range contains a call. These require a save + restore of the |
5159 | // link register. |
5160 | bool ModStackToSaveLR = false; |
5161 | if (std::any_of(FirstCand.front(), FirstCand.back(), |
5162 | [](const MachineInstr &MI) { return MI.isCall(); })) |
5163 | ModStackToSaveLR = true; |
5164 | |
5165 | // Handle the last instruction separately. If this is a tail call, then the |
5166 | // last instruction is a call. We don't want to save + restore in this case. |
5167 | // However, it could be possible that the last instruction is a call without |
5168 | // it being valid to tail call this sequence. We should consider this as |
5169 | // well. |
5170 | else if (FrameID != MachineOutlinerThunk && |
5171 | FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall()) |
5172 | ModStackToSaveLR = true; |
5173 | |
5174 | if (ModStackToSaveLR) { |
5175 | // We can't fix up the stack. Bail out. |
5176 | if (!AllStackInstrsSafe) { |
5177 | RepeatedSequenceLocs.clear(); |
5178 | return outliner::OutlinedFunction(); |
5179 | } |
5180 | |
5181 | // Save + restore LR. |
5182 | NumBytesToCreateFrame += 8; |
5183 | } |
5184 | } |
5185 | |
5186 | return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, |
5187 | NumBytesToCreateFrame, FrameID); |
5188 | } |
5189 | |
5190 | bool AArch64InstrInfo::isFunctionSafeToOutlineFrom( |
5191 | MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { |
5192 | const Function &F = MF.getFunction(); |
5193 | |
5194 | // Can F be deduplicated by the linker? If it can, don't outline from it. |
5195 | if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) |
5196 | return false; |
5197 | |
5198 | // Don't outline from functions with section markings; the program could |
5199 | // expect that all the code is in the named section. |
5200 | // FIXME: Allow outlining from multiple functions with the same section |
5201 | // marking. |
5202 | if (F.hasSection()) |
5203 | return false; |
5204 | |
5205 | // Outlining from functions with redzones is unsafe since the outliner may |
5206 | // modify the stack. Check if hasRedZone is true or unknown; if yes, don't |
5207 | // outline from it. |
5208 | AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); |
5209 | if (!AFI || AFI->hasRedZone().getValueOr(true)) |
5210 | return false; |
5211 | |
5212 | // It's safe to outline from MF. |
5213 | return true; |
5214 | } |
5215 | |
5216 | bool AArch64InstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, |
5217 | unsigned &Flags) const { |
5218 | // Check if LR is available through all of the MBB. If it's not, then set |
5219 | // a flag. |
5220 | assert(MBB.getParent()->getRegInfo().tracksLiveness() &&((MBB.getParent()->getRegInfo().tracksLiveness() && "Suitable Machine Function for outlining must track liveness" ) ? static_cast<void> (0) : __assert_fail ("MBB.getParent()->getRegInfo().tracksLiveness() && \"Suitable Machine Function for outlining must track liveness\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5221, __PRETTY_FUNCTION__)) |
5221 | "Suitable Machine Function for outlining must track liveness")((MBB.getParent()->getRegInfo().tracksLiveness() && "Suitable Machine Function for outlining must track liveness" ) ? static_cast<void> (0) : __assert_fail ("MBB.getParent()->getRegInfo().tracksLiveness() && \"Suitable Machine Function for outlining must track liveness\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5221, __PRETTY_FUNCTION__)); |
5222 | LiveRegUnits LRU(getRegisterInfo()); |
5223 | |
5224 | std::for_each(MBB.rbegin(), MBB.rend(), |
5225 | [&LRU](MachineInstr &MI) { LRU.accumulate(MI); }); |
5226 | |
5227 | // Check if each of the unsafe registers are available... |
5228 | bool W16AvailableInBlock = LRU.available(AArch64::W16); |
5229 | bool W17AvailableInBlock = LRU.available(AArch64::W17); |
5230 | bool NZCVAvailableInBlock = LRU.available(AArch64::NZCV); |
5231 | |
5232 | // If all of these are dead (and not live out), we know we don't have to check |
5233 | // them later. |
5234 | if (W16AvailableInBlock && W17AvailableInBlock && NZCVAvailableInBlock) |
5235 | Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead; |
5236 | |
5237 | // Now, add the live outs to the set. |
5238 | LRU.addLiveOuts(MBB); |
5239 | |
5240 | // If any of these registers is available in the MBB, but also a live out of |
5241 | // the block, then we know outlining is unsafe. |
5242 | if (W16AvailableInBlock && !LRU.available(AArch64::W16)) |
5243 | return false; |
5244 | if (W17AvailableInBlock && !LRU.available(AArch64::W17)) |
5245 | return false; |
5246 | if (NZCVAvailableInBlock && !LRU.available(AArch64::NZCV)) |
5247 | return false; |
5248 | |
5249 | // Check if there's a call inside this MachineBasicBlock. If there is, then |
5250 | // set a flag. |
5251 | if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); })) |
5252 | Flags |= MachineOutlinerMBBFlags::HasCalls; |
5253 | |
5254 | MachineFunction *MF = MBB.getParent(); |
5255 | |
5256 | // In the event that we outline, we may have to save LR. If there is an |
5257 | // available register in the MBB, then we'll always save LR there. Check if |
5258 | // this is true. |
5259 | bool CanSaveLR = false; |
5260 | const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>( |
5261 | MF->getSubtarget().getRegisterInfo()); |
5262 | |
5263 | // Check if there is an available register across the sequence that we can |
5264 | // use. |
5265 | for (unsigned Reg : AArch64::GPR64RegClass) { |
5266 | if (!ARI->isReservedReg(*MF, Reg) && Reg != AArch64::LR && |
5267 | Reg != AArch64::X16 && Reg != AArch64::X17 && LRU.available(Reg)) { |
5268 | CanSaveLR = true; |
5269 | break; |
5270 | } |
5271 | } |
5272 | |
5273 | // Check if we have a register we can save LR to, and if LR was used |
5274 | // somewhere. If both of those things are true, then we need to evaluate the |
5275 | // safety of outlining stack instructions later. |
5276 | if (!CanSaveLR && !LRU.available(AArch64::LR)) |
5277 | Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere; |
5278 | |
5279 | return true; |
5280 | } |
5281 | |
5282 | outliner::InstrType |
5283 | AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, |
5284 | unsigned Flags) const { |
5285 | MachineInstr &MI = *MIT; |
5286 | MachineBasicBlock *MBB = MI.getParent(); |
5287 | MachineFunction *MF = MBB->getParent(); |
5288 | AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>(); |
5289 | |
5290 | // Don't outline LOHs. |
5291 | if (FuncInfo->getLOHRelated().count(&MI)) |
5292 | return outliner::InstrType::Illegal; |
5293 | |
5294 | // Don't allow debug values to impact outlining type. |
5295 | if (MI.isDebugInstr() || MI.isIndirectDebugValue()) |
5296 | return outliner::InstrType::Invisible; |
5297 | |
5298 | // At this point, KILL instructions don't really tell us much so we can go |
5299 | // ahead and skip over them. |
5300 | if (MI.isKill()) |
5301 | return outliner::InstrType::Invisible; |
5302 | |
5303 | // Is this a terminator for a basic block? |
5304 | if (MI.isTerminator()) { |
5305 | |
5306 | // Is this the end of a function? |
5307 | if (MI.getParent()->succ_empty()) |
5308 | return outliner::InstrType::Legal; |
5309 | |
5310 | // It's not, so don't outline it. |
5311 | return outliner::InstrType::Illegal; |
5312 | } |
5313 | |
5314 | // Make sure none of the operands are un-outlinable. |
5315 | for (const MachineOperand &MOP : MI.operands()) { |
5316 | if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || |
5317 | MOP.isTargetIndex()) |
5318 | return outliner::InstrType::Illegal; |
5319 | |
5320 | // If it uses LR or W30 explicitly, then don't touch it. |
5321 | if (MOP.isReg() && !MOP.isImplicit() && |
5322 | (MOP.getReg() == AArch64::LR || MOP.getReg() == AArch64::W30)) |
5323 | return outliner::InstrType::Illegal; |
5324 | } |
5325 | |
5326 | // Special cases for instructions that can always be outlined, but will fail |
5327 | // the later tests. e.g, ADRPs, which are PC-relative use LR, but can always |
5328 | // be outlined because they don't require a *specific* value to be in LR. |
5329 | if (MI.getOpcode() == AArch64::ADRP) |
5330 | return outliner::InstrType::Legal; |
5331 | |
5332 | // If MI is a call we might be able to outline it. We don't want to outline |
5333 | // any calls that rely on the position of items on the stack. When we outline |
5334 | // something containing a call, we have to emit a save and restore of LR in |
5335 | // the outlined function. Currently, this always happens by saving LR to the |
5336 | // stack. Thus, if we outline, say, half the parameters for a function call |
5337 | // plus the call, then we'll break the callee's expectations for the layout |
5338 | // of the stack. |
5339 | // |
5340 | // FIXME: Allow calls to functions which construct a stack frame, as long |
5341 | // as they don't access arguments on the stack. |
5342 | // FIXME: Figure out some way to analyze functions defined in other modules. |
5343 | // We should be able to compute the memory usage based on the IR calling |
5344 | // convention, even if we can't see the definition. |
5345 | if (MI.isCall()) { |
5346 | // Get the function associated with the call. Look at each operand and find |
5347 | // the one that represents the callee and get its name. |
5348 | const Function *Callee = nullptr; |
5349 | for (const MachineOperand &MOP : MI.operands()) { |
5350 | if (MOP.isGlobal()) { |
5351 | Callee = dyn_cast<Function>(MOP.getGlobal()); |
5352 | break; |
5353 | } |
5354 | } |
5355 | |
5356 | // Never outline calls to mcount. There isn't any rule that would require |
5357 | // this, but the Linux kernel's "ftrace" feature depends on it. |
5358 | if (Callee && Callee->getName() == "\01_mcount") |
5359 | return outliner::InstrType::Illegal; |
5360 | |
5361 | // If we don't know anything about the callee, assume it depends on the |
5362 | // stack layout of the caller. In that case, it's only legal to outline |
5363 | // as a tail-call. Whitelist the call instructions we know about so we |
5364 | // don't get unexpected results with call pseudo-instructions. |
5365 | auto UnknownCallOutlineType = outliner::InstrType::Illegal; |
5366 | if (MI.getOpcode() == AArch64::BLR || MI.getOpcode() == AArch64::BL) |
5367 | UnknownCallOutlineType = outliner::InstrType::LegalTerminator; |
5368 | |
5369 | if (!Callee) |
5370 | return UnknownCallOutlineType; |
5371 | |
5372 | // We have a function we have information about. Check it if it's something |
5373 | // can safely outline. |
5374 | MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee); |
5375 | |
5376 | // We don't know what's going on with the callee at all. Don't touch it. |
5377 | if (!CalleeMF) |
5378 | return UnknownCallOutlineType; |
5379 | |
5380 | // Check if we know anything about the callee saves on the function. If we |
5381 | // don't, then don't touch it, since that implies that we haven't |
5382 | // computed anything about its stack frame yet. |
5383 | MachineFrameInfo &MFI = CalleeMF->getFrameInfo(); |
5384 | if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 || |
5385 | MFI.getNumObjects() > 0) |
5386 | return UnknownCallOutlineType; |
5387 | |
5388 | // At this point, we can say that CalleeMF ought to not pass anything on the |
5389 | // stack. Therefore, we can outline it. |
5390 | return outliner::InstrType::Legal; |
5391 | } |
5392 | |
5393 | // Don't outline positions. |
5394 | if (MI.isPosition()) |
5395 | return outliner::InstrType::Illegal; |
5396 | |
5397 | // Don't touch the link register or W30. |
5398 | if (MI.readsRegister(AArch64::W30, &getRegisterInfo()) || |
5399 | MI.modifiesRegister(AArch64::W30, &getRegisterInfo())) |
5400 | return outliner::InstrType::Illegal; |
5401 | |
5402 | // Don't outline BTI instructions, because that will prevent the outlining |
5403 | // site from being indirectly callable. |
5404 | if (MI.getOpcode() == AArch64::HINT) { |
5405 | int64_t Imm = MI.getOperand(0).getImm(); |
5406 | if (Imm == 32 || Imm == 34 || Imm == 36 || Imm == 38) |
5407 | return outliner::InstrType::Illegal; |
5408 | } |
5409 | |
5410 | return outliner::InstrType::Legal; |
5411 | } |
5412 | |
5413 | void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const { |
5414 | for (MachineInstr &MI : MBB) { |
5415 | const MachineOperand *Base; |
5416 | unsigned Width; |
5417 | int64_t Offset; |
5418 | |
5419 | // Is this a load or store with an immediate offset with SP as the base? |
5420 | if (!MI.mayLoadOrStore() || |
5421 | !getMemOperandWithOffsetWidth(MI, Base, Offset, Width, &RI) || |
5422 | (Base->isReg() && Base->getReg() != AArch64::SP)) |
5423 | continue; |
5424 | |
5425 | // It is, so we have to fix it up. |
5426 | unsigned Scale; |
5427 | int64_t Dummy1, Dummy2; |
5428 | |
5429 | MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI); |
5430 | assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!")((StackOffsetOperand.isImm() && "Stack offset wasn't immediate!" ) ? static_cast<void> (0) : __assert_fail ("StackOffsetOperand.isImm() && \"Stack offset wasn't immediate!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5430, __PRETTY_FUNCTION__)); |
5431 | getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2); |
5432 | assert(Scale != 0 && "Unexpected opcode!")((Scale != 0 && "Unexpected opcode!") ? static_cast< void> (0) : __assert_fail ("Scale != 0 && \"Unexpected opcode!\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5432, __PRETTY_FUNCTION__)); |
5433 | |
5434 | // We've pushed the return address to the stack, so add 16 to the offset. |
5435 | // This is safe, since we already checked if it would overflow when we |
5436 | // checked if this instruction was legal to outline. |
5437 | int64_t NewImm = (Offset + 16) / Scale; |
5438 | StackOffsetOperand.setImm(NewImm); |
5439 | } |
5440 | } |
5441 | |
5442 | void AArch64InstrInfo::buildOutlinedFrame( |
5443 | MachineBasicBlock &MBB, MachineFunction &MF, |
5444 | const outliner::OutlinedFunction &OF) const { |
5445 | // For thunk outlining, rewrite the last instruction from a call to a |
5446 | // tail-call. |
5447 | if (OF.FrameConstructionID == MachineOutlinerThunk) { |
5448 | MachineInstr *Call = &*--MBB.instr_end(); |
5449 | unsigned TailOpcode; |
5450 | if (Call->getOpcode() == AArch64::BL) { |
5451 | TailOpcode = AArch64::TCRETURNdi; |
5452 | } else { |
5453 | assert(Call->getOpcode() == AArch64::BLR)((Call->getOpcode() == AArch64::BLR) ? static_cast<void > (0) : __assert_fail ("Call->getOpcode() == AArch64::BLR" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5453, __PRETTY_FUNCTION__)); |
5454 | TailOpcode = AArch64::TCRETURNriALL; |
5455 | } |
5456 | MachineInstr *TC = BuildMI(MF, DebugLoc(), get(TailOpcode)) |
5457 | .add(Call->getOperand(0)) |
5458 | .addImm(0); |
5459 | MBB.insert(MBB.end(), TC); |
5460 | Call->eraseFromParent(); |
5461 | } |
5462 | |
5463 | // Is there a call in the outlined range? |
5464 | auto IsNonTailCall = [](MachineInstr &MI) { |
5465 | return MI.isCall() && !MI.isReturn(); |
5466 | }; |
5467 | if (std::any_of(MBB.instr_begin(), MBB.instr_end(), IsNonTailCall)) { |
5468 | // Fix up the instructions in the range, since we're going to modify the |
5469 | // stack. |
5470 | assert(OF.FrameConstructionID != MachineOutlinerDefault &&((OF.FrameConstructionID != MachineOutlinerDefault && "Can only fix up stack references once") ? static_cast<void > (0) : __assert_fail ("OF.FrameConstructionID != MachineOutlinerDefault && \"Can only fix up stack references once\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5471, __PRETTY_FUNCTION__)) |
5471 | "Can only fix up stack references once")((OF.FrameConstructionID != MachineOutlinerDefault && "Can only fix up stack references once") ? static_cast<void > (0) : __assert_fail ("OF.FrameConstructionID != MachineOutlinerDefault && \"Can only fix up stack references once\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5471, __PRETTY_FUNCTION__)); |
5472 | fixupPostOutline(MBB); |
5473 | |
5474 | // LR has to be a live in so that we can save it. |
5475 | MBB.addLiveIn(AArch64::LR); |
5476 | |
5477 | MachineBasicBlock::iterator It = MBB.begin(); |
5478 | MachineBasicBlock::iterator Et = MBB.end(); |
5479 | |
5480 | if (OF.FrameConstructionID == MachineOutlinerTailCall || |
5481 | OF.FrameConstructionID == MachineOutlinerThunk) |
5482 | Et = std::prev(MBB.end()); |
5483 | |
5484 | // Insert a save before the outlined region |
5485 | MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre)) |
5486 | .addReg(AArch64::SP, RegState::Define) |
5487 | .addReg(AArch64::LR) |
5488 | .addReg(AArch64::SP) |
5489 | .addImm(-16); |
5490 | It = MBB.insert(It, STRXpre); |
5491 | |
5492 | const TargetSubtargetInfo &STI = MF.getSubtarget(); |
5493 | const MCRegisterInfo *MRI = STI.getRegisterInfo(); |
5494 | unsigned DwarfReg = MRI->getDwarfRegNum(AArch64::LR, true); |
5495 | |
5496 | // Add a CFI saying the stack was moved 16 B down. |
5497 | int64_t StackPosEntry = |
5498 | MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 16)); |
5499 | BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION)) |
5500 | .addCFIIndex(StackPosEntry) |
5501 | .setMIFlags(MachineInstr::FrameSetup); |
5502 | |
5503 | // Add a CFI saying that the LR that we want to find is now 16 B higher than |
5504 | // before. |
5505 | int64_t LRPosEntry = |
5506 | MF.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg, 16)); |
5507 | BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION)) |
5508 | .addCFIIndex(LRPosEntry) |
5509 | .setMIFlags(MachineInstr::FrameSetup); |
5510 | |
5511 | // Insert a restore before the terminator for the function. |
5512 | MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost)) |
5513 | .addReg(AArch64::SP, RegState::Define) |
5514 | .addReg(AArch64::LR, RegState::Define) |
5515 | .addReg(AArch64::SP) |
5516 | .addImm(16); |
5517 | Et = MBB.insert(Et, LDRXpost); |
5518 | } |
5519 | |
5520 | // If this is a tail call outlined function, then there's already a return. |
5521 | if (OF.FrameConstructionID == MachineOutlinerTailCall || |
5522 | OF.FrameConstructionID == MachineOutlinerThunk) |
5523 | return; |
5524 | |
5525 | // It's not a tail call, so we have to insert the return ourselves. |
5526 | MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET)) |
5527 | .addReg(AArch64::LR, RegState::Undef); |
5528 | MBB.insert(MBB.end(), ret); |
5529 | |
5530 | // Did we have to modify the stack by saving the link register? |
5531 | if (OF.FrameConstructionID != MachineOutlinerDefault) |
5532 | return; |
5533 | |
5534 | // We modified the stack. |
5535 | // Walk over the basic block and fix up all the stack accesses. |
5536 | fixupPostOutline(MBB); |
5537 | } |
5538 | |
5539 | MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall( |
5540 | Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, |
5541 | MachineFunction &MF, const outliner::Candidate &C) const { |
5542 | |
5543 | // Are we tail calling? |
5544 | if (C.CallConstructionID == MachineOutlinerTailCall) { |
5545 | // If yes, then we can just branch to the label. |
5546 | It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::TCRETURNdi)) |
5547 | .addGlobalAddress(M.getNamedValue(MF.getName())) |
5548 | .addImm(0)); |
5549 | return It; |
5550 | } |
5551 | |
5552 | // Are we saving the link register? |
5553 | if (C.CallConstructionID == MachineOutlinerNoLRSave || |
5554 | C.CallConstructionID == MachineOutlinerThunk) { |
5555 | // No, so just insert the call. |
5556 | It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL)) |
5557 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
5558 | return It; |
5559 | } |
5560 | |
5561 | // We want to return the spot where we inserted the call. |
5562 | MachineBasicBlock::iterator CallPt; |
5563 | |
5564 | // Instructions for saving and restoring LR around the call instruction we're |
5565 | // going to insert. |
5566 | MachineInstr *Save; |
5567 | MachineInstr *Restore; |
5568 | // Can we save to a register? |
5569 | if (C.CallConstructionID == MachineOutlinerRegSave) { |
5570 | // FIXME: This logic should be sunk into a target-specific interface so that |
5571 | // we don't have to recompute the register. |
5572 | unsigned Reg = findRegisterToSaveLRTo(C); |
5573 | assert(Reg != 0 && "No callee-saved register available?")((Reg != 0 && "No callee-saved register available?") ? static_cast<void> (0) : __assert_fail ("Reg != 0 && \"No callee-saved register available?\"" , "/build/llvm-toolchain-snapshot-10~svn373517/lib/Target/AArch64/AArch64InstrInfo.cpp" , 5573, __PRETTY_FUNCTION__)); |
5574 | |
5575 | // Save and restore LR from that register. |
5576 | Save = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), Reg) |
5577 | .addReg(AArch64::XZR) |
5578 | .addReg(AArch64::LR) |
5579 | .addImm(0); |
5580 | Restore = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), AArch64::LR) |
5581 | .addReg(AArch64::XZR) |
5582 | .addReg(Reg) |
5583 | .addImm(0); |
5584 | } else { |
5585 | // We have the default case. Save and restore from SP. |
5586 | Save = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre)) |
5587 | .addReg(AArch64::SP, RegState::Define) |
5588 | .addReg(AArch64::LR) |
5589 | .addReg(AArch64::SP) |
5590 | .addImm(-16); |
5591 | Restore = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost)) |
5592 | .addReg(AArch64::SP, RegState::Define) |
5593 | .addReg(AArch64::LR, RegState::Define) |
5594 | .addReg(AArch64::SP) |
5595 | .addImm(16); |
5596 | } |
5597 | |
5598 | It = MBB.insert(It, Save); |
5599 | It++; |
5600 | |
5601 | // Insert the call. |
5602 | It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL)) |
5603 | .addGlobalAddress(M.getNamedValue(MF.getName()))); |
5604 | CallPt = It; |
5605 | It++; |
5606 | |
5607 | It = MBB.insert(It, Restore); |
5608 | return CallPt; |
5609 | } |
5610 | |
5611 | bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault( |
5612 | MachineFunction &MF) const { |
5613 | return MF.getFunction().hasMinSize(); |
5614 | } |
5615 | |
5616 | bool AArch64InstrInfo::isCopyInstrImpl( |
5617 | const MachineInstr &MI, const MachineOperand *&Source, |
5618 | const MachineOperand *&Destination) const { |
5619 | |
5620 | // AArch64::ORRWrs and AArch64::ORRXrs with WZR/XZR reg |
5621 | // and zero immediate operands used as an alias for mov instruction. |
5622 | if (MI.getOpcode() == AArch64::ORRWrs && |
5623 | MI.getOperand(1).getReg() == AArch64::WZR && |
5624 | MI.getOperand(3).getImm() == 0x0) { |
5625 | Destination = &MI.getOperand(0); |
5626 | Source = &MI.getOperand(2); |
5627 | return true; |
5628 | } |
5629 | |
5630 | if (MI.getOpcode() == AArch64::ORRXrs && |
5631 | MI.getOperand(1).getReg() == AArch64::XZR && |
5632 | MI.getOperand(3).getImm() == 0x0) { |
5633 | Destination = &MI.getOperand(0); |
5634 | Source = &MI.getOperand(2); |
5635 | return true; |
5636 | } |
5637 | |
5638 | return false; |
5639 | } |
5640 | |
5641 | #define GET_INSTRINFO_HELPERS |
5642 | #include "AArch64GenInstrInfo.inc" |