LLVM  9.0.0svn
AArch64InstrInfo.cpp
Go to the documentation of this file.
1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/CodeGen/StackMaps.h"
33 #include "llvm/IR/DebugLoc.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCInstrDesc.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/CodeGen.h"
40 #include "llvm/Support/Compiler.h"
45 #include <cassert>
46 #include <cstdint>
47 #include <iterator>
48 #include <utility>
49 
50 using namespace llvm;
51 
52 #define GET_INSTRINFO_CTOR_DTOR
53 #include "AArch64GenInstrInfo.inc"
54 
56  "aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
57  cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
58 
60  "aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
61  cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
62 
63 static cl::opt<unsigned>
64  BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
65  cl::desc("Restrict range of Bcc instructions (DEBUG)"));
66 
68  : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP,
69  AArch64::CATCHRET),
70  RI(STI.getTargetTriple()), Subtarget(STI) {}
71 
72 /// GetInstSize - Return the number of bytes of code the specified
73 /// instruction may be. This returns the maximum number of bytes.
75  const MachineBasicBlock &MBB = *MI.getParent();
76  const MachineFunction *MF = MBB.getParent();
77  const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
78 
79  {
80  auto Op = MI.getOpcode();
82  return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
83  }
84 
85  // FIXME: We currently only handle pseudoinstructions that don't get expanded
86  // before the assembly printer.
87  unsigned NumBytes = 0;
88  const MCInstrDesc &Desc = MI.getDesc();
89  switch (Desc.getOpcode()) {
90  default:
91  // Anything not explicitly designated otherwise is a normal 4-byte insn.
92  NumBytes = 4;
93  break;
94  case TargetOpcode::DBG_VALUE:
96  case TargetOpcode::IMPLICIT_DEF:
97  case TargetOpcode::KILL:
98  NumBytes = 0;
99  break;
100  case TargetOpcode::STACKMAP:
101  // The upper bound for a stackmap intrinsic is the full length of its shadow
102  NumBytes = StackMapOpers(&MI).getNumPatchBytes();
103  assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
104  break;
105  case TargetOpcode::PATCHPOINT:
106  // The size of the patchpoint intrinsic is the number of bytes requested
107  NumBytes = PatchPointOpers(&MI).getNumPatchBytes();
108  assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
109  break;
111  // This gets lowered to an instruction sequence which takes 16 bytes
112  NumBytes = 16;
113  break;
114  case AArch64::JumpTableDest32:
115  case AArch64::JumpTableDest16:
116  case AArch64::JumpTableDest8:
117  NumBytes = 12;
118  break;
119  case AArch64::SPACE:
120  NumBytes = MI.getOperand(1).getImm();
121  break;
122  }
123 
124  return NumBytes;
125 }
126 
129  // Block ends with fall-through condbranch.
130  switch (LastInst->getOpcode()) {
131  default:
132  llvm_unreachable("Unknown branch instruction?");
133  case AArch64::Bcc:
134  Target = LastInst->getOperand(1).getMBB();
135  Cond.push_back(LastInst->getOperand(0));
136  break;
137  case AArch64::CBZW:
138  case AArch64::CBZX:
139  case AArch64::CBNZW:
140  case AArch64::CBNZX:
141  Target = LastInst->getOperand(1).getMBB();
143  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
144  Cond.push_back(LastInst->getOperand(0));
145  break;
146  case AArch64::TBZW:
147  case AArch64::TBZX:
148  case AArch64::TBNZW:
149  case AArch64::TBNZX:
150  Target = LastInst->getOperand(2).getMBB();
152  Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
153  Cond.push_back(LastInst->getOperand(0));
154  Cond.push_back(LastInst->getOperand(1));
155  }
156 }
157 
158 static unsigned getBranchDisplacementBits(unsigned Opc) {
159  switch (Opc) {
160  default:
161  llvm_unreachable("unexpected opcode!");
162  case AArch64::B:
163  return 64;
164  case AArch64::TBNZW:
165  case AArch64::TBZW:
166  case AArch64::TBNZX:
167  case AArch64::TBZX:
168  return TBZDisplacementBits;
169  case AArch64::CBNZW:
170  case AArch64::CBZW:
171  case AArch64::CBNZX:
172  case AArch64::CBZX:
173  return CBZDisplacementBits;
174  case AArch64::Bcc:
175  return BCCDisplacementBits;
176  }
177 }
178 
180  int64_t BrOffset) const {
181  unsigned Bits = getBranchDisplacementBits(BranchOp);
182  assert(Bits >= 3 && "max branch displacement must be enough to jump"
183  "over conditional branch expansion");
184  return isIntN(Bits, BrOffset / 4);
185 }
186 
189  switch (MI.getOpcode()) {
190  default:
191  llvm_unreachable("unexpected opcode!");
192  case AArch64::B:
193  return MI.getOperand(0).getMBB();
194  case AArch64::TBZW:
195  case AArch64::TBNZW:
196  case AArch64::TBZX:
197  case AArch64::TBNZX:
198  return MI.getOperand(2).getMBB();
199  case AArch64::CBZW:
200  case AArch64::CBNZW:
201  case AArch64::CBZX:
202  case AArch64::CBNZX:
203  case AArch64::Bcc:
204  return MI.getOperand(1).getMBB();
205  }
206 }
207 
208 // Branch analysis.
210  MachineBasicBlock *&TBB,
211  MachineBasicBlock *&FBB,
213  bool AllowModify) const {
214  // If the block has no terminators, it just falls into the block after it.
216  if (I == MBB.end())
217  return false;
218 
219  if (!isUnpredicatedTerminator(*I))
220  return false;
221 
222  // Get the last instruction in the block.
223  MachineInstr *LastInst = &*I;
224 
225  // If there is only one terminator instruction, process it.
226  unsigned LastOpc = LastInst->getOpcode();
227  if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
228  if (isUncondBranchOpcode(LastOpc)) {
229  TBB = LastInst->getOperand(0).getMBB();
230  return false;
231  }
232  if (isCondBranchOpcode(LastOpc)) {
233  // Block ends with fall-through condbranch.
234  parseCondBranch(LastInst, TBB, Cond);
235  return false;
236  }
237  return true; // Can't handle indirect branch.
238  }
239 
240  // Get the instruction before it if it is a terminator.
241  MachineInstr *SecondLastInst = &*I;
242  unsigned SecondLastOpc = SecondLastInst->getOpcode();
243 
244  // If AllowModify is true and the block ends with two or more unconditional
245  // branches, delete all but the first unconditional branch.
246  if (AllowModify && isUncondBranchOpcode(LastOpc)) {
247  while (isUncondBranchOpcode(SecondLastOpc)) {
248  LastInst->eraseFromParent();
249  LastInst = SecondLastInst;
250  LastOpc = LastInst->getOpcode();
251  if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
252  // Return now the only terminator is an unconditional branch.
253  TBB = LastInst->getOperand(0).getMBB();
254  return false;
255  } else {
256  SecondLastInst = &*I;
257  SecondLastOpc = SecondLastInst->getOpcode();
258  }
259  }
260  }
261 
262  // If there are three terminators, we don't know what sort of block this is.
263  if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
264  return true;
265 
266  // If the block ends with a B and a Bcc, handle it.
267  if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
268  parseCondBranch(SecondLastInst, TBB, Cond);
269  FBB = LastInst->getOperand(0).getMBB();
270  return false;
271  }
272 
273  // If the block ends with two unconditional branches, handle it. The second
274  // one is not executed, so remove it.
275  if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
276  TBB = SecondLastInst->getOperand(0).getMBB();
277  I = LastInst;
278  if (AllowModify)
279  I->eraseFromParent();
280  return false;
281  }
282 
283  // ...likewise if it ends with an indirect branch followed by an unconditional
284  // branch.
285  if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
286  I = LastInst;
287  if (AllowModify)
288  I->eraseFromParent();
289  return true;
290  }
291 
292  // Otherwise, can't handle this.
293  return true;
294 }
295 
297  SmallVectorImpl<MachineOperand> &Cond) const {
298  if (Cond[0].getImm() != -1) {
299  // Regular Bcc
300  AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
301  Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
302  } else {
303  // Folded compare-and-branch
304  switch (Cond[1].getImm()) {
305  default:
306  llvm_unreachable("Unknown conditional branch!");
307  case AArch64::CBZW:
308  Cond[1].setImm(AArch64::CBNZW);
309  break;
310  case AArch64::CBNZW:
311  Cond[1].setImm(AArch64::CBZW);
312  break;
313  case AArch64::CBZX:
314  Cond[1].setImm(AArch64::CBNZX);
315  break;
316  case AArch64::CBNZX:
317  Cond[1].setImm(AArch64::CBZX);
318  break;
319  case AArch64::TBZW:
320  Cond[1].setImm(AArch64::TBNZW);
321  break;
322  case AArch64::TBNZW:
323  Cond[1].setImm(AArch64::TBZW);
324  break;
325  case AArch64::TBZX:
326  Cond[1].setImm(AArch64::TBNZX);
327  break;
328  case AArch64::TBNZX:
329  Cond[1].setImm(AArch64::TBZX);
330  break;
331  }
332  }
333 
334  return false;
335 }
336 
338  int *BytesRemoved) const {
340  if (I == MBB.end())
341  return 0;
342 
343  if (!isUncondBranchOpcode(I->getOpcode()) &&
344  !isCondBranchOpcode(I->getOpcode()))
345  return 0;
346 
347  // Remove the branch.
348  I->eraseFromParent();
349 
350  I = MBB.end();
351 
352  if (I == MBB.begin()) {
353  if (BytesRemoved)
354  *BytesRemoved = 4;
355  return 1;
356  }
357  --I;
358  if (!isCondBranchOpcode(I->getOpcode())) {
359  if (BytesRemoved)
360  *BytesRemoved = 4;
361  return 1;
362  }
363 
364  // Remove the branch.
365  I->eraseFromParent();
366  if (BytesRemoved)
367  *BytesRemoved = 8;
368 
369  return 2;
370 }
371 
372 void AArch64InstrInfo::instantiateCondBranch(
373  MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
374  ArrayRef<MachineOperand> Cond) const {
375  if (Cond[0].getImm() != -1) {
376  // Regular Bcc
377  BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
378  } else {
379  // Folded compare-and-branch
380  // Note that we use addOperand instead of addReg to keep the flags.
381  const MachineInstrBuilder MIB =
382  BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]);
383  if (Cond.size() > 3)
384  MIB.addImm(Cond[3].getImm());
385  MIB.addMBB(TBB);
386  }
387 }
388 
391  ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
392  // Shouldn't be a fall through.
393  assert(TBB && "insertBranch must not be told to insert a fallthrough");
394 
395  if (!FBB) {
396  if (Cond.empty()) // Unconditional branch?
397  BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
398  else
399  instantiateCondBranch(MBB, DL, TBB, Cond);
400 
401  if (BytesAdded)
402  *BytesAdded = 4;
403 
404  return 1;
405  }
406 
407  // Two-way conditional branch.
408  instantiateCondBranch(MBB, DL, TBB, Cond);
409  BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
410 
411  if (BytesAdded)
412  *BytesAdded = 8;
413 
414  return 2;
415 }
416 
417 // Find the original register that VReg is copied from.
418 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
420  const MachineInstr *DefMI = MRI.getVRegDef(VReg);
421  if (!DefMI->isFullCopy())
422  return VReg;
423  VReg = DefMI->getOperand(1).getReg();
424  }
425  return VReg;
426 }
427 
428 // Determine if VReg is defined by an instruction that can be folded into a
429 // csel instruction. If so, return the folded opcode, and the replacement
430 // register.
431 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
432  unsigned *NewVReg = nullptr) {
433  VReg = removeCopies(MRI, VReg);
435  return 0;
436 
437  bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
438  const MachineInstr *DefMI = MRI.getVRegDef(VReg);
439  unsigned Opc = 0;
440  unsigned SrcOpNum = 0;
441  switch (DefMI->getOpcode()) {
442  case AArch64::ADDSXri:
443  case AArch64::ADDSWri:
444  // if NZCV is used, do not fold.
445  if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
446  return 0;
447  // fall-through to ADDXri and ADDWri.
449  case AArch64::ADDXri:
450  case AArch64::ADDWri:
451  // add x, 1 -> csinc.
452  if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
453  DefMI->getOperand(3).getImm() != 0)
454  return 0;
455  SrcOpNum = 1;
456  Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
457  break;
458 
459  case AArch64::ORNXrr:
460  case AArch64::ORNWrr: {
461  // not x -> csinv, represented as orn dst, xzr, src.
462  unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
463  if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
464  return 0;
465  SrcOpNum = 2;
466  Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
467  break;
468  }
469 
470  case AArch64::SUBSXrr:
471  case AArch64::SUBSWrr:
472  // if NZCV is used, do not fold.
473  if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
474  return 0;
475  // fall-through to SUBXrr and SUBWrr.
477  case AArch64::SUBXrr:
478  case AArch64::SUBWrr: {
479  // neg x -> csneg, represented as sub dst, xzr, src.
480  unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
481  if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
482  return 0;
483  SrcOpNum = 2;
484  Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
485  break;
486  }
487  default:
488  return 0;
489  }
490  assert(Opc && SrcOpNum && "Missing parameters");
491 
492  if (NewVReg)
493  *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
494  return Opc;
495 }
496 
499  unsigned TrueReg, unsigned FalseReg,
500  int &CondCycles, int &TrueCycles,
501  int &FalseCycles) const {
502  // Check register classes.
503  const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
504  const TargetRegisterClass *RC =
505  RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
506  if (!RC)
507  return false;
508 
509  // Expanding cbz/tbz requires an extra cycle of latency on the condition.
510  unsigned ExtraCondLat = Cond.size() != 1;
511 
512  // GPRs are handled by csel.
513  // FIXME: Fold in x+1, -x, and ~x when applicable.
514  if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
515  AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
516  // Single-cycle csel, csinc, csinv, and csneg.
517  CondCycles = 1 + ExtraCondLat;
518  TrueCycles = FalseCycles = 1;
519  if (canFoldIntoCSel(MRI, TrueReg))
520  TrueCycles = 0;
521  else if (canFoldIntoCSel(MRI, FalseReg))
522  FalseCycles = 0;
523  return true;
524  }
525 
526  // Scalar floating point is handled by fcsel.
527  // FIXME: Form fabs, fmin, and fmax when applicable.
528  if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
529  AArch64::FPR32RegClass.hasSubClassEq(RC)) {
530  CondCycles = 5 + ExtraCondLat;
531  TrueCycles = FalseCycles = 2;
532  return true;
533  }
534 
535  // Can't do vectors.
536  return false;
537 }
538 
541  const DebugLoc &DL, unsigned DstReg,
543  unsigned TrueReg, unsigned FalseReg) const {
545 
546  // Parse the condition code, see parseCondBranch() above.
548  switch (Cond.size()) {
549  default:
550  llvm_unreachable("Unknown condition opcode in Cond");
551  case 1: // b.cc
552  CC = AArch64CC::CondCode(Cond[0].getImm());
553  break;
554  case 3: { // cbz/cbnz
555  // We must insert a compare against 0.
556  bool Is64Bit;
557  switch (Cond[1].getImm()) {
558  default:
559  llvm_unreachable("Unknown branch opcode in Cond");
560  case AArch64::CBZW:
561  Is64Bit = false;
562  CC = AArch64CC::EQ;
563  break;
564  case AArch64::CBZX:
565  Is64Bit = true;
566  CC = AArch64CC::EQ;
567  break;
568  case AArch64::CBNZW:
569  Is64Bit = false;
570  CC = AArch64CC::NE;
571  break;
572  case AArch64::CBNZX:
573  Is64Bit = true;
574  CC = AArch64CC::NE;
575  break;
576  }
577  unsigned SrcReg = Cond[2].getReg();
578  if (Is64Bit) {
579  // cmp reg, #0 is actually subs xzr, reg, #0.
580  MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
581  BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
582  .addReg(SrcReg)
583  .addImm(0)
584  .addImm(0);
585  } else {
586  MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
587  BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
588  .addReg(SrcReg)
589  .addImm(0)
590  .addImm(0);
591  }
592  break;
593  }
594  case 4: { // tbz/tbnz
595  // We must insert a tst instruction.
596  switch (Cond[1].getImm()) {
597  default:
598  llvm_unreachable("Unknown branch opcode in Cond");
599  case AArch64::TBZW:
600  case AArch64::TBZX:
601  CC = AArch64CC::EQ;
602  break;
603  case AArch64::TBNZW:
604  case AArch64::TBNZX:
605  CC = AArch64CC::NE;
606  break;
607  }
608  // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
609  if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
610  BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
611  .addReg(Cond[2].getReg())
612  .addImm(
613  AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
614  else
615  BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
616  .addReg(Cond[2].getReg())
617  .addImm(
618  AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
619  break;
620  }
621  }
622 
623  unsigned Opc = 0;
624  const TargetRegisterClass *RC = nullptr;
625  bool TryFold = false;
626  if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
627  RC = &AArch64::GPR64RegClass;
628  Opc = AArch64::CSELXr;
629  TryFold = true;
630  } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
631  RC = &AArch64::GPR32RegClass;
632  Opc = AArch64::CSELWr;
633  TryFold = true;
634  } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
635  RC = &AArch64::FPR64RegClass;
636  Opc = AArch64::FCSELDrrr;
637  } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
638  RC = &AArch64::FPR32RegClass;
639  Opc = AArch64::FCSELSrrr;
640  }
641  assert(RC && "Unsupported regclass");
642 
643  // Try folding simple instructions into the csel.
644  if (TryFold) {
645  unsigned NewVReg = 0;
646  unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
647  if (FoldedOpc) {
648  // The folded opcodes csinc, csinc and csneg apply the operation to
649  // FalseReg, so we need to invert the condition.
651  TrueReg = FalseReg;
652  } else
653  FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
654 
655  // Fold the operation. Leave any dead instructions for DCE to clean up.
656  if (FoldedOpc) {
657  FalseReg = NewVReg;
658  Opc = FoldedOpc;
659  // The extends the live range of NewVReg.
660  MRI.clearKillFlags(NewVReg);
661  }
662  }
663 
664  // Pull all virtual register into the appropriate class.
665  MRI.constrainRegClass(TrueReg, RC);
666  MRI.constrainRegClass(FalseReg, RC);
667 
668  // Insert the csel.
669  BuildMI(MBB, I, DL, get(Opc), DstReg)
670  .addReg(TrueReg)
671  .addReg(FalseReg)
672  .addImm(CC);
673 }
674 
675 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
676 static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
677  uint64_t Imm = MI.getOperand(1).getImm();
678  uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
679  uint64_t Encoding;
680  return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
681 }
682 
683 // FIXME: this implementation should be micro-architecture dependent, so a
684 // micro-architecture target hook should be introduced here in future.
686  if (!Subtarget.hasCustomCheapAsMoveHandling())
687  return MI.isAsCheapAsAMove();
688 
689  const unsigned Opcode = MI.getOpcode();
690 
691  // Firstly, check cases gated by features.
692 
693  if (Subtarget.hasZeroCycleZeroingFP()) {
694  if (Opcode == AArch64::FMOVH0 ||
695  Opcode == AArch64::FMOVS0 ||
696  Opcode == AArch64::FMOVD0)
697  return true;
698  }
699 
700  if (Subtarget.hasZeroCycleZeroingGP()) {
701  if (Opcode == TargetOpcode::COPY &&
702  (MI.getOperand(1).getReg() == AArch64::WZR ||
703  MI.getOperand(1).getReg() == AArch64::XZR))
704  return true;
705  }
706 
707  // Secondly, check cases specific to sub-targets.
708 
709  if (Subtarget.hasExynosCheapAsMoveHandling()) {
710  if (isExynosCheapAsMove(MI))
711  return true;
712 
713  return MI.isAsCheapAsAMove();
714  }
715 
716  // Finally, check generic cases.
717 
718  switch (Opcode) {
719  default:
720  return false;
721 
722  // add/sub on register without shift
723  case AArch64::ADDWri:
724  case AArch64::ADDXri:
725  case AArch64::SUBWri:
726  case AArch64::SUBXri:
727  return (MI.getOperand(3).getImm() == 0);
728 
729  // logical ops on immediate
730  case AArch64::ANDWri:
731  case AArch64::ANDXri:
732  case AArch64::EORWri:
733  case AArch64::EORXri:
734  case AArch64::ORRWri:
735  case AArch64::ORRXri:
736  return true;
737 
738  // logical ops on register without shift
739  case AArch64::ANDWrr:
740  case AArch64::ANDXrr:
741  case AArch64::BICWrr:
742  case AArch64::BICXrr:
743  case AArch64::EONWrr:
744  case AArch64::EONXrr:
745  case AArch64::EORWrr:
746  case AArch64::EORXrr:
747  case AArch64::ORNWrr:
748  case AArch64::ORNXrr:
749  case AArch64::ORRWrr:
750  case AArch64::ORRXrr:
751  return true;
752 
753  // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
754  // ORRXri, it is as cheap as MOV
755  case AArch64::MOVi32imm:
756  return canBeExpandedToORR(MI, 32);
757  case AArch64::MOVi64imm:
758  return canBeExpandedToORR(MI, 64);
759  }
760 
761  llvm_unreachable("Unknown opcode to check as cheap as a move!");
762 }
763 
765  switch (MI.getOpcode()) {
766  default:
767  return false;
768 
769  case AArch64::ADDWrs:
770  case AArch64::ADDXrs:
771  case AArch64::ADDSWrs:
772  case AArch64::ADDSXrs: {
773  unsigned Imm = MI.getOperand(3).getImm();
774  unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
775  if (ShiftVal == 0)
776  return true;
777  return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5;
778  }
779 
780  case AArch64::ADDWrx:
781  case AArch64::ADDXrx:
782  case AArch64::ADDXrx64:
783  case AArch64::ADDSWrx:
784  case AArch64::ADDSXrx:
785  case AArch64::ADDSXrx64: {
786  unsigned Imm = MI.getOperand(3).getImm();
787  switch (AArch64_AM::getArithExtendType(Imm)) {
788  default:
789  return false;
790  case AArch64_AM::UXTB:
791  case AArch64_AM::UXTH:
792  case AArch64_AM::UXTW:
793  case AArch64_AM::UXTX:
794  return AArch64_AM::getArithShiftValue(Imm) <= 4;
795  }
796  }
797 
798  case AArch64::SUBWrs:
799  case AArch64::SUBSWrs: {
800  unsigned Imm = MI.getOperand(3).getImm();
801  unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
802  return ShiftVal == 0 ||
803  (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31);
804  }
805 
806  case AArch64::SUBXrs:
807  case AArch64::SUBSXrs: {
808  unsigned Imm = MI.getOperand(3).getImm();
809  unsigned ShiftVal = AArch64_AM::getShiftValue(Imm);
810  return ShiftVal == 0 ||
811  (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63);
812  }
813 
814  case AArch64::SUBWrx:
815  case AArch64::SUBXrx:
816  case AArch64::SUBXrx64:
817  case AArch64::SUBSWrx:
818  case AArch64::SUBSXrx:
819  case AArch64::SUBSXrx64: {
820  unsigned Imm = MI.getOperand(3).getImm();
821  switch (AArch64_AM::getArithExtendType(Imm)) {
822  default:
823  return false;
824  case AArch64_AM::UXTB:
825  case AArch64_AM::UXTH:
826  case AArch64_AM::UXTW:
827  case AArch64_AM::UXTX:
828  return AArch64_AM::getArithShiftValue(Imm) == 0;
829  }
830  }
831 
832  case AArch64::LDRBBroW:
833  case AArch64::LDRBBroX:
834  case AArch64::LDRBroW:
835  case AArch64::LDRBroX:
836  case AArch64::LDRDroW:
837  case AArch64::LDRDroX:
838  case AArch64::LDRHHroW:
839  case AArch64::LDRHHroX:
840  case AArch64::LDRHroW:
841  case AArch64::LDRHroX:
842  case AArch64::LDRQroW:
843  case AArch64::LDRQroX:
844  case AArch64::LDRSBWroW:
845  case AArch64::LDRSBWroX:
846  case AArch64::LDRSBXroW:
847  case AArch64::LDRSBXroX:
848  case AArch64::LDRSHWroW:
849  case AArch64::LDRSHWroX:
850  case AArch64::LDRSHXroW:
851  case AArch64::LDRSHXroX:
852  case AArch64::LDRSWroW:
853  case AArch64::LDRSWroX:
854  case AArch64::LDRSroW:
855  case AArch64::LDRSroX:
856  case AArch64::LDRWroW:
857  case AArch64::LDRWroX:
858  case AArch64::LDRXroW:
859  case AArch64::LDRXroX:
860  case AArch64::PRFMroW:
861  case AArch64::PRFMroX:
862  case AArch64::STRBBroW:
863  case AArch64::STRBBroX:
864  case AArch64::STRBroW:
865  case AArch64::STRBroX:
866  case AArch64::STRDroW:
867  case AArch64::STRDroX:
868  case AArch64::STRHHroW:
869  case AArch64::STRHHroX:
870  case AArch64::STRHroW:
871  case AArch64::STRHroX:
872  case AArch64::STRQroW:
873  case AArch64::STRQroX:
874  case AArch64::STRSroW:
875  case AArch64::STRSroX:
876  case AArch64::STRWroW:
877  case AArch64::STRWroX:
878  case AArch64::STRXroW:
879  case AArch64::STRXroX: {
880  unsigned IsSigned = MI.getOperand(3).getImm();
881  return !IsSigned;
882  }
883  }
884 }
885 
887  unsigned Opc = MI.getOpcode();
888  switch (Opc) {
889  default:
890  return false;
891  case AArch64::SEH_StackAlloc:
892  case AArch64::SEH_SaveFPLR:
893  case AArch64::SEH_SaveFPLR_X:
894  case AArch64::SEH_SaveReg:
895  case AArch64::SEH_SaveReg_X:
896  case AArch64::SEH_SaveRegP:
897  case AArch64::SEH_SaveRegP_X:
898  case AArch64::SEH_SaveFReg:
899  case AArch64::SEH_SaveFReg_X:
900  case AArch64::SEH_SaveFRegP:
901  case AArch64::SEH_SaveFRegP_X:
902  case AArch64::SEH_SetFP:
903  case AArch64::SEH_AddFP:
904  case AArch64::SEH_Nop:
905  case AArch64::SEH_PrologEnd:
906  case AArch64::SEH_EpilogStart:
907  case AArch64::SEH_EpilogEnd:
908  return true;
909  }
910 }
911 
913  unsigned &SrcReg, unsigned &DstReg,
914  unsigned &SubIdx) const {
915  switch (MI.getOpcode()) {
916  default:
917  return false;
918  case AArch64::SBFMXri: // aka sxtw
919  case AArch64::UBFMXri: // aka uxtw
920  // Check for the 32 -> 64 bit extension case, these instructions can do
921  // much more.
922  if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
923  return false;
924  // This is a signed or unsigned 32 -> 64 bit extension.
925  SrcReg = MI.getOperand(1).getReg();
926  DstReg = MI.getOperand(0).getReg();
927  SubIdx = AArch64::sub_32;
928  return true;
929  }
930 }
931 
933  const MachineInstr &MIa, const MachineInstr &MIb, AliasAnalysis *AA) const {
935  const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
936  int64_t OffsetA = 0, OffsetB = 0;
937  unsigned WidthA = 0, WidthB = 0;
938 
939  assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
940  assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
941 
944  return false;
945 
946  // Retrieve the base, offset from the base and width. Width
947  // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
948  // base are identical, and the offset of a lower memory access +
949  // the width doesn't overlap the offset of a higher memory access,
950  // then the memory accesses are different.
951  if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
952  getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
953  if (BaseOpA->isIdenticalTo(*BaseOpB)) {
954  int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
955  int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
956  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
957  if (LowOffset + LowWidth <= HighOffset)
958  return true;
959  }
960  }
961  return false;
962 }
963 
965  const MachineBasicBlock *MBB,
966  const MachineFunction &MF) const {
967  if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
968  return true;
969  switch (MI.getOpcode()) {
970  case AArch64::HINT:
971  // CSDB hints are scheduling barriers.
972  if (MI.getOperand(0).getImm() == 0x14)
973  return true;
974  break;
975  case AArch64::DSB:
976  case AArch64::ISB:
977  // DSB and ISB also are scheduling barriers.
978  return true;
979  default:;
980  }
981  return isSEHInstruction(MI);
982 }
983 
984 /// analyzeCompare - For a comparison instruction, return the source registers
985 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
986 /// Return true if the comparison instruction can be analyzed.
987 bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
988  unsigned &SrcReg2, int &CmpMask,
989  int &CmpValue) const {
990  // The first operand can be a frame index where we'd normally expect a
991  // register.
992  assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands");
993  if (!MI.getOperand(1).isReg())
994  return false;
995 
996  switch (MI.getOpcode()) {
997  default:
998  break;
999  case AArch64::SUBSWrr:
1000  case AArch64::SUBSWrs:
1001  case AArch64::SUBSWrx:
1002  case AArch64::SUBSXrr:
1003  case AArch64::SUBSXrs:
1004  case AArch64::SUBSXrx:
1005  case AArch64::ADDSWrr:
1006  case AArch64::ADDSWrs:
1007  case AArch64::ADDSWrx:
1008  case AArch64::ADDSXrr:
1009  case AArch64::ADDSXrs:
1010  case AArch64::ADDSXrx:
1011  // Replace SUBSWrr with SUBWrr if NZCV is not used.
1012  SrcReg = MI.getOperand(1).getReg();
1013  SrcReg2 = MI.getOperand(2).getReg();
1014  CmpMask = ~0;
1015  CmpValue = 0;
1016  return true;
1017  case AArch64::SUBSWri:
1018  case AArch64::ADDSWri:
1019  case AArch64::SUBSXri:
1020  case AArch64::ADDSXri:
1021  SrcReg = MI.getOperand(1).getReg();
1022  SrcReg2 = 0;
1023  CmpMask = ~0;
1024  // FIXME: In order to convert CmpValue to 0 or 1
1025  CmpValue = MI.getOperand(2).getImm() != 0;
1026  return true;
1027  case AArch64::ANDSWri:
1028  case AArch64::ANDSXri:
1029  // ANDS does not use the same encoding scheme as the others xxxS
1030  // instructions.
1031  SrcReg = MI.getOperand(1).getReg();
1032  SrcReg2 = 0;
1033  CmpMask = ~0;
1034  // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
1035  // while the type of CmpValue is int. When converting uint64_t to int,
1036  // the high 32 bits of uint64_t will be lost.
1037  // In fact it causes a bug in spec2006-483.xalancbmk
1038  // CmpValue is only used to compare with zero in OptimizeCompareInstr
1040  MI.getOperand(2).getImm(),
1041  MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
1042  return true;
1043  }
1044 
1045  return false;
1046 }
1047 
1049  MachineBasicBlock *MBB = Instr.getParent();
1050  assert(MBB && "Can't get MachineBasicBlock here");
1051  MachineFunction *MF = MBB->getParent();
1052  assert(MF && "Can't get MachineFunction here");
1053  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1056 
1057  for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
1058  ++OpIdx) {
1059  MachineOperand &MO = Instr.getOperand(OpIdx);
1060  const TargetRegisterClass *OpRegCstraints =
1061  Instr.getRegClassConstraint(OpIdx, TII, TRI);
1062 
1063  // If there's no constraint, there's nothing to do.
1064  if (!OpRegCstraints)
1065  continue;
1066  // If the operand is a frame index, there's nothing to do here.
1067  // A frame index operand will resolve correctly during PEI.
1068  if (MO.isFI())
1069  continue;
1070 
1071  assert(MO.isReg() &&
1072  "Operand has register constraints without being a register!");
1073 
1074  unsigned Reg = MO.getReg();
1076  if (!OpRegCstraints->contains(Reg))
1077  return false;
1078  } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
1079  !MRI->constrainRegClass(Reg, OpRegCstraints))
1080  return false;
1081  }
1082 
1083  return true;
1084 }
1085 
1086 /// Return the opcode that does not set flags when possible - otherwise
1087 /// return the original opcode. The caller is responsible to do the actual
1088 /// substitution and legality checking.
1089 static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {
1090  // Don't convert all compare instructions, because for some the zero register
1091  // encoding becomes the sp register.
1092  bool MIDefinesZeroReg = false;
1093  if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
1094  MIDefinesZeroReg = true;
1095 
1096  switch (MI.getOpcode()) {
1097  default:
1098  return MI.getOpcode();
1099  case AArch64::ADDSWrr:
1100  return AArch64::ADDWrr;
1101  case AArch64::ADDSWri:
1102  return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
1103  case AArch64::ADDSWrs:
1104  return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
1105  case AArch64::ADDSWrx:
1106  return AArch64::ADDWrx;
1107  case AArch64::ADDSXrr:
1108  return AArch64::ADDXrr;
1109  case AArch64::ADDSXri:
1110  return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
1111  case AArch64::ADDSXrs:
1112  return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
1113  case AArch64::ADDSXrx:
1114  return AArch64::ADDXrx;
1115  case AArch64::SUBSWrr:
1116  return AArch64::SUBWrr;
1117  case AArch64::SUBSWri:
1118  return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
1119  case AArch64::SUBSWrs:
1120  return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
1121  case AArch64::SUBSWrx:
1122  return AArch64::SUBWrx;
1123  case AArch64::SUBSXrr:
1124  return AArch64::SUBXrr;
1125  case AArch64::SUBSXri:
1126  return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
1127  case AArch64::SUBSXrs:
1128  return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
1129  case AArch64::SUBSXrx:
1130  return AArch64::SUBXrx;
1131  }
1132 }
1133 
1134 enum AccessKind { AK_Write = 0x01, AK_Read = 0x10, AK_All = 0x11 };
1135 
1136 /// True when condition flags are accessed (either by writing or reading)
1137 /// on the instruction trace starting at From and ending at To.
1138 ///
1139 /// Note: If From and To are from different blocks it's assumed CC are accessed
1140 /// on the path.
1143  const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
1144  // Early exit if To is at the beginning of the BB.
1145  if (To == To->getParent()->begin())
1146  return true;
1147 
1148  // Check whether the instructions are in the same basic block
1149  // If not, assume the condition flags might get modified somewhere.
1150  if (To->getParent() != From->getParent())
1151  return true;
1152 
1153  // From must be above To.
1154  assert(std::find_if(++To.getReverse(), To->getParent()->rend(),
1155  [From](MachineInstr &MI) {
1156  return MI.getIterator() == From;
1157  }) != To->getParent()->rend());
1158 
1159  // We iterate backward starting \p To until we hit \p From.
1160  for (--To; To != From; --To) {
1161  const MachineInstr &Instr = *To;
1162 
1163  if (((AccessToCheck & AK_Write) &&
1164  Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
1165  ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
1166  return true;
1167  }
1168  return false;
1169 }
1170 
1171 /// Try to optimize a compare instruction. A compare instruction is an
1172 /// instruction which produces AArch64::NZCV. It can be truly compare
1173 /// instruction
1174 /// when there are no uses of its destination register.
1175 ///
1176 /// The following steps are tried in order:
1177 /// 1. Convert CmpInstr into an unconditional version.
1178 /// 2. Remove CmpInstr if above there is an instruction producing a needed
1179 /// condition code or an instruction which can be converted into such an
1180 /// instruction.
1181 /// Only comparison with zero is supported.
1183  MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
1184  int CmpValue, const MachineRegisterInfo *MRI) const {
1185  assert(CmpInstr.getParent());
1186  assert(MRI);
1187 
1188  // Replace SUBSWrr with SUBWrr if NZCV is not used.
1189  int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
1190  if (DeadNZCVIdx != -1) {
1191  if (CmpInstr.definesRegister(AArch64::WZR) ||
1192  CmpInstr.definesRegister(AArch64::XZR)) {
1193  CmpInstr.eraseFromParent();
1194  return true;
1195  }
1196  unsigned Opc = CmpInstr.getOpcode();
1197  unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr);
1198  if (NewOpc == Opc)
1199  return false;
1200  const MCInstrDesc &MCID = get(NewOpc);
1201  CmpInstr.setDesc(MCID);
1202  CmpInstr.RemoveOperand(DeadNZCVIdx);
1203  bool succeeded = UpdateOperandRegClass(CmpInstr);
1204  (void)succeeded;
1205  assert(succeeded && "Some operands reg class are incompatible!");
1206  return true;
1207  }
1208 
1209  // Continue only if we have a "ri" where immediate is zero.
1210  // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
1211  // function.
1212  assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
1213  if (CmpValue != 0 || SrcReg2 != 0)
1214  return false;
1215 
1216  // CmpInstr is a Compare instruction if destination register is not used.
1217  if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
1218  return false;
1219 
1220  return substituteCmpToZero(CmpInstr, SrcReg, MRI);
1221 }
1222 
1223 /// Get opcode of S version of Instr.
1224 /// If Instr is S version its opcode is returned.
1225 /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
1226 /// or we are not interested in it.
1227 static unsigned sForm(MachineInstr &Instr) {
1228  switch (Instr.getOpcode()) {
1229  default:
1230  return AArch64::INSTRUCTION_LIST_END;
1231 
1232  case AArch64::ADDSWrr:
1233  case AArch64::ADDSWri:
1234  case AArch64::ADDSXrr:
1235  case AArch64::ADDSXri:
1236  case AArch64::SUBSWrr:
1237  case AArch64::SUBSWri:
1238  case AArch64::SUBSXrr:
1239  case AArch64::SUBSXri:
1240  return Instr.getOpcode();
1241 
1242  case AArch64::ADDWrr:
1243  return AArch64::ADDSWrr;
1244  case AArch64::ADDWri:
1245  return AArch64::ADDSWri;
1246  case AArch64::ADDXrr:
1247  return AArch64::ADDSXrr;
1248  case AArch64::ADDXri:
1249  return AArch64::ADDSXri;
1250  case AArch64::ADCWr:
1251  return AArch64::ADCSWr;
1252  case AArch64::ADCXr:
1253  return AArch64::ADCSXr;
1254  case AArch64::SUBWrr:
1255  return AArch64::SUBSWrr;
1256  case AArch64::SUBWri:
1257  return AArch64::SUBSWri;
1258  case AArch64::SUBXrr:
1259  return AArch64::SUBSXrr;
1260  case AArch64::SUBXri:
1261  return AArch64::SUBSXri;
1262  case AArch64::SBCWr:
1263  return AArch64::SBCSWr;
1264  case AArch64::SBCXr:
1265  return AArch64::SBCSXr;
1266  case AArch64::ANDWri:
1267  return AArch64::ANDSWri;
1268  case AArch64::ANDXri:
1269  return AArch64::ANDSXri;
1270  }
1271 }
1272 
1273 /// Check if AArch64::NZCV should be alive in successors of MBB.
1275  for (auto *BB : MBB->successors())
1276  if (BB->isLiveIn(AArch64::NZCV))
1277  return true;
1278  return false;
1279 }
1280 
1281 namespace {
1282 
1283 struct UsedNZCV {
1284  bool N = false;
1285  bool Z = false;
1286  bool C = false;
1287  bool V = false;
1288 
1289  UsedNZCV() = default;
1290 
1291  UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
1292  this->N |= UsedFlags.N;
1293  this->Z |= UsedFlags.Z;
1294  this->C |= UsedFlags.C;
1295  this->V |= UsedFlags.V;
1296  return *this;
1297  }
1298 };
1299 
1300 } // end anonymous namespace
1301 
1302 /// Find a condition code used by the instruction.
1303 /// Returns AArch64CC::Invalid if either the instruction does not use condition
1304 /// codes or we don't optimize CmpInstr in the presence of such instructions.
1306  switch (Instr.getOpcode()) {
1307  default:
1308  return AArch64CC::Invalid;
1309 
1310  case AArch64::Bcc: {
1311  int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1312  assert(Idx >= 2);
1313  return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
1314  }
1315 
1316  case AArch64::CSINVWr:
1317  case AArch64::CSINVXr:
1318  case AArch64::CSINCWr:
1319  case AArch64::CSINCXr:
1320  case AArch64::CSELWr:
1321  case AArch64::CSELXr:
1322  case AArch64::CSNEGWr:
1323  case AArch64::CSNEGXr:
1324  case AArch64::FCSELSrrr:
1325  case AArch64::FCSELDrrr: {
1326  int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
1327  assert(Idx >= 1);
1328  return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
1329  }
1330  }
1331 }
1332 
1333 static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
1334  assert(CC != AArch64CC::Invalid);
1335  UsedNZCV UsedFlags;
1336  switch (CC) {
1337  default:
1338  break;
1339 
1340  case AArch64CC::EQ: // Z set
1341  case AArch64CC::NE: // Z clear
1342  UsedFlags.Z = true;
1343  break;
1344 
1345  case AArch64CC::HI: // Z clear and C set
1346  case AArch64CC::LS: // Z set or C clear
1347  UsedFlags.Z = true;
1349  case AArch64CC::HS: // C set
1350  case AArch64CC::LO: // C clear
1351  UsedFlags.C = true;
1352  break;
1353 
1354  case AArch64CC::MI: // N set
1355  case AArch64CC::PL: // N clear
1356  UsedFlags.N = true;
1357  break;
1358 
1359  case AArch64CC::VS: // V set
1360  case AArch64CC::VC: // V clear
1361  UsedFlags.V = true;
1362  break;
1363 
1364  case AArch64CC::GT: // Z clear, N and V the same
1365  case AArch64CC::LE: // Z set, N and V differ
1366  UsedFlags.Z = true;
1368  case AArch64CC::GE: // N and V the same
1369  case AArch64CC::LT: // N and V differ
1370  UsedFlags.N = true;
1371  UsedFlags.V = true;
1372  break;
1373  }
1374  return UsedFlags;
1375 }
1376 
1377 static bool isADDSRegImm(unsigned Opcode) {
1378  return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1379 }
1380 
1381 static bool isSUBSRegImm(unsigned Opcode) {
1382  return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1383 }
1384 
1385 /// Check if CmpInstr can be substituted by MI.
1386 ///
1387 /// CmpInstr can be substituted:
1388 /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
1389 /// - and, MI and CmpInstr are from the same MachineBB
1390 /// - and, condition flags are not alive in successors of the CmpInstr parent
1391 /// - and, if MI opcode is the S form there must be no defs of flags between
1392 /// MI and CmpInstr
1393 /// or if MI opcode is not the S form there must be neither defs of flags
1394 /// nor uses of flags between MI and CmpInstr.
1395 /// - and C/V flags are not used after CmpInstr
1397  const TargetRegisterInfo *TRI) {
1398  assert(MI);
1399  assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1400  assert(CmpInstr);
1401 
1402  const unsigned CmpOpcode = CmpInstr->getOpcode();
1403  if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
1404  return false;
1405 
1406  if (MI->getParent() != CmpInstr->getParent())
1407  return false;
1408 
1409  if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
1410  return false;
1411 
1412  AccessKind AccessToCheck = AK_Write;
1413  if (sForm(*MI) != MI->getOpcode())
1414  AccessToCheck = AK_All;
1415  if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
1416  return false;
1417 
1418  UsedNZCV NZCVUsedAfterCmp;
1419  for (auto I = std::next(CmpInstr->getIterator()),
1420  E = CmpInstr->getParent()->instr_end();
1421  I != E; ++I) {
1422  const MachineInstr &Instr = *I;
1423  if (Instr.readsRegister(AArch64::NZCV, TRI)) {
1425  if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
1426  return false;
1427  NZCVUsedAfterCmp |= getUsedNZCV(CC);
1428  }
1429 
1430  if (Instr.modifiesRegister(AArch64::NZCV, TRI))
1431  break;
1432  }
1433 
1434  return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1435 }
1436 
1437 /// Substitute an instruction comparing to zero with another instruction
1438 /// which produces needed condition flags.
1439 ///
1440 /// Return true on success.
1441 bool AArch64InstrInfo::substituteCmpToZero(
1442  MachineInstr &CmpInstr, unsigned SrcReg,
1443  const MachineRegisterInfo *MRI) const {
1444  assert(MRI);
1445  // Get the unique definition of SrcReg.
1446  MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
1447  if (!MI)
1448  return false;
1449 
1451 
1452  unsigned NewOpc = sForm(*MI);
1453  if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1454  return false;
1455 
1456  if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
1457  return false;
1458 
1459  // Update the instruction to set NZCV.
1460  MI->setDesc(get(NewOpc));
1461  CmpInstr.eraseFromParent();
1462  bool succeeded = UpdateOperandRegClass(*MI);
1463  (void)succeeded;
1464  assert(succeeded && "Some operands reg class are incompatible!");
1465  MI->addRegisterDefined(AArch64::NZCV, TRI);
1466  return true;
1467 }
1468 
1470  if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD &&
1471  MI.getOpcode() != AArch64::CATCHRET)
1472  return false;
1473 
1474  MachineBasicBlock &MBB = *MI.getParent();
1475  DebugLoc DL = MI.getDebugLoc();
1476 
1477  if (MI.getOpcode() == AArch64::CATCHRET) {
1478  // Skip to the first instruction before the epilog.
1479  const TargetInstrInfo *TII =
1480  MBB.getParent()->getSubtarget().getInstrInfo();
1481  MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
1482  auto MBBI = MachineBasicBlock::iterator(MI);
1483  MachineBasicBlock::iterator FirstEpilogSEH = std::prev(MBBI);
1484  while (FirstEpilogSEH->getFlag(MachineInstr::FrameDestroy) &&
1485  FirstEpilogSEH != MBB.begin())
1486  FirstEpilogSEH = std::prev(FirstEpilogSEH);
1487  if (FirstEpilogSEH != MBB.begin())
1488  FirstEpilogSEH = std::next(FirstEpilogSEH);
1489  BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADRP))
1490  .addReg(AArch64::X0, RegState::Define)
1491  .addMBB(TargetMBB);
1492  BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADDXri))
1493  .addReg(AArch64::X0, RegState::Define)
1494  .addReg(AArch64::X0)
1495  .addMBB(TargetMBB)
1496  .addImm(0);
1497  return true;
1498  }
1499 
1500  unsigned Reg = MI.getOperand(0).getReg();
1501  const GlobalValue *GV =
1502  cast<GlobalValue>((*MI.memoperands_begin())->getValue());
1503  const TargetMachine &TM = MBB.getParent()->getTarget();
1504  unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1505  const unsigned char MO_NC = AArch64II::MO_NC;
1506 
1507  if ((OpFlags & AArch64II::MO_GOT) != 0) {
1508  BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1509  .addGlobalAddress(GV, 0, OpFlags);
1510  BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1511  .addReg(Reg, RegState::Kill)
1512  .addImm(0)
1514  } else if (TM.getCodeModel() == CodeModel::Large) {
1515  BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1516  .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC)
1517  .addImm(0);
1518  BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1519  .addReg(Reg, RegState::Kill)
1520  .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC)
1521  .addImm(16);
1522  BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1523  .addReg(Reg, RegState::Kill)
1524  .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC)
1525  .addImm(32);
1526  BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1527  .addReg(Reg, RegState::Kill)
1529  .addImm(48);
1530  BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1531  .addReg(Reg, RegState::Kill)
1532  .addImm(0)
1534  } else if (TM.getCodeModel() == CodeModel::Tiny) {
1535  BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg)
1536  .addGlobalAddress(GV, 0, OpFlags);
1537  } else {
1538  BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1539  .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1540  unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1541  BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1542  .addReg(Reg, RegState::Kill)
1543  .addGlobalAddress(GV, 0, LoFlags)
1545  }
1546 
1547  MBB.erase(MI);
1548 
1549  return true;
1550 }
1551 
1552 // Return true if this instruction simply sets its single destination register
1553 // to zero. This is equivalent to a register rename of the zero-register.
1555  switch (MI.getOpcode()) {
1556  default:
1557  break;
1558  case AArch64::MOVZWi:
1559  case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1560  if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
1561  assert(MI.getDesc().getNumOperands() == 3 &&
1562  MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1563  return true;
1564  }
1565  break;
1566  case AArch64::ANDWri: // and Rd, Rzr, #imm
1567  return MI.getOperand(1).getReg() == AArch64::WZR;
1568  case AArch64::ANDXri:
1569  return MI.getOperand(1).getReg() == AArch64::XZR;
1570  case TargetOpcode::COPY:
1571  return MI.getOperand(1).getReg() == AArch64::WZR;
1572  }
1573  return false;
1574 }
1575 
1576 // Return true if this instruction simply renames a general register without
1577 // modifying bits.
1579  switch (MI.getOpcode()) {
1580  default:
1581  break;
1582  case TargetOpcode::COPY: {
1583  // GPR32 copies will by lowered to ORRXrs
1584  unsigned DstReg = MI.getOperand(0).getReg();
1585  return (AArch64::GPR32RegClass.contains(DstReg) ||
1586  AArch64::GPR64RegClass.contains(DstReg));
1587  }
1588  case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1589  if (MI.getOperand(1).getReg() == AArch64::XZR) {
1590  assert(MI.getDesc().getNumOperands() == 4 &&
1591  MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1592  return true;
1593  }
1594  break;
1595  case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1596  if (MI.getOperand(2).getImm() == 0) {
1597  assert(MI.getDesc().getNumOperands() == 4 &&
1598  MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1599  return true;
1600  }
1601  break;
1602  }
1603  return false;
1604 }
1605 
1606 // Return true if this instruction simply renames a general register without
1607 // modifying bits.
1609  switch (MI.getOpcode()) {
1610  default:
1611  break;
1612  case TargetOpcode::COPY: {
1613  // FPR64 copies will by lowered to ORR.16b
1614  unsigned DstReg = MI.getOperand(0).getReg();
1615  return (AArch64::FPR64RegClass.contains(DstReg) ||
1616  AArch64::FPR128RegClass.contains(DstReg));
1617  }
1618  case AArch64::ORRv16i8:
1619  if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
1620  assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
1621  "invalid ORRv16i8 operands");
1622  return true;
1623  }
1624  break;
1625  }
1626  return false;
1627 }
1628 
1630  int &FrameIndex) const {
1631  switch (MI.getOpcode()) {
1632  default:
1633  break;
1634  case AArch64::LDRWui:
1635  case AArch64::LDRXui:
1636  case AArch64::LDRBui:
1637  case AArch64::LDRHui:
1638  case AArch64::LDRSui:
1639  case AArch64::LDRDui:
1640  case AArch64::LDRQui:
1641  if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1642  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1643  FrameIndex = MI.getOperand(1).getIndex();
1644  return MI.getOperand(0).getReg();
1645  }
1646  break;
1647  }
1648 
1649  return 0;
1650 }
1651 
1653  int &FrameIndex) const {
1654  switch (MI.getOpcode()) {
1655  default:
1656  break;
1657  case AArch64::STRWui:
1658  case AArch64::STRXui:
1659  case AArch64::STRBui:
1660  case AArch64::STRHui:
1661  case AArch64::STRSui:
1662  case AArch64::STRDui:
1663  case AArch64::STRQui:
1664  if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
1665  MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
1666  FrameIndex = MI.getOperand(1).getIndex();
1667  return MI.getOperand(0).getReg();
1668  }
1669  break;
1670  }
1671  return 0;
1672 }
1673 
1674 /// Check all MachineMemOperands for a hint to suppress pairing.
1676  return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1677  return MMO->getFlags() & MOSuppressPair;
1678  });
1679 }
1680 
1681 /// Set a flag on the first MachineMemOperand to suppress pairing.
1683  if (MI.memoperands_empty())
1684  return;
1685  (*MI.memoperands_begin())->setFlags(MOSuppressPair);
1686 }
1687 
1688 /// Check all MachineMemOperands for a hint that the load/store is strided.
1690  return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
1691  return MMO->getFlags() & MOStridedAccess;
1692  });
1693 }
1694 
1696  switch (Opc) {
1697  default:
1698  return false;
1699  case AArch64::STURSi:
1700  case AArch64::STURDi:
1701  case AArch64::STURQi:
1702  case AArch64::STURBBi:
1703  case AArch64::STURHHi:
1704  case AArch64::STURWi:
1705  case AArch64::STURXi:
1706  case AArch64::LDURSi:
1707  case AArch64::LDURDi:
1708  case AArch64::LDURQi:
1709  case AArch64::LDURWi:
1710  case AArch64::LDURXi:
1711  case AArch64::LDURSWi:
1712  case AArch64::LDURHHi:
1713  case AArch64::LDURBBi:
1714  case AArch64::LDURSBWi:
1715  case AArch64::LDURSHWi:
1716  return true;
1717  }
1718 }
1719 
1721  switch (Opc) {
1722  default: return {};
1723  case AArch64::PRFMui: return AArch64::PRFUMi;
1724  case AArch64::LDRXui: return AArch64::LDURXi;
1725  case AArch64::LDRWui: return AArch64::LDURWi;
1726  case AArch64::LDRBui: return AArch64::LDURBi;
1727  case AArch64::LDRHui: return AArch64::LDURHi;
1728  case AArch64::LDRSui: return AArch64::LDURSi;
1729  case AArch64::LDRDui: return AArch64::LDURDi;
1730  case AArch64::LDRQui: return AArch64::LDURQi;
1731  case AArch64::LDRBBui: return AArch64::LDURBBi;
1732  case AArch64::LDRHHui: return AArch64::LDURHHi;
1733  case AArch64::LDRSBXui: return AArch64::LDURSBXi;
1734  case AArch64::LDRSBWui: return AArch64::LDURSBWi;
1735  case AArch64::LDRSHXui: return AArch64::LDURSHXi;
1736  case AArch64::LDRSHWui: return AArch64::LDURSHWi;
1737  case AArch64::LDRSWui: return AArch64::LDURSWi;
1738  case AArch64::STRXui: return AArch64::STURXi;
1739  case AArch64::STRWui: return AArch64::STURWi;
1740  case AArch64::STRBui: return AArch64::STURBi;
1741  case AArch64::STRHui: return AArch64::STURHi;
1742  case AArch64::STRSui: return AArch64::STURSi;
1743  case AArch64::STRDui: return AArch64::STURDi;
1744  case AArch64::STRQui: return AArch64::STURQi;
1745  case AArch64::STRBBui: return AArch64::STURBBi;
1746  case AArch64::STRHHui: return AArch64::STURHHi;
1747  }
1748 }
1749 
1751  switch (Opc) {
1752  default:
1753  return 2;
1754  case AArch64::LDPXi:
1755  case AArch64::LDPDi:
1756  case AArch64::STPXi:
1757  case AArch64::STPDi:
1758  case AArch64::LDNPXi:
1759  case AArch64::LDNPDi:
1760  case AArch64::STNPXi:
1761  case AArch64::STNPDi:
1762  case AArch64::LDPQi:
1763  case AArch64::STPQi:
1764  case AArch64::LDNPQi:
1765  case AArch64::STNPQi:
1766  case AArch64::LDPWi:
1767  case AArch64::LDPSi:
1768  case AArch64::STPWi:
1769  case AArch64::STPSi:
1770  case AArch64::LDNPWi:
1771  case AArch64::LDNPSi:
1772  case AArch64::STNPWi:
1773  case AArch64::STNPSi:
1774  case AArch64::LDG:
1775  return 3;
1776  case AArch64::ADDG:
1777  case AArch64::STGOffset:
1778  return 2;
1779  }
1780 }
1781 
1783  switch (MI.getOpcode()) {
1784  default:
1785  return false;
1786  // Scaled instructions.
1787  case AArch64::STRSui:
1788  case AArch64::STRDui:
1789  case AArch64::STRQui:
1790  case AArch64::STRXui:
1791  case AArch64::STRWui:
1792  case AArch64::LDRSui:
1793  case AArch64::LDRDui:
1794  case AArch64::LDRQui:
1795  case AArch64::LDRXui:
1796  case AArch64::LDRWui:
1797  case AArch64::LDRSWui:
1798  // Unscaled instructions.
1799  case AArch64::STURSi:
1800  case AArch64::STURDi:
1801  case AArch64::STURQi:
1802  case AArch64::STURWi:
1803  case AArch64::STURXi:
1804  case AArch64::LDURSi:
1805  case AArch64::LDURDi:
1806  case AArch64::LDURQi:
1807  case AArch64::LDURWi:
1808  case AArch64::LDURXi:
1809  case AArch64::LDURSWi:
1810  return true;
1811  }
1812 }
1813 
1815  bool &Is64Bit) {
1816  switch (Opc) {
1817  default:
1818  llvm_unreachable("Opcode has no flag setting equivalent!");
1819  // 32-bit cases:
1820  case AArch64::ADDWri:
1821  Is64Bit = false;
1822  return AArch64::ADDSWri;
1823  case AArch64::ADDWrr:
1824  Is64Bit = false;
1825  return AArch64::ADDSWrr;
1826  case AArch64::ADDWrs:
1827  Is64Bit = false;
1828  return AArch64::ADDSWrs;
1829  case AArch64::ADDWrx:
1830  Is64Bit = false;
1831  return AArch64::ADDSWrx;
1832  case AArch64::ANDWri:
1833  Is64Bit = false;
1834  return AArch64::ANDSWri;
1835  case AArch64::ANDWrr:
1836  Is64Bit = false;
1837  return AArch64::ANDSWrr;
1838  case AArch64::ANDWrs:
1839  Is64Bit = false;
1840  return AArch64::ANDSWrs;
1841  case AArch64::BICWrr:
1842  Is64Bit = false;
1843  return AArch64::BICSWrr;
1844  case AArch64::BICWrs:
1845  Is64Bit = false;
1846  return AArch64::BICSWrs;
1847  case AArch64::SUBWri:
1848  Is64Bit = false;
1849  return AArch64::SUBSWri;
1850  case AArch64::SUBWrr:
1851  Is64Bit = false;
1852  return AArch64::SUBSWrr;
1853  case AArch64::SUBWrs:
1854  Is64Bit = false;
1855  return AArch64::SUBSWrs;
1856  case AArch64::SUBWrx:
1857  Is64Bit = false;
1858  return AArch64::SUBSWrx;
1859  // 64-bit cases:
1860  case AArch64::ADDXri:
1861  Is64Bit = true;
1862  return AArch64::ADDSXri;
1863  case AArch64::ADDXrr:
1864  Is64Bit = true;
1865  return AArch64::ADDSXrr;
1866  case AArch64::ADDXrs:
1867  Is64Bit = true;
1868  return AArch64::ADDSXrs;
1869  case AArch64::ADDXrx:
1870  Is64Bit = true;
1871  return AArch64::ADDSXrx;
1872  case AArch64::ANDXri:
1873  Is64Bit = true;
1874  return AArch64::ANDSXri;
1875  case AArch64::ANDXrr:
1876  Is64Bit = true;
1877  return AArch64::ANDSXrr;
1878  case AArch64::ANDXrs:
1879  Is64Bit = true;
1880  return AArch64::ANDSXrs;
1881  case AArch64::BICXrr:
1882  Is64Bit = true;
1883  return AArch64::BICSXrr;
1884  case AArch64::BICXrs:
1885  Is64Bit = true;
1886  return AArch64::BICSXrs;
1887  case AArch64::SUBXri:
1888  Is64Bit = true;
1889  return AArch64::SUBSXri;
1890  case AArch64::SUBXrr:
1891  Is64Bit = true;
1892  return AArch64::SUBSXrr;
1893  case AArch64::SUBXrs:
1894  Is64Bit = true;
1895  return AArch64::SUBSXrs;
1896  case AArch64::SUBXrx:
1897  Is64Bit = true;
1898  return AArch64::SUBSXrx;
1899  }
1900 }
1901 
1902 // Is this a candidate for ld/st merging or pairing? For example, we don't
1903 // touch volatiles or load/stores that have a hint to avoid pair formation.
1905  // If this is a volatile load/store, don't mess with it.
1906  if (MI.hasOrderedMemoryRef())
1907  return false;
1908 
1909  // Make sure this is a reg/fi+imm (as opposed to an address reloc).
1910  assert((MI.getOperand(1).isReg() || MI.getOperand(1).isFI()) &&
1911  "Expected a reg or frame index operand.");
1912  if (!MI.getOperand(2).isImm())
1913  return false;
1914 
1915  // Can't merge/pair if the instruction modifies the base register.
1916  // e.g., ldr x0, [x0]
1917  // This case will never occur with an FI base.
1918  if (MI.getOperand(1).isReg()) {
1919  unsigned BaseReg = MI.getOperand(1).getReg();
1921  if (MI.modifiesRegister(BaseReg, TRI))
1922  return false;
1923  }
1924 
1925  // Check if this load/store has a hint to avoid pair formation.
1926  // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
1927  if (isLdStPairSuppressed(MI))
1928  return false;
1929 
1930  // On some CPUs quad load/store pairs are slower than two single load/stores.
1931  if (Subtarget.isPaired128Slow()) {
1932  switch (MI.getOpcode()) {
1933  default:
1934  break;
1935  case AArch64::LDURQi:
1936  case AArch64::STURQi:
1937  case AArch64::LDRQui:
1938  case AArch64::STRQui:
1939  return false;
1940  }
1941  }
1942 
1943  return true;
1944 }
1945 
1947  const MachineOperand *&BaseOp,
1948  int64_t &Offset,
1949  const TargetRegisterInfo *TRI) const {
1950  unsigned Width;
1951  return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI);
1952 }
1953 
1955  const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
1956  unsigned &Width, const TargetRegisterInfo *TRI) const {
1957  assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
1958  // Handle only loads/stores with base register followed by immediate offset.
1959  if (LdSt.getNumExplicitOperands() == 3) {
1960  // Non-paired instruction (e.g., ldr x1, [x0, #8]).
1961  if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) ||
1962  !LdSt.getOperand(2).isImm())
1963  return false;
1964  } else if (LdSt.getNumExplicitOperands() == 4) {
1965  // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
1966  if (!LdSt.getOperand(1).isReg() ||
1967  (!LdSt.getOperand(2).isReg() && !LdSt.getOperand(2).isFI()) ||
1968  !LdSt.getOperand(3).isImm())
1969  return false;
1970  } else
1971  return false;
1972 
1973  // Get the scaling factor for the instruction and set the width for the
1974  // instruction.
1975  unsigned Scale = 0;
1976  int64_t Dummy1, Dummy2;
1977 
1978  // If this returns false, then it's an instruction we don't want to handle.
1979  if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2))
1980  return false;
1981 
1982  // Compute the offset. Offset is calculated as the immediate operand
1983  // multiplied by the scaling factor. Unscaled instructions have scaling factor
1984  // set to 1.
1985  if (LdSt.getNumExplicitOperands() == 3) {
1986  BaseOp = &LdSt.getOperand(1);
1987  Offset = LdSt.getOperand(2).getImm() * Scale;
1988  } else {
1989  assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
1990  BaseOp = &LdSt.getOperand(2);
1991  Offset = LdSt.getOperand(3).getImm() * Scale;
1992  }
1993 
1994  assert((BaseOp->isReg() || BaseOp->isFI()) &&
1995  "getMemOperandWithOffset only supports base "
1996  "operands of type register or frame index.");
1997 
1998  return true;
1999 }
2000 
2003  assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
2004  MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands() - 1);
2005  assert(OfsOp.isImm() && "Offset operand wasn't immediate.");
2006  return OfsOp;
2007 }
2008 
2009 bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, unsigned &Scale,
2010  unsigned &Width, int64_t &MinOffset,
2011  int64_t &MaxOffset) {
2012  switch (Opcode) {
2013  // Not a memory operation or something we want to handle.
2014  default:
2015  Scale = Width = 0;
2016  MinOffset = MaxOffset = 0;
2017  return false;
2018  case AArch64::STRWpost:
2019  case AArch64::LDRWpost:
2020  Width = 32;
2021  Scale = 4;
2022  MinOffset = -256;
2023  MaxOffset = 255;
2024  break;
2025  case AArch64::LDURQi:
2026  case AArch64::STURQi:
2027  Width = 16;
2028  Scale = 1;
2029  MinOffset = -256;
2030  MaxOffset = 255;
2031  break;
2032  case AArch64::PRFUMi:
2033  case AArch64::LDURXi:
2034  case AArch64::LDURDi:
2035  case AArch64::STURXi:
2036  case AArch64::STURDi:
2037  Width = 8;
2038  Scale = 1;
2039  MinOffset = -256;
2040  MaxOffset = 255;
2041  break;
2042  case AArch64::LDURWi:
2043  case AArch64::LDURSi:
2044  case AArch64::LDURSWi:
2045  case AArch64::STURWi:
2046  case AArch64::STURSi:
2047  Width = 4;
2048  Scale = 1;
2049  MinOffset = -256;
2050  MaxOffset = 255;
2051  break;
2052  case AArch64::LDURHi:
2053  case AArch64::LDURHHi:
2054  case AArch64::LDURSHXi:
2055  case AArch64::LDURSHWi:
2056  case AArch64::STURHi:
2057  case AArch64::STURHHi:
2058  Width = 2;
2059  Scale = 1;
2060  MinOffset = -256;
2061  MaxOffset = 255;
2062  break;
2063  case AArch64::LDURBi:
2064  case AArch64::LDURBBi:
2065  case AArch64::LDURSBXi:
2066  case AArch64::LDURSBWi:
2067  case AArch64::STURBi:
2068  case AArch64::STURBBi:
2069  Width = 1;
2070  Scale = 1;
2071  MinOffset = -256;
2072  MaxOffset = 255;
2073  break;
2074  case AArch64::LDPQi:
2075  case AArch64::LDNPQi:
2076  case AArch64::STPQi:
2077  case AArch64::STNPQi:
2078  Scale = 16;
2079  Width = 32;
2080  MinOffset = -64;
2081  MaxOffset = 63;
2082  break;
2083  case AArch64::LDRQui:
2084  case AArch64::STRQui:
2085  Scale = Width = 16;
2086  MinOffset = 0;
2087  MaxOffset = 4095;
2088  break;
2089  case AArch64::LDPXi:
2090  case AArch64::LDPDi:
2091  case AArch64::LDNPXi:
2092  case AArch64::LDNPDi:
2093  case AArch64::STPXi:
2094  case AArch64::STPDi:
2095  case AArch64::STNPXi:
2096  case AArch64::STNPDi:
2097  Scale = 8;
2098  Width = 16;
2099  MinOffset = -64;
2100  MaxOffset = 63;
2101  break;
2102  case AArch64::PRFMui:
2103  case AArch64::LDRXui:
2104  case AArch64::LDRDui:
2105  case AArch64::STRXui:
2106  case AArch64::STRDui:
2107  Scale = Width = 8;
2108  MinOffset = 0;
2109  MaxOffset = 4095;
2110  break;
2111  case AArch64::LDPWi:
2112  case AArch64::LDPSi:
2113  case AArch64::LDNPWi:
2114  case AArch64::LDNPSi:
2115  case AArch64::STPWi:
2116  case AArch64::STPSi:
2117  case AArch64::STNPWi:
2118  case AArch64::STNPSi:
2119  Scale = 4;
2120  Width = 8;
2121  MinOffset = -64;
2122  MaxOffset = 63;
2123  break;
2124  case AArch64::LDRWui:
2125  case AArch64::LDRSui:
2126  case AArch64::LDRSWui:
2127  case AArch64::STRWui:
2128  case AArch64::STRSui:
2129  Scale = Width = 4;
2130  MinOffset = 0;
2131  MaxOffset = 4095;
2132  break;
2133  case AArch64::LDRHui:
2134  case AArch64::LDRHHui:
2135  case AArch64::LDRSHWui:
2136  case AArch64::LDRSHXui:
2137  case AArch64::STRHui:
2138  case AArch64::STRHHui:
2139  Scale = Width = 2;
2140  MinOffset = 0;
2141  MaxOffset = 4095;
2142  break;
2143  case AArch64::LDRBui:
2144  case AArch64::LDRBBui:
2145  case AArch64::LDRSBWui:
2146  case AArch64::LDRSBXui:
2147  case AArch64::STRBui:
2148  case AArch64::STRBBui:
2149  Scale = Width = 1;
2150  MinOffset = 0;
2151  MaxOffset = 4095;
2152  break;
2153  case AArch64::ADDG:
2154  Scale = 16;
2155  Width = 0;
2156  MinOffset = 0;
2157  MaxOffset = 63;
2158  break;
2159  case AArch64::LDG:
2160  case AArch64::STGOffset:
2161  Scale = Width = 16;
2162  MinOffset = -256;
2163  MaxOffset = 255;
2164  break;
2165  }
2166 
2167  return true;
2168 }
2169 
2170 static unsigned getOffsetStride(unsigned Opc) {
2171  switch (Opc) {
2172  default:
2173  return 0;
2174  case AArch64::LDURQi:
2175  case AArch64::STURQi:
2176  return 16;
2177  case AArch64::LDURXi:
2178  case AArch64::LDURDi:
2179  case AArch64::STURXi:
2180  case AArch64::STURDi:
2181  return 8;
2182  case AArch64::LDURWi:
2183  case AArch64::LDURSi:
2184  case AArch64::LDURSWi:
2185  case AArch64::STURWi:
2186  case AArch64::STURSi:
2187  return 4;
2188  }
2189 }
2190 
2191 // Scale the unscaled offsets. Returns false if the unscaled offset can't be
2192 // scaled.
2193 static bool scaleOffset(unsigned Opc, int64_t &Offset) {
2194  unsigned OffsetStride = getOffsetStride(Opc);
2195  if (OffsetStride == 0)
2196  return false;
2197  // If the byte-offset isn't a multiple of the stride, we can't scale this
2198  // offset.
2199  if (Offset % OffsetStride != 0)
2200  return false;
2201 
2202  // Convert the byte-offset used by unscaled into an "element" offset used
2203  // by the scaled pair load/store instructions.
2204  Offset /= OffsetStride;
2205  return true;
2206 }
2207 
2208 // Unscale the scaled offsets. Returns false if the scaled offset can't be
2209 // unscaled.
2210 static bool unscaleOffset(unsigned Opc, int64_t &Offset) {
2211  unsigned OffsetStride = getOffsetStride(Opc);
2212  if (OffsetStride == 0)
2213  return false;
2214 
2215  // Convert the "element" offset used by scaled pair load/store instructions
2216  // into the byte-offset used by unscaled.
2217  Offset *= OffsetStride;
2218  return true;
2219 }
2220 
2221 static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
2222  if (FirstOpc == SecondOpc)
2223  return true;
2224  // We can also pair sign-ext and zero-ext instructions.
2225  switch (FirstOpc) {
2226  default:
2227  return false;
2228  case AArch64::LDRWui:
2229  case AArch64::LDURWi:
2230  return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
2231  case AArch64::LDRSWui:
2232  case AArch64::LDURSWi:
2233  return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
2234  }
2235  // These instructions can't be paired based on their opcodes.
2236  return false;
2237 }
2238 
2239 static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
2240  int64_t Offset1, unsigned Opcode1, int FI2,
2241  int64_t Offset2, unsigned Opcode2) {
2242  // Accesses through fixed stack object frame indices may access a different
2243  // fixed stack slot. Check that the object offsets + offsets match.
2244  if (MFI.isFixedObjectIndex(FI1) && MFI.isFixedObjectIndex(FI2)) {
2245  int64_t ObjectOffset1 = MFI.getObjectOffset(FI1);
2246  int64_t ObjectOffset2 = MFI.getObjectOffset(FI2);
2247  assert(ObjectOffset1 <= ObjectOffset2 && "Object offsets are not ordered.");
2248  // Get the byte-offset from the object offset.
2249  if (!unscaleOffset(Opcode1, Offset1) || !unscaleOffset(Opcode2, Offset2))
2250  return false;
2251  ObjectOffset1 += Offset1;
2252  ObjectOffset2 += Offset2;
2253  // Get the "element" index in the object.
2254  if (!scaleOffset(Opcode1, ObjectOffset1) ||
2255  !scaleOffset(Opcode2, ObjectOffset2))
2256  return false;
2257  return ObjectOffset1 + 1 == ObjectOffset2;
2258  }
2259 
2260  return FI1 == FI2;
2261 }
2262 
2263 /// Detect opportunities for ldp/stp formation.
2264 ///
2265 /// Only called for LdSt for which getMemOperandWithOffset returns true.
2267  const MachineOperand &BaseOp2,
2268  unsigned NumLoads) const {
2269  const MachineInstr &FirstLdSt = *BaseOp1.getParent();
2270  const MachineInstr &SecondLdSt = *BaseOp2.getParent();
2271  if (BaseOp1.getType() != BaseOp2.getType())
2272  return false;
2273 
2274  assert((BaseOp1.isReg() || BaseOp1.isFI()) &&
2275  "Only base registers and frame indices are supported.");
2276 
2277  // Check for both base regs and base FI.
2278  if (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg())
2279  return false;
2280 
2281  // Only cluster up to a single pair.
2282  if (NumLoads > 1)
2283  return false;
2284 
2285  if (!isPairableLdStInst(FirstLdSt) || !isPairableLdStInst(SecondLdSt))
2286  return false;
2287 
2288  // Can we pair these instructions based on their opcodes?
2289  unsigned FirstOpc = FirstLdSt.getOpcode();
2290  unsigned SecondOpc = SecondLdSt.getOpcode();
2291  if (!canPairLdStOpc(FirstOpc, SecondOpc))
2292  return false;
2293 
2294  // Can't merge volatiles or load/stores that have a hint to avoid pair
2295  // formation, for example.
2296  if (!isCandidateToMergeOrPair(FirstLdSt) ||
2297  !isCandidateToMergeOrPair(SecondLdSt))
2298  return false;
2299 
2300  // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
2301  int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
2302  if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
2303  return false;
2304 
2305  int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
2306  if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
2307  return false;
2308 
2309  // Pairwise instructions have a 7-bit signed offset field.
2310  if (Offset1 > 63 || Offset1 < -64)
2311  return false;
2312 
2313  // The caller should already have ordered First/SecondLdSt by offset.
2314  // Note: except for non-equal frame index bases
2315  if (BaseOp1.isFI()) {
2316  assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 >= Offset2) &&
2317  "Caller should have ordered offsets.");
2318 
2319  const MachineFrameInfo &MFI =
2320  FirstLdSt.getParent()->getParent()->getFrameInfo();
2321  return shouldClusterFI(MFI, BaseOp1.getIndex(), Offset1, FirstOpc,
2322  BaseOp2.getIndex(), Offset2, SecondOpc);
2323  }
2324 
2325  assert((!BaseOp1.isIdenticalTo(BaseOp2) || Offset1 <= Offset2) &&
2326  "Caller should have ordered offsets.");
2327 
2328  return Offset1 + 1 == Offset2;
2329 }
2330 
2332  unsigned Reg, unsigned SubIdx,
2333  unsigned State,
2334  const TargetRegisterInfo *TRI) {
2335  if (!SubIdx)
2336  return MIB.addReg(Reg, State);
2337 
2339  return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
2340  return MIB.addReg(Reg, State, SubIdx);
2341 }
2342 
2343 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
2344  unsigned NumRegs) {
2345  // We really want the positive remainder mod 32 here, that happens to be
2346  // easily obtainable with a mask.
2347  return ((DestReg - SrcReg) & 0x1f) < NumRegs;
2348 }
2349 
2352  const DebugLoc &DL, unsigned DestReg,
2353  unsigned SrcReg, bool KillSrc,
2354  unsigned Opcode,
2355  ArrayRef<unsigned> Indices) const {
2356  assert(Subtarget.hasNEON() && "Unexpected register copy without NEON");
2358  uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2359  uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2360  unsigned NumRegs = Indices.size();
2361 
2362  int SubReg = 0, End = NumRegs, Incr = 1;
2363  if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
2364  SubReg = NumRegs - 1;
2365  End = -1;
2366  Incr = -1;
2367  }
2368 
2369  for (; SubReg != End; SubReg += Incr) {
2370  const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
2371  AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2372  AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2373  AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2374  }
2375 }
2376 
2379  DebugLoc DL, unsigned DestReg,
2380  unsigned SrcReg, bool KillSrc,
2381  unsigned Opcode, unsigned ZeroReg,
2382  llvm::ArrayRef<unsigned> Indices) const {
2384  unsigned NumRegs = Indices.size();
2385 
2386 #ifndef NDEBUG
2387  uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2388  uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2389  assert(DestEncoding % NumRegs == 0 && SrcEncoding % NumRegs == 0 &&
2390  "GPR reg sequences should not be able to overlap");
2391 #endif
2392 
2393  for (unsigned SubReg = 0; SubReg != NumRegs; ++SubReg) {
2394  const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
2395  AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
2396  MIB.addReg(ZeroReg);
2397  AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
2398  MIB.addImm(0);
2399  }
2400 }
2401 
2404  const DebugLoc &DL, unsigned DestReg,
2405  unsigned SrcReg, bool KillSrc) const {
2406  if (AArch64::GPR32spRegClass.contains(DestReg) &&
2407  (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
2409 
2410  if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2411  // If either operand is WSP, expand to ADD #0.
2412  if (Subtarget.hasZeroCycleRegMove()) {
2413  // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
2414  unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2415  &AArch64::GPR64spRegClass);
2416  unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2417  &AArch64::GPR64spRegClass);
2418  // This instruction is reading and writing X registers. This may upset
2419  // the register scavenger and machine verifier, so we need to indicate
2420  // that we are reading an undefined value from SrcRegX, but a proper
2421  // value from SrcReg.
2422  BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
2423  .addReg(SrcRegX, RegState::Undef)
2424  .addImm(0)
2426  .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2427  } else {
2428  BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
2429  .addReg(SrcReg, getKillRegState(KillSrc))
2430  .addImm(0)
2432  }
2433  } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroingGP()) {
2434  BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg)
2435  .addImm(0)
2437  } else {
2438  if (Subtarget.hasZeroCycleRegMove()) {
2439  // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
2440  unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
2441  &AArch64::GPR64spRegClass);
2442  unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
2443  &AArch64::GPR64spRegClass);
2444  // This instruction is reading and writing X registers. This may upset
2445  // the register scavenger and machine verifier, so we need to indicate
2446  // that we are reading an undefined value from SrcRegX, but a proper
2447  // value from SrcReg.
2448  BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
2449  .addReg(AArch64::XZR)
2450  .addReg(SrcRegX, RegState::Undef)
2451  .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
2452  } else {
2453  // Otherwise, expand to ORR WZR.
2454  BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
2455  .addReg(AArch64::WZR)
2456  .addReg(SrcReg, getKillRegState(KillSrc));
2457  }
2458  }
2459  return;
2460  }
2461 
2462  if (AArch64::GPR64spRegClass.contains(DestReg) &&
2463  (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
2464  if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2465  // If either operand is SP, expand to ADD #0.
2466  BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
2467  .addReg(SrcReg, getKillRegState(KillSrc))
2468  .addImm(0)
2470  } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroingGP()) {
2471  BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg)
2472  .addImm(0)
2474  } else {
2475  // Otherwise, expand to ORR XZR.
2476  BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
2477  .addReg(AArch64::XZR)
2478  .addReg(SrcReg, getKillRegState(KillSrc));
2479  }
2480  return;
2481  }
2482 
2483  // Copy a DDDD register quad by copying the individual sub-registers.
2484  if (AArch64::DDDDRegClass.contains(DestReg) &&
2485  AArch64::DDDDRegClass.contains(SrcReg)) {
2486  static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
2487  AArch64::dsub2, AArch64::dsub3};
2488  copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2489  Indices);
2490  return;
2491  }
2492 
2493  // Copy a DDD register triple by copying the individual sub-registers.
2494  if (AArch64::DDDRegClass.contains(DestReg) &&
2495  AArch64::DDDRegClass.contains(SrcReg)) {
2496  static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1,
2497  AArch64::dsub2};
2498  copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2499  Indices);
2500  return;
2501  }
2502 
2503  // Copy a DD register pair by copying the individual sub-registers.
2504  if (AArch64::DDRegClass.contains(DestReg) &&
2505  AArch64::DDRegClass.contains(SrcReg)) {
2506  static const unsigned Indices[] = {AArch64::dsub0, AArch64::dsub1};
2507  copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
2508  Indices);
2509  return;
2510  }
2511 
2512  // Copy a QQQQ register quad by copying the individual sub-registers.
2513  if (AArch64::QQQQRegClass.contains(DestReg) &&
2514  AArch64::QQQQRegClass.contains(SrcReg)) {
2515  static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
2516  AArch64::qsub2, AArch64::qsub3};
2517  copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2518  Indices);
2519  return;
2520  }
2521 
2522  // Copy a QQQ register triple by copying the individual sub-registers.
2523  if (AArch64::QQQRegClass.contains(DestReg) &&
2524  AArch64::QQQRegClass.contains(SrcReg)) {
2525  static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1,
2526  AArch64::qsub2};
2527  copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2528  Indices);
2529  return;
2530  }
2531 
2532  // Copy a QQ register pair by copying the individual sub-registers.
2533  if (AArch64::QQRegClass.contains(DestReg) &&
2534  AArch64::QQRegClass.contains(SrcReg)) {
2535  static const unsigned Indices[] = {AArch64::qsub0, AArch64::qsub1};
2536  copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
2537  Indices);
2538  return;
2539  }
2540 
2541  if (AArch64::XSeqPairsClassRegClass.contains(DestReg) &&
2542  AArch64::XSeqPairsClassRegClass.contains(SrcReg)) {
2543  static const unsigned Indices[] = {AArch64::sube64, AArch64::subo64};
2544  copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRXrs,
2545  AArch64::XZR, Indices);
2546  return;
2547  }
2548 
2549  if (AArch64::WSeqPairsClassRegClass.contains(DestReg) &&
2550  AArch64::WSeqPairsClassRegClass.contains(SrcReg)) {
2551  static const unsigned Indices[] = {AArch64::sube32, AArch64::subo32};
2552  copyGPRRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRWrs,
2553  AArch64::WZR, Indices);
2554  return;
2555  }
2556 
2557  if (AArch64::FPR128RegClass.contains(DestReg) &&
2558  AArch64::FPR128RegClass.contains(SrcReg)) {
2559  if (Subtarget.hasNEON()) {
2560  BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2561  .addReg(SrcReg)
2562  .addReg(SrcReg, getKillRegState(KillSrc));
2563  } else {
2564  BuildMI(MBB, I, DL, get(AArch64::STRQpre))
2565  .addReg(AArch64::SP, RegState::Define)
2566  .addReg(SrcReg, getKillRegState(KillSrc))
2567  .addReg(AArch64::SP)
2568  .addImm(-16);
2569  BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
2570  .addReg(AArch64::SP, RegState::Define)
2571  .addReg(DestReg, RegState::Define)
2572  .addReg(AArch64::SP)
2573  .addImm(16);
2574  }
2575  return;
2576  }
2577 
2578  if (AArch64::FPR64RegClass.contains(DestReg) &&
2579  AArch64::FPR64RegClass.contains(SrcReg)) {
2580  if (Subtarget.hasNEON()) {
2581  DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2582  &AArch64::FPR128RegClass);
2583  SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2584  &AArch64::FPR128RegClass);
2585  BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2586  .addReg(SrcReg)
2587  .addReg(SrcReg, getKillRegState(KillSrc));
2588  } else {
2589  BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
2590  .addReg(SrcReg, getKillRegState(KillSrc));
2591  }
2592  return;
2593  }
2594 
2595  if (AArch64::FPR32RegClass.contains(DestReg) &&
2596  AArch64::FPR32RegClass.contains(SrcReg)) {
2597  if (Subtarget.hasNEON()) {
2598  DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2599  &AArch64::FPR128RegClass);
2600  SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2601  &AArch64::FPR128RegClass);
2602  BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2603  .addReg(SrcReg)
2604  .addReg(SrcReg, getKillRegState(KillSrc));
2605  } else {
2606  BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2607  .addReg(SrcReg, getKillRegState(KillSrc));
2608  }
2609  return;
2610  }
2611 
2612  if (AArch64::FPR16RegClass.contains(DestReg) &&
2613  AArch64::FPR16RegClass.contains(SrcReg)) {
2614  if (Subtarget.hasNEON()) {
2615  DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2616  &AArch64::FPR128RegClass);
2617  SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2618  &AArch64::FPR128RegClass);
2619  BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2620  .addReg(SrcReg)
2621  .addReg(SrcReg, getKillRegState(KillSrc));
2622  } else {
2623  DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2624  &AArch64::FPR32RegClass);
2625  SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2626  &AArch64::FPR32RegClass);
2627  BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2628  .addReg(SrcReg, getKillRegState(KillSrc));
2629  }
2630  return;
2631  }
2632 
2633  if (AArch64::FPR8RegClass.contains(DestReg) &&
2634  AArch64::FPR8RegClass.contains(SrcReg)) {
2635  if (Subtarget.hasNEON()) {
2636  DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2637  &AArch64::FPR128RegClass);
2638  SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2639  &AArch64::FPR128RegClass);
2640  BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
2641  .addReg(SrcReg)
2642  .addReg(SrcReg, getKillRegState(KillSrc));
2643  } else {
2644  DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2645  &AArch64::FPR32RegClass);
2646  SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2647  &AArch64::FPR32RegClass);
2648  BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
2649  .addReg(SrcReg, getKillRegState(KillSrc));
2650  }
2651  return;
2652  }
2653 
2654  // Copies between GPR64 and FPR64.
2655  if (AArch64::FPR64RegClass.contains(DestReg) &&
2656  AArch64::GPR64RegClass.contains(SrcReg)) {
2657  BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
2658  .addReg(SrcReg, getKillRegState(KillSrc));
2659  return;
2660  }
2661  if (AArch64::GPR64RegClass.contains(DestReg) &&
2662  AArch64::FPR64RegClass.contains(SrcReg)) {
2663  BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
2664  .addReg(SrcReg, getKillRegState(KillSrc));
2665  return;
2666  }
2667  // Copies between GPR32 and FPR32.
2668  if (AArch64::FPR32RegClass.contains(DestReg) &&
2669  AArch64::GPR32RegClass.contains(SrcReg)) {
2670  BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
2671  .addReg(SrcReg, getKillRegState(KillSrc));
2672  return;
2673  }
2674  if (AArch64::GPR32RegClass.contains(DestReg) &&
2675  AArch64::FPR32RegClass.contains(SrcReg)) {
2676  BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
2677  .addReg(SrcReg, getKillRegState(KillSrc));
2678  return;
2679  }
2680 
2681  if (DestReg == AArch64::NZCV) {
2682  assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
2683  BuildMI(MBB, I, DL, get(AArch64::MSR))
2684  .addImm(AArch64SysReg::NZCV)
2685  .addReg(SrcReg, getKillRegState(KillSrc))
2686  .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
2687  return;
2688  }
2689 
2690  if (SrcReg == AArch64::NZCV) {
2691  assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
2692  BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
2693  .addImm(AArch64SysReg::NZCV)
2694  .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
2695  return;
2696  }
2697 
2698  llvm_unreachable("unimplemented reg-to-reg copy");
2699 }
2700 
2702  MachineBasicBlock &MBB,
2703  MachineBasicBlock::iterator InsertBefore,
2704  const MCInstrDesc &MCID,
2705  unsigned SrcReg, bool IsKill,
2706  unsigned SubIdx0, unsigned SubIdx1, int FI,
2707  MachineMemOperand *MMO) {
2708  unsigned SrcReg0 = SrcReg;
2709  unsigned SrcReg1 = SrcReg;
2711  SrcReg0 = TRI.getSubReg(SrcReg, SubIdx0);
2712  SubIdx0 = 0;
2713  SrcReg1 = TRI.getSubReg(SrcReg, SubIdx1);
2714  SubIdx1 = 0;
2715  }
2716  BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
2717  .addReg(SrcReg0, getKillRegState(IsKill), SubIdx0)
2718  .addReg(SrcReg1, getKillRegState(IsKill), SubIdx1)
2719  .addFrameIndex(FI)
2720  .addImm(0)
2721  .addMemOperand(MMO);
2722 }
2723 
2725  MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
2726  bool isKill, int FI, const TargetRegisterClass *RC,
2727  const TargetRegisterInfo *TRI) const {
2728  MachineFunction &MF = *MBB.getParent();
2729  MachineFrameInfo &MFI = MF.getFrameInfo();
2730  unsigned Align = MFI.getObjectAlignment(FI);
2731 
2734  PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
2735  unsigned Opc = 0;
2736  bool Offset = true;
2737  switch (TRI->getSpillSize(*RC)) {
2738  case 1:
2739  if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2740  Opc = AArch64::STRBui;
2741  break;
2742  case 2:
2743  if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2744  Opc = AArch64::STRHui;
2745  break;
2746  case 4:
2747  if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2748  Opc = AArch64::STRWui;
2750  MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
2751  else
2752  assert(SrcReg != AArch64::WSP);
2753  } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2754  Opc = AArch64::STRSui;
2755  break;
2756  case 8:
2757  if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2758  Opc = AArch64::STRXui;
2760  MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2761  else
2762  assert(SrcReg != AArch64::SP);
2763  } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
2764  Opc = AArch64::STRDui;
2765  } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
2767  get(AArch64::STPWi), SrcReg, isKill,
2768  AArch64::sube32, AArch64::subo32, FI, MMO);
2769  return;
2770  }
2771  break;
2772  case 16:
2773  if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2774  Opc = AArch64::STRQui;
2775  else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2776  assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2777  Opc = AArch64::ST1Twov1d;
2778  Offset = false;
2779  } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
2781  get(AArch64::STPXi), SrcReg, isKill,
2782  AArch64::sube64, AArch64::subo64, FI, MMO);
2783  return;
2784  }
2785  break;
2786  case 24:
2787  if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2788  assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2789  Opc = AArch64::ST1Threev1d;
2790  Offset = false;
2791  }
2792  break;
2793  case 32:
2794  if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2795  assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2796  Opc = AArch64::ST1Fourv1d;
2797  Offset = false;
2798  } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2799  assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2800  Opc = AArch64::ST1Twov2d;
2801  Offset = false;
2802  }
2803  break;
2804  case 48:
2805  if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2806  assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2807  Opc = AArch64::ST1Threev2d;
2808  Offset = false;
2809  }
2810  break;
2811  case 64:
2812  if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2813  assert(Subtarget.hasNEON() && "Unexpected register store without NEON");
2814  Opc = AArch64::ST1Fourv2d;
2815  Offset = false;
2816  }
2817  break;
2818  }
2819  assert(Opc && "Unknown register class");
2820 
2821  const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
2822  .addReg(SrcReg, getKillRegState(isKill))
2823  .addFrameIndex(FI);
2824 
2825  if (Offset)
2826  MI.addImm(0);
2827  MI.addMemOperand(MMO);
2828 }
2829 
2831  MachineBasicBlock &MBB,
2832  MachineBasicBlock::iterator InsertBefore,
2833  const MCInstrDesc &MCID,
2834  unsigned DestReg, unsigned SubIdx0,
2835  unsigned SubIdx1, int FI,
2836  MachineMemOperand *MMO) {
2837  unsigned DestReg0 = DestReg;
2838  unsigned DestReg1 = DestReg;
2839  bool IsUndef = true;
2841  DestReg0 = TRI.getSubReg(DestReg, SubIdx0);
2842  SubIdx0 = 0;
2843  DestReg1 = TRI.getSubReg(DestReg, SubIdx1);
2844  SubIdx1 = 0;
2845  IsUndef = false;
2846  }
2847  BuildMI(MBB, InsertBefore, DebugLoc(), MCID)
2848  .addReg(DestReg0, RegState::Define | getUndefRegState(IsUndef), SubIdx0)
2849  .addReg(DestReg1, RegState::Define | getUndefRegState(IsUndef), SubIdx1)
2850  .addFrameIndex(FI)
2851  .addImm(0)
2852  .addMemOperand(MMO);
2853 }
2854 
2856  MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
2857  int FI, const TargetRegisterClass *RC,
2858  const TargetRegisterInfo *TRI) const {
2859  MachineFunction &MF = *MBB.getParent();
2860  MachineFrameInfo &MFI = MF.getFrameInfo();
2861  unsigned Align = MFI.getObjectAlignment(FI);
2864  PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
2865 
2866  unsigned Opc = 0;
2867  bool Offset = true;
2868  switch (TRI->getSpillSize(*RC)) {
2869  case 1:
2870  if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2871  Opc = AArch64::LDRBui;
2872  break;
2873  case 2:
2874  if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2875  Opc = AArch64::LDRHui;
2876  break;
2877  case 4:
2878  if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2879  Opc = AArch64::LDRWui;
2881  MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
2882  else
2883  assert(DestReg != AArch64::WSP);
2884  } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2885  Opc = AArch64::LDRSui;
2886  break;
2887  case 8:
2888  if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2889  Opc = AArch64::LDRXui;
2891  MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
2892  else
2893  assert(DestReg != AArch64::SP);
2894  } else if (AArch64::FPR64RegClass.hasSubClassEq(RC)) {
2895  Opc = AArch64::LDRDui;
2896  } else if (AArch64::WSeqPairsClassRegClass.hasSubClassEq(RC)) {
2898  get(AArch64::LDPWi), DestReg, AArch64::sube32,
2899  AArch64::subo32, FI, MMO);
2900  return;
2901  }
2902  break;
2903  case 16:
2904  if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2905  Opc = AArch64::LDRQui;
2906  else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2907  assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2908  Opc = AArch64::LD1Twov1d;
2909  Offset = false;
2910  } else if (AArch64::XSeqPairsClassRegClass.hasSubClassEq(RC)) {
2912  get(AArch64::LDPXi), DestReg, AArch64::sube64,
2913  AArch64::subo64, FI, MMO);
2914  return;
2915  }
2916  break;
2917  case 24:
2918  if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2919  assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2920  Opc = AArch64::LD1Threev1d;
2921  Offset = false;
2922  }
2923  break;
2924  case 32:
2925  if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2926  assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2927  Opc = AArch64::LD1Fourv1d;
2928  Offset = false;
2929  } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2930  assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2931  Opc = AArch64::LD1Twov2d;
2932  Offset = false;
2933  }
2934  break;
2935  case 48:
2936  if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2937  assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2938  Opc = AArch64::LD1Threev2d;
2939  Offset = false;
2940  }
2941  break;
2942  case 64:
2943  if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2944  assert(Subtarget.hasNEON() && "Unexpected register load without NEON");
2945  Opc = AArch64::LD1Fourv2d;
2946  Offset = false;
2947  }
2948  break;
2949  }
2950  assert(Opc && "Unknown register class");
2951 
2952  const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DebugLoc(), get(Opc))
2953  .addReg(DestReg, getDefRegState(true))
2954  .addFrameIndex(FI);
2955  if (Offset)
2956  MI.addImm(0);
2957  MI.addMemOperand(MMO);
2958 }
2959 
2961  MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
2962  unsigned DestReg, unsigned SrcReg, int Offset,
2963  const TargetInstrInfo *TII,
2964  MachineInstr::MIFlag Flag, bool SetNZCV,
2965  bool NeedsWinCFI, bool *HasWinCFI) {
2966  if (DestReg == SrcReg && Offset == 0)
2967  return;
2968 
2969  assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2970  "SP increment/decrement not 16-byte aligned");
2971 
2972  bool isSub = Offset < 0;
2973  if (isSub)
2974  Offset = -Offset;
2975 
2976  // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2977  // scratch register. If DestReg is a virtual register, use it as the
2978  // scratch register; otherwise, create a new virtual register (to be
2979  // replaced by the scavenger at the end of PEI). That case can be optimized
2980  // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2981  // register can be loaded with offset%8 and the add/sub can use an extending
2982  // instruction with LSL#3.
2983  // Currently the function handles any offsets but generates a poor sequence
2984  // of code.
2985  // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2986 
2987  unsigned Opc;
2988  if (SetNZCV)
2989  Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2990  else
2991  Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2992  const unsigned MaxEncoding = 0xfff;
2993  const unsigned ShiftSize = 12;
2994  const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2995  while (((unsigned)Offset) >= (1 << ShiftSize)) {
2996  unsigned ThisVal;
2997  if (((unsigned)Offset) > MaxEncodableValue) {
2998  ThisVal = MaxEncodableValue;
2999  } else {
3000  ThisVal = Offset & MaxEncodableValue;
3001  }
3002  assert((ThisVal >> ShiftSize) <= MaxEncoding &&
3003  "Encoding cannot handle value that big");
3004  BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
3005  .addReg(SrcReg)
3006  .addImm(ThisVal >> ShiftSize)
3008  .setMIFlag(Flag);
3009 
3010  if (NeedsWinCFI && SrcReg == AArch64::SP && DestReg == AArch64::SP) {
3011  if (HasWinCFI)
3012  *HasWinCFI = true;
3013  BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
3014  .addImm(ThisVal)
3015  .setMIFlag(Flag);
3016  }
3017 
3018  SrcReg = DestReg;
3019  Offset -= ThisVal;
3020  if (Offset == 0)
3021  return;
3022  }
3023  BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
3024  .addReg(SrcReg)
3025  .addImm(Offset)
3027  .setMIFlag(Flag);
3028 
3029  if (NeedsWinCFI) {
3030  if ((DestReg == AArch64::FP && SrcReg == AArch64::SP) ||
3031  (SrcReg == AArch64::FP && DestReg == AArch64::SP)) {
3032  if (HasWinCFI)
3033  *HasWinCFI = true;
3034  if (Offset == 0)
3035  BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_SetFP)).
3036  setMIFlag(Flag);
3037  else
3038  BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_AddFP)).
3039  addImm(Offset).setMIFlag(Flag);
3040  } else if (DestReg == AArch64::SP) {
3041  if (HasWinCFI)
3042  *HasWinCFI = true;
3043  BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)).
3044  addImm(Offset).setMIFlag(Flag);
3045  }
3046  }
3047 }
3048 
3052  LiveIntervals *LIS, VirtRegMap *VRM) const {
3053  // This is a bit of a hack. Consider this instruction:
3054  //
3055  // %0 = COPY %sp; GPR64all:%0
3056  //
3057  // We explicitly chose GPR64all for the virtual register so such a copy might
3058  // be eliminated by RegisterCoalescer. However, that may not be possible, and
3059  // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
3060  // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
3061  //
3062  // To prevent that, we are going to constrain the %0 register class here.
3063  //
3064  // <rdar://problem/11522048>
3065  //
3066  if (MI.isFullCopy()) {
3067  unsigned DstReg = MI.getOperand(0).getReg();
3068  unsigned SrcReg = MI.getOperand(1).getReg();
3069  if (SrcReg == AArch64::SP &&
3071  MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
3072  return nullptr;
3073  }
3074  if (DstReg == AArch64::SP &&
3076  MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
3077  return nullptr;
3078  }
3079  }
3080 
3081  // Handle the case where a copy is being spilled or filled but the source
3082  // and destination register class don't match. For example:
3083  //
3084  // %0 = COPY %xzr; GPR64common:%0
3085  //
3086  // In this case we can still safely fold away the COPY and generate the
3087  // following spill code:
3088  //
3089  // STRXui %xzr, %stack.0
3090  //
3091  // This also eliminates spilled cross register class COPYs (e.g. between x and
3092  // d regs) of the same size. For example:
3093  //
3094  // %0 = COPY %1; GPR64:%0, FPR64:%1
3095  //
3096  // will be filled as
3097  //
3098  // LDRDui %0, fi<#0>
3099  //
3100  // instead of
3101  //
3102  // LDRXui %Temp, fi<#0>
3103  // %0 = FMOV %Temp
3104  //
3105  if (MI.isCopy() && Ops.size() == 1 &&
3106  // Make sure we're only folding the explicit COPY defs/uses.
3107  (Ops[0] == 0 || Ops[0] == 1)) {
3108  bool IsSpill = Ops[0] == 0;
3109  bool IsFill = !IsSpill;
3111  const MachineRegisterInfo &MRI = MF.getRegInfo();
3112  MachineBasicBlock &MBB = *MI.getParent();
3113  const MachineOperand &DstMO = MI.getOperand(0);
3114  const MachineOperand &SrcMO = MI.getOperand(1);
3115  unsigned DstReg = DstMO.getReg();
3116  unsigned SrcReg = SrcMO.getReg();
3117  // This is slightly expensive to compute for physical regs since
3118  // getMinimalPhysRegClass is slow.
3119  auto getRegClass = [&](unsigned Reg) {
3121  ? MRI.getRegClass(Reg)
3122  : TRI.getMinimalPhysRegClass(Reg);
3123  };
3124 
3125  if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
3126  assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
3127  TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
3128  "Mismatched register size in non subreg COPY");
3129  if (IsSpill)
3130  storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
3131  getRegClass(SrcReg), &TRI);
3132  else
3133  loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
3134  getRegClass(DstReg), &TRI);
3135  return &*--InsertPt;
3136  }
3137 
3138  // Handle cases like spilling def of:
3139  //
3140  // %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
3141  //
3142  // where the physical register source can be widened and stored to the full
3143  // virtual reg destination stack slot, in this case producing:
3144  //
3145  // STRXui %xzr, %stack.0
3146  //
3147  if (IsSpill && DstMO.isUndef() &&
3149  assert(SrcMO.getSubReg() == 0 &&
3150  "Unexpected subreg on physical register");
3151  const TargetRegisterClass *SpillRC;
3152  unsigned SpillSubreg;
3153  switch (DstMO.getSubReg()) {
3154  default:
3155  SpillRC = nullptr;
3156  break;
3157  case AArch64::sub_32:
3158  case AArch64::ssub:
3159  if (AArch64::GPR32RegClass.contains(SrcReg)) {
3160  SpillRC = &AArch64::GPR64RegClass;
3161  SpillSubreg = AArch64::sub_32;
3162  } else if (AArch64::FPR32RegClass.contains(SrcReg)) {
3163  SpillRC = &AArch64::FPR64RegClass;
3164  SpillSubreg = AArch64::ssub;
3165  } else
3166  SpillRC = nullptr;
3167  break;
3168  case AArch64::dsub:
3169  if (AArch64::FPR64RegClass.contains(SrcReg)) {
3170  SpillRC = &AArch64::FPR128RegClass;
3171  SpillSubreg = AArch64::dsub;
3172  } else
3173  SpillRC = nullptr;
3174  break;
3175  }
3176 
3177  if (SpillRC)
3178  if (unsigned WidenedSrcReg =
3179  TRI.getMatchingSuperReg(SrcReg, SpillSubreg, SpillRC)) {
3180  storeRegToStackSlot(MBB, InsertPt, WidenedSrcReg, SrcMO.isKill(),
3181  FrameIndex, SpillRC, &TRI);
3182  return &*--InsertPt;
3183  }
3184  }
3185 
3186  // Handle cases like filling use of:
3187  //
3188  // %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
3189  //
3190  // where we can load the full virtual reg source stack slot, into the subreg
3191  // destination, in this case producing:
3192  //
3193  // LDRWui %0:sub_32<def,read-undef>, %stack.0
3194  //
3195  if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
3196  const TargetRegisterClass *FillRC;
3197  switch (DstMO.getSubReg()) {
3198  default:
3199  FillRC = nullptr;
3200  break;
3201  case AArch64::sub_32:
3202  FillRC = &AArch64::GPR32RegClass;
3203  break;
3204  case AArch64::ssub:
3205  FillRC = &AArch64::FPR32RegClass;
3206  break;
3207  case AArch64::dsub:
3208  FillRC = &AArch64::FPR64RegClass;
3209  break;
3210  }
3211 
3212  if (FillRC) {
3213  assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
3214  TRI.getRegSizeInBits(*FillRC) &&
3215  "Mismatched regclass size on folded subreg COPY");
3216  loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
3217  MachineInstr &LoadMI = *--InsertPt;
3218  MachineOperand &LoadDst = LoadMI.getOperand(0);
3219  assert(LoadDst.getSubReg() == 0 && "unexpected subreg on fill load");
3220  LoadDst.setSubReg(DstMO.getSubReg());
3221  LoadDst.setIsUndef();
3222  return &LoadMI;
3223  }
3224  }
3225  }
3226 
3227  // Cannot fold.
3228  return nullptr;
3229 }
3230 
3232  bool *OutUseUnscaledOp,
3233  unsigned *OutUnscaledOp,
3234  int *EmittableOffset) {
3235  // Set output values in case of early exit.
3236  if (EmittableOffset)
3237  *EmittableOffset = 0;
3238  if (OutUseUnscaledOp)
3239  *OutUseUnscaledOp = false;
3240  if (OutUnscaledOp)
3241  *OutUnscaledOp = 0;
3242 
3243  // Exit early for structured vector spills/fills as they can't take an
3244  // immediate offset.
3245  switch (MI.getOpcode()) {
3246  default:
3247  break;
3248  case AArch64::LD1Twov2d:
3249  case AArch64::LD1Threev2d:
3250  case AArch64::LD1Fourv2d:
3251  case AArch64::LD1Twov1d:
3252  case AArch64::LD1Threev1d:
3253  case AArch64::LD1Fourv1d:
3254  case AArch64::ST1Twov2d:
3255  case AArch64::ST1Threev2d:
3256  case AArch64::ST1Fourv2d:
3257  case AArch64::ST1Twov1d:
3258  case AArch64::ST1Threev1d:
3259  case AArch64::ST1Fourv1d:
3261  }
3262 
3263  // Get the min/max offset and the scale.
3264  unsigned Scale, Width;
3265  int64_t MinOff, MaxOff;
3266  if (!AArch64InstrInfo::getMemOpInfo(MI.getOpcode(), Scale, Width, MinOff,
3267  MaxOff))
3268  llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
3269 
3270  // Construct the complete offset.
3271  const MachineOperand &ImmOpnd =
3273  Offset += ImmOpnd.getImm() * Scale;
3274 
3275  // If the offset doesn't match the scale, we rewrite the instruction to
3276  // use the unscaled instruction instead. Likewise, if we have a negative
3277  // offset and there is an unscaled op to use.
3278  Optional<unsigned> UnscaledOp =
3280  bool useUnscaledOp = UnscaledOp && (Offset % Scale || Offset < 0);
3281  if (useUnscaledOp &&
3282  !AArch64InstrInfo::getMemOpInfo(*UnscaledOp, Scale, Width, MinOff, MaxOff))
3283  llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal");
3284 
3285  int64_t Remainder = Offset % Scale;
3286  assert(!(Remainder && useUnscaledOp) &&
3287  "Cannot have remainder when using unscaled op");
3288 
3289  assert(MinOff < MaxOff && "Unexpected Min/Max offsets");
3290  int NewOffset = Offset / Scale;
3291  if (MinOff <= NewOffset && NewOffset <= MaxOff)
3292  Offset = Remainder;
3293  else {
3294  NewOffset = NewOffset < 0 ? MinOff : MaxOff;
3295  Offset = Offset - NewOffset * Scale + Remainder;
3296  }
3297 
3298  if (EmittableOffset)
3299  *EmittableOffset = NewOffset;
3300  if (OutUseUnscaledOp)
3301  *OutUseUnscaledOp = useUnscaledOp;
3302  if (OutUnscaledOp && UnscaledOp)
3303  *OutUnscaledOp = *UnscaledOp;
3304 
3306  (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
3307 }
3308 
3309 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
3310  unsigned FrameReg, int &Offset,
3311  const AArch64InstrInfo *TII) {
3312  unsigned Opcode = MI.getOpcode();
3313  unsigned ImmIdx = FrameRegIdx + 1;
3314 
3315  if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
3316  Offset += MI.getOperand(ImmIdx).getImm();
3317  emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
3318  MI.getOperand(0).getReg(), FrameReg, Offset, TII,
3319  MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
3320  MI.eraseFromParent();
3321  Offset = 0;
3322  return true;
3323  }
3324 
3325  int NewOffset;
3326  unsigned UnscaledOp;
3327  bool UseUnscaledOp;
3328  int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
3329  &UnscaledOp, &NewOffset);
3330  if (Status & AArch64FrameOffsetCanUpdate) {
3331  if (Status & AArch64FrameOffsetIsLegal)
3332  // Replace the FrameIndex with FrameReg.
3333  MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
3334  if (UseUnscaledOp)
3335  MI.setDesc(TII->get(UnscaledOp));
3336 
3337  MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
3338  return Offset == 0;
3339  }
3340 
3341  return false;
3342 }
3343 
3344 void AArch64InstrInfo::getNoop(MCInst &NopInst) const {
3345  NopInst.setOpcode(AArch64::HINT);
3346  NopInst.addOperand(MCOperand::createImm(0));
3347 }
3348 
3349 // AArch64 supports MachineCombiner.
3350 bool AArch64InstrInfo::useMachineCombiner() const { return true; }
3351 
3352 // True when Opc sets flag
3353 static bool isCombineInstrSettingFlag(unsigned Opc) {
3354  switch (Opc) {
3355  case AArch64::ADDSWrr:
3356  case AArch64::ADDSWri:
3357  case AArch64::ADDSXrr:
3358  case AArch64::ADDSXri:
3359  case AArch64::SUBSWrr:
3360  case AArch64::SUBSXrr:
3361  // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3362  case AArch64::SUBSWri:
3363  case AArch64::SUBSXri:
3364  return true;
3365  default:
3366  break;
3367  }
3368  return false;
3369 }
3370 
3371 // 32b Opcodes that can be combined with a MUL
3372 static bool isCombineInstrCandidate32(unsigned Opc) {
3373  switch (Opc) {
3374  case AArch64::ADDWrr:
3375  case AArch64::ADDWri:
3376  case AArch64::SUBWrr:
3377  case AArch64::ADDSWrr:
3378  case AArch64::ADDSWri:
3379  case AArch64::SUBSWrr:
3380  // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3381  case AArch64::SUBWri:
3382  case AArch64::SUBSWri:
3383  return true;
3384  default:
3385  break;
3386  }
3387  return false;
3388 }
3389 
3390 // 64b Opcodes that can be combined with a MUL
3391 static bool isCombineInstrCandidate64(unsigned Opc) {
3392  switch (Opc) {
3393  case AArch64::ADDXrr:
3394  case AArch64::ADDXri:
3395  case AArch64::SUBXrr:
3396  case AArch64::ADDSXrr:
3397  case AArch64::ADDSXri:
3398  case AArch64::SUBSXrr:
3399  // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
3400  case AArch64::SUBXri:
3401  case AArch64::SUBSXri:
3402  return true;
3403  default:
3404  break;
3405  }
3406  return false;
3407 }
3408 
3409 // FP Opcodes that can be combined with a FMUL
3410 static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
3411  switch (Inst.getOpcode()) {
3412  default:
3413  break;
3414  case AArch64::FADDSrr:
3415  case AArch64::FADDDrr:
3416  case AArch64::FADDv2f32:
3417  case AArch64::FADDv2f64:
3418  case AArch64::FADDv4f32:
3419  case AArch64::FSUBSrr:
3420  case AArch64::FSUBDrr:
3421  case AArch64::FSUBv2f32:
3422  case AArch64::FSUBv2f64:
3423  case AArch64::FSUBv4f32:
3424  TargetOptions Options = Inst.getParent()->getParent()->getTarget().Options;
3425  return (Options.UnsafeFPMath ||
3426  Options.AllowFPOpFusion == FPOpFusion::Fast);
3427  }
3428  return false;
3429 }
3430 
3431 // Opcodes that can be combined with a MUL
3432 static bool isCombineInstrCandidate(unsigned Opc) {
3434 }
3435 
3436 //
3437 // Utility routine that checks if \param MO is defined by an
3438 // \param CombineOpc instruction in the basic block \param MBB
3440  unsigned CombineOpc, unsigned ZeroReg = 0,
3441  bool CheckZeroReg = false) {
3442  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3443  MachineInstr *MI = nullptr;
3444 
3446  MI = MRI.getUniqueVRegDef(MO.getReg());
3447  // And it needs to be in the trace (otherwise, it won't have a depth).
3448  if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
3449  return false;
3450  // Must only used by the user we combine with.
3451  if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
3452  return false;
3453 
3454  if (CheckZeroReg) {
3455  assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
3456  MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
3457  MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
3458  // The third input reg must be zero.
3459  if (MI->getOperand(3).getReg() != ZeroReg)
3460  return false;
3461  }
3462 
3463  return true;
3464 }
3465 
3466 //
3467 // Is \param MO defined by an integer multiply and can be combined?
3469  unsigned MulOpc, unsigned ZeroReg) {
3470  return canCombine(MBB, MO, MulOpc, ZeroReg, true);
3471 }
3472 
3473 //
3474 // Is \param MO defined by a floating-point multiply and can be combined?
3476  unsigned MulOpc) {
3477  return canCombine(MBB, MO, MulOpc);
3478 }
3479 
3480 // TODO: There are many more machine instruction opcodes to match:
3481 // 1. Other data types (integer, vectors)
3482 // 2. Other math / logic operations (xor, or)
3483 // 3. Other forms of the same operation (intrinsics and other variants)
3485  const MachineInstr &Inst) const {
3486  switch (Inst.getOpcode()) {
3487  case AArch64::FADDDrr:
3488  case AArch64::FADDSrr:
3489  case AArch64::FADDv2f32:
3490  case AArch64::FADDv2f64:
3491  case AArch64::FADDv4f32:
3492  case AArch64::FMULDrr:
3493  case AArch64::FMULSrr:
3494  case AArch64::FMULX32:
3495  case AArch64::FMULX64:
3496  case AArch64::FMULXv2f32:
3497  case AArch64::FMULXv2f64:
3498  case AArch64::FMULXv4f32:
3499  case AArch64::FMULv2f32:
3500  case AArch64::FMULv2f64:
3501  case AArch64::FMULv4f32:
3502  return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
3503  default:
3504  return false;
3505  }
3506 }
3507 
3508 /// Find instructions that can be turned into madd.
3509 static bool getMaddPatterns(MachineInstr &Root,
3511  unsigned Opc = Root.getOpcode();
3512  MachineBasicBlock &MBB = *Root.getParent();
3513  bool Found = false;
3514 
3515  if (!isCombineInstrCandidate(Opc))
3516  return false;
3517  if (isCombineInstrSettingFlag(Opc)) {
3518  int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
3519  // When NZCV is live bail out.
3520  if (Cmp_NZCV == -1)
3521  return false;
3522  unsigned NewOpc = convertToNonFlagSettingOpc(Root);
3523  // When opcode can't change bail out.
3524  // CHECKME: do we miss any cases for opcode conversion?
3525  if (NewOpc == Opc)
3526  return false;
3527  Opc = NewOpc;
3528  }
3529 
3530  switch (Opc) {
3531  default:
3532  break;
3533  case AArch64::ADDWrr:
3534  assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3535  "ADDWrr does not have register operands");
3536  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3537  AArch64::WZR)) {
3539  Found = true;
3540  }
3541  if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3542  AArch64::WZR)) {
3544  Found = true;
3545  }
3546  break;
3547  case AArch64::ADDXrr:
3548  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3549  AArch64::XZR)) {
3551  Found = true;
3552  }
3553  if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3554  AArch64::XZR)) {
3556  Found = true;
3557  }
3558  break;
3559  case AArch64::SUBWrr:
3560  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3561  AArch64::WZR)) {
3563  Found = true;
3564  }
3565  if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
3566  AArch64::WZR)) {
3568  Found = true;
3569  }
3570  break;
3571  case AArch64::SUBXrr:
3572  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3573  AArch64::XZR)) {
3575  Found = true;
3576  }
3577  if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
3578  AArch64::XZR)) {
3580  Found = true;
3581  }
3582  break;
3583  case AArch64::ADDWri:
3584  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3585  AArch64::WZR)) {
3587  Found = true;
3588  }
3589  break;
3590  case AArch64::ADDXri:
3591  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3592  AArch64::XZR)) {
3594  Found = true;
3595  }
3596  break;
3597  case AArch64::SUBWri:
3598  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
3599  AArch64::WZR)) {
3601  Found = true;
3602  }
3603  break;
3604  case AArch64::SUBXri:
3605  if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
3606  AArch64::XZR)) {
3608  Found = true;
3609  }
3610  break;
3611  }
3612  return Found;
3613 }
3614 /// Floating-Point Support
3615 
3616 /// Find instructions that can be turned into madd.
3617 static bool getFMAPatterns(MachineInstr &Root,
3619 
3620  if (!isCombineInstrCandidateFP(Root))
3621  return false;
3622 
3623  MachineBasicBlock &MBB = *Root.getParent();
3624  bool Found = false;
3625 
3626  switch (Root.getOpcode()) {
3627  default:
3628  assert(false && "Unsupported FP instruction in combiner\n");
3629  break;
3630  case AArch64::FADDSrr:
3631  assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
3632  "FADDWrr does not have register operands");
3633  if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3635  Found = true;
3636  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3637  AArch64::FMULv1i32_indexed)) {
3639  Found = true;
3640  }
3641  if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3643  Found = true;
3644  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3645  AArch64::FMULv1i32_indexed)) {
3647  Found = true;
3648  }
3649  break;
3650  case AArch64::FADDDrr:
3651  if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3653  Found = true;
3654  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3655  AArch64::FMULv1i64_indexed)) {
3657  Found = true;
3658  }
3659  if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3661  Found = true;
3662  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3663  AArch64::FMULv1i64_indexed)) {
3665  Found = true;
3666  }
3667  break;
3668  case AArch64::FADDv2f32:
3669  if (canCombineWithFMUL(MBB, Root.getOperand(1),
3670  AArch64::FMULv2i32_indexed)) {
3672  Found = true;
3673  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3674  AArch64::FMULv2f32)) {
3676  Found = true;
3677  }
3678  if (canCombineWithFMUL(MBB, Root.getOperand(2),
3679  AArch64::FMULv2i32_indexed)) {
3681  Found = true;
3682  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3683  AArch64::FMULv2f32)) {
3685  Found = true;
3686  }
3687  break;
3688  case AArch64::FADDv2f64:
3689  if (canCombineWithFMUL(MBB, Root.getOperand(1),
3690  AArch64::FMULv2i64_indexed)) {
3692  Found = true;
3693  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3694  AArch64::FMULv2f64)) {
3696  Found = true;
3697  }
3698  if (canCombineWithFMUL(MBB, Root.getOperand(2),
3699  AArch64::FMULv2i64_indexed)) {
3701  Found = true;
3702  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3703  AArch64::FMULv2f64)) {
3705  Found = true;
3706  }
3707  break;
3708  case AArch64::FADDv4f32:
3709  if (canCombineWithFMUL(MBB, Root.getOperand(1),
3710  AArch64::FMULv4i32_indexed)) {
3712  Found = true;
3713  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3714  AArch64::FMULv4f32)) {
3716  Found = true;
3717  }
3718  if (canCombineWithFMUL(MBB, Root.getOperand(2),
3719  AArch64::FMULv4i32_indexed)) {
3721  Found = true;
3722  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3723  AArch64::FMULv4f32)) {
3725  Found = true;
3726  }
3727  break;
3728 
3729  case AArch64::FSUBSrr:
3730  if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
3732  Found = true;
3733  }
3734  if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
3736  Found = true;
3737  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3738  AArch64::FMULv1i32_indexed)) {
3740  Found = true;
3741  }
3742  if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULSrr)) {
3744  Found = true;
3745  }
3746  break;
3747  case AArch64::FSUBDrr:
3748  if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
3750  Found = true;
3751  }
3752  if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
3754  Found = true;
3755  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3756  AArch64::FMULv1i64_indexed)) {
3758  Found = true;
3759  }
3760  if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FNMULDrr)) {
3762  Found = true;
3763  }
3764  break;
3765  case AArch64::FSUBv2f32:
3766  if (canCombineWithFMUL(MBB, Root.getOperand(2),
3767  AArch64::FMULv2i32_indexed)) {
3769  Found = true;
3770  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3771  AArch64::FMULv2f32)) {
3773  Found = true;
3774  }
3775  if (canCombineWithFMUL(MBB, Root.getOperand(1),
3776  AArch64::FMULv2i32_indexed)) {
3778  Found = true;
3779  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3780  AArch64::FMULv2f32)) {
3782  Found = true;
3783  }
3784  break;
3785  case AArch64::FSUBv2f64:
3786  if (canCombineWithFMUL(MBB, Root.getOperand(2),
3787  AArch64::FMULv2i64_indexed)) {
3789  Found = true;
3790  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3791  AArch64::FMULv2f64)) {
3793  Found = true;
3794  }
3795  if (canCombineWithFMUL(MBB, Root.getOperand(1),
3796  AArch64::FMULv2i64_indexed)) {
3798  Found = true;
3799  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3800  AArch64::FMULv2f64)) {
3802  Found = true;
3803  }
3804  break;
3805  case AArch64::FSUBv4f32:
3806  if (canCombineWithFMUL(MBB, Root.getOperand(2),
3807  AArch64::FMULv4i32_indexed)) {
3809  Found = true;
3810  } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
3811  AArch64::FMULv4f32)) {
3813  Found = true;
3814  }
3815  if (canCombineWithFMUL(MBB, Root.getOperand(1),
3816  AArch64::FMULv4i32_indexed)) {
3818  Found = true;
3819  } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
3820  AArch64::FMULv4f32)) {
3822  Found = true;
3823  }
3824  break;
3825  }
3826  return Found;
3827 }
3828 
3829 /// Return true when a code sequence can improve throughput. It
3830 /// should be called only for instructions in loops.
3831 /// \param Pattern - combiner pattern
3833  MachineCombinerPattern Pattern) const {
3834  switch (Pattern) {
3835  default:
3836  break;
3871  return true;
3872  } // end switch (Pattern)
3873  return false;
3874 }
3875 /// Return true when there is potentially a faster code sequence for an
3876 /// instruction chain ending in \p Root. All potential patterns are listed in
3877 /// the \p Pattern vector. Pattern should be sorted in priority order since the
3878 /// pattern evaluator stops checking as soon as it finds a faster sequence.
3879 
3881  MachineInstr &Root,
3882  SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
3883  // Integer patterns
3884  if (getMaddPatterns(Root, Patterns))
3885  return true;
3886  // Floating point patterns
3887  if (getFMAPatterns(Root, Patterns))
3888  return true;
3889 
3890  return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
3891 }
3892 
3894 /// genFusedMultiply - Generate fused multiply instructions.
3895 /// This function supports both integer and floating point instructions.
3896 /// A typical example:
3897 /// F|MUL I=A,B,0
3898 /// F|ADD R,I,C
3899 /// ==> F|MADD R,A,B,C
3900 /// \param MF Containing MachineFunction
3901 /// \param MRI Register information
3902 /// \param TII Target information
3903 /// \param Root is the F|ADD instruction
3904 /// \param [out] InsInstrs is a vector of machine instructions and will
3905 /// contain the generated madd instruction
3906 /// \param IdxMulOpd is index of operand in Root that is the result of
3907 /// the F|MUL. In the example above IdxMulOpd is 1.
3908 /// \param MaddOpc the opcode fo the f|madd instruction
3909 /// \param RC Register class of operands
3910 /// \param kind of fma instruction (addressing mode) to be generated
3911 /// \param ReplacedAddend is the result register from the instruction
3912 /// replacing the non-combined operand, if any.
3913 static MachineInstr *
3915  const TargetInstrInfo *TII, MachineInstr &Root,
3916  SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
3917  unsigned MaddOpc, const TargetRegisterClass *RC,
3919  const unsigned *ReplacedAddend = nullptr) {
3920  assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3921 
3922  unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3923  MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
3924  unsigned ResultReg = Root.getOperand(0).getReg();
3925  unsigned SrcReg0 = MUL->getOperand(1).getReg();
3926  bool Src0IsKill = MUL->getOperand(1).isKill();
3927  unsigned SrcReg1 = MUL->getOperand(2).getReg();
3928  bool Src1IsKill = MUL->getOperand(2).isKill();
3929 
3930  unsigned SrcReg2;
3931  bool Src2IsKill;
3932  if (ReplacedAddend) {
3933  // If we just generated a new addend, we must be it's only use.
3934  SrcReg2 = *ReplacedAddend;
3935  Src2IsKill = true;
3936  } else {
3937  SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
3938  Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
3939  }
3940 
3942  MRI.constrainRegClass(ResultReg, RC);
3944  MRI.constrainRegClass(SrcReg0, RC);
3946  MRI.constrainRegClass(SrcReg1, RC);
3948  MRI.constrainRegClass(SrcReg2, RC);
3949 
3950  MachineInstrBuilder MIB;
3951  if (kind == FMAInstKind::Default)
3952  MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3953  .addReg(SrcReg0, getKillRegState(Src0IsKill))
3954  .addReg(SrcReg1, getKillRegState(Src1IsKill))
3955  .addReg(SrcReg2, getKillRegState(Src2IsKill));
3956  else if (kind == FMAInstKind::Indexed)
3957  MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3958  .addReg(SrcReg2, getKillRegState(Src2IsKill))
3959  .addReg(SrcReg0, getKillRegState(Src0IsKill))
3960  .addReg(SrcReg1, getKillRegState(Src1IsKill))
3961  .addImm(MUL->getOperand(3).getImm());
3962  else if (kind == FMAInstKind::Accumulator)
3963  MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
3964  .addReg(SrcReg2, getKillRegState(Src2IsKill))
3965  .addReg(SrcReg0, getKillRegState(Src0IsKill))
3966  .addReg(SrcReg1, getKillRegState(Src1IsKill));
3967  else
3968  assert(false && "Invalid FMA instruction kind \n");
3969  // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
3970  InsInstrs.push_back(MIB);
3971  return MUL;
3972 }
3973 
3974 /// genMaddR - Generate madd instruction and combine mul and add using
3975 /// an extra virtual register
3976 /// Example - an ADD intermediate needs to be stored in a register:
3977 /// MUL I=A,B,0
3978 /// ADD R,I,Imm
3979 /// ==> ORR V, ZR, Imm
3980 /// ==> MADD R,A,B,V
3981 /// \param MF Containing MachineFunction
3982 /// \param MRI Register information
3983 /// \param TII Target information
3984 /// \param Root is the ADD instruction
3985 /// \param [out] InsInstrs is a vector of machine instructions and will
3986 /// contain the generated madd instruction
3987 /// \param IdxMulOpd is index of operand in Root that is the result of
3988 /// the MUL. In the example above IdxMulOpd is 1.
3989 /// \param MaddOpc the opcode fo the madd instruction
3990 /// \param VR is a virtual register that holds the value of an ADD operand
3991 /// (V in the example above).
3992 /// \param RC Register class of operands
3994  const TargetInstrInfo *TII, MachineInstr &Root,
3996  unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR,
3997  const TargetRegisterClass *RC) {
3998  assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3999 
4000  MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
4001  unsigned ResultReg = Root.getOperand(0).getReg();
4002  unsigned SrcReg0 = MUL->getOperand(1).getReg();
4003  bool Src0IsKill = MUL->getOperand(1).isKill();
4004  unsigned SrcReg1 = MUL->getOperand(2).getReg();
4005  bool Src1IsKill = MUL->getOperand(2).isKill();
4006 
4008  MRI.constrainRegClass(ResultReg, RC);
4010  MRI.constrainRegClass(SrcReg0, RC);
4012  MRI.constrainRegClass(SrcReg1, RC);
4014  MRI.constrainRegClass(VR, RC);
4015 
4016  MachineInstrBuilder MIB =
4017  BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
4018  .addReg(SrcReg0, getKillRegState(Src0IsKill))
4019  .addReg(SrcReg1, getKillRegState(Src1IsKill))
4020  .addReg(VR);
4021  // Insert the MADD
4022  InsInstrs.push_back(MIB);
4023  return MUL;
4024 }
4025 
4026 /// When getMachineCombinerPatterns() finds potential patterns,
4027 /// this function generates the instructions that could replace the
4028 /// original code sequence
4030  MachineInstr &Root, MachineCombinerPattern Pattern,
4033  DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
4034  MachineBasicBlock &MBB = *Root.getParent();
4035  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4036  MachineFunction &MF = *MBB.getParent();
4037  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
4038 
4039  MachineInstr *MUL;
4040  const TargetRegisterClass *RC;
4041  unsigned Opc;
4042  switch (Pattern) {
4043  default:
4044  // Reassociate instructions.
4045  TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
4046  DelInstrs, InstrIdxForVirtReg);
4047  return;
4050  // MUL I=A,B,0
4051  // ADD R,I,C
4052  // ==> MADD R,A,B,C
4053  // --- Create(MADD);
4054  if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
4055  Opc = AArch64::MADDWrrr;
4056  RC = &AArch64::GPR32RegClass;
4057  } else {
4058  Opc = AArch64::MADDXrrr;
4059  RC = &AArch64::GPR64RegClass;
4060  }
4061  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4062  break;
4065  // MUL I=A,B,0
4066  // ADD R,C,I
4067  // ==> MADD R,A,B,C
4068  // --- Create(MADD);
4069  if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
4070  Opc = AArch64::MADDWrrr;
4071  RC = &AArch64::GPR32RegClass;
4072  } else {
4073  Opc = AArch64::MADDXrrr;
4074  RC = &AArch64::GPR64RegClass;
4075  }
4076  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4077  break;
4080  // MUL I=A,B,0
4081  // ADD R,I,Imm
4082  // ==> ORR V, ZR, Imm
4083  // ==> MADD R,A,B,V
4084  // --- Create(MADD);
4085  const TargetRegisterClass *OrrRC;
4086  unsigned BitSize, OrrOpc, ZeroReg;
4087  if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
4088  OrrOpc = AArch64::ORRWri;
4089  OrrRC = &AArch64::GPR32spRegClass;
4090  BitSize = 32;
4091  ZeroReg = AArch64::WZR;
4092  Opc = AArch64::MADDWrrr;
4093  RC = &AArch64::GPR32RegClass;
4094  } else {
4095  OrrOpc = AArch64::ORRXri;
4096  OrrRC = &AArch64::GPR64spRegClass;
4097  BitSize = 64;
4098  ZeroReg = AArch64::XZR;
4099  Opc = AArch64::MADDXrrr;
4100  RC = &AArch64::GPR64RegClass;
4101  }
4102  unsigned NewVR = MRI.createVirtualRegister(OrrRC);
4103  uint64_t Imm = Root.getOperand(2).getImm();
4104 
4105  if (Root.getOperand(3).isImm()) {
4106  unsigned Val = Root.getOperand(3).getImm();
4107  Imm = Imm << Val;
4108  }
4109  uint64_t UImm = SignExtend64(Imm, BitSize);
4110  uint64_t Encoding;
4111  if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
4112  MachineInstrBuilder MIB1 =
4113  BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
4114  .addReg(ZeroReg)
4115  .addImm(Encoding);
4116  InsInstrs.push_back(MIB1);
4117  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4118  MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
4119  }
4120  break;
4121  }
4124  // MUL I=A,B,0
4125  // SUB R,I, C
4126  // ==> SUB V, 0, C
4127  // ==> MADD R,A,B,V // = -C + A*B
4128  // --- Create(MADD);
4129  const TargetRegisterClass *SubRC;
4130  unsigned SubOpc, ZeroReg;
4131  if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
4132  SubOpc = AArch64::SUBWrr;
4133  SubRC = &AArch64::GPR32spRegClass;
4134  ZeroReg = AArch64::WZR;
4135  Opc = AArch64::MADDWrrr;
4136  RC = &AArch64::GPR32RegClass;
4137  } else {
4138  SubOpc = AArch64::SUBXrr;
4139  SubRC = &AArch64::GPR64spRegClass;
4140  ZeroReg = AArch64::XZR;
4141  Opc = AArch64::MADDXrrr;
4142  RC = &AArch64::GPR64RegClass;
4143  }
4144  unsigned NewVR = MRI.createVirtualRegister(SubRC);
4145  // SUB NewVR, 0, C
4146  MachineInstrBuilder MIB1 =
4147  BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
4148  .addReg(ZeroReg)
4149  .add(Root.getOperand(2));
4150  InsInstrs.push_back(MIB1);
4151  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4152  MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
4153  break;
4154  }
4157  // MUL I=A,B,0
4158  // SUB R,C,I
4159  // ==> MSUB R,A,B,C (computes C - A*B)
4160  // --- Create(MSUB);
4161  if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
4162  Opc = AArch64::MSUBWrrr;
4163  RC = &AArch64::GPR32RegClass;
4164  } else {
4165  Opc = AArch64::MSUBXrrr;
4166  RC = &AArch64::GPR64RegClass;
4167  }
4168  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4169  break;
4172  // MUL I=A,B,0
4173  // SUB R,I, Imm
4174  // ==> ORR V, ZR, -Imm
4175  // ==> MADD R,A,B,V // = -Imm + A*B
4176  // --- Create(MADD);
4177  const TargetRegisterClass *OrrRC;
4178  unsigned BitSize, OrrOpc, ZeroReg;
4179  if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
4180  OrrOpc = AArch64::ORRWri;
4181  OrrRC = &AArch64::GPR32spRegClass;
4182  BitSize = 32;
4183  ZeroReg = AArch64::WZR;
4184  Opc = AArch64::MADDWrrr;
4185  RC = &AArch64::GPR32RegClass;
4186  } else {
4187  OrrOpc = AArch64::ORRXri;
4188  OrrRC = &AArch64::GPR64spRegClass;
4189  BitSize = 64;
4190  ZeroReg = AArch64::XZR;
4191  Opc = AArch64::MADDXrrr;
4192  RC = &AArch64::GPR64RegClass;
4193  }
4194  unsigned NewVR = MRI.createVirtualRegister(OrrRC);
4195  uint64_t Imm = Root.getOperand(2).getImm();
4196  if (Root.getOperand(3).isImm()) {
4197  unsigned Val = Root.getOperand(3).getImm();
4198  Imm = Imm << Val;
4199  }
4200  uint64_t UImm = SignExtend64(-Imm, BitSize);
4201  uint64_t Encoding;
4202  if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
4203  MachineInstrBuilder MIB1 =
4204  BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
4205  .addReg(ZeroReg)
4206  .addImm(Encoding);
4207  InsInstrs.push_back(MIB1);
4208  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4209  MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
4210  }
4211  break;
4212  }
4213  // Floating Point Support
4216  // MUL I=A,B,0
4217  // ADD R,I,C
4218  // ==> MADD R,A,B,C
4219  // --- Create(MADD);
4220  if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
4221  Opc = AArch64::FMADDSrrr;
4222  RC = &AArch64::FPR32RegClass;
4223  } else {
4224  Opc = AArch64::FMADDDrrr;
4225  RC = &AArch64::FPR64RegClass;
4226  }
4227  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4228  break;
4231  // FMUL I=A,B,0
4232  // FADD R,C,I
4233  // ==> FMADD R,A,B,C
4234  // --- Create(FMADD);
4235  if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
4236  Opc = AArch64::FMADDSrrr;
4237  RC = &AArch64::FPR32RegClass;
4238  } else {
4239  Opc = AArch64::FMADDDrrr;
4240  RC = &AArch64::FPR64RegClass;
4241  }
4242  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4243  break;
4244 
4246  Opc = AArch64::FMLAv1i32_indexed;
4247  RC = &AArch64::FPR32RegClass;
4248  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4250  break;
4252  Opc = AArch64::FMLAv1i32_indexed;
4253  RC = &AArch64::FPR32RegClass;
4254  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4256  break;
4257 
4259  Opc = AArch64::FMLAv1i64_indexed;
4260  RC = &AArch64::FPR64RegClass;
4261  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4263  break;
4265  Opc = AArch64::FMLAv1i64_indexed;
4266  RC = &AArch64::FPR64RegClass;
4267  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4269  break;
4270 
4273  RC = &AArch64::FPR64RegClass;
4275  Opc = AArch64::FMLAv2i32_indexed;
4276  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4278  } else {
4279  Opc = AArch64::FMLAv2f32;
4280  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4282  }
4283  break;
4286  RC = &AArch64::FPR64RegClass;
4288  Opc = AArch64::FMLAv2i32_indexed;
4289  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4291  } else {
4292  Opc = AArch64::FMLAv2f32;
4293  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4295  }
4296  break;
4297 
4300  RC = &AArch64::FPR128RegClass;
4302  Opc = AArch64::FMLAv2i64_indexed;
4303  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4305  } else {
4306  Opc = AArch64::FMLAv2f64;
4307  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4309  }
4310  break;
4313  RC = &AArch64::FPR128RegClass;
4315  Opc = AArch64::FMLAv2i64_indexed;
4316  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4318  } else {
4319  Opc = AArch64::FMLAv2f64;
4320  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4322  }
4323  break;
4324 
4327  RC = &AArch64::FPR128RegClass;
4329  Opc = AArch64::FMLAv4i32_indexed;
4330  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4332  } else {
4333  Opc = AArch64::FMLAv4f32;
4334  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4336  }
4337  break;
4338 
4341  RC = &AArch64::FPR128RegClass;
4343  Opc = AArch64::FMLAv4i32_indexed;
4344  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4346  } else {
4347  Opc = AArch64::FMLAv4f32;
4348  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4350  }
4351  break;
4352 
4355  // FMUL I=A,B,0
4356  // FSUB R,I,C
4357  // ==> FNMSUB R,A,B,C // = -C + A*B
4358  // --- Create(FNMSUB);
4359  if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
4360  Opc = AArch64::FNMSUBSrrr;
4361  RC = &AArch64::FPR32RegClass;
4362  } else {
4363  Opc = AArch64::FNMSUBDrrr;
4364  RC = &AArch64::FPR64RegClass;
4365  }
4366  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4367  break;
4368  }
4369 
4372  // FNMUL I=A,B,0
4373  // FSUB R,I,C
4374  // ==> FNMADD R,A,B,C // = -A*B - C
4375  // --- Create(FNMADD);
4376  if (Pattern == MachineCombinerPattern::FNMULSUBS_OP1) {
4377  Opc = AArch64::FNMADDSrrr;
4378  RC = &AArch64::FPR32RegClass;
4379  } else {
4380  Opc = AArch64::FNMADDDrrr;
4381  RC = &AArch64::FPR64RegClass;
4382  }
4383  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
4384  break;
4385  }
4386 
4389  // FMUL I=A,B,0
4390  // FSUB R,C,I
4391  // ==> FMSUB R,A,B,C (computes C - A*B)
4392  // --- Create(FMSUB);
4393  if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
4394  Opc = AArch64::FMSUBSrrr;
4395  RC = &AArch64::FPR32RegClass;
4396  } else {
4397  Opc = AArch64::FMSUBDrrr;
4398  RC = &AArch64::FPR64RegClass;
4399  }
4400  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
4401  break;
4402  }
4403 
4405  Opc = AArch64::FMLSv1i32_indexed;
4406  RC = &AArch64::FPR32RegClass;
4407  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4409  break;
4410 
4412  Opc = AArch64::FMLSv1i64_indexed;
4413  RC = &AArch64::FPR64RegClass;
4414  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4416  break;
4417 
4420  RC = &AArch64::FPR64RegClass;
4422  Opc = AArch64::FMLSv2i32_indexed;
4423  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4425  } else {
4426  Opc = AArch64::FMLSv2f32;
4427  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4429  }
4430  break;
4431 
4434  RC = &AArch64::FPR128RegClass;
4436  Opc = AArch64::FMLSv2i64_indexed;
4437  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4439  } else {
4440  Opc = AArch64::FMLSv2f64;
4441  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4443  }
4444  break;
4445 
4448  RC = &AArch64::FPR128RegClass;
4450  Opc = AArch64::FMLSv4i32_indexed;
4451  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4453  } else {
4454  Opc = AArch64::FMLSv4f32;
4455  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
4457  }
4458  break;
4461  RC = &AArch64::FPR64RegClass;
4462  unsigned NewVR = MRI.createVirtualRegister(RC);
4463  MachineInstrBuilder MIB1 =
4464  BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f32), NewVR)
4465  .add(Root.getOperand(2));
4466  InsInstrs.push_back(MIB1);
4467  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4469  Opc = AArch64::FMLAv2i32_indexed;
4470  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4471  FMAInstKind::Indexed, &NewVR);
4472  } else {
4473  Opc = AArch64::FMLAv2f32;
4474  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4475  FMAInstKind::Accumulator, &NewVR);
4476  }
4477  break;
4478  }
4481  RC = &AArch64::FPR128RegClass;
4482  unsigned NewVR = MRI.createVirtualRegister(RC);
4483  MachineInstrBuilder MIB1 =
4484  BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv4f32), NewVR)
4485  .add(Root.getOperand(2));
4486  InsInstrs.push_back(MIB1);
4487  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4489  Opc = AArch64::FMLAv4i32_indexed;
4490  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4491  FMAInstKind::Indexed, &NewVR);
4492  } else {
4493  Opc = AArch64::FMLAv4f32;
4494  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4495  FMAInstKind::Accumulator, &NewVR);
4496  }
4497  break;
4498  }
4501  RC = &AArch64::FPR128RegClass;
4502  unsigned NewVR = MRI.createVirtualRegister(RC);
4503  MachineInstrBuilder MIB1 =
4504  BuildMI(MF, Root.getDebugLoc(), TII->get(AArch64::FNEGv2f64), NewVR)
4505  .add(Root.getOperand(2));
4506  InsInstrs.push_back(MIB1);
4507  InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
4509  Opc = AArch64::FMLAv2i64_indexed;
4510  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4511  FMAInstKind::Indexed, &NewVR);
4512  } else {
4513  Opc = AArch64::FMLAv2f64;
4514  MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
4515  FMAInstKind::Accumulator, &NewVR);
4516  }
4517  break;
4518  }
4519  } // end switch (Pattern)
4520  // Record MUL and ADD/SUB for deletion
4521  DelInstrs.push_back(MUL);
4522  DelInstrs.push_back(&Root);
4523 }
4524 
4525 /// Replace csincr-branch sequence by simple conditional branch
4526 ///
4527 /// Examples:
4528 /// 1. \code
4529 /// csinc w9, wzr, wzr, <condition code>
4530 /// tbnz w9, #0, 0x44
4531 /// \endcode
4532 /// to
4533 /// \code
4534 /// b.<inverted condition code>
4535 /// \endcode
4536 ///
4537 /// 2. \code
4538 /// csinc w9, wzr, wzr, <condition code>
4539 /// tbz w9, #0, 0x44
4540 /// \endcode
4541 /// to
4542 /// \code
4543 /// b.<condition code>
4544 /// \endcode
4545 ///
4546 /// Replace compare and branch sequence by TBZ/TBNZ instruction when the
4547 /// compare's constant operand is power of 2.
4548 ///
4549 /// Examples:
4550 /// \code
4551 /// and w8, w8, #0x400
4552 /// cbnz w8, L1
4553 /// \endcode
4554 /// to
4555 /// \code
4556 /// tbnz w8, #10, L1
4557 /// \endcode
4558 ///
4559 /// \param MI Conditional Branch
4560 /// \return True when the simple conditional branch is generated
4561 ///
4563  bool IsNegativeBranch = false;
4564  bool IsTestAndBranch = false;
4565  unsigned TargetBBInMI = 0;
4566  switch (MI.getOpcode()) {
4567  default:
4568  llvm_unreachable("Unknown branch instruction?");
4569  case AArch64::Bcc:
4570  return false;
4571  case AArch64::CBZW:
4572  case AArch64::CBZX:
4573  TargetBBInMI = 1;
4574  break;
4575  case AArch64::CBNZW:
4576  case AArch64::CBNZX:
4577  TargetBBInMI = 1;
4578  IsNegativeBranch = true;
4579  break;
4580  case AArch64::TBZW:
4581  case AArch64::TBZX:
4582  TargetBBInMI = 2;
4583  IsTestAndBranch = true;
4584  break;
4585  case AArch64::TBNZW:
4586  case AArch64::TBNZX:
4587  TargetBBInMI = 2;
4588  IsNegativeBranch = true;
4589  IsTestAndBranch = true;
4590  break;
4591  }
4592  // So we increment a zero register and test for bits other
4593  // than bit 0? Conservatively bail out in case the verifier
4594  // missed this case.
4595  if (IsTestAndBranch && MI.getOperand(1).getImm())
4596  return false;
4597 
4598  // Find Definition.
4599  assert(MI.getParent() && "Incomplete machine instruciton\n");
4600  MachineBasicBlock *MBB = MI.getParent();
4601  MachineFunction *MF = MBB->getParent();
4602  MachineRegisterInfo *MRI = &MF->getRegInfo();
4603  unsigned VReg = MI.getOperand(0).getReg();
4605  return false;
4606 
4607  MachineInstr *DefMI = MRI->getVRegDef(VReg);
4608 
4609  // Look through COPY instructions to find definition.
4610  while (DefMI->isCopy()) {
4611  unsigned CopyVReg = DefMI->getOperand(1).getReg();
4612  if (!MRI->hasOneNonDBGUse(CopyVReg))
4613  return false;
4614  if (!MRI->hasOneDef(CopyVReg))
4615  return false;
4616  DefMI = MRI->getVRegDef(CopyVReg);
4617  }
4618 
4619  switch (DefMI->getOpcode()) {
4620  default:
4621  return false;
4622  // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
4623  case AArch64::ANDWri:
4624  case AArch64::ANDXri: {
4625  if (IsTestAndBranch)
4626  return false;
4627  if (DefMI->getParent() != MBB)
4628  return false;
4629  if (!MRI->hasOneNonDBGUse(VReg))
4630  return false;
4631 
4632  bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
4634  DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
4635  if (!isPowerOf2_64(Mask))
4636  return false;
4637 
4638  MachineOperand &MO = DefMI->getOperand(1);
4639  unsigned NewReg = MO.getReg();
4641  return false;
4642 
4643  assert(!MRI->def_empty(NewReg) && "Register must be defined.");
4644 
4645  MachineBasicBlock &RefToMBB = *MBB;
4646  MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
4647  DebugLoc DL = MI.getDebugLoc();
4648  unsigned Imm = Log2_64(Mask);
4649  unsigned Opc = (Imm < 32)
4650  ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4651  : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
4652  MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
4653  .addReg(NewReg)
4654  .addImm(Imm)
4655  .addMBB(TBB);
4656  // Register lives on to the CBZ now.
4657  MO.setIsKill(false);
4658 
4659  // For immediate smaller than 32, we need to use the 32-bit
4660  // variant (W) in all cases. Indeed the 64-bit variant does not
4661  // allow to encode them.
4662  // Therefore, if the input register is 64-bit, we need to take the
4663  // 32-bit sub-part.
4664  if (!Is32Bit && Imm < 32)
4665  NewMI->getOperand(0).setSubReg(AArch64::sub_32);
4666  MI.eraseFromParent();
4667  return true;
4668  }
4669  // Look for CSINC
4670  case AArch64::CSINCWr:
4671  case AArch64::CSINCXr: {
4672  if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
4673  DefMI->getOperand(2).getReg() == AArch64::WZR) &&
4674  !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
4675  DefMI->getOperand(2).getReg() == AArch64::XZR))
4676  return false;
4677 
4678  if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
4679  return false;
4680 
4682  // Convert only when the condition code is not modified between
4683  // the CSINC and the branch. The CC may be used by other
4684  // instructions in between.
4686  return false;
4687  MachineBasicBlock &RefToMBB = *MBB;
4688  MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
4689  DebugLoc DL = MI.getDebugLoc();
4690  if (IsNegativeBranch)
4692  BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
4693  MI.eraseFromParent();
4694  return true;
4695  }
4696  }
4697 }
4698 
4699 std::pair<unsigned, unsigned>
4701  const unsigned Mask = AArch64II::MO_FRAGMENT;
4702  return std::make_pair(TF & Mask, TF & ~Mask);
4703 }
4704 
4707  using namespace AArch64II;
4708 
4709  static const std::pair<unsigned, const char *> TargetFlags[] = {
4710  {MO_PAGE, "aarch64-page"}, {MO_PAGEOFF, "aarch64-pageoff"},
4711  {MO_G3, "aarch64-g3"}, {MO_G2, "aarch64-g2"},
4712  {MO_G1, "aarch64-g1"}, {MO_G0, "aarch64-g0"},
4713  {MO_HI12, "aarch64-hi12"}};
4714  return makeArrayRef(TargetFlags);
4715 }
4716 
4719  using namespace AArch64II;
4720 
4721  static const std::pair<unsigned, const char *> TargetFlags[] = {
4722  {MO_COFFSTUB, "aarch64-coffstub"},
4723  {MO_GOT, "aarch64-got"}, {MO_NC, "aarch64-nc"},
4724  {MO_S, "aarch64-s"}, {MO_TLS, "aarch64-tls"},
4725  {MO_DLLIMPORT, "aarch64-dllimport"}};
4726  return makeArrayRef(TargetFlags);
4727 }
4728 
4731  static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4732  {{MOSuppressPair, "aarch64-suppress-pair"},
4733  {MOStridedAccess, "aarch64-strided-access"}};
4734  return makeArrayRef(TargetFlags);
4735 }
4736 
4737 /// Constants defining how certain sequences should be outlined.
4738 /// This encompasses how an outlined function should be called, and what kind of
4739 /// frame should be emitted for that outlined function.
4740 ///
4741 /// \p MachineOutlinerDefault implies that the function should be called with
4742 /// a save and restore of LR to the stack.
4743 ///
4744 /// That is,
4745 ///
4746 /// I1 Save LR OUTLINED_FUNCTION:
4747 /// I2 --> BL OUTLINED_FUNCTION I1
4748 /// I3 Restore LR I2
4749 /// I3
4750 /// RET
4751 ///
4752 /// * Call construction overhead: 3 (save + BL + restore)
4753 /// * Frame construction overhead: 1 (ret)
4754 /// * Requires stack fixups? Yes
4755 ///
4756 /// \p MachineOutlinerTailCall implies that the function is being created from
4757 /// a sequence of instructions ending in a return.
4758 ///
4759 /// That is,
4760 ///
4761 /// I1 OUTLINED_FUNCTION:
4762 /// I2 --> B OUTLINED_FUNCTION I1
4763 /// RET I2
4764 /// RET
4765 ///
4766 /// * Call construction overhead: 1 (B)
4767 /// * Frame construction overhead: 0 (Return included in sequence)
4768 /// * Requires stack fixups? No
4769 ///
4770 /// \p MachineOutlinerNoLRSave implies that the function should be called using
4771 /// a BL instruction, but doesn't require LR to be saved and restored. This
4772 /// happens when LR is known to be dead.
4773 ///
4774 /// That is,
4775 ///
4776 /// I1 OUTLINED_FUNCTION:
4777 /// I2 --> BL OUTLINED_FUNCTION I1
4778 /// I3 I2
4779 /// I3
4780 /// RET
4781 ///
4782 /// * Call construction overhead: 1 (BL)
4783 /// * Frame construction overhead: 1 (RET)
4784 /// * Requires stack fixups? No
4785 ///
4786 /// \p MachineOutlinerThunk implies that the function is being created from
4787 /// a sequence of instructions ending in a call. The outlined function is
4788 /// called with a BL instruction, and the outlined function tail-calls the
4789 /// original call destination.
4790 ///
4791 /// That is,
4792 ///
4793 /// I1 OUTLINED_FUNCTION:
4794 /// I2 --> BL OUTLINED_FUNCTION I1
4795 /// BL f I2
4796 /// B f
4797 /// * Call construction overhead: 1 (BL)
4798 /// * Frame construction overhead: 0
4799 /// * Requires stack fixups? No
4800 ///
4801 /// \p MachineOutlinerRegSave implies that the function should be called with a
4802 /// save and restore of LR to an available register. This allows us to avoid
4803 /// stack fixups. Note that this outlining variant is compatible with the
4804 /// NoLRSave case.
4805 ///
4806 /// That is,
4807 ///
4808 /// I1 Save LR OUTLINED_FUNCTION:
4809 /// I2 --> BL OUTLINED_FUNCTION I1
4810 /// I3 Restore LR I2
4811 /// I3
4812 /// RET
4813 ///
4814 /// * Call construction overhead: 3 (save + BL + restore)
4815 /// * Frame construction overhead: 1 (ret)
4816 /// * Requires stack fixups? No
4818  MachineOutlinerDefault, /// Emit a save, restore, call, and return.
4819  MachineOutlinerTailCall, /// Only emit a branch.
4820  MachineOutlinerNoLRSave, /// Emit a call and return.
4821  MachineOutlinerThunk, /// Emit a call and tail-call.
4822  MachineOutlinerRegSave /// Same as default, but save to a register.
4823 };
4824 
4827  HasCalls = 0x4,
4829 };
4830 
4831 unsigned
4832 AArch64InstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const {
4833  assert(C.LRUWasSet && "LRU wasn't set?");
4834  MachineFunction *MF = C.getMF();
4835  const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
4836  MF->getSubtarget().getRegisterInfo());
4837 
4838  // Check if there is an available register across the sequence that we can
4839  // use.
4840  for (unsigned Reg : AArch64::GPR64RegClass) {
4841  if (!ARI->isReservedReg(*MF, Reg) &&
4842  Reg != AArch64::LR && // LR is not reserved, but don't use it.
4843  Reg != AArch64::X16 && // X16 is not guaranteed to be preserved.
4844  Reg != AArch64::X17 && // Ditto for X17.
4846  return Reg;
4847  }
4848 
4849  // No suitable register. Return 0.
4850  return 0u;
4851 }
4852 
4855  std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
4856  outliner::Candidate &FirstCand = RepeatedSequenceLocs[0];
4857  unsigned SequenceSize =
4858  std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0,
4859  [this](unsigned Sum, const MachineInstr &MI) {
4860  return Sum + getInstSizeInBytes(MI);
4861  });
4862 
4863  // Properties about candidate MBBs that hold for all of them.
4864  unsigned FlagsSetInAll = 0xF;
4865 
4866  // Compute liveness information for each candidate, and set FlagsSetInAll.
4868  std::for_each(RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(),
4869  [&FlagsSetInAll](outliner::Candidate &C) {
4870  FlagsSetInAll &= C.Flags;
4871  });
4872 
4873  // According to the AArch64 Procedure Call Standard, the following are
4874  // undefined on entry/exit from a function call:
4875  //
4876  // * Registers x16, x17, (and thus w16, w17)
4877  // * Condition codes (and thus the NZCV register)
4878  //
4879  // Because if this, we can't outline any sequence of instructions where
4880  // one
4881  // of these registers is live into/across it. Thus, we need to delete
4882  // those
4883  // candidates.
4884  auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) {
4885  // If the unsafe registers in this block are all dead, then we don't need
4886  // to compute liveness here.
4887  if (C.Flags & UnsafeRegsDead)
4888  return false;
4889  C.initLRU(TRI);
4890  LiveRegUnits LRU = C.LRU;
4891  return (!LRU.available(AArch64::W16) || !LRU.available(AArch64::W17) ||
4892  !LRU.available(AArch64::NZCV));
4893  };
4894 
4895  // Are there any candidates where those registers are live?
4896  if (!(FlagsSetInAll & UnsafeRegsDead)) {
4897  // Erase every candidate that violates the restrictions above. (It could be
4898  // true that we have viable candidates, so it's not worth bailing out in
4899  // the case that, say, 1 out of 20 candidates violate the restructions.)
4900  RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(),
4901  RepeatedSequenceLocs.end(),
4902  CantGuaranteeValueAcrossCall),
4903  RepeatedSequenceLocs.end());
4904 
4905  // If the sequence doesn't have enough candidates left, then we're done.
4906  if (RepeatedSequenceLocs.size() < 2)
4907  return outliner::OutlinedFunction();
4908  }
4909 
4910  // At this point, we have only "safe" candidates to outline. Figure out
4911  // frame + call instruction information.
4912 
4913  unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode();
4914 
4915  // Helper lambda which sets call information for every candidate.
4916  auto SetCandidateCallInfo =
4917  [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) {
4918  for (outliner::Candidate &C : RepeatedSequenceLocs)
4919  C.setCallInfo(CallID, NumBytesForCall);
4920  };
4921 
4922  unsigned FrameID = MachineOutlinerDefault;
4923  unsigned NumBytesToCreateFrame = 4;
4924 
4925  bool HasBTI = any_of(RepeatedSequenceLocs, [](outliner::Candidate &C) {
4926  return C.getMF()->getFunction().hasFnAttribute("branch-target-enforcement");
4927  });
4928 
4929  // Returns true if an instructions is safe to fix up, false otherwise.
4930  auto IsSafeToFixup = [this, &TRI](MachineInstr &MI) {
4931  if (MI.isCall())
4932  return true;
4933 
4934  if (!MI.modifiesRegister(AArch64::SP, &TRI) &&
4935  !MI.readsRegister(AArch64::SP, &TRI))
4936  return true;
4937 
4938  // Any modification of SP will break our code to save/restore LR.
4939  // FIXME: We could handle some instructions which add a constant
4940  // offset to SP, with a bit more work.
4941  if (MI.modifiesRegister(AArch64::SP, &TRI))
4942  return false;
4943 
4944  // At this point, we have a stack instruction that we might need to
4945  // fix up. We'll handle it if it's a load or store.
4946  if (MI.mayLoadOrStore()) {
4947  const MachineOperand *Base; // Filled with the base operand of MI.
4948  int64_t Offset; // Filled with the offset of MI.
4949 
4950  // Does it allow us to offset the base operand and is the base the
4951  // register SP?
4952  if (!getMemOperandWithOffset(MI, Base, Offset, &TRI) || !Base->isReg() ||
4953  Base->getReg() != AArch64::SP)
4954  return false;
4955 
4956  // Find the minimum/maximum offset for this instruction and check
4957  // if fixing it up would be in range.
4958  int64_t MinOffset,
4959  MaxOffset; // Unscaled offsets for the instruction.
4960  unsigned Scale; // The scale to multiply the offsets by.
4961  unsigned DummyWidth;
4962  getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset);
4963 
4964  Offset += 16; // Update the offset to what it would be if we outlined.
4965  if (Offset < MinOffset * Scale || Offset > MaxOffset * Scale)
4966  return false;
4967 
4968  // It's in range, so we can outline it.
4969  return true;
4970  }
4971 
4972  // FIXME: Add handling for instructions like "add x0, sp, #8".
4973 
4974  // We can't fix it up, so don't outline it.
4975  return false;
4976  };
4977 
4978  // True if it's possible to fix up each stack instruction in this sequence.
4979  // Important for frames/call variants that modify the stack.
4980  bool AllStackInstrsSafe = std::all_of(
4981  FirstCand.front(), std::next(FirstCand.back()), IsSafeToFixup);
4982 
4983  // If the last instruction in any candidate is a terminator, then we should
4984  // tail call all of the candidates.
4985  if (RepeatedSequenceLocs[0].back()->isTerminator()) {
4986  FrameID = MachineOutlinerTailCall;
4987  NumBytesToCreateFrame = 0;
4988  SetCandidateCallInfo(MachineOutlinerTailCall, 4);
4989  }
4990 
4991  else if (LastInstrOpcode == AArch64::BL ||
4992  (LastInstrOpcode == AArch64::BLR && !HasBTI)) {
4993  // FIXME: Do we need to check if the code after this uses the value of LR?
4994  FrameID = MachineOutlinerThunk;
4995  NumBytesToCreateFrame = 0;
4996  SetCandidateCallInfo(MachineOutlinerThunk, 4);
4997  }
4998 
4999  else {
5000  // We need to decide how to emit calls + frames. We can always emit the same
5001  // frame if we don't need to save to the stack. If we have to save to the
5002  // stack, then we need a different frame.
5003  unsigned NumBytesNoStackCalls = 0;
5004  std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5005 
5006  for (outliner::Candidate &C : RepeatedSequenceLocs) {
5007  C.initLRU(TRI);
5008 
5009  // Is LR available? If so, we don't need a save.
5010  if (C.LRU.available(AArch64::LR)) {
5011  NumBytesNoStackCalls += 4;
5013  CandidatesWithoutStackFixups.push_back(C);
5014  }
5015 
5016  // Is an unused register available? If so, we won't modify the stack, so
5017  // we can outline with the same frame type as those that don't save LR.
5018  else if (findRegisterToSaveLRTo(C)) {
5019  NumBytesNoStackCalls += 12;
5021  CandidatesWithoutStackFixups.push_back(C);
5022  }
5023 
5024  // Is SP used in the sequence at all? If not, we don't have to modify
5025  // the stack, so we are guaranteed to get the same frame.
5026  else if (C.UsedInSequence.available(AArch64::SP)) {
5027  NumBytesNoStackCalls += 12;
5029  CandidatesWithoutStackFixups.push_back(C);
5030  }
5031 
5032  // If we outline this, we need to modify the stack. Pretend we don't
5033  // outline this by saving all of its bytes.
5034  else {
5035  NumBytesNoStackCalls += SequenceSize;
5036  }
5037  }
5038 
5039  // If there are no places where we have to save LR, then note that we
5040  // don't have to update the stack. Otherwise, give every candidate the
5041  // default call type, as long as it's safe to do so.
5042  if (!AllStackInstrsSafe ||
5043  NumBytesNoStackCalls <= RepeatedSequenceLocs.size() * 12) {
5044  RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5045  FrameID = MachineOutlinerNoLRSave;
5046  } else {
5047  SetCandidateCallInfo(MachineOutlinerDefault, 12);
5048  }
5049 
5050  // If we dropped all of the candidates, bail out here.
5051  if (RepeatedSequenceLocs.size() < 2) {
5052  RepeatedSequenceLocs.clear();
5053  return outliner::OutlinedFunction();
5054  }
5055  }
5056 
5057  // Does every candidate's MBB contain a call? If so, then we might have a call
5058  // in the range.
5059  if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
5060  // Check if the range contains a call. These require a save + restore of the
5061  // link register.
5062  bool ModStackToSaveLR = false;
5063  if (std::any_of(FirstCand.front(), FirstCand.back(),
5064  [](const MachineInstr &MI) { return MI.isCall(); }))
5065  ModStackToSaveLR = true;
5066 
5067  // Handle the last instruction separately. If this is a tail call, then the
5068  // last instruction is a call. We don't want to save + restore in this case.
5069  // However, it could be possible that the last instruction is a call without
5070  // it being valid to tail call this sequence. We should consider this as
5071  // well.
5072  else if (FrameID != MachineOutlinerThunk &&
5073  FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall())
5074  ModStackToSaveLR = true;
5075 
5076  if (ModStackToSaveLR) {
5077  // We can't fix up the stack. Bail out.
5078  if (!AllStackInstrsSafe) {
5079  RepeatedSequenceLocs.clear();
5080  return outliner::OutlinedFunction();
5081  }
5082 
5083  // Save + restore LR.
5084  NumBytesToCreateFrame += 8;
5085  }
5086  }
5087 
5088  return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
5089  NumBytesToCreateFrame, FrameID);
5090 }
5091 
5093  MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
5094  const Function &F = MF.getFunction();
5095 
5096  // Can F be deduplicated by the linker? If it can, don't outline from it.
5097  if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
5098  return false;
5099 
5100  // Don't outline from functions with section markings; the program could
5101  // expect that all the code is in the named section.
5102  // FIXME: Allow outlining from multiple functions with the same section
5103  // marking.
5104  if (F.hasSection())
5105  return false;
5106 
5107  // Outlining from functions with redzones is unsafe since the outliner may
5108  // modify the stack. Check if hasRedZone is true or unknown; if yes, don't
5109  // outline from it.
5111  if (!AFI || AFI->hasRedZone().getValueOr(true))
5112  return false;
5113 
5114  // It's safe to outline from MF.
5115  return true;
5116 }
5117 
5119  unsigned &Flags) const {
5120  // Check if LR is available through all of the MBB. If it's not, then set
5121  // a flag.
5123  "Suitable Machine Function for outlining must track liveness");
5125 
5126  std::for_each(MBB.rbegin(), MBB.rend(),
5127  [&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
5128 
5129  // Check if each of the unsafe registers are available...
5130  bool W16AvailableInBlock = LRU.available(AArch64::W16);
5131  bool W17AvailableInBlock = LRU.available(AArch64::W17);
5132  bool NZCVAvailableInBlock = LRU.available(AArch64::NZCV);
5133 
5134  // If all of these are dead (and not live out), we know we don't have to check
5135  // them later.
5136  if (W16AvailableInBlock && W17AvailableInBlock && NZCVAvailableInBlock)
5138 
5139  // Now, add the live outs to the set.
5140  LRU.addLiveOuts(MBB);
5141 
5142  // If any of these registers is available in the MBB, but also a live out of
5143  // the block, then we know outlining is unsafe.
5144  if (W16AvailableInBlock && !LRU.available(AArch64::W16))
5145  return false;
5146  if (W17AvailableInBlock && !LRU.available(AArch64::W17))
5147  return false;
5148  if (NZCVAvailableInBlock && !LRU.available(AArch64::NZCV))
5149  return false;
5150 
5151  // Check if there's a call inside this MachineBasicBlock. If there is, then
5152  // set a flag.
5153  if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); }))
5155 
5156  MachineFunction *MF = MBB.getParent();
5157 
5158  // In the event that we outline, we may have to save LR. If there is an
5159  // available register in the MBB, then we'll always save LR there. Check if
5160  // this is true.
5161  bool CanSaveLR = false;
5162  const AArch64RegisterInfo *ARI = static_cast<const AArch64RegisterInfo *>(
5163  MF->getSubtarget().getRegisterInfo());
5164 
5165  // Check if there is an available register across the sequence that we can
5166  // use.
5167  for (unsigned Reg : AArch64::GPR64RegClass) {
5168  if (!ARI->isReservedReg(*MF, Reg) && Reg != AArch64::LR &&
5169  Reg != AArch64::X16 && Reg != AArch64::X17 && LRU.available(Reg)) {
5170  CanSaveLR = true;
5171  break;
5172  }
5173  }
5174 
5175  // Check if we have a register we can save LR to, and if LR was used
5176  // somewhere. If both of those things are true, then we need to evaluate the
5177  // safety of outlining stack instructions later.
5178  if (!CanSaveLR && !LRU.available(AArch64::LR))
5180 
5181  return true;
5182 }
5183 
5186  unsigned Flags) const {
5187  MachineInstr &MI = *MIT;
5188  MachineBasicBlock *MBB = MI.getParent();
5189  MachineFunction *MF = MBB->getParent();
5190  AArch64FunctionInfo *FuncInfo = MF->getInfo<AArch64FunctionInfo>();
5191 
5192  // Don't outline LOHs.
5193  if (FuncInfo->getLOHRelated().count(&MI))
5195 
5196  // Don't allow debug values to impact outlining type.
5197  if (MI.isDebugInstr() || MI.isIndirectDebugValue())
5199 
5200  // At this point, KILL instructions don't really tell us much so we can go
5201  // ahead and skip over them.
5202  if (MI.isKill())
5204 
5205  // Is this a terminator for a basic block?
5206  if (MI.isTerminator()) {
5207 
5208  // Is this the end of a function?
5209  if (MI.getParent()->succ_empty())
5211 
5212  // It's not, so don't outline it.
5214  }
5215 
5216  // Make sure none of the operands are un-outlinable.
5217  for (const MachineOperand &MOP : MI.operands()) {
5218  if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
5219  MOP.isTargetIndex())
5221 
5222  // If it uses LR or W30 explicitly, then don't touch it.
5223  if (MOP.isReg() && !MOP.isImplicit() &&
5224  (MOP.getReg() == AArch64::LR || MOP.getReg() == AArch64::W30))
5226  }
5227 
5228  // Special cases for instructions that can always be outlined, but will fail
5229  // the later tests. e.g, ADRPs, which are PC-relative use LR, but can always
5230  // be outlined because they don't require a *specific* value to be in LR.
5231  if (MI.getOpcode() == AArch64::ADRP)
5233 
5234  // If MI is a call we might be able to outline it. We don't want to outline
5235  // any calls that rely on the position of items on the stack. When we outline
5236  // something containing a call, we have to emit a save and restore of LR in
5237  // the outlined function. Currently, this always happens by saving LR to the
5238  // stack. Thus, if we outline, say, half the parameters for a function call
5239  // plus the call, then we'll break the callee's expectations for the layout
5240  // of the stack.
5241  //
5242  // FIXME: Allow calls to functions which construct a stack frame, as long
5243  // as they don't access arguments on the stack.
5244  // FIXME: Figure out some way to analyze functions defined in other modules.
5245  // We should be able to compute the memory usage based on the IR calling
5246  // convention, even if we can't see the definition.
5247  if (MI.isCall()) {
5248  // Get the function associated with the call. Look at each operand and find
5249  // the one that represents the callee and get its name.
5250  const Function *Callee = nullptr;
5251  for (const MachineOperand &MOP : MI.operands()) {
5252  if (MOP.isGlobal()) {
5253  Callee = dyn_cast<Function>(MOP.getGlobal());
5254  break;
5255  }
5256  }
5257 
5258  // Never outline calls to mcount. There isn't any rule that would require
5259  // this, but the Linux kernel's "ftrace" feature depends on it.
5260  if (Callee && Callee->getName() == "\01_mcount")
5262 
5263  // If we don't know anything about the callee, assume it depends on the
5264  // stack layout of the caller. In that case, it's only legal to outline
5265  // as a tail-call. Whitelist the call instructions we know about so we
5266  // don't get unexpected results with call pseudo-instructions.
5267  auto UnknownCallOutlineType = outliner::InstrType::Illegal;
5268  if (MI.getOpcode() == AArch64::BLR || MI.getOpcode() == AArch64::BL)
5269  UnknownCallOutlineType = outliner::InstrType::LegalTerminator;
5270 
5271  if (!Callee)
5272  return UnknownCallOutlineType;
5273 
5274  // We have a function we have information about. Check it if it's something
5275  // can safely outline.
5276  MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee);
5277 
5278  // We don't know what's going on with the callee at all. Don't touch it.
5279  if (!CalleeMF)
5280  return UnknownCallOutlineType;
5281 
5282  // Check if we know anything about the callee saves on the function. If we
5283  // don't, then don't touch it, since that implies that we haven't
5284  // computed anything about its stack frame yet.
5285  MachineFrameInfo &MFI = CalleeMF->getFrameInfo();
5286  if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 ||
5287  MFI.getNumObjects() > 0)
5288  return UnknownCallOutlineType;
5289 
5290  // At this point, we can say that CalleeMF ought to not pass anything on the
5291  // stack. Therefore, we can outline it.
5293  }
5294 
5295  // Don't outline positions.
5296  if (MI.isPosition())
5298 
5299  // Don't touch the link register or W30.
5300  if (MI.readsRegister(AArch64::W30, &getRegisterInfo()) ||
5301  MI.modifiesRegister(AArch64::W30, &getRegisterInfo()))
5303 
5304  // Don't outline BTI instructions, because that will prevent the outlining
5305  // site from being indirectly callable.
5306  if (MI.getOpcode() == AArch64::HINT) {
5307  int64_t Imm = MI.getOperand(0).getImm();
5308  if (Imm == 32 || Imm == 34 || Imm == 36 || Imm == 38)
5310  }
5311 
5313 }
5314 
5315 void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
5316  for (MachineInstr &MI : MBB) {
5317  const MachineOperand *Base;
5318  unsigned Width;
5319  int64_t Offset;
5320 
5321  // Is this a load or store with an immediate offset with SP as the base?
5322  if (!MI.mayLoadOrStore() ||
5323  !getMemOperandWithOffsetWidth(MI, Base, Offset, Width, &RI) ||
5324  (Base->isReg() && Base->getReg() != AArch64::SP))
5325  continue;
5326 
5327  // It is, so we have to fix it up.
5328  unsigned Scale;
5329  int64_t Dummy1, Dummy2;
5330 
5331  MachineOperand &StackOffsetOperand = getMemOpBaseRegImmOfsOffsetOperand(MI);
5332  assert(StackOffsetOperand.isImm() && "Stack offset wasn't immediate!");
5333  getMemOpInfo(MI.getOpcode(), Scale, Width, Dummy1, Dummy2);
5334  assert(Scale != 0 && "Unexpected opcode!");
5335 
5336  // We've pushed the return address to the stack, so add 16 to the offset.
5337  // This is safe, since we already checked if it would overflow when we
5338  // checked if this instruction was legal to outline.
5339  int64_t NewImm = (Offset + 16) / Scale;
5340  StackOffsetOperand.setImm(NewImm);
5341  }
5342 }
5343 
5346  const outliner::OutlinedFunction &OF) const {
5347  // For thunk outlining, rewrite the last instruction from a call to a
5348  // tail-call.
5350  MachineInstr *Call = &*--MBB.instr_end();
5351  unsigned TailOpcode;
5352  if (Call->getOpcode() == AArch64::BL) {
5353  TailOpcode = AArch64::TCRETURNdi;
5354  } else {
5355  assert(Call->getOpcode() == AArch64::BLR);
5356  TailOpcode = AArch64::TCRETURNriALL;
5357  }
5358  MachineInstr *TC = BuildMI(MF, DebugLoc(), get(TailOpcode))
5359  .add(Call->getOperand(0))
5360  .addImm(0);
5361  MBB.insert(MBB.end(), TC);
5362  Call->eraseFromParent();
5363  }
5364 
5365  // Is there a call in the outlined range?
5366  auto IsNonTailCall = [](MachineInstr &MI) {
5367  return MI.isCall() && !MI.isReturn();
5368  };
5369  if (std::any_of(MBB.instr_begin(), MBB.instr_end(), IsNonTailCall)) {
5370  // Fix up the instructions in the range, since we're going to modify the
5371  // stack.
5373  "Can only fix up stack references once");
5374  fixupPostOutline(MBB);
5375 
5376  // LR has to be a live in so that we can save it.
5377  MBB.addLiveIn(AArch64::LR);
5378 
5380  MachineBasicBlock::iterator Et = MBB.end();
5381 
5384  Et = std::prev(MBB.end());
5385 
5386  // Insert a save before the outlined region
5387  MachineInstr *STRXpre = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
5388  .addReg(AArch64::SP, RegState::Define)
5389  .addReg(AArch64::LR)
5390  .addReg(AArch64::SP)
5391  .addImm(-16);
5392  It = MBB.insert(It, STRXpre);
5393 
5394  const TargetSubtargetInfo &STI = MF.getSubtarget();
5395  const MCRegisterInfo *MRI = STI.getRegisterInfo();
5396  unsigned DwarfReg = MRI->getDwarfRegNum(AArch64::LR, true);
5397 
5398  // Add a CFI saying the stack was moved 16 B down.
5399  int64_t StackPosEntry =
5401  BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
5402  .addCFIIndex(StackPosEntry)
5404 
5405  // Add a CFI saying that the LR that we want to find is now 16 B higher than
5406  // before.
5407  int64_t LRPosEntry =
5408  MF.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg, 16));
5409  BuildMI(MBB, It, DebugLoc(), get(AArch64::CFI_INSTRUCTION))
5410  .addCFIIndex(LRPosEntry)
5412 
5413  // Insert a restore before the terminator for the function.
5414  MachineInstr *LDRXpost = BuildMI(MF, DebugLoc(), get(AArch64::LDRXpost))
5415  .addReg(AArch64::SP, RegState::Define)
5416  .addReg(AArch64::LR, RegState::Define)
5417  .addReg(AArch64::SP)
5418  .addImm(16);
5419  Et = MBB.insert(Et, LDRXpost);
5420  }
5421 
5422  // If this is a tail call outlined function, then there's already a return.
5425  return;
5426 
5427  // It's not a tail call, so we have to insert the return ourselves.
5428  MachineInstr *ret = BuildMI(MF, DebugLoc(), get(AArch64::RET))
5429  .addReg(AArch64::LR, RegState::Undef);
5430  MBB.insert(MBB.end(), ret);
5431 
5432  // Did we have to modify the stack by saving the link register?
5434  return;
5435 
5436  // We modified the stack.
5437  // Walk over the basic block and fix up all the stack accesses.
5438  fixupPostOutline(MBB);
5439 }
5440 
5443  MachineFunction &MF, const outliner::Candidate &C) const {
5444 
5445  // Are we tail calling?
5447  // If yes, then we can just branch to the label.
5448  It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::TCRETURNdi))
5449  .addGlobalAddress(M.getNamedValue(MF.getName()))
5450  .addImm(0));
5451  return It;
5452  }
5453 
5454  // Are we saving the link register?
5457  // No, so just insert the call.
5458  It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(AArch64::BL))
5459  .addGlobalAddress(M.getNamedValue(MF.getName())));
5460  return It;
5461  }
5462 
5463  // We want to return the spot where we inserted the call.
5465 
5466  // Instructions for saving and restoring LR around the call instruction we're
5467  // going to insert.
5468  MachineInstr *Save;
5469  MachineInstr *Restore;
5470  // Can we save to a register?
5472  // FIXME: This logic should be sunk into a target-specific interface so that
5473  // we don't have to recompute the register.
5474  unsigned Reg = findRegisterToSaveLRTo(C);
5475  assert(Reg != 0 && "No callee-saved register available?");
5476 
5477  // Save and restore LR from that register.
5478  Save = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), Reg)
5479  .addReg(AArch64::XZR)
5480  .addReg(AArch64::LR)
5481  .addImm(0);
5482  Restore = BuildMI(MF, DebugLoc(), get(AArch64::ORRXrs), AArch64::LR)
5483  .addReg(AArch64::XZR)
5484  .addReg(Reg)
5485  .addImm(0);
5486  } else {
5487  // We have the default case. Save and restore from SP.
5488  Save = BuildMI(MF, DebugLoc(), get(AArch64::STRXpre))
5489  .addReg(AArch64::SP, RegState::Define)
5490  .addReg(AArch64::LR)
5491  .addReg(AArch64::SP)
5492  .