LLVM  14.0.0git
AArch64ExpandPseudoInsts.cpp
Go to the documentation of this file.
1 //===- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling and other late optimizations. This
11 // pass should be run after register allocation but before the post-regalloc
12 // scheduling pass.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AArch64ExpandImm.h"
17 #include "AArch64InstrInfo.h"
19 #include "AArch64Subtarget.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Triple.h"
33 #include "llvm/IR/DebugLoc.h"
34 #include "llvm/MC/MCInstrDesc.h"
35 #include "llvm/Pass.h"
36 #include "llvm/Support/CodeGen.h"
39 #include <cassert>
40 #include <cstdint>
41 #include <iterator>
42 #include <limits>
43 #include <utility>
44 
45 using namespace llvm;
46 
47 #define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
48 
49 namespace {
50 
51 class AArch64ExpandPseudo : public MachineFunctionPass {
52 public:
53  const AArch64InstrInfo *TII;
54 
55  static char ID;
56 
57  AArch64ExpandPseudo() : MachineFunctionPass(ID) {
59  }
60 
61  bool runOnMachineFunction(MachineFunction &Fn) override;
62 
63  StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME; }
64 
65 private:
66  bool expandMBB(MachineBasicBlock &MBB);
68  MachineBasicBlock::iterator &NextMBBI);
70  unsigned BitSize);
71 
72  bool expand_DestructiveOp(MachineInstr &MI, MachineBasicBlock &MBB,
75  unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
76  unsigned ExtendImm, unsigned ZeroReg,
77  MachineBasicBlock::iterator &NextMBBI);
78  bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
80  MachineBasicBlock::iterator &NextMBBI);
81  bool expandSetTagLoop(MachineBasicBlock &MBB,
83  MachineBasicBlock::iterator &NextMBBI);
84  bool expandSVESpillFill(MachineBasicBlock &MBB,
85  MachineBasicBlock::iterator MBBI, unsigned Opc,
86  unsigned N);
87  bool expandCALL_RVMARKER(MachineBasicBlock &MBB,
89  bool expandStoreSwiftAsyncContext(MachineBasicBlock &MBB,
91 };
92 
93 } // end anonymous namespace
94 
96 
97 INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
98  AARCH64_EXPAND_PSEUDO_NAME, false, false)
99 
100 /// Transfer implicit operands on the pseudo instruction to the
101 /// instructions created from the expansion.
102 static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
104  const MCInstrDesc &Desc = OldMI.getDesc();
105  for (const MachineOperand &MO :
106  llvm::drop_begin(OldMI.operands(), Desc.getNumOperands())) {
107  assert(MO.isReg() && MO.getReg());
108  if (MO.isUse())
109  UseMI.add(MO);
110  else
111  DefMI.add(MO);
112  }
113 }
114 
115 /// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
116 /// real move-immediate instructions to synthesize the immediate.
119  unsigned BitSize) {
120  MachineInstr &MI = *MBBI;
121  Register DstReg = MI.getOperand(0).getReg();
122  uint64_t RenamableState =
123  MI.getOperand(0).isRenamable() ? RegState::Renamable : 0;
124  uint64_t Imm = MI.getOperand(1).getImm();
125 
126  if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
127  // Useless def, and we don't want to risk creating an invalid ORR (which
128  // would really write to sp).
129  MI.eraseFromParent();
130  return true;
131  }
132 
134  AArch64_IMM::expandMOVImm(Imm, BitSize, Insn);
135  assert(Insn.size() != 0);
136 
138  for (auto I = Insn.begin(), E = Insn.end(); I != E; ++I) {
139  bool LastItem = std::next(I) == E;
140  switch (I->Opcode)
141  {
142  default: llvm_unreachable("unhandled!"); break;
143 
144  case AArch64::ORRWri:
145  case AArch64::ORRXri:
146  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
147  .add(MI.getOperand(0))
148  .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
149  .addImm(I->Op2));
150  break;
151  case AArch64::MOVNWi:
152  case AArch64::MOVNXi:
153  case AArch64::MOVZWi:
154  case AArch64::MOVZXi: {
155  bool DstIsDead = MI.getOperand(0).isDead();
156  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
157  .addReg(DstReg, RegState::Define |
158  getDeadRegState(DstIsDead && LastItem) |
159  RenamableState)
160  .addImm(I->Op1)
161  .addImm(I->Op2));
162  } break;
163  case AArch64::MOVKWi:
164  case AArch64::MOVKXi: {
165  Register DstReg = MI.getOperand(0).getReg();
166  bool DstIsDead = MI.getOperand(0).isDead();
167  MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
168  .addReg(DstReg,
170  getDeadRegState(DstIsDead && LastItem) |
171  RenamableState)
172  .addReg(DstReg)
173  .addImm(I->Op1)
174  .addImm(I->Op2));
175  } break;
176  }
177  }
178  transferImpOps(MI, MIBS.front(), MIBS.back());
179  MI.eraseFromParent();
180  return true;
181 }
182 
183 bool AArch64ExpandPseudo::expandCMP_SWAP(
185  unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
186  MachineBasicBlock::iterator &NextMBBI) {
187  MachineInstr &MI = *MBBI;
188  DebugLoc DL = MI.getDebugLoc();
189  const MachineOperand &Dest = MI.getOperand(0);
190  Register StatusReg = MI.getOperand(1).getReg();
191  bool StatusDead = MI.getOperand(1).isDead();
192  // Duplicating undef operands into 2 instructions does not guarantee the same
193  // value on both; However undef should be replaced by xzr anyway.
194  assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
195  Register AddrReg = MI.getOperand(2).getReg();
196  Register DesiredReg = MI.getOperand(3).getReg();
197  Register NewReg = MI.getOperand(4).getReg();
198 
199  MachineFunction *MF = MBB.getParent();
200  auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
201  auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
202  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
203 
204  MF->insert(++MBB.getIterator(), LoadCmpBB);
205  MF->insert(++LoadCmpBB->getIterator(), StoreBB);
206  MF->insert(++StoreBB->getIterator(), DoneBB);
207 
208  // .Lloadcmp:
209  // mov wStatus, 0
210  // ldaxr xDest, [xAddr]
211  // cmp xDest, xDesired
212  // b.ne .Ldone
213  if (!StatusDead)
214  BuildMI(LoadCmpBB, DL, TII->get(AArch64::MOVZWi), StatusReg)
215  .addImm(0).addImm(0);
216  BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
217  .addReg(AddrReg);
218  BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
219  .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
220  .addReg(DesiredReg)
221  .addImm(ExtendImm);
222  BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
224  .addMBB(DoneBB)
225  .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
226  LoadCmpBB->addSuccessor(DoneBB);
227  LoadCmpBB->addSuccessor(StoreBB);
228 
229  // .Lstore:
230  // stlxr wStatus, xNew, [xAddr]
231  // cbnz wStatus, .Lloadcmp
232  BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
233  .addReg(NewReg)
234  .addReg(AddrReg);
235  BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
236  .addReg(StatusReg, getKillRegState(StatusDead))
237  .addMBB(LoadCmpBB);
238  StoreBB->addSuccessor(LoadCmpBB);
239  StoreBB->addSuccessor(DoneBB);
240 
241  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
242  DoneBB->transferSuccessors(&MBB);
243 
244  MBB.addSuccessor(LoadCmpBB);
245 
246  NextMBBI = MBB.end();
247  MI.eraseFromParent();
248 
249  // Recompute livein lists.
250  LivePhysRegs LiveRegs;
251  computeAndAddLiveIns(LiveRegs, *DoneBB);
252  computeAndAddLiveIns(LiveRegs, *StoreBB);
253  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
254  // Do an extra pass around the loop to get loop carried registers right.
255  StoreBB->clearLiveIns();
256  computeAndAddLiveIns(LiveRegs, *StoreBB);
257  LoadCmpBB->clearLiveIns();
258  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
259 
260  return true;
261 }
262 
263 bool AArch64ExpandPseudo::expandCMP_SWAP_128(
265  MachineBasicBlock::iterator &NextMBBI) {
266  MachineInstr &MI = *MBBI;
267  DebugLoc DL = MI.getDebugLoc();
268  MachineOperand &DestLo = MI.getOperand(0);
269  MachineOperand &DestHi = MI.getOperand(1);
270  Register StatusReg = MI.getOperand(2).getReg();
271  bool StatusDead = MI.getOperand(2).isDead();
272  // Duplicating undef operands into 2 instructions does not guarantee the same
273  // value on both; However undef should be replaced by xzr anyway.
274  assert(!MI.getOperand(3).isUndef() && "cannot handle undef");
275  Register AddrReg = MI.getOperand(3).getReg();
276  Register DesiredLoReg = MI.getOperand(4).getReg();
277  Register DesiredHiReg = MI.getOperand(5).getReg();
278  Register NewLoReg = MI.getOperand(6).getReg();
279  Register NewHiReg = MI.getOperand(7).getReg();
280 
281  unsigned LdxpOp, StxpOp;
282 
283  switch (MI.getOpcode()) {
284  case AArch64::CMP_SWAP_128_MONOTONIC:
285  LdxpOp = AArch64::LDXPX;
286  StxpOp = AArch64::STXPX;
287  break;
288  case AArch64::CMP_SWAP_128_RELEASE:
289  LdxpOp = AArch64::LDXPX;
290  StxpOp = AArch64::STLXPX;
291  break;
292  case AArch64::CMP_SWAP_128_ACQUIRE:
293  LdxpOp = AArch64::LDAXPX;
294  StxpOp = AArch64::STXPX;
295  break;
296  case AArch64::CMP_SWAP_128:
297  LdxpOp = AArch64::LDAXPX;
298  StxpOp = AArch64::STLXPX;
299  break;
300  default:
301  llvm_unreachable("Unexpected opcode");
302  }
303 
304  MachineFunction *MF = MBB.getParent();
305  auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
306  auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
307  auto FailBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
308  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
309 
310  MF->insert(++MBB.getIterator(), LoadCmpBB);
311  MF->insert(++LoadCmpBB->getIterator(), StoreBB);
312  MF->insert(++StoreBB->getIterator(), FailBB);
313  MF->insert(++FailBB->getIterator(), DoneBB);
314 
315  // .Lloadcmp:
316  // ldaxp xDestLo, xDestHi, [xAddr]
317  // cmp xDestLo, xDesiredLo
318  // sbcs xDestHi, xDesiredHi
319  // b.ne .Ldone
320  BuildMI(LoadCmpBB, DL, TII->get(LdxpOp))
321  .addReg(DestLo.getReg(), RegState::Define)
322  .addReg(DestHi.getReg(), RegState::Define)
323  .addReg(AddrReg);
324  BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
325  .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
326  .addReg(DesiredLoReg)
327  .addImm(0);
328  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
329  .addUse(AArch64::WZR)
330  .addUse(AArch64::WZR)
332  BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
333  .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
334  .addReg(DesiredHiReg)
335  .addImm(0);
336  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
337  .addUse(StatusReg, RegState::Kill)
338  .addUse(StatusReg, RegState::Kill)
340  BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
341  .addUse(StatusReg, getKillRegState(StatusDead))
342  .addMBB(FailBB);
343  LoadCmpBB->addSuccessor(FailBB);
344  LoadCmpBB->addSuccessor(StoreBB);
345 
346  // .Lstore:
347  // stlxp wStatus, xNewLo, xNewHi, [xAddr]
348  // cbnz wStatus, .Lloadcmp
349  BuildMI(StoreBB, DL, TII->get(StxpOp), StatusReg)
350  .addReg(NewLoReg)
351  .addReg(NewHiReg)
352  .addReg(AddrReg);
353  BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
354  .addReg(StatusReg, getKillRegState(StatusDead))
355  .addMBB(LoadCmpBB);
356  BuildMI(StoreBB, DL, TII->get(AArch64::B)).addMBB(DoneBB);
357  StoreBB->addSuccessor(LoadCmpBB);
358  StoreBB->addSuccessor(DoneBB);
359 
360  // .Lfail:
361  // stlxp wStatus, xDestLo, xDestHi, [xAddr]
362  // cbnz wStatus, .Lloadcmp
363  BuildMI(FailBB, DL, TII->get(StxpOp), StatusReg)
364  .addReg(DestLo.getReg())
365  .addReg(DestHi.getReg())
366  .addReg(AddrReg);
367  BuildMI(FailBB, DL, TII->get(AArch64::CBNZW))
368  .addReg(StatusReg, getKillRegState(StatusDead))
369  .addMBB(LoadCmpBB);
370  FailBB->addSuccessor(LoadCmpBB);
371  FailBB->addSuccessor(DoneBB);
372 
373  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
374  DoneBB->transferSuccessors(&MBB);
375 
376  MBB.addSuccessor(LoadCmpBB);
377 
378  NextMBBI = MBB.end();
379  MI.eraseFromParent();
380 
381  // Recompute liveness bottom up.
382  LivePhysRegs LiveRegs;
383  computeAndAddLiveIns(LiveRegs, *DoneBB);
384  computeAndAddLiveIns(LiveRegs, *FailBB);
385  computeAndAddLiveIns(LiveRegs, *StoreBB);
386  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
387 
388  // Do an extra pass in the loop to get the loop carried dependencies right.
389  FailBB->clearLiveIns();
390  computeAndAddLiveIns(LiveRegs, *FailBB);
391  StoreBB->clearLiveIns();
392  computeAndAddLiveIns(LiveRegs, *StoreBB);
393  LoadCmpBB->clearLiveIns();
394  computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
395 
396  return true;
397 }
398 
399 /// \brief Expand Pseudos to Instructions with destructive operands.
400 ///
401 /// This mechanism uses MOVPRFX instructions for zeroing the false lanes
402 /// or for fixing relaxed register allocation conditions to comply with
403 /// the instructions register constraints. The latter case may be cheaper
404 /// than setting the register constraints in the register allocator,
405 /// since that will insert regular MOV instructions rather than MOVPRFX.
406 ///
407 /// Example (after register allocation):
408 ///
409 /// FSUB_ZPZZ_ZERO_B Z0, Pg, Z1, Z0
410 ///
411 /// * The Pseudo FSUB_ZPZZ_ZERO_B maps to FSUB_ZPmZ_B.
412 /// * We cannot map directly to FSUB_ZPmZ_B because the register
413 /// constraints of the instruction are not met.
414 /// * Also the _ZERO specifies the false lanes need to be zeroed.
415 ///
416 /// We first try to see if the destructive operand == result operand,
417 /// if not, we try to swap the operands, e.g.
418 ///
419 /// FSUB_ZPmZ_B Z0, Pg/m, Z0, Z1
420 ///
421 /// But because FSUB_ZPmZ is not commutative, this is semantically
422 /// different, so we need a reverse instruction:
423 ///
424 /// FSUBR_ZPmZ_B Z0, Pg/m, Z0, Z1
425 ///
426 /// Then we implement the zeroing of the false lanes of Z0 by adding
427 /// a zeroing MOVPRFX instruction:
428 ///
429 /// MOVPRFX_ZPzZ_B Z0, Pg/z, Z0
430 /// FSUBR_ZPmZ_B Z0, Pg/m, Z0, Z1
431 ///
432 /// Note that this can only be done for _ZERO or _UNDEF variants where
433 /// we can guarantee the false lanes to be zeroed (by implementing this)
434 /// or that they are undef (don't care / not used), otherwise the
435 /// swapping of operands is illegal because the operation is not
436 /// (or cannot be emulated to be) fully commutative.
437 bool AArch64ExpandPseudo::expand_DestructiveOp(
438  MachineInstr &MI,
441  unsigned Opcode = AArch64::getSVEPseudoMap(MI.getOpcode());
442  uint64_t DType = TII->get(Opcode).TSFlags & AArch64::DestructiveInstTypeMask;
443  uint64_t FalseLanes = MI.getDesc().TSFlags & AArch64::FalseLanesMask;
444  bool FalseZero = FalseLanes == AArch64::FalseLanesZero;
445 
446  unsigned DstReg = MI.getOperand(0).getReg();
447  bool DstIsDead = MI.getOperand(0).isDead();
448 
449  if (DType == AArch64::DestructiveBinary)
450  assert(DstReg != MI.getOperand(3).getReg());
451 
452  bool UseRev = false;
453  unsigned PredIdx, DOPIdx, SrcIdx, Src2Idx;
454  switch (DType) {
457  if (DstReg == MI.getOperand(3).getReg()) {
458  // FSUB Zd, Pg, Zs1, Zd ==> FSUBR Zd, Pg/m, Zd, Zs1
459  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 3, 2);
460  UseRev = true;
461  break;
462  }
466  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 2, 3);
467  break;
469  std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(2, 3, 3);
470  break;
472  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 2, 3, 4);
473  if (DstReg == MI.getOperand(3).getReg()) {
474  // FMLA Zd, Pg, Za, Zd, Zm ==> FMAD Zdn, Pg, Zm, Za
475  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 3, 4, 2);
476  UseRev = true;
477  } else if (DstReg == MI.getOperand(4).getReg()) {
478  // FMLA Zd, Pg, Za, Zm, Zd ==> FMAD Zdn, Pg, Zm, Za
479  std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 4, 3, 2);
480  UseRev = true;
481  }
482  break;
483  default:
484  llvm_unreachable("Unsupported Destructive Operand type");
485  }
486 
487 #ifndef NDEBUG
488  // MOVPRFX can only be used if the destination operand
489  // is the destructive operand, not as any other operand,
490  // so the Destructive Operand must be unique.
491  bool DOPRegIsUnique = false;
492  switch (DType) {
495  DOPRegIsUnique =
496  DstReg != MI.getOperand(DOPIdx).getReg() ||
497  MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg();
498  break;
501  DOPRegIsUnique = true;
502  break;
504  DOPRegIsUnique =
505  DstReg != MI.getOperand(DOPIdx).getReg() ||
506  (MI.getOperand(DOPIdx).getReg() != MI.getOperand(SrcIdx).getReg() &&
507  MI.getOperand(DOPIdx).getReg() != MI.getOperand(Src2Idx).getReg());
508  break;
509  }
510 #endif
511 
512  // Resolve the reverse opcode
513  if (UseRev) {
514  int NewOpcode;
515  // e.g. DIV -> DIVR
516  if ((NewOpcode = AArch64::getSVERevInstr(Opcode)) != -1)
517  Opcode = NewOpcode;
518  // e.g. DIVR -> DIV
519  else if ((NewOpcode = AArch64::getSVENonRevInstr(Opcode)) != -1)
520  Opcode = NewOpcode;
521  }
522 
523  // Get the right MOVPRFX
524  uint64_t ElementSize = TII->getElementSizeForOpcode(Opcode);
525  unsigned MovPrfx, MovPrfxZero;
526  switch (ElementSize) {
529  MovPrfx = AArch64::MOVPRFX_ZZ;
530  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_B;
531  break;
533  MovPrfx = AArch64::MOVPRFX_ZZ;
534  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_H;
535  break;
537  MovPrfx = AArch64::MOVPRFX_ZZ;
538  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_S;
539  break;
541  MovPrfx = AArch64::MOVPRFX_ZZ;
542  MovPrfxZero = AArch64::MOVPRFX_ZPzZ_D;
543  break;
544  default:
545  llvm_unreachable("Unsupported ElementSize");
546  }
547 
548  //
549  // Create the destructive operation (if required)
550  //
551  MachineInstrBuilder PRFX, DOP;
552  if (FalseZero) {
553 #ifndef NDEBUG
554  assert(DOPRegIsUnique && "The destructive operand should be unique");
555 #endif
556  assert(ElementSize != AArch64::ElementSizeNone &&
557  "This instruction is unpredicated");
558 
559  // Merge source operand into destination register
560  PRFX = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MovPrfxZero))
561  .addReg(DstReg, RegState::Define)
562  .addReg(MI.getOperand(PredIdx).getReg())
563  .addReg(MI.getOperand(DOPIdx).getReg());
564 
565  // After the movprfx, the destructive operand is same as Dst
566  DOPIdx = 0;
567  } else if (DstReg != MI.getOperand(DOPIdx).getReg()) {
568 #ifndef NDEBUG
569  assert(DOPRegIsUnique && "The destructive operand should be unique");
570 #endif
571  PRFX = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MovPrfx))
572  .addReg(DstReg, RegState::Define)
573  .addReg(MI.getOperand(DOPIdx).getReg());
574  DOPIdx = 0;
575  }
576 
577  //
578  // Create the destructive operation
579  //
580  DOP = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode))
581  .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead));
582 
583  switch (DType) {
585  DOP.addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
586  .add(MI.getOperand(PredIdx))
587  .add(MI.getOperand(SrcIdx));
588  break;
592  DOP.add(MI.getOperand(PredIdx))
593  .addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
594  .add(MI.getOperand(SrcIdx));
595  break;
597  DOP.add(MI.getOperand(PredIdx))
598  .addReg(MI.getOperand(DOPIdx).getReg(), RegState::Kill)
599  .add(MI.getOperand(SrcIdx))
600  .add(MI.getOperand(Src2Idx));
601  break;
602  }
603 
604  if (PRFX) {
606  transferImpOps(MI, PRFX, DOP);
607  } else
608  transferImpOps(MI, DOP, DOP);
609 
610  MI.eraseFromParent();
611  return true;
612 }
613 
614 bool AArch64ExpandPseudo::expandSetTagLoop(
616  MachineBasicBlock::iterator &NextMBBI) {
617  MachineInstr &MI = *MBBI;
618  DebugLoc DL = MI.getDebugLoc();
619  Register SizeReg = MI.getOperand(0).getReg();
620  Register AddressReg = MI.getOperand(1).getReg();
621 
622  MachineFunction *MF = MBB.getParent();
623 
624  bool ZeroData = MI.getOpcode() == AArch64::STZGloop_wback;
625  const unsigned OpCode1 =
626  ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex;
627  const unsigned OpCode2 =
628  ZeroData ? AArch64::STZ2GPostIndex : AArch64::ST2GPostIndex;
629 
630  unsigned Size = MI.getOperand(2).getImm();
631  assert(Size > 0 && Size % 16 == 0);
632  if (Size % (16 * 2) != 0) {
633  BuildMI(MBB, MBBI, DL, TII->get(OpCode1), AddressReg)
634  .addReg(AddressReg)
635  .addReg(AddressReg)
636  .addImm(1);
637  Size -= 16;
638  }
640  BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), SizeReg)
641  .addImm(Size);
642  expandMOVImm(MBB, I, 64);
643 
644  auto LoopBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
645  auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
646 
647  MF->insert(++MBB.getIterator(), LoopBB);
648  MF->insert(++LoopBB->getIterator(), DoneBB);
649 
650  BuildMI(LoopBB, DL, TII->get(OpCode2))
651  .addDef(AddressReg)
652  .addReg(AddressReg)
653  .addReg(AddressReg)
654  .addImm(2)
655  .cloneMemRefs(MI)
656  .setMIFlags(MI.getFlags());
657  BuildMI(LoopBB, DL, TII->get(AArch64::SUBXri))
658  .addDef(SizeReg)
659  .addReg(SizeReg)
660  .addImm(16 * 2)
661  .addImm(0);
662  BuildMI(LoopBB, DL, TII->get(AArch64::CBNZX)).addUse(SizeReg).addMBB(LoopBB);
663 
664  LoopBB->addSuccessor(LoopBB);
665  LoopBB->addSuccessor(DoneBB);
666 
667  DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
668  DoneBB->transferSuccessors(&MBB);
669 
670  MBB.addSuccessor(LoopBB);
671 
672  NextMBBI = MBB.end();
673  MI.eraseFromParent();
674  // Recompute liveness bottom up.
675  LivePhysRegs LiveRegs;
676  computeAndAddLiveIns(LiveRegs, *DoneBB);
677  computeAndAddLiveIns(LiveRegs, *LoopBB);
678  // Do an extra pass in the loop to get the loop carried dependencies right.
679  // FIXME: is this necessary?
680  LoopBB->clearLiveIns();
681  computeAndAddLiveIns(LiveRegs, *LoopBB);
682  DoneBB->clearLiveIns();
683  computeAndAddLiveIns(LiveRegs, *DoneBB);
684 
685  return true;
686 }
687 
688 bool AArch64ExpandPseudo::expandSVESpillFill(MachineBasicBlock &MBB,
690  unsigned Opc, unsigned N) {
691  const TargetRegisterInfo *TRI =
693  MachineInstr &MI = *MBBI;
694  for (unsigned Offset = 0; Offset < N; ++Offset) {
695  int ImmOffset = MI.getOperand(2).getImm() + Offset;
696  bool Kill = (Offset + 1 == N) ? MI.getOperand(1).isKill() : false;
697  assert(ImmOffset >= -256 && ImmOffset < 256 &&
698  "Immediate spill offset out of range");
699  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
700  .addReg(
701  TRI->getSubReg(MI.getOperand(0).getReg(), AArch64::zsub0 + Offset),
702  Opc == AArch64::LDR_ZXI ? RegState::Define : 0)
703  .addReg(MI.getOperand(1).getReg(), getKillRegState(Kill))
704  .addImm(ImmOffset);
705  }
706  MI.eraseFromParent();
707  return true;
708 }
709 
710 bool AArch64ExpandPseudo::expandCALL_RVMARKER(
712  // Expand CALL_RVMARKER pseudo to a branch, followed by the special `mov x29,
713  // x29` marker. Mark the sequence as bundle, to avoid passes moving other code
714  // in between.
715  MachineInstr &MI = *MBBI;
716 
717  MachineInstr *OriginalCall;
718  MachineOperand &CallTarget = MI.getOperand(0);
719  assert((CallTarget.isGlobal() || CallTarget.isReg()) &&
720  "invalid operand for regular call");
721  unsigned Opc = CallTarget.isGlobal() ? AArch64::BL : AArch64::BLR;
722  OriginalCall = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)).getInstr();
723  OriginalCall->addOperand(CallTarget);
724 
725  unsigned RegMaskStartIdx = 1;
726  // Skip register arguments. Those are added during ISel, but are not
727  // needed for the concrete branch.
728  while (!MI.getOperand(RegMaskStartIdx).isRegMask()) {
729  auto MOP = MI.getOperand(RegMaskStartIdx);
730  assert(MOP.isReg() && "can only add register operands");
732  MOP.getReg(), /*Def=*/false, /*Implicit=*/true));
733  RegMaskStartIdx++;
734  }
735  for (const MachineOperand &MO :
736  llvm::drop_begin(MI.operands(), RegMaskStartIdx))
737  OriginalCall->addOperand(MO);
738 
739  auto *Marker = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXrs))
740  .addReg(AArch64::FP, RegState::Define)
741  .addReg(AArch64::XZR)
742  .addReg(AArch64::FP)
743  .addImm(0)
744  .getInstr();
745  if (MI.shouldUpdateCallSiteInfo())
746  MBB.getParent()->moveCallSiteInfo(&MI, Marker);
747  MI.eraseFromParent();
748  finalizeBundle(MBB, OriginalCall->getIterator(),
749  std::next(Marker->getIterator()));
750  return true;
751 }
752 
753 bool AArch64ExpandPseudo::expandStoreSwiftAsyncContext(
755  Register CtxReg = MBBI->getOperand(0).getReg();
756  Register BaseReg = MBBI->getOperand(1).getReg();
757  int Offset = MBBI->getOperand(2).getImm();
758  DebugLoc DL(MBBI->getDebugLoc());
759  auto &STI = MBB.getParent()->getSubtarget<AArch64Subtarget>();
760 
761  if (STI.getTargetTriple().getArchName() != "arm64e") {
762  BuildMI(MBB, MBBI, DL, TII->get(AArch64::STRXui))
763  .addUse(CtxReg)
764  .addUse(BaseReg)
765  .addImm(Offset / 8)
768  return true;
769  }
770 
771  // We need to sign the context in an address-discriminated way. 0xc31a is a
772  // fixed random value, chosen as part of the ABI.
773  // add x16, xBase, #Offset
774  // movk x16, #0xc31a, lsl #48
775  // mov x17, x22/xzr
776  // pacdb x17, x16
777  // str x17, [xBase, #Offset]
778  unsigned Opc = Offset >= 0 ? AArch64::ADDXri : AArch64::SUBXri;
779  BuildMI(MBB, MBBI, DL, TII->get(Opc), AArch64::X16)
780  .addUse(BaseReg)
781  .addImm(abs(Offset))
782  .addImm(0)
784  BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X16)
785  .addUse(AArch64::X16)
786  .addImm(0xc31a)
787  .addImm(48)
789  // We're not allowed to clobber X22 (and couldn't clobber XZR if we tried), so
790  // move it somewhere before signing.
791  BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrs), AArch64::X17)
792  .addUse(AArch64::XZR)
793  .addUse(CtxReg)
794  .addImm(0)
796  BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACDB), AArch64::X17)
797  .addUse(AArch64::X17)
798  .addUse(AArch64::X16)
800  BuildMI(MBB, MBBI, DL, TII->get(AArch64::STRXui))
801  .addUse(AArch64::X17)
802  .addUse(BaseReg)
803  .addImm(Offset / 8)
805 
807  return true;
808 }
809 
810 /// If MBBI references a pseudo instruction that should be expanded here,
811 /// do the expansion and return true. Otherwise return false.
812 bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
814  MachineBasicBlock::iterator &NextMBBI) {
815  MachineInstr &MI = *MBBI;
816  unsigned Opcode = MI.getOpcode();
817 
818  // Check if we can expand the destructive op
819  int OrigInstr = AArch64::getSVEPseudoMap(MI.getOpcode());
820  if (OrigInstr != -1) {
821  auto &Orig = TII->get(OrigInstr);
822  if ((Orig.TSFlags & AArch64::DestructiveInstTypeMask)
824  return expand_DestructiveOp(MI, MBB, MBBI);
825  }
826  }
827 
828  switch (Opcode) {
829  default:
830  break;
831 
832  case AArch64::BSPv8i8:
833  case AArch64::BSPv16i8: {
834  Register DstReg = MI.getOperand(0).getReg();
835  if (DstReg == MI.getOperand(3).getReg()) {
836  // Expand to BIT
837  BuildMI(MBB, MBBI, MI.getDebugLoc(),
838  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BITv8i8
839  : AArch64::BITv16i8))
840  .add(MI.getOperand(0))
841  .add(MI.getOperand(3))
842  .add(MI.getOperand(2))
843  .add(MI.getOperand(1));
844  } else if (DstReg == MI.getOperand(2).getReg()) {
845  // Expand to BIF
846  BuildMI(MBB, MBBI, MI.getDebugLoc(),
847  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BIFv8i8
848  : AArch64::BIFv16i8))
849  .add(MI.getOperand(0))
850  .add(MI.getOperand(2))
851  .add(MI.getOperand(3))
852  .add(MI.getOperand(1));
853  } else {
854  // Expand to BSL, use additional move if required
855  if (DstReg == MI.getOperand(1).getReg()) {
856  BuildMI(MBB, MBBI, MI.getDebugLoc(),
857  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
858  : AArch64::BSLv16i8))
859  .add(MI.getOperand(0))
860  .add(MI.getOperand(1))
861  .add(MI.getOperand(2))
862  .add(MI.getOperand(3));
863  } else {
864  BuildMI(MBB, MBBI, MI.getDebugLoc(),
865  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::ORRv8i8
866  : AArch64::ORRv16i8))
867  .addReg(DstReg,
869  getRenamableRegState(MI.getOperand(0).isRenamable()))
870  .add(MI.getOperand(1))
871  .add(MI.getOperand(1));
872  BuildMI(MBB, MBBI, MI.getDebugLoc(),
873  TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
874  : AArch64::BSLv16i8))
875  .add(MI.getOperand(0))
876  .addReg(DstReg,
878  getRenamableRegState(MI.getOperand(0).isRenamable()))
879  .add(MI.getOperand(2))
880  .add(MI.getOperand(3));
881  }
882  }
883  MI.eraseFromParent();
884  return true;
885  }
886 
887  case AArch64::ADDWrr:
888  case AArch64::SUBWrr:
889  case AArch64::ADDXrr:
890  case AArch64::SUBXrr:
891  case AArch64::ADDSWrr:
892  case AArch64::SUBSWrr:
893  case AArch64::ADDSXrr:
894  case AArch64::SUBSXrr:
895  case AArch64::ANDWrr:
896  case AArch64::ANDXrr:
897  case AArch64::BICWrr:
898  case AArch64::BICXrr:
899  case AArch64::ANDSWrr:
900  case AArch64::ANDSXrr:
901  case AArch64::BICSWrr:
902  case AArch64::BICSXrr:
903  case AArch64::EONWrr:
904  case AArch64::EONXrr:
905  case AArch64::EORWrr:
906  case AArch64::EORXrr:
907  case AArch64::ORNWrr:
908  case AArch64::ORNXrr:
909  case AArch64::ORRWrr:
910  case AArch64::ORRXrr: {
911  unsigned Opcode;
912  switch (MI.getOpcode()) {
913  default:
914  return false;
915  case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
916  case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
917  case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
918  case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
919  case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
920  case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
921  case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
922  case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
923  case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
924  case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
925  case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
926  case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
927  case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
928  case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
929  case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
930  case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
931  case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
932  case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
933  case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
934  case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
935  case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
936  case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
937  case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
938  case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
939  }
940  MachineFunction &MF = *MBB.getParent();
941  // Try to create new inst without implicit operands added.
942  MachineInstr *NewMI = MF.CreateMachineInstr(
943  TII->get(Opcode), MI.getDebugLoc(), /*NoImplicit=*/true);
944  MBB.insert(MBBI, NewMI);
945  MachineInstrBuilder MIB1(MF, NewMI);
946  MIB1.addReg(MI.getOperand(0).getReg(), RegState::Define)
947  .add(MI.getOperand(1))
948  .add(MI.getOperand(2))
950  transferImpOps(MI, MIB1, MIB1);
951  MI.eraseFromParent();
952  return true;
953  }
954 
955  case AArch64::LOADgot: {
956  MachineFunction *MF = MBB.getParent();
957  Register DstReg = MI.getOperand(0).getReg();
958  const MachineOperand &MO1 = MI.getOperand(1);
959  unsigned Flags = MO1.getTargetFlags();
960 
961  if (MF->getTarget().getCodeModel() == CodeModel::Tiny) {
962  // Tiny codemodel expand to LDR
963  MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
964  TII->get(AArch64::LDRXl), DstReg);
965 
966  if (MO1.isGlobal()) {
967  MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags);
968  } else if (MO1.isSymbol()) {
969  MIB.addExternalSymbol(MO1.getSymbolName(), Flags);
970  } else {
971  assert(MO1.isCPI() &&
972  "Only expect globals, externalsymbols, or constant pools");
973  MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags);
974  }
975  } else {
976  // Small codemodel expand into ADRP + LDR.
977  MachineFunction &MF = *MI.getParent()->getParent();
978  DebugLoc DL = MI.getDebugLoc();
979  MachineInstrBuilder MIB1 =
980  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
981 
982  MachineInstrBuilder MIB2;
985  unsigned Reg32 = TRI->getSubReg(DstReg, AArch64::sub_32);
986  unsigned DstFlags = MI.getOperand(0).getTargetFlags();
987  MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRWui))
988  .addDef(Reg32)
989  .addReg(DstReg, RegState::Kill)
990  .addReg(DstReg, DstFlags | RegState::Implicit);
991  } else {
992  unsigned DstReg = MI.getOperand(0).getReg();
993  MIB2 = BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui))
994  .add(MI.getOperand(0))
995  .addUse(DstReg, RegState::Kill);
996  }
997 
998  if (MO1.isGlobal()) {
999  MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
1000  MIB2.addGlobalAddress(MO1.getGlobal(), 0,
1002  } else if (MO1.isSymbol()) {
1003  MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
1004  MIB2.addExternalSymbol(MO1.getSymbolName(), Flags |
1007  } else {
1008  assert(MO1.isCPI() &&
1009  "Only expect globals, externalsymbols, or constant pools");
1010  MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
1011  Flags | AArch64II::MO_PAGE);
1012  MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
1013  Flags | AArch64II::MO_PAGEOFF |
1015  }
1016 
1017  transferImpOps(MI, MIB1, MIB2);
1018  }
1019  MI.eraseFromParent();
1020  return true;
1021  }
1022  case AArch64::MOVaddrBA: {
1023  MachineFunction &MF = *MI.getParent()->getParent();
1025  // blockaddress expressions have to come from a constant pool because the
1026  // largest addend (and hence offset within a function) allowed for ADRP is
1027  // only 8MB.
1028  const BlockAddress *BA = MI.getOperand(1).getBlockAddress();
1029  assert(MI.getOperand(1).getOffset() == 0 && "unexpected offset");
1030 
1032  unsigned CPIdx = MCP->getConstantPoolIndex(BA, Align(8));
1033 
1034  Register DstReg = MI.getOperand(0).getReg();
1035  auto MIB1 =
1036  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
1038  auto MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
1039  TII->get(AArch64::LDRXui), DstReg)
1040  .addUse(DstReg)
1043  transferImpOps(MI, MIB1, MIB2);
1044  MI.eraseFromParent();
1045  return true;
1046  }
1047  }
1049  case AArch64::MOVaddr:
1050  case AArch64::MOVaddrJT:
1051  case AArch64::MOVaddrCP:
1052  case AArch64::MOVaddrTLS:
1053  case AArch64::MOVaddrEXT: {
1054  // Expand into ADRP + ADD.
1055  Register DstReg = MI.getOperand(0).getReg();
1056  assert(DstReg != AArch64::XZR);
1057  MachineInstrBuilder MIB1 =
1058  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
1059  .add(MI.getOperand(1));
1060 
1061  if (MI.getOperand(1).getTargetFlags() & AArch64II::MO_TAGGED) {
1062  // MO_TAGGED on the page indicates a tagged address. Set the tag now.
1063  // We do so by creating a MOVK that sets bits 48-63 of the register to
1064  // (global address + 0x100000000 - PC) >> 48. This assumes that we're in
1065  // the small code model so we can assume a binary size of <= 4GB, which
1066  // makes the untagged PC relative offset positive. The binary must also be
1067  // loaded into address range [0, 2^48). Both of these properties need to
1068  // be ensured at runtime when using tagged addresses.
1069  auto Tag = MI.getOperand(1);
1070  Tag.setTargetFlags(AArch64II::MO_PREL | AArch64II::MO_G3);
1071  Tag.setOffset(0x100000000);
1072  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi), DstReg)
1073  .addReg(DstReg)
1074  .add(Tag)
1075  .addImm(48);
1076  }
1077 
1078  MachineInstrBuilder MIB2 =
1079  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
1080  .add(MI.getOperand(0))
1081  .addReg(DstReg)
1082  .add(MI.getOperand(2))
1083  .addImm(0);
1084 
1085  transferImpOps(MI, MIB1, MIB2);
1086  MI.eraseFromParent();
1087  return true;
1088  }
1089  case AArch64::ADDlowTLS:
1090  // Produce a plain ADD
1091  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
1092  .add(MI.getOperand(0))
1093  .add(MI.getOperand(1))
1094  .add(MI.getOperand(2))
1095  .addImm(0);
1096  MI.eraseFromParent();
1097  return true;
1098 
1099  case AArch64::MOVbaseTLS: {
1100  Register DstReg = MI.getOperand(0).getReg();
1101  auto SysReg = AArch64SysReg::TPIDR_EL0;
1102  MachineFunction *MF = MBB.getParent();
1104  SysReg = AArch64SysReg::TPIDR_EL3;
1105  else if (MF->getSubtarget<AArch64Subtarget>().useEL2ForTP())
1106  SysReg = AArch64SysReg::TPIDR_EL2;
1107  else if (MF->getSubtarget<AArch64Subtarget>().useEL1ForTP())
1108  SysReg = AArch64SysReg::TPIDR_EL1;
1109  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MRS), DstReg)
1110  .addImm(SysReg);
1111  MI.eraseFromParent();
1112  return true;
1113  }
1114 
1115  case AArch64::MOVi32imm:
1116  return expandMOVImm(MBB, MBBI, 32);
1117  case AArch64::MOVi64imm:
1118  return expandMOVImm(MBB, MBBI, 64);
1119  case AArch64::RET_ReallyLR: {
1120  // Hiding the LR use with RET_ReallyLR may lead to extra kills in the
1121  // function and missing live-ins. We are fine in practice because callee
1122  // saved register handling ensures the register value is restored before
1123  // RET, but we need the undef flag here to appease the MachineVerifier
1124  // liveness checks.
1125  MachineInstrBuilder MIB =
1126  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
1127  .addReg(AArch64::LR, RegState::Undef);
1128  transferImpOps(MI, MIB, MIB);
1129  MI.eraseFromParent();
1130  return true;
1131  }
1132  case AArch64::CMP_SWAP_8:
1133  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
1134  AArch64::SUBSWrx,
1136  AArch64::WZR, NextMBBI);
1137  case AArch64::CMP_SWAP_16:
1138  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
1139  AArch64::SUBSWrx,
1141  AArch64::WZR, NextMBBI);
1142  case AArch64::CMP_SWAP_32:
1143  return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
1144  AArch64::SUBSWrs,
1146  AArch64::WZR, NextMBBI);
1147  case AArch64::CMP_SWAP_64:
1148  return expandCMP_SWAP(MBB, MBBI,
1149  AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
1151  AArch64::XZR, NextMBBI);
1152  case AArch64::CMP_SWAP_128:
1153  case AArch64::CMP_SWAP_128_RELEASE:
1154  case AArch64::CMP_SWAP_128_ACQUIRE:
1155  case AArch64::CMP_SWAP_128_MONOTONIC:
1156  return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
1157 
1158  case AArch64::AESMCrrTied:
1159  case AArch64::AESIMCrrTied: {
1160  MachineInstrBuilder MIB =
1161  BuildMI(MBB, MBBI, MI.getDebugLoc(),
1162  TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
1163  AArch64::AESIMCrr))
1164  .add(MI.getOperand(0))
1165  .add(MI.getOperand(1));
1166  transferImpOps(MI, MIB, MIB);
1167  MI.eraseFromParent();
1168  return true;
1169  }
1170  case AArch64::IRGstack: {
1171  MachineFunction &MF = *MBB.getParent();
1172  const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1173  const AArch64FrameLowering *TFI =
1174  MF.getSubtarget<AArch64Subtarget>().getFrameLowering();
1175 
1176  // IRG does not allow immediate offset. getTaggedBasePointerOffset should
1177  // almost always point to SP-after-prologue; if not, emit a longer
1178  // instruction sequence.
1179  int BaseOffset = -AFI->getTaggedBasePointerOffset();
1180  Register FrameReg;
1181  StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
1182  MF, BaseOffset, false /*isFixed*/, false /*isSVE*/, FrameReg,
1183  /*PreferFP=*/false,
1184  /*ForSimm=*/true);
1185  Register SrcReg = FrameReg;
1186  if (FrameRegOffset) {
1187  // Use output register as temporary.
1188  SrcReg = MI.getOperand(0).getReg();
1189  emitFrameOffset(MBB, &MI, MI.getDebugLoc(), SrcReg, FrameReg,
1190  FrameRegOffset, TII);
1191  }
1192  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::IRG))
1193  .add(MI.getOperand(0))
1194  .addUse(SrcReg)
1195  .add(MI.getOperand(2));
1196  MI.eraseFromParent();
1197  return true;
1198  }
1199  case AArch64::TAGPstack: {
1200  int64_t Offset = MI.getOperand(2).getImm();
1201  BuildMI(MBB, MBBI, MI.getDebugLoc(),
1202  TII->get(Offset >= 0 ? AArch64::ADDG : AArch64::SUBG))
1203  .add(MI.getOperand(0))
1204  .add(MI.getOperand(1))
1206  .add(MI.getOperand(4));
1207  MI.eraseFromParent();
1208  return true;
1209  }
1210  case AArch64::STGloop_wback:
1211  case AArch64::STZGloop_wback:
1212  return expandSetTagLoop(MBB, MBBI, NextMBBI);
1213  case AArch64::STGloop:
1214  case AArch64::STZGloop:
1216  "Non-writeback variants of STGloop / STZGloop should not "
1217  "survive past PrologEpilogInserter.");
1218  case AArch64::STR_ZZZZXI:
1219  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 4);
1220  case AArch64::STR_ZZZXI:
1221  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 3);
1222  case AArch64::STR_ZZXI:
1223  return expandSVESpillFill(MBB, MBBI, AArch64::STR_ZXI, 2);
1224  case AArch64::LDR_ZZZZXI:
1225  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 4);
1226  case AArch64::LDR_ZZZXI:
1227  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 3);
1228  case AArch64::LDR_ZZXI:
1229  return expandSVESpillFill(MBB, MBBI, AArch64::LDR_ZXI, 2);
1230  case AArch64::BLR_RVMARKER:
1231  return expandCALL_RVMARKER(MBB, MBBI);
1232  case AArch64::StoreSwiftAsyncContext:
1233  return expandStoreSwiftAsyncContext(MBB, MBBI);
1234  }
1235  return false;
1236 }
1237 
1238 /// Iterate over the instructions in basic block MBB and expand any
1239 /// pseudo instructions. Return true if anything was modified.
1240 bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
1241  bool Modified = false;
1242 
1244  while (MBBI != E) {
1245  MachineBasicBlock::iterator NMBBI = std::next(MBBI);
1246  Modified |= expandMI(MBB, MBBI, NMBBI);
1247  MBBI = NMBBI;
1248  }
1249 
1250  return Modified;
1251 }
1252 
1253 bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
1254  TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
1255 
1256  bool Modified = false;
1257  for (auto &MBB : MF)
1258  Modified |= expandMBB(MBB);
1259  return Modified;
1260 }
1261 
1262 /// Returns an instance of the pseudo instruction expansion pass.
1264  return new AArch64ExpandPseudo();
1265 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::AArch64II::MO_G3
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
Definition: AArch64BaseInfo.h:689
llvm::AArch64ISD::LOADgot
@ LOADgot
Definition: AArch64ISelLowering.h:64
llvm::AArch64::ElementSizeNone
@ ElementSizeNone
Definition: AArch64InstrInfo.h:466
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
MachineInstr.h
MathExtras.h
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::MachineConstantPool::getConstantPoolIndex
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
Definition: MachineFunction.cpp:1467
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
AArch64MachineFunctionInfo.h
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:102
llvm::drop_begin
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:321
llvm::AArch64CC::NE
@ NE
Definition: AArch64BaseInfo.h:256
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:791
llvm::MachineBasicBlock::getBasicBlock
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Definition: MachineBasicBlock.h:206
llvm::AArch64_AM::LSL
@ LSL
Definition: AArch64AddressingModes.h:35
AARCH64_EXPAND_PSEUDO_NAME
#define AARCH64_EXPAND_PSEUDO_NAME
Definition: AArch64ExpandPseudoInsts.cpp:47
MCInstrDesc.h
llvm::MachineOperand::getGlobal
const GlobalValue * getGlobal() const
Definition: MachineOperand.h:563
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
llvm::AArch64::FalseLanesZero
@ FalseLanesZero
Definition: AArch64InstrInfo.h:489
Pass.h
llvm::TargetSubtargetInfo::getInstrInfo
virtual const TargetInstrInfo * getInstrInfo() const
Definition: TargetSubtargetInfo.h:92
llvm::AArch64::ElementSizeS
@ ElementSizeS
Definition: AArch64InstrInfo.h:469
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1177
llvm::AArch64Subtarget::useEL3ForTP
bool useEL3ForTP() const
Definition: AArch64Subtarget.h:451
AArch64BaseInfo.h
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
MachineBasicBlock.h
llvm::LivePhysRegs
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:48
llvm::TargetSubtargetInfo::getRegisterInfo
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Definition: TargetSubtargetInfo.h:124
llvm::AArch64II::MO_PREL
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
Definition: AArch64BaseInfo.h:740
llvm::TargetRegisterInfo
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Definition: TargetRegisterInfo.h:233
llvm::MachineFunction::moveCallSiteInfo
void moveCallSiteInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
Definition: MachineFunction.cpp:952
llvm::MachineOperand::isSymbol
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
Definition: MachineOperand.h:341
llvm::MachineFunction::CreateMachineInstr
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
Definition: MachineFunction.cpp:354
DenseMap.h
llvm::MachineFunction::insert
void insert(iterator MBBI, MachineBasicBlock *MBB)
Definition: MachineFunction.h:842
llvm::getDeadRegState
unsigned getDeadRegState(bool B)
Definition: MachineInstrBuilder.h:511
llvm::AArch64::ElementSizeB
@ ElementSizeB
Definition: AArch64InstrInfo.h:467
llvm::AArch64::DestructiveTernaryCommWithRev
@ DestructiveTernaryCommWithRev
Definition: AArch64InstrInfo.h:483
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:80
llvm::RegState::Kill
@ Kill
The last use of a register.
Definition: MachineInstrBuilder.h:48
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:52
llvm::dwarf::Tag
Tag
Definition: Dwarf.h:104
llvm::MachineOperand::getOffset
int64_t getOffset() const
Return the offset from the symbol in this operand.
Definition: MachineOperand.h:600
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1559
llvm::ARCISD::BL
@ BL
Definition: ARCISelLowering.h:34
llvm::AArch64FrameLowering
Definition: AArch64FrameLowering.h:23
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:750
llvm::AArch64::getSVERevInstr
int getSVERevInstr(uint16_t Opcode)
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::finalizeBundle
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
Definition: MachineInstrBundle.cpp:123
AArch64InstrInfo.h
llvm::getRenamableRegState
unsigned getRenamableRegState(bool B)
Definition: MachineInstrBuilder.h:523
TargetMachine.h
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::AArch64InstrInfo
Definition: AArch64InstrInfo.h:37
llvm::AArch64ISD::MRS
@ MRS
Definition: AArch64ISelLowering.h:291
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::initializeAArch64ExpandPseudoPass
void initializeAArch64ExpandPseudoPass(PassRegistry &)
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:739
llvm::AArch64Subtarget::isTargetILP32
bool isTargetILP32() const
Definition: AArch64Subtarget.h:549
llvm::MachineBasicBlock::eraseFromParent
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
Definition: MachineBasicBlock.cpp:1331
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:127
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:195
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:49
llvm::MachineInstr::FrameSetup
@ FrameSetup
Definition: MachineInstr.h:82
llvm::AArch64::DestructiveUnaryPassthru
@ DestructiveUnaryPassthru
Definition: AArch64InstrInfo.h:484
llvm::AArch64_AM::getShifterImm
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Definition: AArch64AddressingModes.h:99
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
LoopDeletionResult::Modified
@ Modified
llvm::computeAndAddLiveIns
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
Definition: LivePhysRegs.cpp:341
DebugLoc.h
Align
uint64_t Align
Definition: ELFObjHandler.cpp:82
llvm::emitFrameOffset
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
Definition: AArch64InstrInfo.cpp:4095
llvm::MachineInstrBuilder::addExternalSymbol
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:184
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:95
llvm::MachineInstrBuilder::cloneMemRefs
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Definition: MachineInstrBuilder.h:213
AArch64AddressingModes.h
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:641
llvm::AArch64::NotDestructive
@ NotDestructive
Definition: AArch64InstrInfo.h:475
llvm::MachineInstrBuilder::setMIFlag
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
Definition: MachineInstrBuilder.h:278
llvm::MachineOperand::getTargetFlags
unsigned getTargetFlags() const
Definition: MachineOperand.h:221
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:321
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::AArch64FunctionInfo::getTaggedBasePointerOffset
unsigned getTaggedBasePointerOffset() const
Definition: AArch64MachineFunctionInfo.h:380
AArch64ExpandImm.h
llvm::createAArch64ExpandPseudoPass
FunctionPass * createAArch64ExpandPseudoPass()
Returns an instance of the pseudo instruction expansion pass.
Definition: AArch64ExpandPseudoInsts.cpp:1263
llvm::AArch64II::MO_NC
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
Definition: AArch64BaseInfo.h:721
llvm::AArch64II::MO_PAGEOFF
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
Definition: AArch64BaseInfo.h:685
llvm::MachineConstantPool
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
Definition: MachineConstantPool.h:117
llvm::MachineOperand::isDead
bool isDead() const
Definition: MachineOperand.h:385
llvm::AArch64FunctionInfo
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
Definition: AArch64MachineFunctionInfo.h:37
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::AArch64Subtarget::useEL2ForTP
bool useEL2ForTP() const
Definition: AArch64Subtarget.h:450
MachineConstantPool.h
llvm::MachineOperand::isCPI
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
Definition: MachineOperand.h:333
llvm::MachineFunction::CreateMachineBasicBlock
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Definition: MachineFunction.cpp:420
MachineFunctionPass.h
llvm::MachineFunction::getConstantPool
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Definition: MachineFunction.h:673
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:229
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineInstrBuilder::addUse
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Definition: MachineInstrBuilder.h:123
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::MachineFunction
Definition: MachineFunction.h:241
Triple.h
llvm::BlockAddress
The address of a basic block.
Definition: Constants.h:848
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm::AArch64::FalseLanesMask
@ FalseLanesMask
Definition: AArch64InstrInfo.h:488
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::StackOffset
StackOffset is a class to represent an offset with 2 dimensions, named fixed and scalable,...
Definition: TypeSize.h:134
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
TargetSubtargetInfo.h
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::RegState::Renamable
@ Renamable
Register that may be renamed.
Definition: MachineInstrBuilder.h:61
llvm::AArch64Subtarget::useEL1ForTP
bool useEL1ForTP() const
Definition: AArch64Subtarget.h:449
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:290
llvm::CodeModel::Tiny
@ Tiny
Definition: CodeGen.h:28
llvm::AArch64CC::EQ
@ EQ
Definition: AArch64BaseInfo.h:255
llvm::MachineInstrBuilder::getInstr
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Definition: MachineInstrBuilder.h:89
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::AArch64_AM::getArithExtendImm
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
Definition: AArch64AddressingModes.h:171
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::AArch64::ElementSizeD
@ ElementSizeD
Definition: AArch64InstrInfo.h:470
Insn
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
Definition: AArch64MIPeepholeOpt.cpp:86
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:637
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
llvm::AArch64_IMM::expandMOVImm
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
Definition: AArch64ExpandImm.cpp:303
llvm::MachineBasicBlock::insert
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Definition: MachineBasicBlock.cpp:1311
llvm::ARCISD::RET
@ RET
Definition: ARCISelLowering.h:52
llvm::MachineOperand::getIndex
int getIndex() const
Definition: MachineOperand.h:557
llvm::MachineInstrBuilder::addConstantPoolIndex
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:158
llvm::AArch64::DestructiveInstTypeMask
@ DestructiveInstTypeMask
Definition: AArch64InstrInfo.h:474
llvm::TargetMachine::getCodeModel
CodeModel::Model getCodeModel() const
Returns the code model.
Definition: TargetMachine.cpp:74
llvm::AArch64ISD::ADRP
@ ADRP
Definition: AArch64ISelLowering.h:61
llvm::AArch64::DestructiveBinaryComm
@ DestructiveBinaryComm
Definition: AArch64InstrInfo.h:481
llvm::MachineInstrBuilder::addGlobalAddress
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:177
CodeGen.h
llvm::AArch64Subtarget::isTargetMachO
bool isTargetMachO() const
Definition: AArch64Subtarget.h:547
INITIALIZE_PASS
INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo", AARCH64_EXPAND_PSEUDO_NAME, false, false) static void transferImpOps(MachineInstr &OldMI
Transfer implicit operands on the pseudo instruction to the instructions created from the expansion.
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
AArch64Subtarget.h
llvm::AArch64::DestructiveBinary
@ DestructiveBinary
Definition: AArch64InstrInfo.h:480
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:272
MachineInstrBuilder.h
llvm::MachineOperand::getSymbolName
const char * getSymbolName() const
Definition: MachineOperand.h:608
llvm::MachineInstrBuilder::setMIFlags
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Definition: MachineInstrBuilder.h:273
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
N
#define N
DefMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Definition: AArch64ExpandPseudoInsts.cpp:103
llvm::MachineInstr::addOperand
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Definition: MachineInstr.cpp:207
llvm::AArch64::getSVEPseudoMap
int getSVEPseudoMap(uint16_t Opcode)
llvm::AArch64_AM::UXTB
@ UXTB
Definition: AArch64AddressingModes.h:41
MachineOperand.h
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1094
llvm::AArch64::DestructiveBinaryImm
@ DestructiveBinaryImm
Definition: AArch64InstrInfo.h:478
llvm::AArch64::DestructiveBinaryCommWithRev
@ DestructiveBinaryCommWithRev
Definition: AArch64InstrInfo.h:482
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::AArch64_AM::UXTH
@ UXTH
Definition: AArch64AddressingModes.h:42
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::AArch64II::MO_TAGGED
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
Definition: AArch64BaseInfo.h:748
llvm::AArch64Subtarget
Definition: AArch64Subtarget.h:38
MachineFunction.h
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::abs
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1282
llvm::AArch64::getSVENonRevInstr
int getSVENonRevInstr(uint16_t Opcode)
llvm::MCInstrDesc::getNumOperands
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:228
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:274
llvm::AArch64II::MO_PAGE
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
Definition: AArch64BaseInfo.h:680
llvm::MachineOperand::isGlobal
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Definition: MachineOperand.h:339
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
LivePhysRegs.h
llvm::AArch64::ElementSizeH
@ ElementSizeH
Definition: AArch64InstrInfo.h:468