LLVM  15.0.0git
X86ExpandPseudo.cpp
Go to the documentation of this file.
1 //===------- X86ExpandPseudo.cpp - Expand pseudo instructions -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling, if-conversion, other late
11 // optimizations, or simply the encoding of the instructions.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "X86.h"
16 #include "X86FrameLowering.h"
17 #include "X86InstrBuilder.h"
18 #include "X86InstrInfo.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
25 #include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.
26 #include "llvm/IR/GlobalValue.h"
28 using namespace llvm;
29 
30 #define DEBUG_TYPE "x86-pseudo"
31 #define X86_EXPAND_PSEUDO_NAME "X86 pseudo instruction expansion pass"
32 
33 namespace {
34 class X86ExpandPseudo : public MachineFunctionPass {
35 public:
36  static char ID;
37  X86ExpandPseudo() : MachineFunctionPass(ID) {}
38 
39  void getAnalysisUsage(AnalysisUsage &AU) const override {
40  AU.setPreservesCFG();
44  }
45 
46  const X86Subtarget *STI = nullptr;
47  const X86InstrInfo *TII = nullptr;
48  const X86RegisterInfo *TRI = nullptr;
49  const X86MachineFunctionInfo *X86FI = nullptr;
50  const X86FrameLowering *X86FL = nullptr;
51 
52  bool runOnMachineFunction(MachineFunction &Fn) override;
53 
54  MachineFunctionProperties getRequiredProperties() const override {
57  }
58 
59  StringRef getPassName() const override {
60  return "X86 pseudo instruction expansion pass";
61  }
62 
63 private:
64  void ExpandICallBranchFunnel(MachineBasicBlock *MBB,
66  void expandCALL_RVMARKER(MachineBasicBlock &MBB,
69  bool ExpandMBB(MachineBasicBlock &MBB);
70 
71  /// This function expands pseudos which affects control flow.
72  /// It is done in separate pass to simplify blocks navigation in main
73  /// pass(calling ExpandMBB).
74  bool ExpandPseudosWhichAffectControlFlow(MachineFunction &MF);
75 
76  /// Expand X86::VASTART_SAVE_XMM_REGS into set of xmm copying instructions,
77  /// placed into separate block guarded by check for al register(for SystemV
78  /// abi).
79  void ExpandVastartSaveXmmRegs(
81  MachineBasicBlock::iterator VAStartPseudoInstr) const;
82 };
83 char X86ExpandPseudo::ID = 0;
84 
85 } // End anonymous namespace.
86 
88  false)
89 
90 void X86ExpandPseudo::ExpandICallBranchFunnel(
92  MachineBasicBlock *JTMBB = MBB;
93  MachineInstr *JTInst = &*MBBI;
94  MachineFunction *MF = MBB->getParent();
95  const BasicBlock *BB = MBB->getBasicBlock();
96  auto InsPt = MachineFunction::iterator(MBB);
97  ++InsPt;
98 
99  std::vector<std::pair<MachineBasicBlock *, unsigned>> TargetMBBs;
100  const DebugLoc &DL = JTInst->getDebugLoc();
101  MachineOperand Selector = JTInst->getOperand(0);
102  const GlobalValue *CombinedGlobal = JTInst->getOperand(1).getGlobal();
103 
104  auto CmpTarget = [&](unsigned Target) {
105  if (Selector.isReg())
106  MBB->addLiveIn(Selector.getReg());
107  BuildMI(*MBB, MBBI, DL, TII->get(X86::LEA64r), X86::R11)
108  .addReg(X86::RIP)
109  .addImm(1)
110  .addReg(0)
111  .addGlobalAddress(CombinedGlobal,
112  JTInst->getOperand(2 + 2 * Target).getImm())
113  .addReg(0);
114  BuildMI(*MBB, MBBI, DL, TII->get(X86::CMP64rr))
115  .add(Selector)
116  .addReg(X86::R11);
117  };
118 
119  auto CreateMBB = [&]() {
120  auto *NewMBB = MF->CreateMachineBasicBlock(BB);
121  MBB->addSuccessor(NewMBB);
122  if (!MBB->isLiveIn(X86::EFLAGS))
123  MBB->addLiveIn(X86::EFLAGS);
124  return NewMBB;
125  };
126 
127  auto EmitCondJump = [&](unsigned CC, MachineBasicBlock *ThenMBB) {
128  BuildMI(*MBB, MBBI, DL, TII->get(X86::JCC_1)).addMBB(ThenMBB).addImm(CC);
129 
130  auto *ElseMBB = CreateMBB();
131  MF->insert(InsPt, ElseMBB);
132  MBB = ElseMBB;
133  MBBI = MBB->end();
134  };
135 
136  auto EmitCondJumpTarget = [&](unsigned CC, unsigned Target) {
137  auto *ThenMBB = CreateMBB();
138  TargetMBBs.push_back({ThenMBB, Target});
139  EmitCondJump(CC, ThenMBB);
140  };
141 
142  auto EmitTailCall = [&](unsigned Target) {
143  BuildMI(*MBB, MBBI, DL, TII->get(X86::TAILJMPd64))
144  .add(JTInst->getOperand(3 + 2 * Target));
145  };
146 
147  std::function<void(unsigned, unsigned)> EmitBranchFunnel =
148  [&](unsigned FirstTarget, unsigned NumTargets) {
149  if (NumTargets == 1) {
150  EmitTailCall(FirstTarget);
151  return;
152  }
153 
154  if (NumTargets == 2) {
155  CmpTarget(FirstTarget + 1);
156  EmitCondJumpTarget(X86::COND_B, FirstTarget);
157  EmitTailCall(FirstTarget + 1);
158  return;
159  }
160 
161  if (NumTargets < 6) {
162  CmpTarget(FirstTarget + 1);
163  EmitCondJumpTarget(X86::COND_B, FirstTarget);
164  EmitCondJumpTarget(X86::COND_E, FirstTarget + 1);
165  EmitBranchFunnel(FirstTarget + 2, NumTargets - 2);
166  return;
167  }
168 
169  auto *ThenMBB = CreateMBB();
170  CmpTarget(FirstTarget + (NumTargets / 2));
171  EmitCondJump(X86::COND_B, ThenMBB);
172  EmitCondJumpTarget(X86::COND_E, FirstTarget + (NumTargets / 2));
173  EmitBranchFunnel(FirstTarget + (NumTargets / 2) + 1,
174  NumTargets - (NumTargets / 2) - 1);
175 
176  MF->insert(InsPt, ThenMBB);
177  MBB = ThenMBB;
178  MBBI = MBB->end();
179  EmitBranchFunnel(FirstTarget, NumTargets / 2);
180  };
181 
182  EmitBranchFunnel(0, (JTInst->getNumOperands() - 2) / 2);
183  for (auto P : TargetMBBs) {
184  MF->insert(InsPt, P.first);
185  BuildMI(P.first, DL, TII->get(X86::TAILJMPd64))
186  .add(JTInst->getOperand(3 + 2 * P.second));
187  }
188  JTMBB->erase(JTInst);
189 }
190 
191 void X86ExpandPseudo::expandCALL_RVMARKER(MachineBasicBlock &MBB,
193  // Expand CALL_RVMARKER pseudo to call instruction, followed by the special
194  //"movq %rax, %rdi" marker.
195  MachineInstr &MI = *MBBI;
196 
197  MachineInstr *OriginalCall;
198  assert((MI.getOperand(1).isGlobal() || MI.getOperand(1).isReg()) &&
199  "invalid operand for regular call");
200  unsigned Opc = -1;
201  if (MI.getOpcode() == X86::CALL64m_RVMARKER)
202  Opc = X86::CALL64m;
203  else if (MI.getOpcode() == X86::CALL64r_RVMARKER)
204  Opc = X86::CALL64r;
205  else if (MI.getOpcode() == X86::CALL64pcrel32_RVMARKER)
206  Opc = X86::CALL64pcrel32;
207  else
208  llvm_unreachable("unexpected opcode");
209 
210  OriginalCall = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)).getInstr();
211  bool RAXImplicitDead = false;
212  for (MachineOperand &Op : llvm::drop_begin(MI.operands())) {
213  // RAX may be 'implicit dead', if there are no other users of the return
214  // value. We introduce a new use, so change it to 'implicit def'.
215  if (Op.isReg() && Op.isImplicit() && Op.isDead() &&
216  TRI->regsOverlap(Op.getReg(), X86::RAX)) {
217  Op.setIsDead(false);
218  Op.setIsDef(true);
219  RAXImplicitDead = true;
220  }
221  OriginalCall->addOperand(Op);
222  }
223 
224  // Emit marker "movq %rax, %rdi". %rdi is not callee-saved, so it cannot be
225  // live across the earlier call. The call to the ObjC runtime function returns
226  // the first argument, so the value of %rax is unchanged after the ObjC
227  // runtime call.
228  auto *Marker = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(X86::MOV64rr))
229  .addReg(X86::RDI, RegState::Define)
230  .addReg(X86::RAX)
231  .getInstr();
232  if (MI.shouldUpdateCallSiteInfo())
233  MBB.getParent()->moveCallSiteInfo(&MI, Marker);
234 
235  // Emit call to ObjC runtime.
236  const uint32_t *RegMask =
238  MachineInstr *RtCall =
239  BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(X86::CALL64pcrel32))
240  .addGlobalAddress(MI.getOperand(0).getGlobal(), 0, 0)
241  .addRegMask(RegMask)
242  .addReg(X86::RAX,
244  (RAXImplicitDead ? (RegState::Dead | RegState::Define)
245  : RegState::Define))
246  .getInstr();
247  MI.eraseFromParent();
248 
249  auto &TM = MBB.getParent()->getTarget();
250  // On Darwin platforms, wrap the expanded sequence in a bundle to prevent
251  // later optimizations from breaking up the sequence.
252  if (TM.getTargetTriple().isOSDarwin())
253  finalizeBundle(MBB, OriginalCall->getIterator(),
254  std::next(RtCall->getIterator()));
255 }
256 
257 /// If \p MBBI is a pseudo instruction, this method expands
258 /// it to the corresponding (sequence of) actual instruction(s).
259 /// \returns true if \p MBBI has been expanded.
260 bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
262  MachineInstr &MI = *MBBI;
263  unsigned Opcode = MI.getOpcode();
264  const DebugLoc &DL = MBBI->getDebugLoc();
265  switch (Opcode) {
266  default:
267  return false;
268  case X86::TCRETURNdi:
269  case X86::TCRETURNdicc:
270  case X86::TCRETURNri:
271  case X86::TCRETURNmi:
272  case X86::TCRETURNdi64:
273  case X86::TCRETURNdi64cc:
274  case X86::TCRETURNri64:
275  case X86::TCRETURNmi64: {
276  bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
277  MachineOperand &JumpTarget = MBBI->getOperand(0);
278  MachineOperand &StackAdjust = MBBI->getOperand(isMem ? X86::AddrNumOperands
279  : 1);
280  assert(StackAdjust.isImm() && "Expecting immediate value.");
281 
282  // Adjust stack pointer.
283  int StackAdj = StackAdjust.getImm();
284  int MaxTCDelta = X86FI->getTCReturnAddrDelta();
285  int Offset = 0;
286  assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
287 
288  // Incoporate the retaddr area.
289  Offset = StackAdj - MaxTCDelta;
290  assert(Offset >= 0 && "Offset should never be negative");
291 
292  if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) {
293  assert(Offset == 0 && "Conditional tail call cannot adjust the stack.");
294  }
295 
296  if (Offset) {
297  // Check for possible merge with preceding ADD instruction.
298  Offset += X86FL->mergeSPUpdates(MBB, MBBI, true);
299  X86FL->emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue=*/true);
300  }
301 
302  // Jump to label or value in register.
303  bool IsWin64 = STI->isTargetWin64();
304  if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc ||
305  Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) {
306  unsigned Op;
307  switch (Opcode) {
308  case X86::TCRETURNdi:
309  Op = X86::TAILJMPd;
310  break;
311  case X86::TCRETURNdicc:
312  Op = X86::TAILJMPd_CC;
313  break;
314  case X86::TCRETURNdi64cc:
315  assert(!MBB.getParent()->hasWinCFI() &&
316  "Conditional tail calls confuse "
317  "the Win64 unwinder.");
318  Op = X86::TAILJMPd64_CC;
319  break;
320  default:
321  // Note: Win64 uses REX prefixes indirect jumps out of functions, but
322  // not direct ones.
323  Op = X86::TAILJMPd64;
324  break;
325  }
326  MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
327  if (JumpTarget.isGlobal()) {
328  MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
329  JumpTarget.getTargetFlags());
330  } else {
331  assert(JumpTarget.isSymbol());
332  MIB.addExternalSymbol(JumpTarget.getSymbolName(),
333  JumpTarget.getTargetFlags());
334  }
335  if (Op == X86::TAILJMPd_CC || Op == X86::TAILJMPd64_CC) {
336  MIB.addImm(MBBI->getOperand(2).getImm());
337  }
338 
339  } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
340  unsigned Op = (Opcode == X86::TCRETURNmi)
341  ? X86::TAILJMPm
342  : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
343  MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
344  for (unsigned i = 0; i != X86::AddrNumOperands; ++i)
345  MIB.add(MBBI->getOperand(i));
346  } else if (Opcode == X86::TCRETURNri64) {
347  JumpTarget.setIsKill();
348  BuildMI(MBB, MBBI, DL,
349  TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
350  .add(JumpTarget);
351  } else {
352  JumpTarget.setIsKill();
353  BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
354  .add(JumpTarget);
355  }
356 
357  MachineInstr &NewMI = *std::prev(MBBI);
358  NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
359 
360  // Update the call site info.
361  if (MBBI->isCandidateForCallSiteEntry())
362  MBB.getParent()->moveCallSiteInfo(&*MBBI, &NewMI);
363 
364  // Delete the pseudo instruction TCRETURN.
365  MBB.erase(MBBI);
366 
367  return true;
368  }
369  case X86::EH_RETURN:
370  case X86::EH_RETURN64: {
371  MachineOperand &DestAddr = MBBI->getOperand(0);
372  assert(DestAddr.isReg() && "Offset should be in register!");
373  const bool Uses64BitFramePtr =
374  STI->isTarget64BitLP64() || STI->isTargetNaCl64();
375  Register StackPtr = TRI->getStackRegister();
376  BuildMI(MBB, MBBI, DL,
377  TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
378  .addReg(DestAddr.getReg());
379  // The EH_RETURN pseudo is really removed during the MC Lowering.
380  return true;
381  }
382  case X86::IRET: {
383  // Adjust stack to erase error code
384  int64_t StackAdj = MBBI->getOperand(0).getImm();
385  X86FL->emitSPUpdate(MBB, MBBI, DL, StackAdj, true);
386  // Replace pseudo with machine iret
387  unsigned RetOp = STI->is64Bit() ? X86::IRET64 : X86::IRET32;
388  // Use UIRET if UINTR is present (except for building kernel)
389  if (STI->is64Bit() && STI->hasUINTR() &&
391  RetOp = X86::UIRET;
392  BuildMI(MBB, MBBI, DL, TII->get(RetOp));
393  MBB.erase(MBBI);
394  return true;
395  }
396  case X86::RET: {
397  // Adjust stack to erase error code
398  int64_t StackAdj = MBBI->getOperand(0).getImm();
400  if (StackAdj == 0) {
401  MIB = BuildMI(MBB, MBBI, DL,
402  TII->get(STI->is64Bit() ? X86::RET64 : X86::RET32));
403  } else if (isUInt<16>(StackAdj)) {
404  MIB = BuildMI(MBB, MBBI, DL,
405  TII->get(STI->is64Bit() ? X86::RETI64 : X86::RETI32))
406  .addImm(StackAdj);
407  } else {
408  assert(!STI->is64Bit() &&
409  "shouldn't need to do this for x86_64 targets!");
410  // A ret can only handle immediates as big as 2**16-1. If we need to pop
411  // off bytes before the return address, we must do it manually.
412  BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r)).addReg(X86::ECX, RegState::Define);
413  X86FL->emitSPUpdate(MBB, MBBI, DL, StackAdj, /*InEpilogue=*/true);
414  BuildMI(MBB, MBBI, DL, TII->get(X86::PUSH32r)).addReg(X86::ECX);
415  MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RET32));
416  }
417  for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I)
418  MIB.add(MBBI->getOperand(I));
419  MBB.erase(MBBI);
420  return true;
421  }
422  case X86::LCMPXCHG16B_SAVE_RBX: {
423  // Perform the following transformation.
424  // SaveRbx = pseudocmpxchg Addr, <4 opds for the address>, InArg, SaveRbx
425  // =>
426  // RBX = InArg
427  // actualcmpxchg Addr
428  // RBX = SaveRbx
429  const MachineOperand &InArg = MBBI->getOperand(6);
430  Register SaveRbx = MBBI->getOperand(7).getReg();
431 
432  // Copy the input argument of the pseudo into the argument of the
433  // actual instruction.
434  // NOTE: We don't copy the kill flag since the input might be the same reg
435  // as one of the other operands of LCMPXCHG16B.
436  TII->copyPhysReg(MBB, MBBI, DL, X86::RBX, InArg.getReg(), false);
437  // Create the actual instruction.
438  MachineInstr *NewInstr = BuildMI(MBB, MBBI, DL, TII->get(X86::LCMPXCHG16B));
439  // Copy the operands related to the address.
440  for (unsigned Idx = 1; Idx < 6; ++Idx)
441  NewInstr->addOperand(MBBI->getOperand(Idx));
442  // Finally, restore the value of RBX.
443  TII->copyPhysReg(MBB, MBBI, DL, X86::RBX, SaveRbx,
444  /*SrcIsKill*/ true);
445 
446  // Delete the pseudo.
448  return true;
449  }
450  // Loading/storing mask pairs requires two kmov operations. The second one of
451  // these needs a 2 byte displacement relative to the specified address (with
452  // 32 bit spill size). The pairs of 1bit masks up to 16 bit masks all use the
453  // same spill size, they all are stored using MASKPAIR16STORE, loaded using
454  // MASKPAIR16LOAD.
455  //
456  // The displacement value might wrap around in theory, thus the asserts in
457  // both cases.
458  case X86::MASKPAIR16LOAD: {
459  int64_t Disp = MBBI->getOperand(1 + X86::AddrDisp).getImm();
460  assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement");
461  Register Reg = MBBI->getOperand(0).getReg();
462  bool DstIsDead = MBBI->getOperand(0).isDead();
463  Register Reg0 = TRI->getSubReg(Reg, X86::sub_mask_0);
464  Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);
465 
466  auto MIBLo = BuildMI(MBB, MBBI, DL, TII->get(X86::KMOVWkm))
467  .addReg(Reg0, RegState::Define | getDeadRegState(DstIsDead));
468  auto MIBHi = BuildMI(MBB, MBBI, DL, TII->get(X86::KMOVWkm))
469  .addReg(Reg1, RegState::Define | getDeadRegState(DstIsDead));
470 
471  for (int i = 0; i < X86::AddrNumOperands; ++i) {
472  MIBLo.add(MBBI->getOperand(1 + i));
473  if (i == X86::AddrDisp)
474  MIBHi.addImm(Disp + 2);
475  else
476  MIBHi.add(MBBI->getOperand(1 + i));
477  }
478 
479  // Split the memory operand, adjusting the offset and size for the halves.
480  MachineMemOperand *OldMMO = MBBI->memoperands().front();
481  MachineFunction *MF = MBB.getParent();
482  MachineMemOperand *MMOLo = MF->getMachineMemOperand(OldMMO, 0, 2);
483  MachineMemOperand *MMOHi = MF->getMachineMemOperand(OldMMO, 2, 2);
484 
485  MIBLo.setMemRefs(MMOLo);
486  MIBHi.setMemRefs(MMOHi);
487 
488  // Delete the pseudo.
489  MBB.erase(MBBI);
490  return true;
491  }
492  case X86::MASKPAIR16STORE: {
493  int64_t Disp = MBBI->getOperand(X86::AddrDisp).getImm();
494  assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement");
495  Register Reg = MBBI->getOperand(X86::AddrNumOperands).getReg();
496  bool SrcIsKill = MBBI->getOperand(X86::AddrNumOperands).isKill();
497  Register Reg0 = TRI->getSubReg(Reg, X86::sub_mask_0);
498  Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);
499 
500  auto MIBLo = BuildMI(MBB, MBBI, DL, TII->get(X86::KMOVWmk));
501  auto MIBHi = BuildMI(MBB, MBBI, DL, TII->get(X86::KMOVWmk));
502 
503  for (int i = 0; i < X86::AddrNumOperands; ++i) {
504  MIBLo.add(MBBI->getOperand(i));
505  if (i == X86::AddrDisp)
506  MIBHi.addImm(Disp + 2);
507  else
508  MIBHi.add(MBBI->getOperand(i));
509  }
510  MIBLo.addReg(Reg0, getKillRegState(SrcIsKill));
511  MIBHi.addReg(Reg1, getKillRegState(SrcIsKill));
512 
513  // Split the memory operand, adjusting the offset and size for the halves.
514  MachineMemOperand *OldMMO = MBBI->memoperands().front();
515  MachineFunction *MF = MBB.getParent();
516  MachineMemOperand *MMOLo = MF->getMachineMemOperand(OldMMO, 0, 2);
517  MachineMemOperand *MMOHi = MF->getMachineMemOperand(OldMMO, 2, 2);
518 
519  MIBLo.setMemRefs(MMOLo);
520  MIBHi.setMemRefs(MMOHi);
521 
522  // Delete the pseudo.
523  MBB.erase(MBBI);
524  return true;
525  }
526  case X86::MWAITX_SAVE_RBX: {
527  // Perform the following transformation.
528  // SaveRbx = pseudomwaitx InArg, SaveRbx
529  // =>
530  // [E|R]BX = InArg
531  // actualmwaitx
532  // [E|R]BX = SaveRbx
533  const MachineOperand &InArg = MBBI->getOperand(1);
534  // Copy the input argument of the pseudo into the argument of the
535  // actual instruction.
536  TII->copyPhysReg(MBB, MBBI, DL, X86::EBX, InArg.getReg(), InArg.isKill());
537  // Create the actual instruction.
538  BuildMI(MBB, MBBI, DL, TII->get(X86::MWAITXrrr));
539  // Finally, restore the value of RBX.
540  Register SaveRbx = MBBI->getOperand(2).getReg();
541  TII->copyPhysReg(MBB, MBBI, DL, X86::RBX, SaveRbx, /*SrcIsKill*/ true);
542  // Delete the pseudo.
544  return true;
545  }
546  case TargetOpcode::ICALL_BRANCH_FUNNEL:
547  ExpandICallBranchFunnel(&MBB, MBBI);
548  return true;
549  case X86::PLDTILECFGV: {
550  MI.setDesc(TII->get(X86::LDTILECFG));
551  return true;
552  }
553  case X86::PTILELOADDV:
554  case X86::PTILELOADDT1V: {
555  for (unsigned i = 2; i > 0; --i)
556  MI.removeOperand(i);
557  unsigned Opc =
558  Opcode == X86::PTILELOADDV ? X86::TILELOADD : X86::TILELOADDT1;
559  MI.setDesc(TII->get(Opc));
560  return true;
561  }
562  case X86::PTDPBSSDV:
563  case X86::PTDPBSUDV:
564  case X86::PTDPBUSDV:
565  case X86::PTDPBUUDV:
566  case X86::PTDPBF16PSV: {
567  MI.untieRegOperand(4);
568  for (unsigned i = 3; i > 0; --i)
569  MI.removeOperand(i);
570  unsigned Opc;
571  switch (Opcode) {
572  case X86::PTDPBSSDV: Opc = X86::TDPBSSD; break;
573  case X86::PTDPBSUDV: Opc = X86::TDPBSUD; break;
574  case X86::PTDPBUSDV: Opc = X86::TDPBUSD; break;
575  case X86::PTDPBUUDV: Opc = X86::TDPBUUD; break;
576  case X86::PTDPBF16PSV: Opc = X86::TDPBF16PS; break;
577  default: llvm_unreachable("Impossible Opcode!");
578  }
579  MI.setDesc(TII->get(Opc));
580  MI.tieOperands(0, 1);
581  return true;
582  }
583  case X86::PTILESTOREDV: {
584  for (int i = 1; i >= 0; --i)
585  MI.removeOperand(i);
586  MI.setDesc(TII->get(X86::TILESTORED));
587  return true;
588  }
589  case X86::PTILEZEROV: {
590  for (int i = 2; i > 0; --i) // Remove row, col
591  MI.removeOperand(i);
592  MI.setDesc(TII->get(X86::TILEZERO));
593  return true;
594  }
595  case X86::CALL64pcrel32_RVMARKER:
596  case X86::CALL64r_RVMARKER:
597  case X86::CALL64m_RVMARKER:
598  expandCALL_RVMARKER(MBB, MBBI);
599  return true;
600  }
601  llvm_unreachable("Previous switch has a fallthrough?");
602 }
603 
604 // This function creates additional block for storing varargs guarded
605 // registers. It adds check for %al into entry block, to skip
606 // GuardedRegsBlk if xmm registers should not be stored.
607 //
608 // EntryBlk[VAStartPseudoInstr] EntryBlk
609 // | | .
610 // | | .
611 // | | GuardedRegsBlk
612 // | => | .
613 // | | .
614 // | TailBlk
615 // | |
616 // | |
617 //
618 void X86ExpandPseudo::ExpandVastartSaveXmmRegs(
619  MachineBasicBlock *EntryBlk,
620  MachineBasicBlock::iterator VAStartPseudoInstr) const {
621  assert(VAStartPseudoInstr->getOpcode() == X86::VASTART_SAVE_XMM_REGS);
622 
623  MachineFunction *Func = EntryBlk->getParent();
624  const TargetInstrInfo *TII = STI->getInstrInfo();
625  const DebugLoc &DL = VAStartPseudoInstr->getDebugLoc();
626  Register CountReg = VAStartPseudoInstr->getOperand(0).getReg();
627 
628  // Calculate liveins for newly created blocks.
629  LivePhysRegs LiveRegs(*STI->getRegisterInfo());
631 
632  LiveRegs.addLiveIns(*EntryBlk);
633  for (MachineInstr &MI : EntryBlk->instrs()) {
634  if (MI.getOpcode() == VAStartPseudoInstr->getOpcode())
635  break;
636 
637  LiveRegs.stepForward(MI, Clobbers);
638  }
639 
640  // Create the new basic blocks. One block contains all the XMM stores,
641  // and another block is the final destination regardless of whether any
642  // stores were performed.
643  const BasicBlock *LLVMBlk = EntryBlk->getBasicBlock();
644  MachineFunction::iterator EntryBlkIter = ++EntryBlk->getIterator();
645  MachineBasicBlock *GuardedRegsBlk = Func->CreateMachineBasicBlock(LLVMBlk);
646  MachineBasicBlock *TailBlk = Func->CreateMachineBasicBlock(LLVMBlk);
647  Func->insert(EntryBlkIter, GuardedRegsBlk);
648  Func->insert(EntryBlkIter, TailBlk);
649 
650  // Transfer the remainder of EntryBlk and its successor edges to TailBlk.
651  TailBlk->splice(TailBlk->begin(), EntryBlk,
652  std::next(MachineBasicBlock::iterator(VAStartPseudoInstr)),
653  EntryBlk->end());
654  TailBlk->transferSuccessorsAndUpdatePHIs(EntryBlk);
655 
656  uint64_t FrameOffset = VAStartPseudoInstr->getOperand(4).getImm();
657  uint64_t VarArgsRegsOffset = VAStartPseudoInstr->getOperand(6).getImm();
658 
659  // TODO: add support for YMM and ZMM here.
660  unsigned MOVOpc = STI->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
661 
662  // In the XMM save block, save all the XMM argument registers.
663  for (int64_t OpndIdx = 7, RegIdx = 0;
664  OpndIdx < VAStartPseudoInstr->getNumOperands() - 1;
665  OpndIdx++, RegIdx++) {
666  auto NewMI = BuildMI(GuardedRegsBlk, DL, TII->get(MOVOpc));
667  for (int i = 0; i < X86::AddrNumOperands; ++i) {
668  if (i == X86::AddrDisp)
669  NewMI.addImm(FrameOffset + VarArgsRegsOffset + RegIdx * 16);
670  else
671  NewMI.add(VAStartPseudoInstr->getOperand(i + 1));
672  }
673  NewMI.addReg(VAStartPseudoInstr->getOperand(OpndIdx).getReg());
675  VAStartPseudoInstr->getOperand(OpndIdx).getReg()));
676  }
677 
678  // The original block will now fall through to the GuardedRegsBlk.
679  EntryBlk->addSuccessor(GuardedRegsBlk);
680  // The GuardedRegsBlk will fall through to the TailBlk.
681  GuardedRegsBlk->addSuccessor(TailBlk);
682 
683  if (!STI->isCallingConvWin64(Func->getFunction().getCallingConv())) {
684  // If %al is 0, branch around the XMM save block.
685  BuildMI(EntryBlk, DL, TII->get(X86::TEST8rr))
686  .addReg(CountReg)
687  .addReg(CountReg);
688  BuildMI(EntryBlk, DL, TII->get(X86::JCC_1))
689  .addMBB(TailBlk)
691  EntryBlk->addSuccessor(TailBlk);
692  }
693 
694  // Add liveins to the created block.
695  addLiveIns(*GuardedRegsBlk, LiveRegs);
696  addLiveIns(*TailBlk, LiveRegs);
697 
698  // Delete the pseudo.
699  VAStartPseudoInstr->eraseFromParent();
700 }
701 
702 /// Expand all pseudo instructions contained in \p MBB.
703 /// \returns true if any expansion occurred for \p MBB.
704 bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
705  bool Modified = false;
706 
707  // MBBI may be invalidated by the expansion.
709  while (MBBI != E) {
710  MachineBasicBlock::iterator NMBBI = std::next(MBBI);
711  Modified |= ExpandMI(MBB, MBBI);
712  MBBI = NMBBI;
713  }
714 
715  return Modified;
716 }
717 
718 bool X86ExpandPseudo::ExpandPseudosWhichAffectControlFlow(MachineFunction &MF) {
719  // Currently pseudo which affects control flow is only
720  // X86::VASTART_SAVE_XMM_REGS which is located in Entry block.
721  // So we do not need to evaluate other blocks.
722  for (MachineInstr &Instr : MF.front().instrs()) {
723  if (Instr.getOpcode() == X86::VASTART_SAVE_XMM_REGS) {
724  ExpandVastartSaveXmmRegs(&(MF.front()), Instr);
725  return true;
726  }
727  }
728 
729  return false;
730 }
731 
732 bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
733  STI = &MF.getSubtarget<X86Subtarget>();
734  TII = STI->getInstrInfo();
735  TRI = STI->getRegisterInfo();
736  X86FI = MF.getInfo<X86MachineFunctionInfo>();
737  X86FL = STI->getFrameLowering();
738 
739  bool Modified = ExpandPseudosWhichAffectControlFlow(MF);
740 
741  for (MachineBasicBlock &MBB : MF)
742  Modified |= ExpandMBB(MBB);
743  return Modified;
744 }
745 
746 /// Returns an instance of the pseudo instruction expansion pass.
748  return new X86ExpandPseudo();
749 }
llvm::MachineFunction::hasWinCFI
bool hasWinCFI() const
Definition: MachineFunction.h:738
i
i
Definition: README.txt:29
llvm::X86::AddrDisp
@ AddrDisp
Definition: X86BaseInfo.h:35
llvm::N86::EBX
@ EBX
Definition: X86MCTargetDesc.h:51
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::drop_begin
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:280
llvm::MachineBasicBlock::isLiveIn
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
Definition: MachineBasicBlock.cpp:576
llvm::MachineBasicBlock::getBasicBlock
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
Definition: MachineBasicBlock.h:205
X86Subtarget.h
llvm::MachineOperand::getGlobal
const GlobalValue * getGlobal() const
Definition: MachineOperand.h:572
llvm::MachineInstrBuilder::add
const MachineInstrBuilder & add(const MachineOperand &MO) const
Definition: MachineInstrBuilder.h:224
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
X86InstrBuilder.h
llvm::MachineBasicBlock::instrs
instr_range instrs()
Definition: MachineBasicBlock.h:273
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:145
llvm::MachineOperand::setIsKill
void setIsKill(bool Val=true)
Definition: MachineOperand.h:509
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:454
llvm::X86Subtarget
Definition: X86Subtarget.h:52
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::MachineFunctionPass
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Definition: MachineFunctionPass.h:30
llvm::LivePhysRegs
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:50
llvm::MachineFunction::moveCallSiteInfo
void moveCallSiteInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
Definition: MachineFunction.cpp:952
llvm::MachineOperand::isSymbol
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
Definition: MachineOperand.h:340
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
EHPersonalities.h
llvm::MachineFunction::insert
void insert(iterator MBBI, MachineBasicBlock *MBB)
Definition: MachineFunction.h:872
llvm::getDeadRegState
unsigned getDeadRegState(bool B)
Definition: MachineInstrBuilder.h:511
llvm::MachineFunctionProperties
Properties which a MachineFunction may have at a given point in time.
Definition: MachineFunction.h:127
llvm::codeview::EncodedFramePtrReg::StackPtr
@ StackPtr
llvm::addLiveIns
void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs)
Adds registers contained in LiveRegs to the block live-in list of MBB.
Definition: LivePhysRegs.cpp:259
llvm::X86ISD::VASTART_SAVE_XMM_REGS
@ VASTART_SAVE_XMM_REGS
Definition: X86ISelLowering.h:872
llvm::N86::ECX
@ ECX
Definition: X86MCTargetDesc.h:51
llvm::CodeModel::Kernel
@ Kernel
Definition: CodeGen.h:28
llvm::MachineOperand::getOffset
int64_t getOffset() const
Return the offset from the symbol in this operand.
Definition: MachineOperand.h:609
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1628
llvm::MachineFunctionPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
Definition: MachineFunctionPass.cpp:103
llvm::MachineFunction::iterator
BasicBlockListType::iterator iterator
Definition: MachineFunction.h:835
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::MachineBasicBlock::erase
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
Definition: MachineBasicBlock.cpp:1299
X86FrameLowering.h
llvm::MachineBasicBlock::addSuccessor
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Definition: MachineBasicBlock.cpp:747
llvm::MachineOperand::isKill
bool isKill() const
Definition: MachineOperand.h:389
llvm::finalizeBundle
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
Definition: MachineInstrBundle.cpp:124
X86MachineFunctionInfo.h
llvm::MachineFunction::front
const MachineBasicBlock & front() const
Definition: MachineFunction.h:865
X86.h
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition: TargetInstrInfo.h:97
GlobalValue.h
TargetMachine.h
llvm::MachineInstrBuilder::addMBB
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:146
llvm::MachineInstr::copyImplicitOps
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
Definition: MachineInstr.cpp:1434
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::MachineOperand::getImm
int64_t getImm() const
Definition: MachineOperand.h:546
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:754
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
llvm::Register::isPhysicalRegister
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
llvm::MachineBasicBlock::eraseFromParent
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
Definition: MachineBasicBlock.cpp:1332
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
INITIALIZE_PASS
INITIALIZE_PASS(X86ExpandPseudo, DEBUG_TYPE, X86_EXPAND_PSEUDO_NAME, false, false) void X86ExpandPseudo
Definition: X86ExpandPseudo.cpp:87
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::RegState::Implicit
@ Implicit
Not emitted register (e.g. carry, or temporary result).
Definition: MachineInstrBuilder.h:46
llvm::MachineFunctionProperties::set
MachineFunctionProperties & set(Property P)
Definition: MachineFunction.h:196
LoopDeletionResult::Modified
@ Modified
llvm::X86ISD::IRET
@ IRET
Return from interrupt. Operand 0 is the number of bytes to pop.
Definition: X86ISelLowering.h:134
llvm::MachineInstrBuilder::addExternalSymbol
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:184
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::MachineFunctionProperties::Property::NoVRegs
@ NoVRegs
Passes.h
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:656
llvm::X86::AddrNumOperands
@ AddrNumOperands
AddrNumOperands - Total number of operands in a memory reference.
Definition: X86BaseInfo.h:41
llvm::MachineLoopInfoID
char & MachineLoopInfoID
MachineLoopInfo - This pass is a loop analysis pass.
Definition: MachineLoopInfo.cpp:43
llvm::MachineInstr::getDebugLoc
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:420
llvm::GlobalValue
Definition: GlobalValue.h:44
llvm::TargetRegisterInfo::regsOverlap
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Definition: TargetRegisterInfo.h:419
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:416
llvm::isUInt< 16 >
constexpr bool isUInt< 16 >(uint64_t x)
Definition: MathExtras.h:408
llvm::MachineOperand::getTargetFlags
unsigned getTargetFlags() const
Definition: MachineOperand.h:220
llvm::MachineOperand::isReg
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Definition: MachineOperand.h:320
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::X86FrameLowering
Definition: X86FrameLowering.h:28
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::MachineFunction::CreateMachineBasicBlock
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
Definition: MachineFunction.cpp:439
MachineFunctionPass.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::RegState::Dead
@ Dead
Unused definition.
Definition: MachineInstrBuilder.h:50
llvm::X86::COND_B
@ COND_B
Definition: X86BaseInfo.h:83
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:234
llvm::X86MachineFunctionInfo
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
Definition: X86MachineFunctionInfo.h:25
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:82
llvm::createX86ExpandPseudoPass
FunctionPass * createX86ExpandPseudoPass()
Return a Machine IR pass that expands X86-specific pseudo instructions into a sequence of actual inst...
Definition: X86ExpandPseudo.cpp:747
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::MachineFunction
Definition: MachineFunction.h:257
FirstTarget
static Target * FirstTarget
Definition: TargetRegistry.cpp:18
llvm::CallingConv::C
@ C
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
llvm::X86InstrInfo
Definition: X86InstrInfo.h:138
llvm::MachineInstrBuilder::addRegMask
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
Definition: MachineInstrBuilder.h:197
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
llvm::MachineBasicBlock::splice
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
Definition: MachineBasicBlock.h:982
llvm::AnalysisUsage::addPreservedID
AnalysisUsage & addPreservedID(const void *ID)
Definition: PassAnalysisSupport.h:88
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition: AArch64SLSHardening.cpp:75
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
uint32_t
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:82
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::ifs::IFSSymbolType::Func
@ Func
llvm::MachineInstrBuilder::getInstr
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Definition: MachineInstrBuilder.h:89
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineBasicBlock::addLiveIn
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
Definition: MachineBasicBlock.h:377
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
DEBUG_TYPE
#define DEBUG_TYPE
Definition: X86ExpandPseudo.cpp:30
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:652
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
llvm::ARCISD::RET
@ RET
Definition: ARCISelLowering.h:52
llvm::RegState::Define
@ Define
Register definition.
Definition: MachineInstrBuilder.h:44
llvm::MachineBasicBlock::front
MachineInstr & front()
Definition: MachineBasicBlock.h:257
llvm::ISD::EH_RETURN
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:135
llvm::MachineOperand::isImm
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Definition: MachineOperand.h:322
llvm::MachineInstrBuilder::addGlobalAddress
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
Definition: MachineInstrBuilder.h:177
llvm::getKillRegState
unsigned getKillRegState(bool B)
Definition: MachineInstrBuilder.h:508
X86_EXPAND_PSEUDO_NAME
#define X86_EXPAND_PSEUDO_NAME
Definition: X86ExpandPseudo.cpp:31
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:278
MachineInstrBuilder.h
llvm::MachineOperand::getSymbolName
const char * getSymbolName() const
Definition: MachineOperand.h:617
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::MachineInstr::getNumOperands
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:494
llvm::MachineDominatorsID
char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
llvm::MachineInstr::addOperand
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Definition: MachineInstr.cpp:184
llvm::isMem
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:131
llvm::MachineBasicBlock::transferSuccessorsAndUpdatePHIs
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
Definition: MachineBasicBlock.cpp:886
llvm::TargetRegisterInfo::getSubReg
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Definition: TargetRegisterInfo.h:1113
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::TargetMachine::getCodeModel
CodeModel::Model getCodeModel() const
Returns the code model.
Definition: TargetMachine.h:225
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
X86InstrInfo.h
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::X86::COND_E
@ COND_E
Definition: X86BaseInfo.h:85
llvm::HexagonInstrInfo::copyPhysReg
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
Definition: HexagonInstrInfo.cpp:854
llvm::TargetRegisterInfo::getCallPreservedMask
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function.
Definition: TargetRegisterInfo.h:480
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:280
llvm::X86RegisterInfo
Definition: X86RegisterInfo.h:24
llvm::MachineOperand::isGlobal
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Definition: MachineOperand.h:338
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
LivePhysRegs.h