LLVM  10.0.0svn
RISCVExpandPseudoInsts.cpp
Go to the documentation of this file.
1 //===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions. This pass should be run after register allocation but before
11 // the post-regalloc scheduling pass.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "RISCV.h"
16 #include "RISCVInstrInfo.h"
17 #include "RISCVTargetMachine.h"
18 
22 
23 using namespace llvm;
24 
25 #define RISCV_EXPAND_PSEUDO_NAME "RISCV pseudo instruction expansion pass"
26 
27 namespace {
28 
29 class RISCVExpandPseudo : public MachineFunctionPass {
30 public:
31  const RISCVInstrInfo *TII;
32  static char ID;
33 
34  RISCVExpandPseudo() : MachineFunctionPass(ID) {
36  }
37 
38  bool runOnMachineFunction(MachineFunction &MF) override;
39 
40  StringRef getPassName() const override { return RISCV_EXPAND_PSEUDO_NAME; }
41 
42 private:
43  bool expandMBB(MachineBasicBlock &MBB);
44  bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
45  MachineBasicBlock::iterator &NextMBBI);
46  bool expandAtomicBinOp(MachineBasicBlock &MBB,
48  bool IsMasked, int Width,
49  MachineBasicBlock::iterator &NextMBBI);
50  bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
52  AtomicRMWInst::BinOp, bool IsMasked, int Width,
53  MachineBasicBlock::iterator &NextMBBI);
54  bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
55  MachineBasicBlock::iterator MBBI, bool IsMasked,
56  int Width, MachineBasicBlock::iterator &NextMBBI);
57  bool expandAuipcInstPair(MachineBasicBlock &MBB,
60  unsigned FlagsHi, unsigned SecondOpcode);
61  bool expandLoadLocalAddress(MachineBasicBlock &MBB,
63  MachineBasicBlock::iterator &NextMBBI);
64  bool expandLoadAddress(MachineBasicBlock &MBB,
66  MachineBasicBlock::iterator &NextMBBI);
67  bool expandLoadTLSIEAddress(MachineBasicBlock &MBB,
69  MachineBasicBlock::iterator &NextMBBI);
70  bool expandLoadTLSGDAddress(MachineBasicBlock &MBB,
72  MachineBasicBlock::iterator &NextMBBI);
73 };
74 
75 char RISCVExpandPseudo::ID = 0;
76 
77 bool RISCVExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
78  TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
79  bool Modified = false;
80  for (auto &MBB : MF)
81  Modified |= expandMBB(MBB);
82  return Modified;
83 }
84 
85 bool RISCVExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
86  bool Modified = false;
87 
88  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
89  while (MBBI != E) {
90  MachineBasicBlock::iterator NMBBI = std::next(MBBI);
91  Modified |= expandMI(MBB, MBBI, NMBBI);
92  MBBI = NMBBI;
93  }
94 
95  return Modified;
96 }
97 
98 bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
100  MachineBasicBlock::iterator &NextMBBI) {
101  switch (MBBI->getOpcode()) {
102  case RISCV::PseudoAtomicLoadNand32:
103  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
104  NextMBBI);
105  case RISCV::PseudoAtomicLoadNand64:
106  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64,
107  NextMBBI);
108  case RISCV::PseudoMaskedAtomicSwap32:
109  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
110  NextMBBI);
111  case RISCV::PseudoMaskedAtomicLoadAdd32:
112  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
113  case RISCV::PseudoMaskedAtomicLoadSub32:
114  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
115  case RISCV::PseudoMaskedAtomicLoadNand32:
116  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
117  NextMBBI);
118  case RISCV::PseudoMaskedAtomicLoadMax32:
119  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
120  NextMBBI);
121  case RISCV::PseudoMaskedAtomicLoadMin32:
122  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
123  NextMBBI);
124  case RISCV::PseudoMaskedAtomicLoadUMax32:
125  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
126  NextMBBI);
127  case RISCV::PseudoMaskedAtomicLoadUMin32:
128  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
129  NextMBBI);
130  case RISCV::PseudoCmpXchg32:
131  return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
132  case RISCV::PseudoCmpXchg64:
133  return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI);
134  case RISCV::PseudoMaskedCmpXchg32:
135  return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
136  case RISCV::PseudoLLA:
137  return expandLoadLocalAddress(MBB, MBBI, NextMBBI);
138  case RISCV::PseudoLA:
139  return expandLoadAddress(MBB, MBBI, NextMBBI);
140  case RISCV::PseudoLA_TLS_IE:
141  return expandLoadTLSIEAddress(MBB, MBBI, NextMBBI);
142  case RISCV::PseudoLA_TLS_GD:
143  return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
144  }
145 
146  return false;
147 }
148 
149 static unsigned getLRForRMW32(AtomicOrdering Ordering) {
150  switch (Ordering) {
151  default:
152  llvm_unreachable("Unexpected AtomicOrdering");
154  return RISCV::LR_W;
156  return RISCV::LR_W_AQ;
158  return RISCV::LR_W;
160  return RISCV::LR_W_AQ;
162  return RISCV::LR_W_AQ_RL;
163  }
164 }
165 
166 static unsigned getSCForRMW32(AtomicOrdering Ordering) {
167  switch (Ordering) {
168  default:
169  llvm_unreachable("Unexpected AtomicOrdering");
171  return RISCV::SC_W;
173  return RISCV::SC_W;
175  return RISCV::SC_W_RL;
177  return RISCV::SC_W_RL;
179  return RISCV::SC_W_AQ_RL;
180  }
181 }
182 
183 static unsigned getLRForRMW64(AtomicOrdering Ordering) {
184  switch (Ordering) {
185  default:
186  llvm_unreachable("Unexpected AtomicOrdering");
188  return RISCV::LR_D;
190  return RISCV::LR_D_AQ;
192  return RISCV::LR_D;
194  return RISCV::LR_D_AQ;
196  return RISCV::LR_D_AQ_RL;
197  }
198 }
199 
200 static unsigned getSCForRMW64(AtomicOrdering Ordering) {
201  switch (Ordering) {
202  default:
203  llvm_unreachable("Unexpected AtomicOrdering");
205  return RISCV::SC_D;
207  return RISCV::SC_D;
209  return RISCV::SC_D_RL;
211  return RISCV::SC_D_RL;
213  return RISCV::SC_D_AQ_RL;
214  }
215 }
216 
217 static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) {
218  if (Width == 32)
219  return getLRForRMW32(Ordering);
220  if (Width == 64)
221  return getLRForRMW64(Ordering);
222  llvm_unreachable("Unexpected LR width\n");
223 }
224 
225 static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) {
226  if (Width == 32)
227  return getSCForRMW32(Ordering);
228  if (Width == 64)
229  return getSCForRMW64(Ordering);
230  llvm_unreachable("Unexpected SC width\n");
231 }
232 
233 static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
234  DebugLoc DL, MachineBasicBlock *ThisMBB,
235  MachineBasicBlock *LoopMBB,
236  MachineBasicBlock *DoneMBB,
237  AtomicRMWInst::BinOp BinOp, int Width) {
238  Register DestReg = MI.getOperand(0).getReg();
239  Register ScratchReg = MI.getOperand(1).getReg();
240  Register AddrReg = MI.getOperand(2).getReg();
241  Register IncrReg = MI.getOperand(3).getReg();
242  AtomicOrdering Ordering =
243  static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
244 
245  // .loop:
246  // lr.[w|d] dest, (addr)
247  // binop scratch, dest, val
248  // sc.[w|d] scratch, scratch, (addr)
249  // bnez scratch, loop
250  BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
251  .addReg(AddrReg);
252  switch (BinOp) {
253  default:
254  llvm_unreachable("Unexpected AtomicRMW BinOp");
255  case AtomicRMWInst::Nand:
256  BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
257  .addReg(DestReg)
258  .addReg(IncrReg);
259  BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
260  .addReg(ScratchReg)
261  .addImm(-1);
262  break;
263  }
264  BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
265  .addReg(AddrReg)
266  .addReg(ScratchReg);
267  BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
268  .addReg(ScratchReg)
269  .addReg(RISCV::X0)
270  .addMBB(LoopMBB);
271 }
272 
273 static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
274  MachineBasicBlock *MBB, Register DestReg,
275  Register OldValReg, Register NewValReg,
276  Register MaskReg, Register ScratchReg) {
277  assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
278  assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
279  assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
280 
281  // We select bits from newval and oldval using:
282  // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
283  // r = oldval ^ ((oldval ^ newval) & masktargetdata);
284  BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
285  .addReg(OldValReg)
286  .addReg(NewValReg);
287  BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
288  .addReg(ScratchReg)
289  .addReg(MaskReg);
290  BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
291  .addReg(OldValReg)
292  .addReg(ScratchReg);
293 }
294 
295 static void doMaskedAtomicBinOpExpansion(
296  const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
297  MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
298  MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
299  assert(Width == 32 && "Should never need to expand masked 64-bit operations");
300  Register DestReg = MI.getOperand(0).getReg();
301  Register ScratchReg = MI.getOperand(1).getReg();
302  Register AddrReg = MI.getOperand(2).getReg();
303  Register IncrReg = MI.getOperand(3).getReg();
304  Register MaskReg = MI.getOperand(4).getReg();
305  AtomicOrdering Ordering =
306  static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
307 
308  // .loop:
309  // lr.w destreg, (alignedaddr)
310  // binop scratch, destreg, incr
311  // xor scratch, destreg, scratch
312  // and scratch, scratch, masktargetdata
313  // xor scratch, destreg, scratch
314  // sc.w scratch, scratch, (alignedaddr)
315  // bnez scratch, loop
316  BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
317  .addReg(AddrReg);
318  switch (BinOp) {
319  default:
320  llvm_unreachable("Unexpected AtomicRMW BinOp");
321  case AtomicRMWInst::Xchg:
322  BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
323  .addReg(RISCV::X0)
324  .addReg(IncrReg);
325  break;
326  case AtomicRMWInst::Add:
327  BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
328  .addReg(DestReg)
329  .addReg(IncrReg);
330  break;
331  case AtomicRMWInst::Sub:
332  BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
333  .addReg(DestReg)
334  .addReg(IncrReg);
335  break;
336  case AtomicRMWInst::Nand:
337  BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
338  .addReg(DestReg)
339  .addReg(IncrReg);
340  BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
341  .addReg(ScratchReg)
342  .addImm(-1);
343  break;
344  }
345 
346  insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
347  ScratchReg);
348 
349  BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
350  .addReg(AddrReg)
351  .addReg(ScratchReg);
352  BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
353  .addReg(ScratchReg)
354  .addReg(RISCV::X0)
355  .addMBB(LoopMBB);
356 }
357 
358 bool RISCVExpandPseudo::expandAtomicBinOp(
360  AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
361  MachineBasicBlock::iterator &NextMBBI) {
362  MachineInstr &MI = *MBBI;
363  DebugLoc DL = MI.getDebugLoc();
364 
365  MachineFunction *MF = MBB.getParent();
366  auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
367  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
368 
369  // Insert new MBBs.
370  MF->insert(++MBB.getIterator(), LoopMBB);
371  MF->insert(++LoopMBB->getIterator(), DoneMBB);
372 
373  // Set up successors and transfer remaining instructions to DoneMBB.
374  LoopMBB->addSuccessor(LoopMBB);
375  LoopMBB->addSuccessor(DoneMBB);
376  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
377  DoneMBB->transferSuccessors(&MBB);
378  MBB.addSuccessor(LoopMBB);
379 
380  if (!IsMasked)
381  doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
382  else
383  doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
384  Width);
385 
386  NextMBBI = MBB.end();
387  MI.eraseFromParent();
388 
389  LivePhysRegs LiveRegs;
390  computeAndAddLiveIns(LiveRegs, *LoopMBB);
391  computeAndAddLiveIns(LiveRegs, *DoneMBB);
392 
393  return true;
394 }
395 
396 static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
397  MachineBasicBlock *MBB, Register ValReg,
398  Register ShamtReg) {
399  BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
400  .addReg(ValReg)
401  .addReg(ShamtReg);
402  BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
403  .addReg(ValReg)
404  .addReg(ShamtReg);
405 }
406 
407 bool RISCVExpandPseudo::expandAtomicMinMaxOp(
409  AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
410  MachineBasicBlock::iterator &NextMBBI) {
411  assert(IsMasked == true &&
412  "Should only need to expand masked atomic max/min");
413  assert(Width == 32 && "Should never need to expand masked 64-bit operations");
414 
415  MachineInstr &MI = *MBBI;
416  DebugLoc DL = MI.getDebugLoc();
417  MachineFunction *MF = MBB.getParent();
418  auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
419  auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
420  auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
421  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
422 
423  // Insert new MBBs.
424  MF->insert(++MBB.getIterator(), LoopHeadMBB);
425  MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
426  MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
427  MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
428 
429  // Set up successors and transfer remaining instructions to DoneMBB.
430  LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
431  LoopHeadMBB->addSuccessor(LoopTailMBB);
432  LoopIfBodyMBB->addSuccessor(LoopTailMBB);
433  LoopTailMBB->addSuccessor(LoopHeadMBB);
434  LoopTailMBB->addSuccessor(DoneMBB);
435  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
436  DoneMBB->transferSuccessors(&MBB);
437  MBB.addSuccessor(LoopHeadMBB);
438 
439  Register DestReg = MI.getOperand(0).getReg();
440  Register Scratch1Reg = MI.getOperand(1).getReg();
441  Register Scratch2Reg = MI.getOperand(2).getReg();
442  Register AddrReg = MI.getOperand(3).getReg();
443  Register IncrReg = MI.getOperand(4).getReg();
444  Register MaskReg = MI.getOperand(5).getReg();
445  bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
446  AtomicOrdering Ordering =
447  static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
448 
449  //
450  // .loophead:
451  // lr.w destreg, (alignedaddr)
452  // and scratch2, destreg, mask
453  // mv scratch1, destreg
454  // [sext scratch2 if signed min/max]
455  // ifnochangeneeded scratch2, incr, .looptail
456  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
457  .addReg(AddrReg);
458  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
459  .addReg(DestReg)
460  .addReg(MaskReg);
461  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
462  .addReg(DestReg)
463  .addImm(0);
464 
465  switch (BinOp) {
466  default:
467  llvm_unreachable("Unexpected AtomicRMW BinOp");
468  case AtomicRMWInst::Max: {
469  insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
470  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
471  .addReg(Scratch2Reg)
472  .addReg(IncrReg)
473  .addMBB(LoopTailMBB);
474  break;
475  }
476  case AtomicRMWInst::Min: {
477  insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
478  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
479  .addReg(IncrReg)
480  .addReg(Scratch2Reg)
481  .addMBB(LoopTailMBB);
482  break;
483  }
484  case AtomicRMWInst::UMax:
485  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
486  .addReg(Scratch2Reg)
487  .addReg(IncrReg)
488  .addMBB(LoopTailMBB);
489  break;
490  case AtomicRMWInst::UMin:
491  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
492  .addReg(IncrReg)
493  .addReg(Scratch2Reg)
494  .addMBB(LoopTailMBB);
495  break;
496  }
497 
498  // .loopifbody:
499  // xor scratch1, destreg, incr
500  // and scratch1, scratch1, mask
501  // xor scratch1, destreg, scratch1
502  insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
503  MaskReg, Scratch1Reg);
504 
505  // .looptail:
506  // sc.w scratch1, scratch1, (addr)
507  // bnez scratch1, loop
508  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
509  .addReg(AddrReg)
510  .addReg(Scratch1Reg);
511  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
512  .addReg(Scratch1Reg)
513  .addReg(RISCV::X0)
514  .addMBB(LoopHeadMBB);
515 
516  NextMBBI = MBB.end();
517  MI.eraseFromParent();
518 
519  LivePhysRegs LiveRegs;
520  computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
521  computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
522  computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
523  computeAndAddLiveIns(LiveRegs, *DoneMBB);
524 
525  return true;
526 }
527 
528 bool RISCVExpandPseudo::expandAtomicCmpXchg(
529  MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
530  int Width, MachineBasicBlock::iterator &NextMBBI) {
531  MachineInstr &MI = *MBBI;
532  DebugLoc DL = MI.getDebugLoc();
533  MachineFunction *MF = MBB.getParent();
534  auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
535  auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
536  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
537 
538  // Insert new MBBs.
539  MF->insert(++MBB.getIterator(), LoopHeadMBB);
540  MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
541  MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
542 
543  // Set up successors and transfer remaining instructions to DoneMBB.
544  LoopHeadMBB->addSuccessor(LoopTailMBB);
545  LoopHeadMBB->addSuccessor(DoneMBB);
546  LoopTailMBB->addSuccessor(DoneMBB);
547  LoopTailMBB->addSuccessor(LoopHeadMBB);
548  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
549  DoneMBB->transferSuccessors(&MBB);
550  MBB.addSuccessor(LoopHeadMBB);
551 
552  Register DestReg = MI.getOperand(0).getReg();
553  Register ScratchReg = MI.getOperand(1).getReg();
554  Register AddrReg = MI.getOperand(2).getReg();
555  Register CmpValReg = MI.getOperand(3).getReg();
556  Register NewValReg = MI.getOperand(4).getReg();
557  AtomicOrdering Ordering =
558  static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
559 
560  if (!IsMasked) {
561  // .loophead:
562  // lr.[w|d] dest, (addr)
563  // bne dest, cmpval, done
564  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
565  .addReg(AddrReg);
566  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
567  .addReg(DestReg)
568  .addReg(CmpValReg)
569  .addMBB(DoneMBB);
570  // .looptail:
571  // sc.[w|d] scratch, newval, (addr)
572  // bnez scratch, loophead
573  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
574  .addReg(AddrReg)
575  .addReg(NewValReg);
576  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
577  .addReg(ScratchReg)
578  .addReg(RISCV::X0)
579  .addMBB(LoopHeadMBB);
580  } else {
581  // .loophead:
582  // lr.w dest, (addr)
583  // and scratch, dest, mask
584  // bne scratch, cmpval, done
585  Register MaskReg = MI.getOperand(5).getReg();
586  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg)
587  .addReg(AddrReg);
588  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
589  .addReg(DestReg)
590  .addReg(MaskReg);
591  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
592  .addReg(ScratchReg)
593  .addReg(CmpValReg)
594  .addMBB(DoneMBB);
595 
596  // .looptail:
597  // xor scratch, dest, newval
598  // and scratch, scratch, mask
599  // xor scratch, dest, scratch
600  // sc.w scratch, scratch, (adrr)
601  // bnez scratch, loophead
602  insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
603  MaskReg, ScratchReg);
604  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg)
605  .addReg(AddrReg)
606  .addReg(ScratchReg);
607  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
608  .addReg(ScratchReg)
609  .addReg(RISCV::X0)
610  .addMBB(LoopHeadMBB);
611  }
612 
613  NextMBBI = MBB.end();
614  MI.eraseFromParent();
615 
616  LivePhysRegs LiveRegs;
617  computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
618  computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
619  computeAndAddLiveIns(LiveRegs, *DoneMBB);
620 
621  return true;
622 }
623 
624 bool RISCVExpandPseudo::expandAuipcInstPair(
626  MachineBasicBlock::iterator &NextMBBI, unsigned FlagsHi,
627  unsigned SecondOpcode) {
628  MachineFunction *MF = MBB.getParent();
629  MachineInstr &MI = *MBBI;
630  DebugLoc DL = MI.getDebugLoc();
631 
632  Register DestReg = MI.getOperand(0).getReg();
633  const MachineOperand &Symbol = MI.getOperand(1);
634 
636 
637  // Tell AsmPrinter that we unconditionally want the symbol of this label to be
638  // emitted.
639  NewMBB->setLabelMustBeEmitted();
640 
641  MF->insert(++MBB.getIterator(), NewMBB);
642 
643  BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg)
644  .addDisp(Symbol, 0, FlagsHi);
645  BuildMI(NewMBB, DL, TII->get(SecondOpcode), DestReg)
646  .addReg(DestReg)
647  .addMBB(NewMBB, RISCVII::MO_PCREL_LO);
648 
649  // Move all the rest of the instructions to NewMBB.
650  NewMBB->splice(NewMBB->end(), &MBB, std::next(MBBI), MBB.end());
651  // Update machine-CFG edges.
652  NewMBB->transferSuccessorsAndUpdatePHIs(&MBB);
653  // Make the original basic block fall-through to the new.
654  MBB.addSuccessor(NewMBB);
655 
656  // Make sure live-ins are correctly attached to this new basic block.
657  LivePhysRegs LiveRegs;
658  computeAndAddLiveIns(LiveRegs, *NewMBB);
659 
660  NextMBBI = MBB.end();
661  MI.eraseFromParent();
662  return true;
663 }
664 
665 bool RISCVExpandPseudo::expandLoadLocalAddress(
667  MachineBasicBlock::iterator &NextMBBI) {
668  return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_PCREL_HI,
669  RISCV::ADDI);
670 }
671 
672 bool RISCVExpandPseudo::expandLoadAddress(
674  MachineBasicBlock::iterator &NextMBBI) {
675  MachineFunction *MF = MBB.getParent();
676 
677  unsigned SecondOpcode;
678  unsigned FlagsHi;
679  if (MF->getTarget().isPositionIndependent()) {
680  const auto &STI = MF->getSubtarget<RISCVSubtarget>();
681  SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
682  FlagsHi = RISCVII::MO_GOT_HI;
683  } else {
684  SecondOpcode = RISCV::ADDI;
685  FlagsHi = RISCVII::MO_PCREL_HI;
686  }
687  return expandAuipcInstPair(MBB, MBBI, NextMBBI, FlagsHi, SecondOpcode);
688 }
689 
690 bool RISCVExpandPseudo::expandLoadTLSIEAddress(
692  MachineBasicBlock::iterator &NextMBBI) {
693  MachineFunction *MF = MBB.getParent();
694 
695  const auto &STI = MF->getSubtarget<RISCVSubtarget>();
696  unsigned SecondOpcode = STI.is64Bit() ? RISCV::LD : RISCV::LW;
697  return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GOT_HI,
698  SecondOpcode);
699 }
700 
701 bool RISCVExpandPseudo::expandLoadTLSGDAddress(
703  MachineBasicBlock::iterator &NextMBBI) {
704  return expandAuipcInstPair(MBB, MBBI, NextMBBI, RISCVII::MO_TLS_GD_HI,
705  RISCV::ADDI);
706 }
707 
708 } // end of anonymous namespace
709 
710 INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",
711  RISCV_EXPAND_PSEUDO_NAME, false, false)
712 namespace llvm {
713 
714 FunctionPass *createRISCVExpandPseudoPass() { return new RISCVExpandPseudo(); }
715 
716 } // end of namespace llvm
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
*p = old <signed v ? old : v
Definition: Instructions.h:729
This class represents lattice values for constants.
Definition: AllocatorList.h:23
static bool IsMasked(Instruction *I)
void initializeRISCVExpandPseudoPass(PassRegistry &)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:384
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
*p = old <unsigned v ? old : v
Definition: Instructions.h:733
*p = old >unsigned v ? old : v
Definition: Instructions.h:731
A debug info location.
Definition: DebugLoc.h:33
*p = old >signed v ? old : v
Definition: Instructions.h:727
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:711
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
virtual const TargetInstrInfo * getInstrInfo() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:284
self_iterator getIterator()
Definition: ilist_node.h:81
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void setLabelMustBeEmitted()
Set this block to reflect that, regardless how we flow to it, we need its label be emitted...
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo", RISCV_EXPAND_PSEUDO_NAME, false, false) namespace llvm
Representation of each machine instruction.
Definition: MachineInstr.h:63
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:426
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:48
bool isPositionIndependent() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
#define RISCV_EXPAND_PSEUDO_NAME
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
FunctionPass * createRISCVExpandPseudoPass()
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
Register getReg() const
getReg - Returns the register number.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19