LLVM  8.0.0svn
RISCVISelLowering.cpp
Go to the documentation of this file.
1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that RISCV uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "RISCVISelLowering.h"
16 #include "RISCV.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
30 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/Support/Debug.h"
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "riscv-lower"
39 
40 STATISTIC(NumTailCalls, "Number of tail calls");
41 
43  const RISCVSubtarget &STI)
44  : TargetLowering(TM), Subtarget(STI) {
45 
46  MVT XLenVT = Subtarget.getXLenVT();
47 
48  // Set up the register classes.
49  addRegisterClass(XLenVT, &RISCV::GPRRegClass);
50 
51  if (Subtarget.hasStdExtF())
52  addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
53  if (Subtarget.hasStdExtD())
54  addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
55 
56  // Compute derived properties from the register classes.
58 
60 
61  for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
62  setLoadExtAction(N, XLenVT, MVT::i1, Promote);
63 
64  // TODO: add all necessary setOperationAction calls.
66 
71 
74 
79 
80  for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
82 
83  if (!Subtarget.hasStdExtM()) {
91  }
92 
97 
101 
108 
109  ISD::CondCode FPCCToExtend[] = {
113 
114  if (Subtarget.hasStdExtF()) {
117  for (auto CC : FPCCToExtend)
122  }
123 
124  if (Subtarget.hasStdExtD()) {
127  for (auto CC : FPCCToExtend)
134  }
135 
139 
140  if (Subtarget.hasStdExtA()) {
143  } else {
145  }
146 
148 
149  // Function alignments (log2).
150  unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2;
151  setMinFunctionAlignment(FunctionAlignment);
152  setPrefFunctionAlignment(FunctionAlignment);
153 
154  // Effectively disable jump table generation.
156 }
157 
159  EVT VT) const {
160  if (!VT.isVector())
161  return getPointerTy(DL);
163 }
164 
166  const CallInst &I,
167  MachineFunction &MF,
168  unsigned Intrinsic) const {
169  switch (Intrinsic) {
170  default:
171  return false;
172  case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
173  case Intrinsic::riscv_masked_atomicrmw_add_i32:
174  case Intrinsic::riscv_masked_atomicrmw_sub_i32:
175  case Intrinsic::riscv_masked_atomicrmw_nand_i32:
176  case Intrinsic::riscv_masked_atomicrmw_max_i32:
177  case Intrinsic::riscv_masked_atomicrmw_min_i32:
178  case Intrinsic::riscv_masked_atomicrmw_umax_i32:
179  case Intrinsic::riscv_masked_atomicrmw_umin_i32:
180  PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
182  Info.memVT = MVT::getVT(PtrTy->getElementType());
183  Info.ptrVal = I.getArgOperand(0);
184  Info.offset = 0;
185  Info.align = 4;
188  return true;
189  }
190 }
191 
193  const AddrMode &AM, Type *Ty,
194  unsigned AS,
195  Instruction *I) const {
196  // No global is ever allowed as a base.
197  if (AM.BaseGV)
198  return false;
199 
200  // Require a 12-bit signed offset.
201  if (!isInt<12>(AM.BaseOffs))
202  return false;
203 
204  switch (AM.Scale) {
205  case 0: // "r+i" or just "i", depending on HasBaseReg.
206  break;
207  case 1:
208  if (!AM.HasBaseReg) // allow "r+i".
209  break;
210  return false; // disallow "r+r" or "r+r+i".
211  default:
212  return false;
213  }
214 
215  return true;
216 }
217 
219  return isInt<12>(Imm);
220 }
221 
223  return isInt<12>(Imm);
224 }
225 
226 // On RV32, 64-bit integers are split into their high and low parts and held
227 // in two different registers, so the trunc is free since the low register can
228 // just be used.
229 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
230  if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
231  return false;
232  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
233  unsigned DestBits = DstTy->getPrimitiveSizeInBits();
234  return (SrcBits == 64 && DestBits == 32);
235 }
236 
237 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
238  if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
239  !SrcVT.isInteger() || !DstVT.isInteger())
240  return false;
241  unsigned SrcBits = SrcVT.getSizeInBits();
242  unsigned DestBits = DstVT.getSizeInBits();
243  return (SrcBits == 64 && DestBits == 32);
244 }
245 
247  // Zexts are free if they can be combined with a load.
248  if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
249  EVT MemVT = LD->getMemoryVT();
250  if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
251  (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
252  (LD->getExtensionType() == ISD::NON_EXTLOAD ||
253  LD->getExtensionType() == ISD::ZEXTLOAD))
254  return true;
255  }
256 
257  return TargetLowering::isZExtFree(Val, VT2);
258 }
259 
260 // Changes the condition code and swaps operands if necessary, so the SetCC
261 // operation matches one of the comparisons supported directly in the RISC-V
262 // ISA.
263 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
264  switch (CC) {
265  default:
266  break;
267  case ISD::SETGT:
268  case ISD::SETLE:
269  case ISD::SETUGT:
270  case ISD::SETULE:
272  std::swap(LHS, RHS);
273  break;
274  }
275 }
276 
277 // Return the RISC-V branch opcode that matches the given DAG integer
278 // condition code. The CondCode must be one of those supported by the RISC-V
279 // ISA (see normaliseSetCC).
281  switch (CC) {
282  default:
283  llvm_unreachable("Unsupported CondCode");
284  case ISD::SETEQ:
285  return RISCV::BEQ;
286  case ISD::SETNE:
287  return RISCV::BNE;
288  case ISD::SETLT:
289  return RISCV::BLT;
290  case ISD::SETGE:
291  return RISCV::BGE;
292  case ISD::SETULT:
293  return RISCV::BLTU;
294  case ISD::SETUGE:
295  return RISCV::BGEU;
296  }
297 }
298 
300  SelectionDAG &DAG) const {
301  switch (Op.getOpcode()) {
302  default:
303  report_fatal_error("unimplemented operand");
304  case ISD::GlobalAddress:
305  return lowerGlobalAddress(Op, DAG);
306  case ISD::BlockAddress:
307  return lowerBlockAddress(Op, DAG);
308  case ISD::ConstantPool:
309  return lowerConstantPool(Op, DAG);
310  case ISD::SELECT:
311  return lowerSELECT(Op, DAG);
312  case ISD::VASTART:
313  return lowerVASTART(Op, DAG);
314  case ISD::FRAMEADDR:
315  return lowerFRAMEADDR(Op, DAG);
316  case ISD::RETURNADDR:
317  return lowerRETURNADDR(Op, DAG);
318  }
319 }
320 
321 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
322  SelectionDAG &DAG) const {
323  SDLoc DL(Op);
324  EVT Ty = Op.getValueType();
325  GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
326  const GlobalValue *GV = N->getGlobal();
327  int64_t Offset = N->getOffset();
328  MVT XLenVT = Subtarget.getXLenVT();
329 
330  if (isPositionIndependent())
331  report_fatal_error("Unable to lowerGlobalAddress");
332  // In order to maximise the opportunity for common subexpression elimination,
333  // emit a separate ADD node for the global address offset instead of folding
334  // it in the global address node. Later peephole optimisations may choose to
335  // fold it back in when profitable.
336  SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_HI);
337  SDValue GALo = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_LO);
338  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
339  SDValue MNLo =
340  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
341  if (Offset != 0)
342  return DAG.getNode(ISD::ADD, DL, Ty, MNLo,
343  DAG.getConstant(Offset, DL, XLenVT));
344  return MNLo;
345 }
346 
347 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
348  SelectionDAG &DAG) const {
349  SDLoc DL(Op);
350  EVT Ty = Op.getValueType();
351  BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
352  const BlockAddress *BA = N->getBlockAddress();
353  int64_t Offset = N->getOffset();
354 
355  if (isPositionIndependent())
356  report_fatal_error("Unable to lowerBlockAddress");
357 
358  SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
359  SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO);
360  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0);
361  SDValue MNLo =
362  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0);
363  return MNLo;
364 }
365 
366 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
367  SelectionDAG &DAG) const {
368  SDLoc DL(Op);
369  EVT Ty = Op.getValueType();
370  ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
371  const Constant *CPA = N->getConstVal();
372  int64_t Offset = N->getOffset();
373  unsigned Alignment = N->getAlignment();
374 
375  if (!isPositionIndependent()) {
376  SDValue CPAHi =
377  DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_HI);
378  SDValue CPALo =
379  DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_LO);
380  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, CPAHi), 0);
381  SDValue MNLo =
382  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, CPALo), 0);
383  return MNLo;
384  } else {
385  report_fatal_error("Unable to lowerConstantPool");
386  }
387 }
388 
389 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
390  SDValue CondV = Op.getOperand(0);
391  SDValue TrueV = Op.getOperand(1);
392  SDValue FalseV = Op.getOperand(2);
393  SDLoc DL(Op);
394  MVT XLenVT = Subtarget.getXLenVT();
395 
396  // If the result type is XLenVT and CondV is the output of a SETCC node
397  // which also operated on XLenVT inputs, then merge the SETCC node into the
398  // lowered RISCVISD::SELECT_CC to take advantage of the integer
399  // compare+branch instructions. i.e.:
400  // (select (setcc lhs, rhs, cc), truev, falsev)
401  // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
402  if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
403  CondV.getOperand(0).getSimpleValueType() == XLenVT) {
404  SDValue LHS = CondV.getOperand(0);
405  SDValue RHS = CondV.getOperand(1);
406  auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
407  ISD::CondCode CCVal = CC->get();
408 
409  normaliseSetCC(LHS, RHS, CCVal);
410 
411  SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
412  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
413  SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
414  return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
415  }
416 
417  // Otherwise:
418  // (select condv, truev, falsev)
419  // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
420  SDValue Zero = DAG.getConstant(0, DL, XLenVT);
421  SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
422 
423  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
424  SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
425 
426  return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
427 }
428 
429 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
432 
433  SDLoc DL(Op);
434  SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
436 
437  // vastart just stores the address of the VarArgsFrameIndex slot into the
438  // memory location argument.
439  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
440  return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
441  MachinePointerInfo(SV));
442 }
443 
444 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
445  SelectionDAG &DAG) const {
446  const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
448  MachineFrameInfo &MFI = MF.getFrameInfo();
449  MFI.setFrameAddressIsTaken(true);
450  unsigned FrameReg = RI.getFrameRegister(MF);
451  int XLenInBytes = Subtarget.getXLen() / 8;
452 
453  EVT VT = Op.getValueType();
454  SDLoc DL(Op);
455  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
456  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
457  while (Depth--) {
458  int Offset = -(XLenInBytes * 2);
459  SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
460  DAG.getIntPtrConstant(Offset, DL));
461  FrameAddr =
462  DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
463  }
464  return FrameAddr;
465 }
466 
467 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
468  SelectionDAG &DAG) const {
469  const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
471  MachineFrameInfo &MFI = MF.getFrameInfo();
472  MFI.setReturnAddressIsTaken(true);
473  MVT XLenVT = Subtarget.getXLenVT();
474  int XLenInBytes = Subtarget.getXLen() / 8;
475 
477  return SDValue();
478 
479  EVT VT = Op.getValueType();
480  SDLoc DL(Op);
481  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
482  if (Depth) {
483  int Off = -XLenInBytes;
484  SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
485  SDValue Offset = DAG.getConstant(Off, DL, VT);
486  return DAG.getLoad(VT, DL, DAG.getEntryNode(),
487  DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
489  }
490 
491  // Return the value of the return address register, marking it an implicit
492  // live-in.
493  unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
494  return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
495 }
496 
498  DAGCombinerInfo &DCI) const {
499  switch (N->getOpcode()) {
500  default:
501  break;
502  case RISCVISD::SplitF64: {
503  // If the input to SplitF64 is just BuildPairF64 then the operation is
504  // redundant. Instead, use BuildPairF64's operands directly.
505  SDValue Op0 = N->getOperand(0);
506  if (Op0->getOpcode() != RISCVISD::BuildPairF64)
507  break;
508  return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
509  }
510  }
511 
512  return SDValue();
513 }
514 
516  MachineBasicBlock *BB) {
517  assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
518 
519  MachineFunction &MF = *BB->getParent();
520  DebugLoc DL = MI.getDebugLoc();
521  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
522  const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
523  unsigned LoReg = MI.getOperand(0).getReg();
524  unsigned HiReg = MI.getOperand(1).getReg();
525  unsigned SrcReg = MI.getOperand(2).getReg();
526  const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
527  int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
528 
529  TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
530  RI);
531  MachineMemOperand *MMO =
532  MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
534  BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
535  .addFrameIndex(FI)
536  .addImm(0)
537  .addMemOperand(MMO);
538  BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
539  .addFrameIndex(FI)
540  .addImm(4)
541  .addMemOperand(MMO);
542  MI.eraseFromParent(); // The pseudo instruction is gone now.
543  return BB;
544 }
545 
547  MachineBasicBlock *BB) {
548  assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
549  "Unexpected instruction");
550 
551  MachineFunction &MF = *BB->getParent();
552  DebugLoc DL = MI.getDebugLoc();
553  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
554  const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
555  unsigned DstReg = MI.getOperand(0).getReg();
556  unsigned LoReg = MI.getOperand(1).getReg();
557  unsigned HiReg = MI.getOperand(2).getReg();
558  const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
559  int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
560 
561  MachineMemOperand *MMO =
562  MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
564  BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
565  .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
566  .addFrameIndex(FI)
567  .addImm(0)
568  .addMemOperand(MMO);
569  BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
570  .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
571  .addFrameIndex(FI)
572  .addImm(4)
573  .addMemOperand(MMO);
574  TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
575  MI.eraseFromParent(); // The pseudo instruction is gone now.
576  return BB;
577 }
578 
581  MachineBasicBlock *BB) const {
582  switch (MI.getOpcode()) {
583  default:
584  llvm_unreachable("Unexpected instr type to insert");
585  case RISCV::Select_GPR_Using_CC_GPR:
586  case RISCV::Select_FPR32_Using_CC_GPR:
587  case RISCV::Select_FPR64_Using_CC_GPR:
588  break;
589  case RISCV::BuildPairF64Pseudo:
590  return emitBuildPairF64Pseudo(MI, BB);
591  case RISCV::SplitF64Pseudo:
592  return emitSplitF64Pseudo(MI, BB);
593  }
594 
595  // To "insert" a SELECT instruction, we actually have to insert the triangle
596  // control-flow pattern. The incoming instruction knows the destination vreg
597  // to set, the condition code register to branch on, the true/false values to
598  // select between, and the condcode to use to select the appropriate branch.
599  //
600  // We produce the following control flow:
601  // HeadMBB
602  // | \
603  // | IfFalseMBB
604  // | /
605  // TailMBB
607  const BasicBlock *LLVM_BB = BB->getBasicBlock();
608  DebugLoc DL = MI.getDebugLoc();
610 
611  MachineBasicBlock *HeadMBB = BB;
612  MachineFunction *F = BB->getParent();
613  MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
614  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
615 
616  F->insert(I, IfFalseMBB);
617  F->insert(I, TailMBB);
618  // Move all remaining instructions to TailMBB.
619  TailMBB->splice(TailMBB->begin(), HeadMBB,
620  std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end());
621  // Update machine-CFG edges by transferring all successors of the current
622  // block to the new block which will contain the Phi node for the select.
623  TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
624  // Set the successors for HeadMBB.
625  HeadMBB->addSuccessor(IfFalseMBB);
626  HeadMBB->addSuccessor(TailMBB);
627 
628  // Insert appropriate branch.
629  unsigned LHS = MI.getOperand(1).getReg();
630  unsigned RHS = MI.getOperand(2).getReg();
631  auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
632  unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
633 
634  BuildMI(HeadMBB, DL, TII.get(Opcode))
635  .addReg(LHS)
636  .addReg(RHS)
637  .addMBB(TailMBB);
638 
639  // IfFalseMBB just falls through to TailMBB.
640  IfFalseMBB->addSuccessor(TailMBB);
641 
642  // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
643  BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI),
644  MI.getOperand(0).getReg())
645  .addReg(MI.getOperand(4).getReg())
646  .addMBB(HeadMBB)
647  .addReg(MI.getOperand(5).getReg())
648  .addMBB(IfFalseMBB);
649 
650  MI.eraseFromParent(); // The pseudo instruction is gone now.
651  return TailMBB;
652 }
653 
654 // Calling Convention Implementation.
655 // The expectations for frontend ABI lowering vary from target to target.
656 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
657 // details, but this is a longer term goal. For now, we simply try to keep the
658 // role of the frontend as simple and well-defined as possible. The rules can
659 // be summarised as:
660 // * Never split up large scalar arguments. We handle them here.
661 // * If a hardfloat calling convention is being used, and the struct may be
662 // passed in a pair of registers (fp+fp, int+fp), and both registers are
663 // available, then pass as two separate arguments. If either the GPRs or FPRs
664 // are exhausted, then pass according to the rule below.
665 // * If a struct could never be passed in registers or directly in a stack
666 // slot (as it is larger than 2*XLEN and the floating point rules don't
667 // apply), then pass it using a pointer with the byval attribute.
668 // * If a struct is less than 2*XLEN, then coerce to either a two-element
669 // word-sized array or a 2*XLEN scalar (depending on alignment).
670 // * The frontend can determine whether a struct is returned by reference or
671 // not based on its size and fields. If it will be returned by reference, the
672 // frontend must modify the prototype so a pointer with the sret annotation is
673 // passed as the first argument. This is not necessary for large scalar
674 // returns.
675 // * Struct return values and varargs should be coerced to structs containing
676 // register-size fields in the same situations they would be for fixed
677 // arguments.
678 
679 static const MCPhysReg ArgGPRs[] = {
680  RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
681  RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
682 };
683 
684 // Pass a 2*XLEN argument that has been split into two XLEN values through
685 // registers or the stack as necessary.
686 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
687  ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
688  MVT ValVT2, MVT LocVT2,
689  ISD::ArgFlagsTy ArgFlags2) {
690  unsigned XLenInBytes = XLen / 8;
691  if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
692  // At least one half can be passed via register.
693  State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
694  VA1.getLocVT(), CCValAssign::Full));
695  } else {
696  // Both halves must be passed on the stack, with proper alignment.
697  unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
698  State.addLoc(
700  State.AllocateStack(XLenInBytes, StackAlign),
701  VA1.getLocVT(), CCValAssign::Full));
703  ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
705  return false;
706  }
707 
708  if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
709  // The second half can also be passed via register.
710  State.addLoc(
711  CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
712  } else {
713  // The second half is passed via the stack, without additional alignment.
715  ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
717  }
718 
719  return false;
720 }
721 
722 // Implements the RISC-V calling convention. Returns true upon failure.
723 static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
724  CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
725  CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
726  unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
727  assert(XLen == 32 || XLen == 64);
728  MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
729  if (ValVT == MVT::f32) {
730  LocVT = MVT::i32;
731  LocInfo = CCValAssign::BCvt;
732  }
733 
734  // Any return value split in to more than two values can't be returned
735  // directly.
736  if (IsRet && ValNo > 1)
737  return true;
738 
739  // If this is a variadic argument, the RISC-V calling convention requires
740  // that it is assigned an 'even' or 'aligned' register if it has 8-byte
741  // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
742  // be used regardless of whether the original argument was split during
743  // legalisation or not. The argument will not be passed by registers if the
744  // original type is larger than 2*XLEN, so the register alignment rule does
745  // not apply.
746  unsigned TwoXLenInBytes = (2 * XLen) / 8;
747  if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
748  DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
749  unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
750  // Skip 'odd' register if necessary.
751  if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
752  State.AllocateReg(ArgGPRs);
753  }
754 
755  SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
756  SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
757  State.getPendingArgFlags();
758 
759  assert(PendingLocs.size() == PendingArgFlags.size() &&
760  "PendingLocs and PendingArgFlags out of sync");
761 
762  // Handle passing f64 on RV32D with a soft float ABI.
763  if (XLen == 32 && ValVT == MVT::f64) {
764  assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
765  "Can't lower f64 if it is split");
766  // Depending on available argument GPRS, f64 may be passed in a pair of
767  // GPRs, split between a GPR and the stack, or passed completely on the
768  // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
769  // cases.
770  unsigned Reg = State.AllocateReg(ArgGPRs);
771  LocVT = MVT::i32;
772  if (!Reg) {
773  unsigned StackOffset = State.AllocateStack(8, 8);
774  State.addLoc(
775  CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
776  return false;
777  }
778  if (!State.AllocateReg(ArgGPRs))
779  State.AllocateStack(4, 4);
780  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
781  return false;
782  }
783 
784  // Split arguments might be passed indirectly, so keep track of the pending
785  // values.
786  if (ArgFlags.isSplit() || !PendingLocs.empty()) {
787  LocVT = XLenVT;
788  LocInfo = CCValAssign::Indirect;
789  PendingLocs.push_back(
790  CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
791  PendingArgFlags.push_back(ArgFlags);
792  if (!ArgFlags.isSplitEnd()) {
793  return false;
794  }
795  }
796 
797  // If the split argument only had two elements, it should be passed directly
798  // in registers or on the stack.
799  if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
800  assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
801  // Apply the normal calling convention rules to the first half of the
802  // split argument.
803  CCValAssign VA = PendingLocs[0];
804  ISD::ArgFlagsTy AF = PendingArgFlags[0];
805  PendingLocs.clear();
806  PendingArgFlags.clear();
807  return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
808  ArgFlags);
809  }
810 
811  // Allocate to a register if possible, or else a stack slot.
812  unsigned Reg = State.AllocateReg(ArgGPRs);
813  unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
814 
815  // If we reach this point and PendingLocs is non-empty, we must be at the
816  // end of a split argument that must be passed indirectly.
817  if (!PendingLocs.empty()) {
818  assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
819  assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
820 
821  for (auto &It : PendingLocs) {
822  if (Reg)
823  It.convertToReg(Reg);
824  else
825  It.convertToMem(StackOffset);
826  State.addLoc(It);
827  }
828  PendingLocs.clear();
829  PendingArgFlags.clear();
830  return false;
831  }
832 
833  assert(LocVT == XLenVT && "Expected an XLenVT at this stage");
834 
835  if (Reg) {
836  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
837  return false;
838  }
839 
840  if (ValVT == MVT::f32) {
841  LocVT = MVT::f32;
842  LocInfo = CCValAssign::Full;
843  }
844  State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
845  return false;
846 }
847 
848 void RISCVTargetLowering::analyzeInputArgs(
849  MachineFunction &MF, CCState &CCInfo,
850  const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
851  unsigned NumArgs = Ins.size();
852  FunctionType *FType = MF.getFunction().getFunctionType();
853 
854  for (unsigned i = 0; i != NumArgs; ++i) {
855  MVT ArgVT = Ins[i].VT;
856  ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
857 
858  Type *ArgTy = nullptr;
859  if (IsRet)
860  ArgTy = FType->getReturnType();
861  else if (Ins[i].isOrigArg())
862  ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
863 
864  if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
865  ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
866  LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
867  << EVT(ArgVT).getEVTString() << '\n');
868  llvm_unreachable(nullptr);
869  }
870  }
871 }
872 
873 void RISCVTargetLowering::analyzeOutputArgs(
874  MachineFunction &MF, CCState &CCInfo,
875  const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
876  CallLoweringInfo *CLI) const {
877  unsigned NumArgs = Outs.size();
878 
879  for (unsigned i = 0; i != NumArgs; i++) {
880  MVT ArgVT = Outs[i].VT;
881  ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
882  Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
883 
884  if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
885  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
886  LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
887  << EVT(ArgVT).getEVTString() << "\n");
888  llvm_unreachable(nullptr);
889  }
890  }
891 }
892 
893 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
894 // values.
896  const CCValAssign &VA, const SDLoc &DL) {
897  switch (VA.getLocInfo()) {
898  default:
899  llvm_unreachable("Unexpected CCValAssign::LocInfo");
900  case CCValAssign::Full:
901  break;
902  case CCValAssign::BCvt:
903  Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
904  break;
905  }
906  return Val;
907 }
908 
909 // The caller is responsible for loading the full value if the argument is
910 // passed with CCValAssign::Indirect.
912  const CCValAssign &VA, const SDLoc &DL) {
914  MachineRegisterInfo &RegInfo = MF.getRegInfo();
915  EVT LocVT = VA.getLocVT();
916  SDValue Val;
917 
918  unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
919  RegInfo.addLiveIn(VA.getLocReg(), VReg);
920  Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
921 
922  if (VA.getLocInfo() == CCValAssign::Indirect)
923  return Val;
924 
925  return convertLocVTToValVT(DAG, Val, VA, DL);
926 }
927 
929  const CCValAssign &VA, const SDLoc &DL) {
930  EVT LocVT = VA.getLocVT();
931 
932  switch (VA.getLocInfo()) {
933  default:
934  llvm_unreachable("Unexpected CCValAssign::LocInfo");
935  case CCValAssign::Full:
936  break;
937  case CCValAssign::BCvt:
938  Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
939  break;
940  }
941  return Val;
942 }
943 
944 // The caller is responsible for loading the full value if the argument is
945 // passed with CCValAssign::Indirect.
947  const CCValAssign &VA, const SDLoc &DL) {
949  MachineFrameInfo &MFI = MF.getFrameInfo();
950  EVT LocVT = VA.getLocVT();
951  EVT ValVT = VA.getValVT();
953  int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
954  VA.getLocMemOffset(), /*Immutable=*/true);
955  SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
956  SDValue Val;
957 
959  switch (VA.getLocInfo()) {
960  default:
961  llvm_unreachable("Unexpected CCValAssign::LocInfo");
962  case CCValAssign::Full:
964  ExtType = ISD::NON_EXTLOAD;
965  break;
966  }
967  Val = DAG.getExtLoad(
968  ExtType, DL, LocVT, Chain, FIN,
970  return Val;
971 }
972 
974  const CCValAssign &VA, const SDLoc &DL) {
975  assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
976  "Unexpected VA");
978  MachineFrameInfo &MFI = MF.getFrameInfo();
979  MachineRegisterInfo &RegInfo = MF.getRegInfo();
980 
981  if (VA.isMemLoc()) {
982  // f64 is passed on the stack.
983  int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
984  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
985  return DAG.getLoad(MVT::f64, DL, Chain, FIN,
987  }
988 
989  assert(VA.isRegLoc() && "Expected register VA assignment");
990 
991  unsigned LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
992  RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
993  SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
994  SDValue Hi;
995  if (VA.getLocReg() == RISCV::X17) {
996  // Second half of f64 is passed on the stack.
997  int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
998  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
999  Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
1001  } else {
1002  // Second half of f64 is passed in another GPR.
1003  unsigned HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1004  RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
1005  Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
1006  }
1007  return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
1008 }
1009 
1010 // Transform physical registers into virtual registers.
1011 SDValue RISCVTargetLowering::LowerFormalArguments(
1012  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1013  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1014  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1015 
1016  switch (CallConv) {
1017  default:
1018  report_fatal_error("Unsupported calling convention");
1019  case CallingConv::C:
1020  case CallingConv::Fast:
1021  break;
1022  }
1023 
1024  MachineFunction &MF = DAG.getMachineFunction();
1025 
1026  const Function &Func = MF.getFunction();
1027  if (Func.hasFnAttribute("interrupt")) {
1028  if (!Func.arg_empty())
1030  "Functions with the interrupt attribute cannot have arguments!");
1031 
1032  StringRef Kind =
1033  MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1034 
1035  if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
1037  "Function interrupt attribute argument not supported!");
1038  }
1039 
1040  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1041  MVT XLenVT = Subtarget.getXLenVT();
1042  unsigned XLenInBytes = Subtarget.getXLen() / 8;
1043  // Used with vargs to acumulate store chains.
1044  std::vector<SDValue> OutChains;
1045 
1046  // Assign locations to all of the incoming arguments.
1048  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1049  analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
1050 
1051  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1052  CCValAssign &VA = ArgLocs[i];
1053  SDValue ArgValue;
1054  // Passing f64 on RV32D with a soft float ABI must be handled as a special
1055  // case.
1056  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
1057  ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
1058  else if (VA.isRegLoc())
1059  ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
1060  else
1061  ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
1062 
1063  if (VA.getLocInfo() == CCValAssign::Indirect) {
1064  // If the original argument was split and passed by reference (e.g. i128
1065  // on RV32), we need to load all parts of it here (using the same
1066  // address).
1067  InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1068  MachinePointerInfo()));
1069  unsigned ArgIndex = Ins[i].OrigArgIndex;
1070  assert(Ins[i].PartOffset == 0);
1071  while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
1072  CCValAssign &PartVA = ArgLocs[i + 1];
1073  unsigned PartOffset = Ins[i + 1].PartOffset;
1074  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1075  DAG.getIntPtrConstant(PartOffset, DL));
1076  InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1077  MachinePointerInfo()));
1078  ++i;
1079  }
1080  continue;
1081  }
1082  InVals.push_back(ArgValue);
1083  }
1084 
1085  if (IsVarArg) {
1087  unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
1088  const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1089  MachineFrameInfo &MFI = MF.getFrameInfo();
1090  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1092 
1093  // Offset of the first variable argument from stack pointer, and size of
1094  // the vararg save area. For now, the varargs save area is either zero or
1095  // large enough to hold a0-a7.
1096  int VaArgOffset, VarArgsSaveSize;
1097 
1098  // If all registers are allocated, then all varargs must be passed on the
1099  // stack and we don't need to save any argregs.
1100  if (ArgRegs.size() == Idx) {
1101  VaArgOffset = CCInfo.getNextStackOffset();
1102  VarArgsSaveSize = 0;
1103  } else {
1104  VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
1105  VaArgOffset = -VarArgsSaveSize;
1106  }
1107 
1108  // Record the frame index of the first variable argument
1109  // which is a value necessary to VASTART.
1110  int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1111  RVFI->setVarArgsFrameIndex(FI);
1112 
1113  // If saving an odd number of registers then create an extra stack slot to
1114  // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
1115  // offsets to even-numbered registered remain 2*XLEN-aligned.
1116  if (Idx % 2) {
1117  FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
1118  true);
1119  VarArgsSaveSize += XLenInBytes;
1120  }
1121 
1122  // Copy the integer registers that may have been used for passing varargs
1123  // to the vararg save area.
1124  for (unsigned I = Idx; I < ArgRegs.size();
1125  ++I, VaArgOffset += XLenInBytes) {
1126  const unsigned Reg = RegInfo.createVirtualRegister(RC);
1127  RegInfo.addLiveIn(ArgRegs[I], Reg);
1128  SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
1129  FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1130  SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1131  SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
1133  cast<StoreSDNode>(Store.getNode())
1134  ->getMemOperand()
1135  ->setValue((Value *)nullptr);
1136  OutChains.push_back(Store);
1137  }
1138  RVFI->setVarArgsSaveSize(VarArgsSaveSize);
1139  }
1140 
1141  // All stores are grouped in one node to allow the matching between
1142  // the size of Ins and InVals. This only happens for vararg functions.
1143  if (!OutChains.empty()) {
1144  OutChains.push_back(Chain);
1145  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1146  }
1147 
1148  return Chain;
1149 }
1150 
1151 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1152 /// for tail call optimization.
1153 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
1154 bool RISCVTargetLowering::IsEligibleForTailCallOptimization(
1155  CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
1156  const SmallVector<CCValAssign, 16> &ArgLocs) const {
1157 
1158  auto &Callee = CLI.Callee;
1159  auto CalleeCC = CLI.CallConv;
1160  auto IsVarArg = CLI.IsVarArg;
1161  auto &Outs = CLI.Outs;
1162  auto &Caller = MF.getFunction();
1163  auto CallerCC = Caller.getCallingConv();
1164 
1165  // Do not tail call opt functions with "disable-tail-calls" attribute.
1166  if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
1167  return false;
1168 
1169  // Exception-handling functions need a special set of instructions to
1170  // indicate a return to the hardware. Tail-calling another function would
1171  // probably break this.
1172  // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
1173  // should be expanded as new function attributes are introduced.
1174  if (Caller.hasFnAttribute("interrupt"))
1175  return false;
1176 
1177  // Do not tail call opt functions with varargs.
1178  if (IsVarArg)
1179  return false;
1180 
1181  // Do not tail call opt if the stack is used to pass parameters.
1182  if (CCInfo.getNextStackOffset() != 0)
1183  return false;
1184 
1185  // Do not tail call opt if any parameters need to be passed indirectly.
1186  // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
1187  // passed indirectly. So the address of the value will be passed in a
1188  // register, or if not available, then the address is put on the stack. In
1189  // order to pass indirectly, space on the stack often needs to be allocated
1190  // in order to store the value. In this case the CCInfo.getNextStackOffset()
1191  // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
1192  // are passed CCValAssign::Indirect.
1193  for (auto &VA : ArgLocs)
1194  if (VA.getLocInfo() == CCValAssign::Indirect)
1195  return false;
1196 
1197  // Do not tail call opt if either caller or callee uses struct return
1198  // semantics.
1199  auto IsCallerStructRet = Caller.hasStructRetAttr();
1200  auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
1201  if (IsCallerStructRet || IsCalleeStructRet)
1202  return false;
1203 
1204  // Externally-defined functions with weak linkage should not be
1205  // tail-called. The behaviour of branch instructions in this situation (as
1206  // used for tail calls) is implementation-defined, so we cannot rely on the
1207  // linker replacing the tail call with a return.
1208  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1209  const GlobalValue *GV = G->getGlobal();
1210  if (GV->hasExternalWeakLinkage())
1211  return false;
1212  }
1213 
1214  // The callee has to preserve all registers the caller needs to preserve.
1215  const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
1216  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1217  if (CalleeCC != CallerCC) {
1218  const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1219  if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1220  return false;
1221  }
1222 
1223  // Byval parameters hand the function a pointer directly into the stack area
1224  // we want to reuse during a tail call. Working around this *is* possible
1225  // but less efficient and uglier in LowerCall.
1226  for (auto &Arg : Outs)
1227  if (Arg.Flags.isByVal())
1228  return false;
1229 
1230  return true;
1231 }
1232 
1233 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
1234 // and output parameter nodes.
1235 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
1236  SmallVectorImpl<SDValue> &InVals) const {
1237  SelectionDAG &DAG = CLI.DAG;
1238  SDLoc &DL = CLI.DL;
1239  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1240  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1241  SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1242  SDValue Chain = CLI.Chain;
1243  SDValue Callee = CLI.Callee;
1244  bool &IsTailCall = CLI.IsTailCall;
1245  CallingConv::ID CallConv = CLI.CallConv;
1246  bool IsVarArg = CLI.IsVarArg;
1247  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1248  MVT XLenVT = Subtarget.getXLenVT();
1249 
1250  MachineFunction &MF = DAG.getMachineFunction();
1251 
1252  // Analyze the operands of the call, assigning locations to each operand.
1254  CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1255  analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
1256 
1257  // Check if it's really possible to do a tail call.
1258  if (IsTailCall)
1259  IsTailCall = IsEligibleForTailCallOptimization(ArgCCInfo, CLI, MF,
1260  ArgLocs);
1261 
1262  if (IsTailCall)
1263  ++NumTailCalls;
1264  else if (CLI.CS && CLI.CS.isMustTailCall())
1265  report_fatal_error("failed to perform tail call elimination on a call "
1266  "site marked musttail");
1267 
1268  // Get a count of how many bytes are to be pushed on the stack.
1269  unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1270 
1271  // Create local copies for byval args
1272  SmallVector<SDValue, 8> ByValArgs;
1273  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1274  ISD::ArgFlagsTy Flags = Outs[i].Flags;
1275  if (!Flags.isByVal())
1276  continue;
1277 
1278  SDValue Arg = OutVals[i];
1279  unsigned Size = Flags.getByValSize();
1280  unsigned Align = Flags.getByValAlign();
1281 
1282  int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
1283  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1284  SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
1285 
1286  Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
1287  /*IsVolatile=*/false,
1288  /*AlwaysInline=*/false,
1289  IsTailCall, MachinePointerInfo(),
1290  MachinePointerInfo());
1291  ByValArgs.push_back(FIPtr);
1292  }
1293 
1294  if (!IsTailCall)
1295  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
1296 
1297  // Copy argument values to their designated locations.
1299  SmallVector<SDValue, 8> MemOpChains;
1300  SDValue StackPtr;
1301  for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
1302  CCValAssign &VA = ArgLocs[i];
1303  SDValue ArgValue = OutVals[i];
1304  ISD::ArgFlagsTy Flags = Outs[i].Flags;
1305 
1306  // Handle passing f64 on RV32D with a soft float ABI as a special case.
1307  bool IsF64OnRV32DSoftABI =
1308  VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
1309  if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
1310  SDValue SplitF64 = DAG.getNode(
1311  RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
1312  SDValue Lo = SplitF64.getValue(0);
1313  SDValue Hi = SplitF64.getValue(1);
1314 
1315  unsigned RegLo = VA.getLocReg();
1316  RegsToPass.push_back(std::make_pair(RegLo, Lo));
1317 
1318  if (RegLo == RISCV::X17) {
1319  // Second half of f64 is passed on the stack.
1320  // Work out the address of the stack slot.
1321  if (!StackPtr.getNode())
1322  StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
1323  // Emit the store.
1324  MemOpChains.push_back(
1325  DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
1326  } else {
1327  // Second half of f64 is passed in another GPR.
1328  unsigned RegHigh = RegLo + 1;
1329  RegsToPass.push_back(std::make_pair(RegHigh, Hi));
1330  }
1331  continue;
1332  }
1333 
1334  // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
1335  // as any other MemLoc.
1336 
1337  // Promote the value if needed.
1338  // For now, only handle fully promoted and indirect arguments.
1339  if (VA.getLocInfo() == CCValAssign::Indirect) {
1340  // Store the argument in a stack slot and pass its address.
1341  SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
1342  int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1343  MemOpChains.push_back(
1344  DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1346  // If the original argument was split (e.g. i128), we need
1347  // to store all parts of it here (and pass just one address).
1348  unsigned ArgIndex = Outs[i].OrigArgIndex;
1349  assert(Outs[i].PartOffset == 0);
1350  while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
1351  SDValue PartValue = OutVals[i + 1];
1352  unsigned PartOffset = Outs[i + 1].PartOffset;
1353  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1354  DAG.getIntPtrConstant(PartOffset, DL));
1355  MemOpChains.push_back(
1356  DAG.getStore(Chain, DL, PartValue, Address,
1358  ++i;
1359  }
1360  ArgValue = SpillSlot;
1361  } else {
1362  ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
1363  }
1364 
1365  // Use local copy if it is a byval arg.
1366  if (Flags.isByVal())
1367  ArgValue = ByValArgs[j++];
1368 
1369  if (VA.isRegLoc()) {
1370  // Queue up the argument copies and emit them at the end.
1371  RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1372  } else {
1373  assert(VA.isMemLoc() && "Argument not register or memory");
1374  assert(!IsTailCall && "Tail call not allowed if stack is used "
1375  "for passing parameters");
1376 
1377  // Work out the address of the stack slot.
1378  if (!StackPtr.getNode())
1379  StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
1380  SDValue Address =
1381  DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1382  DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
1383 
1384  // Emit the store.
1385  MemOpChains.push_back(
1386  DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1387  }
1388  }
1389 
1390  // Join the stores, which are independent of one another.
1391  if (!MemOpChains.empty())
1392  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1393 
1394  SDValue Glue;
1395 
1396  // Build a sequence of copy-to-reg nodes, chained and glued together.
1397  for (auto &Reg : RegsToPass) {
1398  Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
1399  Glue = Chain.getValue(1);
1400  }
1401 
1402  // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
1403  // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
1404  // split it and then direct call can be matched by PseudoCALL.
1405  if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
1406  Callee = DAG.getTargetGlobalAddress(S->getGlobal(), DL, PtrVT, 0, 0);
1407  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1408  Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, 0);
1409  }
1410 
1411  // The first call operand is the chain and the second is the target address.
1413  Ops.push_back(Chain);
1414  Ops.push_back(Callee);
1415 
1416  // Add argument registers to the end of the list so that they are
1417  // known live into the call.
1418  for (auto &Reg : RegsToPass)
1419  Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1420 
1421  if (!IsTailCall) {
1422  // Add a register mask operand representing the call-preserved registers.
1423  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1424  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1425  assert(Mask && "Missing call preserved mask for calling convention");
1426  Ops.push_back(DAG.getRegisterMask(Mask));
1427  }
1428 
1429  // Glue the call to the argument copies, if any.
1430  if (Glue.getNode())
1431  Ops.push_back(Glue);
1432 
1433  // Emit the call.
1434  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1435 
1436  if (IsTailCall) {
1438  return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
1439  }
1440 
1441  Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
1442  Glue = Chain.getValue(1);
1443 
1444  // Mark the end of the call, which is glued to the call itself.
1445  Chain = DAG.getCALLSEQ_END(Chain,
1446  DAG.getConstant(NumBytes, DL, PtrVT, true),
1447  DAG.getConstant(0, DL, PtrVT, true),
1448  Glue, DL);
1449  Glue = Chain.getValue(1);
1450 
1451  // Assign locations to each value returned by this call.
1453  CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
1454  analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
1455 
1456  // Copy all of the result registers out of their specified physreg.
1457  for (auto &VA : RVLocs) {
1458  // Copy the value out
1459  SDValue RetValue =
1460  DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
1461  // Glue the RetValue to the end of the call sequence
1462  Chain = RetValue.getValue(1);
1463  Glue = RetValue.getValue(2);
1464 
1465  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
1466  assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
1467  SDValue RetValue2 =
1468  DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
1469  Chain = RetValue2.getValue(1);
1470  Glue = RetValue2.getValue(2);
1471  RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
1472  RetValue2);
1473  }
1474 
1475  RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
1476 
1477  InVals.push_back(RetValue);
1478  }
1479 
1480  return Chain;
1481 }
1482 
1483 bool RISCVTargetLowering::CanLowerReturn(
1484  CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
1485  const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1487  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1488  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1489  MVT VT = Outs[i].VT;
1490  ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
1491  if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags,
1492  CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
1493  return false;
1494  }
1495  return true;
1496 }
1497 
1498 SDValue
1499 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1500  bool IsVarArg,
1501  const SmallVectorImpl<ISD::OutputArg> &Outs,
1502  const SmallVectorImpl<SDValue> &OutVals,
1503  const SDLoc &DL, SelectionDAG &DAG) const {
1504  // Stores the assignment of the return value to a location.
1506 
1507  // Info about the registers and stack slot.
1508  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
1509  *DAG.getContext());
1510 
1511  analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
1512  nullptr);
1513 
1514  SDValue Glue;
1515  SmallVector<SDValue, 4> RetOps(1, Chain);
1516 
1517  // Copy the result values into the output registers.
1518  for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
1519  SDValue Val = OutVals[i];
1520  CCValAssign &VA = RVLocs[i];
1521  assert(VA.isRegLoc() && "Can only return in registers!");
1522 
1523  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
1524  // Handle returning f64 on RV32D with a soft float ABI.
1525  assert(VA.isRegLoc() && "Expected return via registers");
1527  DAG.getVTList(MVT::i32, MVT::i32), Val);
1528  SDValue Lo = SplitF64.getValue(0);
1529  SDValue Hi = SplitF64.getValue(1);
1530  unsigned RegLo = VA.getLocReg();
1531  unsigned RegHi = RegLo + 1;
1532  Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
1533  Glue = Chain.getValue(1);
1534  RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
1535  Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
1536  Glue = Chain.getValue(1);
1537  RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
1538  } else {
1539  // Handle a 'normal' return.
1540  Val = convertValVTToLocVT(DAG, Val, VA, DL);
1541  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
1542 
1543  // Guarantee that all emitted copies are stuck together.
1544  Glue = Chain.getValue(1);
1545  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1546  }
1547  }
1548 
1549  RetOps[0] = Chain; // Update chain.
1550 
1551  // Add the glue node if we have it.
1552  if (Glue.getNode()) {
1553  RetOps.push_back(Glue);
1554  }
1555 
1556  // Interrupt service routines use different return instructions.
1557  const Function &Func = DAG.getMachineFunction().getFunction();
1558  if (Func.hasFnAttribute("interrupt")) {
1559  if (!Func.getReturnType()->isVoidTy())
1561  "Functions with the interrupt attribute must have void return type!");
1562 
1563  MachineFunction &MF = DAG.getMachineFunction();
1564  StringRef Kind =
1565  MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1566 
1567  unsigned RetOpc;
1568  if (Kind == "user")
1569  RetOpc = RISCVISD::URET_FLAG;
1570  else if (Kind == "supervisor")
1571  RetOpc = RISCVISD::SRET_FLAG;
1572  else
1573  RetOpc = RISCVISD::MRET_FLAG;
1574 
1575  return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
1576  }
1577 
1578  return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
1579 }
1580 
1581 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
1582  switch ((RISCVISD::NodeType)Opcode) {
1584  break;
1585  case RISCVISD::RET_FLAG:
1586  return "RISCVISD::RET_FLAG";
1587  case RISCVISD::URET_FLAG:
1588  return "RISCVISD::URET_FLAG";
1589  case RISCVISD::SRET_FLAG:
1590  return "RISCVISD::SRET_FLAG";
1591  case RISCVISD::MRET_FLAG:
1592  return "RISCVISD::MRET_FLAG";
1593  case RISCVISD::CALL:
1594  return "RISCVISD::CALL";
1595  case RISCVISD::SELECT_CC:
1596  return "RISCVISD::SELECT_CC";
1598  return "RISCVISD::BuildPairF64";
1599  case RISCVISD::SplitF64:
1600  return "RISCVISD::SplitF64";
1601  case RISCVISD::TAIL:
1602  return "RISCVISD::TAIL";
1603  }
1604  return nullptr;
1605 }
1606 
1607 std::pair<unsigned, const TargetRegisterClass *>
1609  StringRef Constraint,
1610  MVT VT) const {
1611  // First, see if this is a constraint that directly corresponds to a
1612  // RISCV register class.
1613  if (Constraint.size() == 1) {
1614  switch (Constraint[0]) {
1615  case 'r':
1616  return std::make_pair(0U, &RISCV::GPRRegClass);
1617  default:
1618  break;
1619  }
1620  }
1621 
1622  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1623 }
1624 
1626  Instruction *Inst,
1627  AtomicOrdering Ord) const {
1628  if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
1629  return Builder.CreateFence(Ord);
1630  if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
1631  return Builder.CreateFence(AtomicOrdering::Release);
1632  return nullptr;
1633 }
1634 
1636  Instruction *Inst,
1637  AtomicOrdering Ord) const {
1638  if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
1639  return Builder.CreateFence(AtomicOrdering::Acquire);
1640  return nullptr;
1641 }
1642 
1644 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
1645  unsigned Size = AI->getType()->getPrimitiveSizeInBits();
1646  if (Size == 8 || Size == 16)
1649 }
1650 
1651 static Intrinsic::ID
1653  switch (BinOp) {
1654  default:
1655  llvm_unreachable("Unexpected AtomicRMW BinOp");
1656  case AtomicRMWInst::Xchg:
1657  return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
1658  case AtomicRMWInst::Add:
1659  return Intrinsic::riscv_masked_atomicrmw_add_i32;
1660  case AtomicRMWInst::Sub:
1661  return Intrinsic::riscv_masked_atomicrmw_sub_i32;
1662  case AtomicRMWInst::Nand:
1663  return Intrinsic::riscv_masked_atomicrmw_nand_i32;
1664  case AtomicRMWInst::Max:
1665  return Intrinsic::riscv_masked_atomicrmw_max_i32;
1666  case AtomicRMWInst::Min:
1667  return Intrinsic::riscv_masked_atomicrmw_min_i32;
1668  case AtomicRMWInst::UMax:
1669  return Intrinsic::riscv_masked_atomicrmw_umax_i32;
1670  case AtomicRMWInst::UMin:
1671  return Intrinsic::riscv_masked_atomicrmw_umin_i32;
1672  }
1673 }
1674 
1675 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
1676  IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
1677  Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
1678  Value *Ordering = Builder.getInt32(static_cast<uint32_t>(AI->getOrdering()));
1679  Type *Tys[] = {AlignedAddr->getType()};
1680  Function *LrwOpScwLoop = Intrinsic::getDeclaration(
1681  AI->getModule(),
1683 
1684  // Must pass the shift amount needed to sign extend the loaded value prior
1685  // to performing a signed comparison for min/max. ShiftAmt is the number of
1686  // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
1687  // is the number of bits to left+right shift the value in order to
1688  // sign-extend.
1689  if (AI->getOperation() == AtomicRMWInst::Min ||
1690  AI->getOperation() == AtomicRMWInst::Max) {
1691  const DataLayout &DL = AI->getModule()->getDataLayout();
1692  unsigned ValWidth =
1694  Value *SextShamt = Builder.CreateSub(
1695  Builder.getInt32(Subtarget.getXLen() - ValWidth), ShiftAmt);
1696  return Builder.CreateCall(LrwOpScwLoop,
1697  {AlignedAddr, Incr, Mask, SextShamt, Ordering});
1698  }
1699 
1700  return Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
1701 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:549
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static MVT getIntegerVT(unsigned BitWidth)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
uint64_t getTypeStoreSizeInBits(Type *Ty) const
Returns the maximum number of bits that may be overwritten by storing the specified type; always a mu...
Definition: DataLayout.h:419
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:570
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const GlobalValue * getGlobal() const
*p = old <signed v ? old : v
Definition: Instructions.h:711
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:851
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:619
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:668
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const RISCVRegisterInfo * getRegisterInfo() const override
*p = old <unsigned v ? old : v
Definition: Instructions.h:715
*p = old >unsigned v ? old : v
Definition: Instructions.h:713
unsigned getValNo() const
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC)
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:321
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:360
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:437
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:681
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
*p = old >signed v ? old : v
Definition: Instructions.h:709
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:415
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
bool isMemLoc() const
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:210
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:436
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:836
A description of a memory reference used in the backend.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:364
const HexagonInstrInfo * TII
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
unsigned getXLen() const
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:734
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:743
static MachineBasicBlock * emitSplitF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB)
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:457
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:398
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
const BlockAddress * getBlockAddress() const
LocInfo getLocInfo() const
static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:664
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:395
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
SDValue getRegisterMask(const uint32_t *RegMask)
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:693
bool arg_empty() const
Definition: Function.h:699
SmallVectorImpl< CCValAssign > & getPendingLocs()
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it&#39;s free to truncate a value of type FromTy to type ToTy.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
bool hasStdExtA() const
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB)
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:991
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:928
virtual const TargetInstrInfo * getInstrInfo() const
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
amdgpu Simplify well known AMD library false Value * Callee
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1021
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getByValSize() const
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
TargetInstrInfo - Interface to description of machine instruction set.
static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
The memory access is volatile.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:169
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
unsigned getOrigAlign() const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
bool hasStdExtF() const
This is an important base class in LLVM.
Definition: Constant.h:42
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:697
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
const SDValue & getOperand(unsigned Num) const
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:903
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:839
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isAcquireOrStronger(AtomicOrdering ao)
bool hasStdExtM() const
unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
Definition: DataLayout.cpp:764
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:82
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:281
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:688
const MachineInstrBuilder & addFrameIndex(int Idx) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPositionIndependent() const
size_t size() const
Definition: SmallVector.h:53
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:763
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
The memory access writes data.
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isReleaseOrStronger(AtomicOrdering ao)
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:636
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
Value * getValOperand()
Definition: Instructions.h:789
static const MCPhysReg ArgGPRs[]
bool hasStdExtD() const
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:900
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:96
const DataFlowGraph & G
Definition: RDFGraph.cpp:211
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:393
Type * getReturnType() const
Definition: DerivedTypes.h:124
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:307
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
Represents one node in the SelectionDAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
int64_t getImm() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:676
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:164
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
unsigned getByValAlign() const
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:400
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI)
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool is64Bit() const
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:607
Representation of each machine instruction.
Definition: MachineInstr.h:64
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:693
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:670
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Definition: IRBuilder.h:1365
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:702
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:206
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
StringRef getValueAsString() const
Return the attribute&#39;s value as a string.
Definition: Attributes.cpp:195
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:466
static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
uint32_t Size
Definition: Profile.cpp:47
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp32(AtomicRMWInst::BinOp BinOp)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getOpcode() const
SDValue getValue(unsigned R) const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
bool isRegLoc() const
const unsigned Kind
bool hasStdExtC() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
LLVM Value Representation.
Definition: Value.h:73
SDValue getRegister(unsigned Reg, EVT VT)
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:331
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:423
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
unsigned getLocReg() const
#define LLVM_DEBUG(X)
Definition: Debug.h:123
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
Function Alias Analysis false
static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
LLVMContext * getContext() const
Definition: SelectionDAG.h:404
Type * getElementType() const
Definition: DerivedTypes.h:486
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:619
CallInst * CreateCall(Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1883
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:364
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:592