LLVM  8.0.0svn
RISCVISelLowering.cpp
Go to the documentation of this file.
1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that RISCV uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "RISCVISelLowering.h"
16 #include "RISCV.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
30 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/Support/Debug.h"
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "riscv-lower"
39 
40 STATISTIC(NumTailCalls, "Number of tail calls");
41 
43  const RISCVSubtarget &STI)
44  : TargetLowering(TM), Subtarget(STI) {
45 
46  MVT XLenVT = Subtarget.getXLenVT();
47 
48  // Set up the register classes.
49  addRegisterClass(XLenVT, &RISCV::GPRRegClass);
50 
51  if (Subtarget.hasStdExtF())
52  addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
53  if (Subtarget.hasStdExtD())
54  addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
55 
56  // Compute derived properties from the register classes.
58 
60 
61  for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
62  setLoadExtAction(N, XLenVT, MVT::i1, Promote);
63 
64  // TODO: add all necessary setOperationAction calls.
66 
71 
74 
79 
80  for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
82 
83  if (!Subtarget.hasStdExtM()) {
91  }
92 
97 
101 
108 
109  ISD::CondCode FPCCToExtend[] = {
113 
114  if (Subtarget.hasStdExtF()) {
117  for (auto CC : FPCCToExtend)
122  }
123 
124  if (Subtarget.hasStdExtD()) {
127  for (auto CC : FPCCToExtend)
134  }
135 
139 
140  if (Subtarget.hasStdExtA())
142  else
144 
146 
147  // Function alignments (log2).
148  unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2;
149  setMinFunctionAlignment(FunctionAlignment);
150  setPrefFunctionAlignment(FunctionAlignment);
151 
152  // Effectively disable jump table generation.
154 }
155 
157  EVT VT) const {
158  if (!VT.isVector())
159  return getPointerTy(DL);
161 }
162 
164  const AddrMode &AM, Type *Ty,
165  unsigned AS,
166  Instruction *I) const {
167  // No global is ever allowed as a base.
168  if (AM.BaseGV)
169  return false;
170 
171  // Require a 12-bit signed offset.
172  if (!isInt<12>(AM.BaseOffs))
173  return false;
174 
175  switch (AM.Scale) {
176  case 0: // "r+i" or just "i", depending on HasBaseReg.
177  break;
178  case 1:
179  if (!AM.HasBaseReg) // allow "r+i".
180  break;
181  return false; // disallow "r+r" or "r+r+i".
182  default:
183  return false;
184  }
185 
186  return true;
187 }
188 
190  return isInt<12>(Imm);
191 }
192 
194  return isInt<12>(Imm);
195 }
196 
197 // On RV32, 64-bit integers are split into their high and low parts and held
198 // in two different registers, so the trunc is free since the low register can
199 // just be used.
200 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
201  if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
202  return false;
203  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
204  unsigned DestBits = DstTy->getPrimitiveSizeInBits();
205  return (SrcBits == 64 && DestBits == 32);
206 }
207 
208 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
209  if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
210  !SrcVT.isInteger() || !DstVT.isInteger())
211  return false;
212  unsigned SrcBits = SrcVT.getSizeInBits();
213  unsigned DestBits = DstVT.getSizeInBits();
214  return (SrcBits == 64 && DestBits == 32);
215 }
216 
218  // Zexts are free if they can be combined with a load.
219  if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
220  EVT MemVT = LD->getMemoryVT();
221  if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
222  (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
223  (LD->getExtensionType() == ISD::NON_EXTLOAD ||
224  LD->getExtensionType() == ISD::ZEXTLOAD))
225  return true;
226  }
227 
228  return TargetLowering::isZExtFree(Val, VT2);
229 }
230 
231 // Changes the condition code and swaps operands if necessary, so the SetCC
232 // operation matches one of the comparisons supported directly in the RISC-V
233 // ISA.
234 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
235  switch (CC) {
236  default:
237  break;
238  case ISD::SETGT:
239  case ISD::SETLE:
240  case ISD::SETUGT:
241  case ISD::SETULE:
243  std::swap(LHS, RHS);
244  break;
245  }
246 }
247 
248 // Return the RISC-V branch opcode that matches the given DAG integer
249 // condition code. The CondCode must be one of those supported by the RISC-V
250 // ISA (see normaliseSetCC).
252  switch (CC) {
253  default:
254  llvm_unreachable("Unsupported CondCode");
255  case ISD::SETEQ:
256  return RISCV::BEQ;
257  case ISD::SETNE:
258  return RISCV::BNE;
259  case ISD::SETLT:
260  return RISCV::BLT;
261  case ISD::SETGE:
262  return RISCV::BGE;
263  case ISD::SETULT:
264  return RISCV::BLTU;
265  case ISD::SETUGE:
266  return RISCV::BGEU;
267  }
268 }
269 
271  SelectionDAG &DAG) const {
272  switch (Op.getOpcode()) {
273  default:
274  report_fatal_error("unimplemented operand");
275  case ISD::GlobalAddress:
276  return lowerGlobalAddress(Op, DAG);
277  case ISD::BlockAddress:
278  return lowerBlockAddress(Op, DAG);
279  case ISD::ConstantPool:
280  return lowerConstantPool(Op, DAG);
281  case ISD::SELECT:
282  return lowerSELECT(Op, DAG);
283  case ISD::VASTART:
284  return lowerVASTART(Op, DAG);
285  case ISD::FRAMEADDR:
286  return LowerFRAMEADDR(Op, DAG);
287  case ISD::RETURNADDR:
288  return LowerRETURNADDR(Op, DAG);
289  }
290 }
291 
292 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
293  SelectionDAG &DAG) const {
294  SDLoc DL(Op);
295  EVT Ty = Op.getValueType();
296  GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
297  const GlobalValue *GV = N->getGlobal();
298  int64_t Offset = N->getOffset();
299  MVT XLenVT = Subtarget.getXLenVT();
300 
301  if (isPositionIndependent() || Subtarget.is64Bit())
302  report_fatal_error("Unable to lowerGlobalAddress");
303  // In order to maximise the opportunity for common subexpression elimination,
304  // emit a separate ADD node for the global address offset instead of folding
305  // it in the global address node. Later peephole optimisations may choose to
306  // fold it back in when profitable.
307  SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_HI);
308  SDValue GALo = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_LO);
309  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
310  SDValue MNLo =
311  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
312  if (Offset != 0)
313  return DAG.getNode(ISD::ADD, DL, Ty, MNLo,
314  DAG.getConstant(Offset, DL, XLenVT));
315  return MNLo;
316 }
317 
318 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
319  SelectionDAG &DAG) const {
320  SDLoc DL(Op);
321  EVT Ty = Op.getValueType();
322  BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
323  const BlockAddress *BA = N->getBlockAddress();
324  int64_t Offset = N->getOffset();
325 
326  if (isPositionIndependent() || Subtarget.is64Bit())
327  report_fatal_error("Unable to lowerBlockAddress");
328 
329  SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
330  SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO);
331  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0);
332  SDValue MNLo =
333  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0);
334  return MNLo;
335 }
336 
337 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
338  SelectionDAG &DAG) const {
339  SDLoc DL(Op);
340  EVT Ty = Op.getValueType();
341  ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
342  const Constant *CPA = N->getConstVal();
343  int64_t Offset = N->getOffset();
344  unsigned Alignment = N->getAlignment();
345 
346  if (!isPositionIndependent()) {
347  SDValue CPAHi =
348  DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_HI);
349  SDValue CPALo =
350  DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_LO);
351  SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, CPAHi), 0);
352  SDValue MNLo =
353  SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, CPALo), 0);
354  return MNLo;
355  } else {
356  report_fatal_error("Unable to lowerConstantPool");
357  }
358 }
359 
360 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
361  SDValue CondV = Op.getOperand(0);
362  SDValue TrueV = Op.getOperand(1);
363  SDValue FalseV = Op.getOperand(2);
364  SDLoc DL(Op);
365  MVT XLenVT = Subtarget.getXLenVT();
366 
367  // If the result type is XLenVT and CondV is the output of a SETCC node
368  // which also operated on XLenVT inputs, then merge the SETCC node into the
369  // lowered RISCVISD::SELECT_CC to take advantage of the integer
370  // compare+branch instructions. i.e.:
371  // (select (setcc lhs, rhs, cc), truev, falsev)
372  // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
373  if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
374  CondV.getOperand(0).getSimpleValueType() == XLenVT) {
375  SDValue LHS = CondV.getOperand(0);
376  SDValue RHS = CondV.getOperand(1);
377  auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
378  ISD::CondCode CCVal = CC->get();
379 
380  normaliseSetCC(LHS, RHS, CCVal);
381 
382  SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
383  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
384  SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
385  return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
386  }
387 
388  // Otherwise:
389  // (select condv, truev, falsev)
390  // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
391  SDValue Zero = DAG.getConstant(0, DL, XLenVT);
392  SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
393 
394  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
395  SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
396 
397  return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
398 }
399 
400 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
403 
404  SDLoc DL(Op);
405  SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
407 
408  // vastart just stores the address of the VarArgsFrameIndex slot into the
409  // memory location argument.
410  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
411  return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
412  MachinePointerInfo(SV));
413 }
414 
415 SDValue RISCVTargetLowering::LowerFRAMEADDR(SDValue Op,
416  SelectionDAG &DAG) const {
417  const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
419  MachineFrameInfo &MFI = MF.getFrameInfo();
420  MFI.setFrameAddressIsTaken(true);
421  unsigned FrameReg = RI.getFrameRegister(MF);
422  int XLenInBytes = Subtarget.getXLen() / 8;
423 
424  EVT VT = Op.getValueType();
425  SDLoc DL(Op);
426  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
427  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
428  while (Depth--) {
429  int Offset = -(XLenInBytes * 2);
430  SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
431  DAG.getIntPtrConstant(Offset, DL));
432  FrameAddr =
433  DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
434  }
435  return FrameAddr;
436 }
437 
438 SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op,
439  SelectionDAG &DAG) const {
440  const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
442  MachineFrameInfo &MFI = MF.getFrameInfo();
443  MFI.setReturnAddressIsTaken(true);
444  MVT XLenVT = Subtarget.getXLenVT();
445  int XLenInBytes = Subtarget.getXLen() / 8;
446 
448  return SDValue();
449 
450  EVT VT = Op.getValueType();
451  SDLoc DL(Op);
452  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
453  if (Depth) {
454  int Off = -XLenInBytes;
455  SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
456  SDValue Offset = DAG.getConstant(Off, DL, VT);
457  return DAG.getLoad(VT, DL, DAG.getEntryNode(),
458  DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
460  }
461 
462  // Return the value of the return address register, marking it an implicit
463  // live-in.
464  unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
465  return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
466 }
467 
469  MachineBasicBlock *BB) {
470  assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
471 
472  MachineFunction &MF = *BB->getParent();
473  DebugLoc DL = MI.getDebugLoc();
474  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
475  const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
476  unsigned LoReg = MI.getOperand(0).getReg();
477  unsigned HiReg = MI.getOperand(1).getReg();
478  unsigned SrcReg = MI.getOperand(2).getReg();
479  const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
480  int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
481 
482  TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
483  RI);
484  MachineMemOperand *MMO =
485  MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
487  BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
488  .addFrameIndex(FI)
489  .addImm(0)
490  .addMemOperand(MMO);
491  BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
492  .addFrameIndex(FI)
493  .addImm(4)
494  .addMemOperand(MMO);
495  MI.eraseFromParent(); // The pseudo instruction is gone now.
496  return BB;
497 }
498 
500  MachineBasicBlock *BB) {
501  assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
502  "Unexpected instruction");
503 
504  MachineFunction &MF = *BB->getParent();
505  DebugLoc DL = MI.getDebugLoc();
506  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
507  const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
508  unsigned DstReg = MI.getOperand(0).getReg();
509  unsigned LoReg = MI.getOperand(1).getReg();
510  unsigned HiReg = MI.getOperand(2).getReg();
511  const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
512  int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex();
513 
514  MachineMemOperand *MMO =
515  MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
517  BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
518  .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
519  .addFrameIndex(FI)
520  .addImm(0)
521  .addMemOperand(MMO);
522  BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
523  .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
524  .addFrameIndex(FI)
525  .addImm(4)
526  .addMemOperand(MMO);
527  TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
528  MI.eraseFromParent(); // The pseudo instruction is gone now.
529  return BB;
530 }
531 
534  MachineBasicBlock *BB) const {
535  switch (MI.getOpcode()) {
536  default:
537  llvm_unreachable("Unexpected instr type to insert");
538  case RISCV::Select_GPR_Using_CC_GPR:
539  case RISCV::Select_FPR32_Using_CC_GPR:
540  case RISCV::Select_FPR64_Using_CC_GPR:
541  break;
542  case RISCV::BuildPairF64Pseudo:
543  return emitBuildPairF64Pseudo(MI, BB);
544  case RISCV::SplitF64Pseudo:
545  return emitSplitF64Pseudo(MI, BB);
546  }
547 
548  // To "insert" a SELECT instruction, we actually have to insert the triangle
549  // control-flow pattern. The incoming instruction knows the destination vreg
550  // to set, the condition code register to branch on, the true/false values to
551  // select between, and the condcode to use to select the appropriate branch.
552  //
553  // We produce the following control flow:
554  // HeadMBB
555  // | \
556  // | IfFalseMBB
557  // | /
558  // TailMBB
560  const BasicBlock *LLVM_BB = BB->getBasicBlock();
561  DebugLoc DL = MI.getDebugLoc();
563 
564  MachineBasicBlock *HeadMBB = BB;
565  MachineFunction *F = BB->getParent();
566  MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
567  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
568 
569  F->insert(I, IfFalseMBB);
570  F->insert(I, TailMBB);
571  // Move all remaining instructions to TailMBB.
572  TailMBB->splice(TailMBB->begin(), HeadMBB,
573  std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end());
574  // Update machine-CFG edges by transferring all successors of the current
575  // block to the new block which will contain the Phi node for the select.
576  TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
577  // Set the successors for HeadMBB.
578  HeadMBB->addSuccessor(IfFalseMBB);
579  HeadMBB->addSuccessor(TailMBB);
580 
581  // Insert appropriate branch.
582  unsigned LHS = MI.getOperand(1).getReg();
583  unsigned RHS = MI.getOperand(2).getReg();
584  auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
585  unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
586 
587  BuildMI(HeadMBB, DL, TII.get(Opcode))
588  .addReg(LHS)
589  .addReg(RHS)
590  .addMBB(TailMBB);
591 
592  // IfFalseMBB just falls through to TailMBB.
593  IfFalseMBB->addSuccessor(TailMBB);
594 
595  // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
596  BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI),
597  MI.getOperand(0).getReg())
598  .addReg(MI.getOperand(4).getReg())
599  .addMBB(HeadMBB)
600  .addReg(MI.getOperand(5).getReg())
601  .addMBB(IfFalseMBB);
602 
603  MI.eraseFromParent(); // The pseudo instruction is gone now.
604  return TailMBB;
605 }
606 
607 // Calling Convention Implementation.
608 // The expectations for frontend ABI lowering vary from target to target.
609 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
610 // details, but this is a longer term goal. For now, we simply try to keep the
611 // role of the frontend as simple and well-defined as possible. The rules can
612 // be summarised as:
613 // * Never split up large scalar arguments. We handle them here.
614 // * If a hardfloat calling convention is being used, and the struct may be
615 // passed in a pair of registers (fp+fp, int+fp), and both registers are
616 // available, then pass as two separate arguments. If either the GPRs or FPRs
617 // are exhausted, then pass according to the rule below.
618 // * If a struct could never be passed in registers or directly in a stack
619 // slot (as it is larger than 2*XLEN and the floating point rules don't
620 // apply), then pass it using a pointer with the byval attribute.
621 // * If a struct is less than 2*XLEN, then coerce to either a two-element
622 // word-sized array or a 2*XLEN scalar (depending on alignment).
623 // * The frontend can determine whether a struct is returned by reference or
624 // not based on its size and fields. If it will be returned by reference, the
625 // frontend must modify the prototype so a pointer with the sret annotation is
626 // passed as the first argument. This is not necessary for large scalar
627 // returns.
628 // * Struct return values and varargs should be coerced to structs containing
629 // register-size fields in the same situations they would be for fixed
630 // arguments.
631 
632 static const MCPhysReg ArgGPRs[] = {
633  RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
634  RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
635 };
636 
637 // Pass a 2*XLEN argument that has been split into two XLEN values through
638 // registers or the stack as necessary.
639 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
640  ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
641  MVT ValVT2, MVT LocVT2,
642  ISD::ArgFlagsTy ArgFlags2) {
643  unsigned XLenInBytes = XLen / 8;
644  if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
645  // At least one half can be passed via register.
646  State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
647  VA1.getLocVT(), CCValAssign::Full));
648  } else {
649  // Both halves must be passed on the stack, with proper alignment.
650  unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
651  State.addLoc(
653  State.AllocateStack(XLenInBytes, StackAlign),
654  VA1.getLocVT(), CCValAssign::Full));
656  ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
658  return false;
659  }
660 
661  if (unsigned Reg = State.AllocateReg(ArgGPRs)) {
662  // The second half can also be passed via register.
663  State.addLoc(
664  CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
665  } else {
666  // The second half is passed via the stack, without additional alignment.
668  ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
670  }
671 
672  return false;
673 }
674 
675 // Implements the RISC-V calling convention. Returns true upon failure.
676 static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
677  CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
678  CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) {
679  unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
680  assert(XLen == 32 || XLen == 64);
681  MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
682  if (ValVT == MVT::f32) {
683  LocVT = MVT::i32;
684  LocInfo = CCValAssign::BCvt;
685  }
686 
687  // Any return value split in to more than two values can't be returned
688  // directly.
689  if (IsRet && ValNo > 1)
690  return true;
691 
692  // If this is a variadic argument, the RISC-V calling convention requires
693  // that it is assigned an 'even' or 'aligned' register if it has 8-byte
694  // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
695  // be used regardless of whether the original argument was split during
696  // legalisation or not. The argument will not be passed by registers if the
697  // original type is larger than 2*XLEN, so the register alignment rule does
698  // not apply.
699  unsigned TwoXLenInBytes = (2 * XLen) / 8;
700  if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes &&
701  DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
702  unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
703  // Skip 'odd' register if necessary.
704  if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
705  State.AllocateReg(ArgGPRs);
706  }
707 
708  SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
709  SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
710  State.getPendingArgFlags();
711 
712  assert(PendingLocs.size() == PendingArgFlags.size() &&
713  "PendingLocs and PendingArgFlags out of sync");
714 
715  // Handle passing f64 on RV32D with a soft float ABI.
716  if (XLen == 32 && ValVT == MVT::f64) {
717  assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
718  "Can't lower f64 if it is split");
719  // Depending on available argument GPRS, f64 may be passed in a pair of
720  // GPRs, split between a GPR and the stack, or passed completely on the
721  // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
722  // cases.
723  unsigned Reg = State.AllocateReg(ArgGPRs);
724  LocVT = MVT::i32;
725  if (!Reg) {
726  unsigned StackOffset = State.AllocateStack(8, 8);
727  State.addLoc(
728  CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
729  return false;
730  }
731  if (!State.AllocateReg(ArgGPRs))
732  State.AllocateStack(4, 4);
733  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
734  return false;
735  }
736 
737  // Split arguments might be passed indirectly, so keep track of the pending
738  // values.
739  if (ArgFlags.isSplit() || !PendingLocs.empty()) {
740  LocVT = XLenVT;
741  LocInfo = CCValAssign::Indirect;
742  PendingLocs.push_back(
743  CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
744  PendingArgFlags.push_back(ArgFlags);
745  if (!ArgFlags.isSplitEnd()) {
746  return false;
747  }
748  }
749 
750  // If the split argument only had two elements, it should be passed directly
751  // in registers or on the stack.
752  if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
753  assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
754  // Apply the normal calling convention rules to the first half of the
755  // split argument.
756  CCValAssign VA = PendingLocs[0];
757  ISD::ArgFlagsTy AF = PendingArgFlags[0];
758  PendingLocs.clear();
759  PendingArgFlags.clear();
760  return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
761  ArgFlags);
762  }
763 
764  // Allocate to a register if possible, or else a stack slot.
765  unsigned Reg = State.AllocateReg(ArgGPRs);
766  unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
767 
768  // If we reach this point and PendingLocs is non-empty, we must be at the
769  // end of a split argument that must be passed indirectly.
770  if (!PendingLocs.empty()) {
771  assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
772  assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
773 
774  for (auto &It : PendingLocs) {
775  if (Reg)
776  It.convertToReg(Reg);
777  else
778  It.convertToMem(StackOffset);
779  State.addLoc(It);
780  }
781  PendingLocs.clear();
782  PendingArgFlags.clear();
783  return false;
784  }
785 
786  assert(LocVT == XLenVT && "Expected an XLenVT at this stage");
787 
788  if (Reg) {
789  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
790  } else {
791  State.addLoc(
792  CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
793  }
794  return false;
795 }
796 
797 void RISCVTargetLowering::analyzeInputArgs(
798  MachineFunction &MF, CCState &CCInfo,
799  const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
800  unsigned NumArgs = Ins.size();
801  FunctionType *FType = MF.getFunction().getFunctionType();
802 
803  for (unsigned i = 0; i != NumArgs; ++i) {
804  MVT ArgVT = Ins[i].VT;
805  ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
806 
807  Type *ArgTy = nullptr;
808  if (IsRet)
809  ArgTy = FType->getReturnType();
810  else if (Ins[i].isOrigArg())
811  ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
812 
813  if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
814  ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) {
815  LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
816  << EVT(ArgVT).getEVTString() << '\n');
817  llvm_unreachable(nullptr);
818  }
819  }
820 }
821 
822 void RISCVTargetLowering::analyzeOutputArgs(
823  MachineFunction &MF, CCState &CCInfo,
824  const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
825  CallLoweringInfo *CLI) const {
826  unsigned NumArgs = Outs.size();
827 
828  for (unsigned i = 0; i != NumArgs; i++) {
829  MVT ArgVT = Outs[i].VT;
830  ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
831  Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
832 
833  if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
834  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
835  LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
836  << EVT(ArgVT).getEVTString() << "\n");
837  llvm_unreachable(nullptr);
838  }
839  }
840 }
841 
842 // The caller is responsible for loading the full value if the argument is
843 // passed with CCValAssign::Indirect.
845  const CCValAssign &VA, const SDLoc &DL) {
847  MachineRegisterInfo &RegInfo = MF.getRegInfo();
848  EVT LocVT = VA.getLocVT();
849  EVT ValVT = VA.getValVT();
850  SDValue Val;
851 
852  unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
853  RegInfo.addLiveIn(VA.getLocReg(), VReg);
854  Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
855 
856  switch (VA.getLocInfo()) {
857  default:
858  llvm_unreachable("Unexpected CCValAssign::LocInfo");
859  case CCValAssign::Full:
861  break;
862  case CCValAssign::BCvt:
863  Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
864  break;
865  }
866  return Val;
867 }
868 
869 // The caller is responsible for loading the full value if the argument is
870 // passed with CCValAssign::Indirect.
872  const CCValAssign &VA, const SDLoc &DL) {
874  MachineFrameInfo &MFI = MF.getFrameInfo();
875  EVT LocVT = VA.getLocVT();
876  EVT ValVT = VA.getValVT();
878  int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
879  VA.getLocMemOffset(), /*Immutable=*/true);
880  SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
881  SDValue Val;
882 
884  switch (VA.getLocInfo()) {
885  default:
886  llvm_unreachable("Unexpected CCValAssign::LocInfo");
887  case CCValAssign::Full:
889  ExtType = ISD::NON_EXTLOAD;
890  break;
891  }
892  Val = DAG.getExtLoad(
893  ExtType, DL, LocVT, Chain, FIN,
895  return Val;
896 }
897 
899  const CCValAssign &VA, const SDLoc &DL) {
900  assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
901  "Unexpected VA");
903  MachineFrameInfo &MFI = MF.getFrameInfo();
904  MachineRegisterInfo &RegInfo = MF.getRegInfo();
905 
906  if (VA.isMemLoc()) {
907  // f64 is passed on the stack.
908  int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
909  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
910  return DAG.getLoad(MVT::f64, DL, Chain, FIN,
912  }
913 
914  assert(VA.isRegLoc() && "Expected register VA assignment");
915 
916  unsigned LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
917  RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
918  SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
919  SDValue Hi;
920  if (VA.getLocReg() == RISCV::X17) {
921  // Second half of f64 is passed on the stack.
922  int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
923  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
924  Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
926  } else {
927  // Second half of f64 is passed in another GPR.
928  unsigned HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
929  RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
930  Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
931  }
932  return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
933 }
934 
935 // Transform physical registers into virtual registers.
936 SDValue RISCVTargetLowering::LowerFormalArguments(
937  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
938  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
939  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
940 
941  switch (CallConv) {
942  default:
943  report_fatal_error("Unsupported calling convention");
944  case CallingConv::C:
945  case CallingConv::Fast:
946  break;
947  }
948 
950 
951  const Function &Func = MF.getFunction();
952  if (Func.hasFnAttribute("interrupt")) {
953  if (!Func.arg_empty())
955  "Functions with the interrupt attribute cannot have arguments!");
956 
957  StringRef Kind =
958  MF.getFunction().getFnAttribute("interrupt").getValueAsString();
959 
960  if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
962  "Function interrupt attribute argument not supported!");
963  }
964 
965  EVT PtrVT = getPointerTy(DAG.getDataLayout());
966  MVT XLenVT = Subtarget.getXLenVT();
967  unsigned XLenInBytes = Subtarget.getXLen() / 8;
968  // Used with vargs to acumulate store chains.
969  std::vector<SDValue> OutChains;
970 
971  // Assign locations to all of the incoming arguments.
973  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
974  analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
975 
976  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
977  CCValAssign &VA = ArgLocs[i];
978  assert(VA.getLocVT() == XLenVT && "Unhandled argument type");
979  SDValue ArgValue;
980  // Passing f64 on RV32D with a soft float ABI must be handled as a special
981  // case.
982  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
983  ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
984  else if (VA.isRegLoc())
985  ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
986  else
987  ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
988 
989  if (VA.getLocInfo() == CCValAssign::Indirect) {
990  // If the original argument was split and passed by reference (e.g. i128
991  // on RV32), we need to load all parts of it here (using the same
992  // address).
993  InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
994  MachinePointerInfo()));
995  unsigned ArgIndex = Ins[i].OrigArgIndex;
996  assert(Ins[i].PartOffset == 0);
997  while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
998  CCValAssign &PartVA = ArgLocs[i + 1];
999  unsigned PartOffset = Ins[i + 1].PartOffset;
1000  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1001  DAG.getIntPtrConstant(PartOffset, DL));
1002  InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1003  MachinePointerInfo()));
1004  ++i;
1005  }
1006  continue;
1007  }
1008  InVals.push_back(ArgValue);
1009  }
1010 
1011  if (IsVarArg) {
1013  unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
1014  const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1015  MachineFrameInfo &MFI = MF.getFrameInfo();
1016  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1018 
1019  // Offset of the first variable argument from stack pointer, and size of
1020  // the vararg save area. For now, the varargs save area is either zero or
1021  // large enough to hold a0-a7.
1022  int VaArgOffset, VarArgsSaveSize;
1023 
1024  // If all registers are allocated, then all varargs must be passed on the
1025  // stack and we don't need to save any argregs.
1026  if (ArgRegs.size() == Idx) {
1027  VaArgOffset = CCInfo.getNextStackOffset();
1028  VarArgsSaveSize = 0;
1029  } else {
1030  VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
1031  VaArgOffset = -VarArgsSaveSize;
1032  }
1033 
1034  // Record the frame index of the first variable argument
1035  // which is a value necessary to VASTART.
1036  int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1037  RVFI->setVarArgsFrameIndex(FI);
1038 
1039  // If saving an odd number of registers then create an extra stack slot to
1040  // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
1041  // offsets to even-numbered registered remain 2*XLEN-aligned.
1042  if (Idx % 2) {
1043  FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes,
1044  true);
1045  VarArgsSaveSize += XLenInBytes;
1046  }
1047 
1048  // Copy the integer registers that may have been used for passing varargs
1049  // to the vararg save area.
1050  for (unsigned I = Idx; I < ArgRegs.size();
1051  ++I, VaArgOffset += XLenInBytes) {
1052  const unsigned Reg = RegInfo.createVirtualRegister(RC);
1053  RegInfo.addLiveIn(ArgRegs[I], Reg);
1054  SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
1055  FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
1056  SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1057  SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
1059  cast<StoreSDNode>(Store.getNode())
1060  ->getMemOperand()
1061  ->setValue((Value *)nullptr);
1062  OutChains.push_back(Store);
1063  }
1064  RVFI->setVarArgsSaveSize(VarArgsSaveSize);
1065  }
1066 
1067  // All stores are grouped in one node to allow the matching between
1068  // the size of Ins and InVals. This only happens for vararg functions.
1069  if (!OutChains.empty()) {
1070  OutChains.push_back(Chain);
1071  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
1072  }
1073 
1074  return Chain;
1075 }
1076 
1077 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1078 /// for tail call optimization.
1079 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
1080 bool RISCVTargetLowering::IsEligibleForTailCallOptimization(
1081  CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
1082  const SmallVector<CCValAssign, 16> &ArgLocs) const {
1083 
1084  auto &Callee = CLI.Callee;
1085  auto CalleeCC = CLI.CallConv;
1086  auto IsVarArg = CLI.IsVarArg;
1087  auto &Outs = CLI.Outs;
1088  auto &Caller = MF.getFunction();
1089  auto CallerCC = Caller.getCallingConv();
1090 
1091  // Do not tail call opt functions with "disable-tail-calls" attribute.
1092  if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
1093  return false;
1094 
1095  // Exception-handling functions need a special set of instructions to
1096  // indicate a return to the hardware. Tail-calling another function would
1097  // probably break this.
1098  // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
1099  // should be expanded as new function attributes are introduced.
1100  if (Caller.hasFnAttribute("interrupt"))
1101  return false;
1102 
1103  // Do not tail call opt functions with varargs.
1104  if (IsVarArg)
1105  return false;
1106 
1107  // Do not tail call opt if the stack is used to pass parameters.
1108  if (CCInfo.getNextStackOffset() != 0)
1109  return false;
1110 
1111  // Do not tail call opt if any parameters need to be passed indirectly.
1112  // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
1113  // passed indirectly. So the address of the value will be passed in a
1114  // register, or if not available, then the address is put on the stack. In
1115  // order to pass indirectly, space on the stack often needs to be allocated
1116  // in order to store the value. In this case the CCInfo.getNextStackOffset()
1117  // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
1118  // are passed CCValAssign::Indirect.
1119  for (auto &VA : ArgLocs)
1120  if (VA.getLocInfo() == CCValAssign::Indirect)
1121  return false;
1122 
1123  // Do not tail call opt if either caller or callee uses struct return
1124  // semantics.
1125  auto IsCallerStructRet = Caller.hasStructRetAttr();
1126  auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
1127  if (IsCallerStructRet || IsCalleeStructRet)
1128  return false;
1129 
1130  // Externally-defined functions with weak linkage should not be
1131  // tail-called. The behaviour of branch instructions in this situation (as
1132  // used for tail calls) is implementation-defined, so we cannot rely on the
1133  // linker replacing the tail call with a return.
1134  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1135  const GlobalValue *GV = G->getGlobal();
1136  if (GV->hasExternalWeakLinkage())
1137  return false;
1138  }
1139 
1140  // The callee has to preserve all registers the caller needs to preserve.
1141  const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
1142  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
1143  if (CalleeCC != CallerCC) {
1144  const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
1145  if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
1146  return false;
1147  }
1148 
1149  // Byval parameters hand the function a pointer directly into the stack area
1150  // we want to reuse during a tail call. Working around this *is* possible
1151  // but less efficient and uglier in LowerCall.
1152  for (auto &Arg : Outs)
1153  if (Arg.Flags.isByVal())
1154  return false;
1155 
1156  return true;
1157 }
1158 
1159 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
1160 // and output parameter nodes.
1161 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
1162  SmallVectorImpl<SDValue> &InVals) const {
1163  SelectionDAG &DAG = CLI.DAG;
1164  SDLoc &DL = CLI.DL;
1165  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1166  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1167  SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1168  SDValue Chain = CLI.Chain;
1169  SDValue Callee = CLI.Callee;
1170  bool &IsTailCall = CLI.IsTailCall;
1171  CallingConv::ID CallConv = CLI.CallConv;
1172  bool IsVarArg = CLI.IsVarArg;
1173  EVT PtrVT = getPointerTy(DAG.getDataLayout());
1174  MVT XLenVT = Subtarget.getXLenVT();
1175 
1176  MachineFunction &MF = DAG.getMachineFunction();
1177 
1178  // Analyze the operands of the call, assigning locations to each operand.
1180  CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1181  analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
1182 
1183  // Check if it's really possible to do a tail call.
1184  if (IsTailCall)
1185  IsTailCall = IsEligibleForTailCallOptimization(ArgCCInfo, CLI, MF,
1186  ArgLocs);
1187 
1188  if (IsTailCall)
1189  ++NumTailCalls;
1190  else if (CLI.CS && CLI.CS.isMustTailCall())
1191  report_fatal_error("failed to perform tail call elimination on a call "
1192  "site marked musttail");
1193 
1194  // Get a count of how many bytes are to be pushed on the stack.
1195  unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1196 
1197  // Create local copies for byval args
1198  SmallVector<SDValue, 8> ByValArgs;
1199  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1200  ISD::ArgFlagsTy Flags = Outs[i].Flags;
1201  if (!Flags.isByVal())
1202  continue;
1203 
1204  SDValue Arg = OutVals[i];
1205  unsigned Size = Flags.getByValSize();
1206  unsigned Align = Flags.getByValAlign();
1207 
1208  int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false);
1209  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1210  SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
1211 
1212  Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
1213  /*IsVolatile=*/false,
1214  /*AlwaysInline=*/false,
1215  IsTailCall, MachinePointerInfo(),
1216  MachinePointerInfo());
1217  ByValArgs.push_back(FIPtr);
1218  }
1219 
1220  if (!IsTailCall)
1221  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
1222 
1223  // Copy argument values to their designated locations.
1225  SmallVector<SDValue, 8> MemOpChains;
1226  SDValue StackPtr;
1227  for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
1228  CCValAssign &VA = ArgLocs[i];
1229  SDValue ArgValue = OutVals[i];
1230  ISD::ArgFlagsTy Flags = Outs[i].Flags;
1231 
1232  // Handle passing f64 on RV32D with a soft float ABI as a special case.
1233  bool IsF64OnRV32DSoftABI =
1234  VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
1235  if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
1236  SDValue SplitF64 = DAG.getNode(
1237  RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
1238  SDValue Lo = SplitF64.getValue(0);
1239  SDValue Hi = SplitF64.getValue(1);
1240 
1241  unsigned RegLo = VA.getLocReg();
1242  RegsToPass.push_back(std::make_pair(RegLo, Lo));
1243 
1244  if (RegLo == RISCV::X17) {
1245  // Second half of f64 is passed on the stack.
1246  // Work out the address of the stack slot.
1247  if (!StackPtr.getNode())
1248  StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
1249  // Emit the store.
1250  MemOpChains.push_back(
1251  DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
1252  } else {
1253  // Second half of f64 is passed in another GPR.
1254  unsigned RegHigh = RegLo + 1;
1255  RegsToPass.push_back(std::make_pair(RegHigh, Hi));
1256  }
1257  continue;
1258  }
1259 
1260  // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
1261  // as any other MemLoc.
1262 
1263  // Promote the value if needed.
1264  // For now, only handle fully promoted and indirect arguments.
1265  switch (VA.getLocInfo()) {
1266  case CCValAssign::Full:
1267  break;
1268  case CCValAssign::BCvt:
1269  ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), ArgValue);
1270  break;
1271  case CCValAssign::Indirect: {
1272  // Store the argument in a stack slot and pass its address.
1273  SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
1274  int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1275  MemOpChains.push_back(
1276  DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1278  // If the original argument was split (e.g. i128), we need
1279  // to store all parts of it here (and pass just one address).
1280  unsigned ArgIndex = Outs[i].OrigArgIndex;
1281  assert(Outs[i].PartOffset == 0);
1282  while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
1283  SDValue PartValue = OutVals[i + 1];
1284  unsigned PartOffset = Outs[i + 1].PartOffset;
1285  SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1286  DAG.getIntPtrConstant(PartOffset, DL));
1287  MemOpChains.push_back(
1288  DAG.getStore(Chain, DL, PartValue, Address,
1290  ++i;
1291  }
1292  ArgValue = SpillSlot;
1293  break;
1294  }
1295  default:
1296  llvm_unreachable("Unknown loc info!");
1297  }
1298 
1299  // Use local copy if it is a byval arg.
1300  if (Flags.isByVal())
1301  ArgValue = ByValArgs[j++];
1302 
1303  if (VA.isRegLoc()) {
1304  // Queue up the argument copies and emit them at the end.
1305  RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1306  } else {
1307  assert(VA.isMemLoc() && "Argument not register or memory");
1308  assert(!IsTailCall && "Tail call not allowed if stack is used "
1309  "for passing parameters");
1310 
1311  // Work out the address of the stack slot.
1312  if (!StackPtr.getNode())
1313  StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
1314  SDValue Address =
1315  DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1316  DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
1317 
1318  // Emit the store.
1319  MemOpChains.push_back(
1320  DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1321  }
1322  }
1323 
1324  // Join the stores, which are independent of one another.
1325  if (!MemOpChains.empty())
1326  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1327 
1328  SDValue Glue;
1329 
1330  // Build a sequence of copy-to-reg nodes, chained and glued together.
1331  for (auto &Reg : RegsToPass) {
1332  Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
1333  Glue = Chain.getValue(1);
1334  }
1335 
1336  // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
1337  // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
1338  // split it and then direct call can be matched by PseudoCALL.
1339  if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
1340  Callee = DAG.getTargetGlobalAddress(S->getGlobal(), DL, PtrVT, 0, 0);
1341  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1342  Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, 0);
1343  }
1344 
1345  // The first call operand is the chain and the second is the target address.
1347  Ops.push_back(Chain);
1348  Ops.push_back(Callee);
1349 
1350  // Add argument registers to the end of the list so that they are
1351  // known live into the call.
1352  for (auto &Reg : RegsToPass)
1353  Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1354 
1355  if (!IsTailCall) {
1356  // Add a register mask operand representing the call-preserved registers.
1357  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1358  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1359  assert(Mask && "Missing call preserved mask for calling convention");
1360  Ops.push_back(DAG.getRegisterMask(Mask));
1361  }
1362 
1363  // Glue the call to the argument copies, if any.
1364  if (Glue.getNode())
1365  Ops.push_back(Glue);
1366 
1367  // Emit the call.
1368  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1369 
1370  if (IsTailCall) {
1372  return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
1373  }
1374 
1375  Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
1376  Glue = Chain.getValue(1);
1377 
1378  // Mark the end of the call, which is glued to the call itself.
1379  Chain = DAG.getCALLSEQ_END(Chain,
1380  DAG.getConstant(NumBytes, DL, PtrVT, true),
1381  DAG.getConstant(0, DL, PtrVT, true),
1382  Glue, DL);
1383  Glue = Chain.getValue(1);
1384 
1385  // Assign locations to each value returned by this call.
1387  CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
1388  analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
1389 
1390  // Copy all of the result registers out of their specified physreg.
1391  for (auto &VA : RVLocs) {
1392  // Copy the value out
1393  SDValue RetValue =
1394  DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
1395  // Glue the RetValue to the end of the call sequence
1396  Chain = RetValue.getValue(1);
1397  Glue = RetValue.getValue(2);
1398  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
1399  assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
1400  SDValue RetValue2 =
1401  DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
1402  Chain = RetValue2.getValue(1);
1403  Glue = RetValue2.getValue(2);
1404  RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
1405  RetValue2);
1406  }
1407 
1408  switch (VA.getLocInfo()) {
1409  default:
1410  llvm_unreachable("Unknown loc info!");
1411  case CCValAssign::Full:
1412  break;
1413  case CCValAssign::BCvt:
1414  RetValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), RetValue);
1415  break;
1416  }
1417 
1418  InVals.push_back(RetValue);
1419  }
1420 
1421  return Chain;
1422 }
1423 
1424 bool RISCVTargetLowering::CanLowerReturn(
1425  CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
1426  const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1428  CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1429  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1430  MVT VT = Outs[i].VT;
1431  ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
1432  if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags,
1433  CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
1434  return false;
1435  }
1436  return true;
1437 }
1438 
1440  const CCValAssign &VA, const SDLoc &DL) {
1441  EVT LocVT = VA.getLocVT();
1442 
1443  switch (VA.getLocInfo()) {
1444  default:
1445  llvm_unreachable("Unexpected CCValAssign::LocInfo");
1446  case CCValAssign::Full:
1447  break;
1448  case CCValAssign::BCvt:
1449  Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
1450  break;
1451  }
1452  return Val;
1453 }
1454 
1455 SDValue
1456 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1457  bool IsVarArg,
1458  const SmallVectorImpl<ISD::OutputArg> &Outs,
1459  const SmallVectorImpl<SDValue> &OutVals,
1460  const SDLoc &DL, SelectionDAG &DAG) const {
1461  // Stores the assignment of the return value to a location.
1463 
1464  // Info about the registers and stack slot.
1465  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
1466  *DAG.getContext());
1467 
1468  analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
1469  nullptr);
1470 
1471  SDValue Glue;
1472  SmallVector<SDValue, 4> RetOps(1, Chain);
1473 
1474  // Copy the result values into the output registers.
1475  for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
1476  SDValue Val = OutVals[i];
1477  CCValAssign &VA = RVLocs[i];
1478  assert(VA.isRegLoc() && "Can only return in registers!");
1479 
1480  if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
1481  // Handle returning f64 on RV32D with a soft float ABI.
1482  assert(VA.isRegLoc() && "Expected return via registers");
1484  DAG.getVTList(MVT::i32, MVT::i32), Val);
1485  SDValue Lo = SplitF64.getValue(0);
1486  SDValue Hi = SplitF64.getValue(1);
1487  unsigned RegLo = VA.getLocReg();
1488  unsigned RegHi = RegLo + 1;
1489  Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
1490  Glue = Chain.getValue(1);
1491  RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
1492  Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
1493  Glue = Chain.getValue(1);
1494  RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
1495  } else {
1496  // Handle a 'normal' return.
1497  Val = packIntoRegLoc(DAG, Val, VA, DL);
1498  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
1499 
1500  // Guarantee that all emitted copies are stuck together.
1501  Glue = Chain.getValue(1);
1502  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1503  }
1504  }
1505 
1506  RetOps[0] = Chain; // Update chain.
1507 
1508  // Add the glue node if we have it.
1509  if (Glue.getNode()) {
1510  RetOps.push_back(Glue);
1511  }
1512 
1513  // Interrupt service routines use different return instructions.
1514  const Function &Func = DAG.getMachineFunction().getFunction();
1515  if (Func.hasFnAttribute("interrupt")) {
1516  if (!Func.getReturnType()->isVoidTy())
1518  "Functions with the interrupt attribute must have void return type!");
1519 
1520  MachineFunction &MF = DAG.getMachineFunction();
1521  StringRef Kind =
1522  MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1523 
1524  unsigned RetOpc;
1525  if (Kind == "user")
1526  RetOpc = RISCVISD::URET_FLAG;
1527  else if (Kind == "supervisor")
1528  RetOpc = RISCVISD::SRET_FLAG;
1529  else
1530  RetOpc = RISCVISD::MRET_FLAG;
1531 
1532  return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
1533  }
1534 
1535  return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
1536 }
1537 
1538 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
1539  switch ((RISCVISD::NodeType)Opcode) {
1541  break;
1542  case RISCVISD::RET_FLAG:
1543  return "RISCVISD::RET_FLAG";
1544  case RISCVISD::URET_FLAG:
1545  return "RISCVISD::URET_FLAG";
1546  case RISCVISD::SRET_FLAG:
1547  return "RISCVISD::SRET_FLAG";
1548  case RISCVISD::MRET_FLAG:
1549  return "RISCVISD::MRET_FLAG";
1550  case RISCVISD::CALL:
1551  return "RISCVISD::CALL";
1552  case RISCVISD::SELECT_CC:
1553  return "RISCVISD::SELECT_CC";
1555  return "RISCVISD::BuildPairF64";
1556  case RISCVISD::SplitF64:
1557  return "RISCVISD::SplitF64";
1558  case RISCVISD::TAIL:
1559  return "RISCVISD::TAIL";
1560  }
1561  return nullptr;
1562 }
1563 
1564 std::pair<unsigned, const TargetRegisterClass *>
1566  StringRef Constraint,
1567  MVT VT) const {
1568  // First, see if this is a constraint that directly corresponds to a
1569  // RISCV register class.
1570  if (Constraint.size() == 1) {
1571  switch (Constraint[0]) {
1572  case 'r':
1573  return std::make_pair(0U, &RISCV::GPRRegClass);
1574  default:
1575  break;
1576  }
1577  }
1578 
1579  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1580 }
1581 
1583  Instruction *Inst,
1584  AtomicOrdering Ord) const {
1585  if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
1586  return Builder.CreateFence(Ord);
1587  if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
1588  return Builder.CreateFence(AtomicOrdering::Release);
1589  return nullptr;
1590 }
1591 
1593  Instruction *Inst,
1594  AtomicOrdering Ord) const {
1595  if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
1596  return Builder.CreateFence(AtomicOrdering::Acquire);
1597  return nullptr;
1598 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:541
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static MVT getIntegerVT(unsigned BitWidth)
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:565
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const GlobalValue * getGlobal() const
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
LLVMContext & Context
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:849
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:119
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:613
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:377
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:662
unsigned Reg
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const RISCVRegisterInfo * getRegisterInfo() const override
unsigned getValNo() const
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC)
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:307
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:360
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:436
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:407
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isMemLoc() const
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:210
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:428
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:836
A description of a memory reference used in the backend.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
const HexagonInstrInfo * TII
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
unsigned getXLen() const
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:731
static MachineBasicBlock * emitSplitF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB)
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn&#39;t supported on the target and indicate what to d...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:403
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:457
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:398
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
const BlockAddress * getBlockAddress() const
LocInfo getLocInfo() const
static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:658
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:395
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
SDValue getRegisterMask(const uint32_t *RegMask)
bool arg_empty() const
Definition: Function.h:685
SmallVectorImpl< CCValAssign > & getPendingLocs()
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it&#39;s free to truncate a value of type FromTy to type ToTy.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
bool hasStdExtA() const
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB)
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:913
virtual const TargetInstrInfo * getInstrInfo() const
amdgpu Simplify well known AMD library false Value * Callee
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
unsigned getByValSize() const
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
TargetInstrInfo - Interface to description of machine instruction set.
static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:155
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
unsigned getOrigAlign() const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
bool hasStdExtF() const
This is an important base class in LLVM.
Definition: Constant.h:42
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:691
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:888
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:837
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
bool isAcquireOrStronger(AtomicOrdering ao)
bool hasStdExtM() const
unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
Definition: DataLayout.cpp:764
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:82
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:682
const MachineInstrBuilder & addFrameIndex(int Idx) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
Extended Value Type.
Definition: ValueTypes.h:34
const AMDGPUAS & AS
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPositionIndependent() const
size_t size() const
Definition: SmallVector.h:53
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
The memory access writes data.
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isReleaseOrStronger(AtomicOrdering ao)
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:634
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
static const MCPhysReg ArgGPRs[]
bool hasStdExtD() const
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:199
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:897
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:96
const DataFlowGraph & G
Definition: RDFGraph.cpp:211
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:385
Type * getReturnType() const
Definition: DerivedTypes.h:124
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
int64_t getImm() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:674
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:924
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:150
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned getByValAlign() const
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:392
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI)
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Store the specified register of the given register class to the specified stack frame index...
bool is64Bit() const
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:601
Representation of each machine instruction.
Definition: MachineInstr.h:64
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:687
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:668
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Definition: IRBuilder.h:1353
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:700
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:206
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
StringRef getValueAsString() const
Return the attribute&#39;s value as a string.
Definition: Attributes.cpp:195
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:458
static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getOpcode() const
SDValue getValue(unsigned R) const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
bool isRegLoc() const
const unsigned Kind
bool hasStdExtC() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
LLVM Value Representation.
Definition: Value.h:73
SDValue getRegister(unsigned Reg, EVT VT)
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:317
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:415
const SDValue & getOperand(unsigned i) const
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
unsigned getLocReg() const
#define LLVM_DEBUG(X)
Definition: Debug.h:123
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:408
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static SDValue packIntoRegLoc(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
Function Alias Analysis false
static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
Load the specified register of the given register class from the specified stack frame index...
LLVMContext * getContext() const
Definition: SelectionDAG.h:404
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:617
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:356
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:586