LLVM  9.0.0svn
MipsCallLowering.cpp
Go to the documentation of this file.
1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
21 
22 using namespace llvm;
23 
25  : CallLowering(&TLI) {}
26 
27 bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA,
28  const EVT &VT) {
29  if (VA.isRegLoc()) {
30  assignValueToReg(VReg, VA, VT);
31  } else if (VA.isMemLoc()) {
32  assignValueToAddress(VReg, VA);
33  } else {
34  return false;
35  }
36  return true;
37 }
38 
40  ArrayRef<CCValAssign> ArgLocs,
41  unsigned ArgLocsStartIndex,
42  const EVT &VT) {
43  for (unsigned i = 0; i < VRegs.size(); ++i)
44  if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT))
45  return false;
46  return true;
47 }
48 
51  if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
52  std::reverse(VRegs.begin(), VRegs.end());
53 }
54 
58  unsigned SplitLength;
59  const Function &F = MIRBuilder.getMF().getFunction();
60  const DataLayout &DL = F.getParent()->getDataLayout();
61  const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>(
62  MIRBuilder.getMF().getSubtarget().getTargetLowering());
63 
64  for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size();
65  ++ArgsIndex, ArgLocsIndex += SplitLength) {
66  EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty);
67  SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(),
68  F.getCallingConv(), VT);
69  if (SplitLength > 1) {
70  VRegs.clear();
71  MVT RegisterVT = TLI.getRegisterTypeForCallingConv(
72  F.getContext(), F.getCallingConv(), VT);
73  for (unsigned i = 0; i < SplitLength; ++i)
74  VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT}));
75 
76  if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Reg, VT))
77  return false;
78  } else {
79  if (!assign(Args[ArgsIndex].Reg, ArgLocs[ArgLocsIndex], VT))
80  return false;
81  }
82  }
83  return true;
84 }
85 
86 namespace {
87 class IncomingValueHandler : public MipsCallLowering::MipsHandler {
88 public:
89  IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
90  : MipsHandler(MIRBuilder, MRI) {}
91 
92 private:
93  void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
94  const EVT &VT) override;
95 
96  unsigned getStackAddress(const CCValAssign &VA,
97  MachineMemOperand *&MMO) override;
98 
99  void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
100 
101  bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
102  ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
103  unsigned ArgsReg, const EVT &VT) override;
104 
105  virtual void markPhysRegUsed(unsigned PhysReg) {
106  MIRBuilder.getMBB().addLiveIn(PhysReg);
107  }
108 
109  void buildLoad(unsigned Val, const CCValAssign &VA) {
110  MachineMemOperand *MMO;
111  unsigned Addr = getStackAddress(VA, MMO);
112  MIRBuilder.buildLoad(Val, Addr, *MMO);
113  }
114 };
115 
116 class CallReturnHandler : public IncomingValueHandler {
117 public:
118  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
119  MachineInstrBuilder &MIB)
120  : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
121 
122 private:
123  void markPhysRegUsed(unsigned PhysReg) override {
124  MIB.addDef(PhysReg, RegState::Implicit);
125  }
126 
127  MachineInstrBuilder &MIB;
128 };
129 
130 } // end anonymous namespace
131 
132 void IncomingValueHandler::assignValueToReg(unsigned ValVReg,
133  const CCValAssign &VA,
134  const EVT &VT) {
135  const MipsSubtarget &STI =
136  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
137  unsigned PhysReg = VA.getLocReg();
138  if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
139  const MipsSubtarget &STI =
140  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
141 
142  MIRBuilder
143  .buildInstr(STI.isFP64bit() ? Mips::BuildPairF64_64
145  .addDef(ValVReg)
146  .addUse(PhysReg + (STI.isLittle() ? 0 : 1))
147  .addUse(PhysReg + (STI.isLittle() ? 1 : 0))
148  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
149  *STI.getRegBankInfo());
150  markPhysRegUsed(PhysReg);
151  markPhysRegUsed(PhysReg + 1);
152  } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
153  MIRBuilder.buildInstr(Mips::MTC1)
154  .addDef(ValVReg)
155  .addUse(PhysReg)
156  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
157  *STI.getRegBankInfo());
158  markPhysRegUsed(PhysReg);
159  } else {
160  switch (VA.getLocInfo()) {
161  case CCValAssign::LocInfo::SExt:
162  case CCValAssign::LocInfo::ZExt:
163  case CCValAssign::LocInfo::AExt: {
164  auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
165  MIRBuilder.buildTrunc(ValVReg, Copy);
166  break;
167  }
168  default:
169  MIRBuilder.buildCopy(ValVReg, PhysReg);
170  break;
171  }
172  markPhysRegUsed(PhysReg);
173  }
174 }
175 
176 unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
177  MachineMemOperand *&MMO) {
178  MachineFunction &MF = MIRBuilder.getMF();
179  unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
180  unsigned Offset = VA.getLocMemOffset();
181  MachineFrameInfo &MFI = MF.getFrameInfo();
182 
183  int FI = MFI.CreateFixedObject(Size, Offset, true);
184  MachinePointerInfo MPO =
185  MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
186 
188  unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
189  MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
190 
191  unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
192  MIRBuilder.buildFrameIndex(AddrReg, FI);
193 
194  return AddrReg;
195 }
196 
197 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg,
198  const CCValAssign &VA) {
199  if (VA.getLocInfo() == CCValAssign::SExt ||
200  VA.getLocInfo() == CCValAssign::ZExt ||
201  VA.getLocInfo() == CCValAssign::AExt) {
202  unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
203  buildLoad(LoadReg, VA);
204  MIRBuilder.buildTrunc(ValVReg, LoadReg);
205  } else
206  buildLoad(ValVReg, VA);
207 }
208 
209 bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
210  ArrayRef<CCValAssign> ArgLocs,
211  unsigned ArgLocsStartIndex,
212  unsigned ArgsReg, const EVT &VT) {
213  if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
214  return false;
215  setLeastSignificantFirst(VRegs);
216  MIRBuilder.buildMerge(ArgsReg, VRegs);
217  return true;
218 }
219 
220 namespace {
221 class OutgoingValueHandler : public MipsCallLowering::MipsHandler {
222 public:
223  OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
224  MachineInstrBuilder &MIB)
225  : MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
226 
227 private:
228  void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
229  const EVT &VT) override;
230 
231  unsigned getStackAddress(const CCValAssign &VA,
232  MachineMemOperand *&MMO) override;
233 
234  void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
235 
236  bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
237  ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
238  unsigned ArgsReg, const EVT &VT) override;
239 
240  unsigned extendRegister(unsigned ValReg, const CCValAssign &VA);
241 
242  MachineInstrBuilder &MIB;
243 };
244 } // end anonymous namespace
245 
246 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg,
247  const CCValAssign &VA,
248  const EVT &VT) {
249  unsigned PhysReg = VA.getLocReg();
250  const MipsSubtarget &STI =
251  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
252 
253  if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
254  MIRBuilder
255  .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
257  .addDef(PhysReg + (STI.isLittle() ? 1 : 0))
258  .addUse(ValVReg)
259  .addImm(1)
260  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
261  *STI.getRegBankInfo());
262  MIRBuilder
263  .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
265  .addDef(PhysReg + (STI.isLittle() ? 0 : 1))
266  .addUse(ValVReg)
267  .addImm(0)
268  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
269  *STI.getRegBankInfo());
270  } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
271  MIRBuilder.buildInstr(Mips::MFC1)
272  .addDef(PhysReg)
273  .addUse(ValVReg)
274  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
275  *STI.getRegBankInfo());
276  } else {
277  unsigned ExtReg = extendRegister(ValVReg, VA);
278  MIRBuilder.buildCopy(PhysReg, ExtReg);
279  MIB.addUse(PhysReg, RegState::Implicit);
280  }
281 }
282 
283 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
284  MachineMemOperand *&MMO) {
285  MachineFunction &MF = MIRBuilder.getMF();
287 
288  LLT p0 = LLT::pointer(0, 32);
289  LLT s32 = LLT::scalar(32);
290  unsigned SPReg = MRI.createGenericVirtualRegister(p0);
291  MIRBuilder.buildCopy(SPReg, Mips::SP);
292 
293  unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
294  unsigned Offset = VA.getLocMemOffset();
295  MIRBuilder.buildConstant(OffsetReg, Offset);
296 
297  unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
298  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
299 
300  MachinePointerInfo MPO =
301  MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
302  unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
303  unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
304  MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align);
305 
306  return AddrReg;
307 }
308 
309 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg,
310  const CCValAssign &VA) {
311  MachineMemOperand *MMO;
312  unsigned Addr = getStackAddress(VA, MMO);
313  unsigned ExtReg = extendRegister(ValVReg, VA);
314  MIRBuilder.buildStore(ExtReg, Addr, *MMO);
315 }
316 
317 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
318  const CCValAssign &VA) {
319  LLT LocTy{VA.getLocVT()};
320  switch (VA.getLocInfo()) {
321  case CCValAssign::SExt: {
322  unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
323  MIRBuilder.buildSExt(ExtReg, ValReg);
324  return ExtReg;
325  }
326  case CCValAssign::ZExt: {
327  unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
328  MIRBuilder.buildZExt(ExtReg, ValReg);
329  return ExtReg;
330  }
331  case CCValAssign::AExt: {
332  unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
333  MIRBuilder.buildAnyExt(ExtReg, ValReg);
334  return ExtReg;
335  }
336  // TODO : handle upper extends
337  case CCValAssign::Full:
338  return ValReg;
339  default:
340  break;
341  }
342  llvm_unreachable("unable to extend register");
343 }
344 
345 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
346  ArrayRef<CCValAssign> ArgLocs,
347  unsigned ArgLocsStartIndex,
348  unsigned ArgsReg, const EVT &VT) {
349  MIRBuilder.buildUnmerge(VRegs, ArgsReg);
350  setLeastSignificantFirst(VRegs);
351  if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
352  return false;
353 
354  return true;
355 }
356 
357 static bool isSupportedType(Type *T) {
358  if (T->isIntegerTy())
359  return true;
360  if (T->isPointerTy())
361  return true;
362  if (T->isFloatingPointTy())
363  return true;
364  return false;
365 }
366 
367 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
368  const ISD::ArgFlagsTy &Flags) {
369  // > does not mean loss of information as type RegisterVT can't hold type VT,
370  // it means that type VT is split into multiple registers of type RegisterVT
371  if (VT.getSizeInBits() >= RegisterVT.getSizeInBits())
373  if (Flags.isSExt())
374  return CCValAssign::LocInfo::SExt;
375  if (Flags.isZExt())
376  return CCValAssign::LocInfo::ZExt;
377  return CCValAssign::LocInfo::AExt;
378 }
379 
380 template <typename T>
382  const SmallVectorImpl<T> &Arguments) {
383  for (unsigned i = 0; i < ArgLocs.size(); ++i) {
384  const CCValAssign &VA = ArgLocs[i];
386  Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags);
387  if (VA.isMemLoc())
388  ArgLocs[i] =
390  VA.getLocMemOffset(), VA.getLocVT(), LocInfo);
391  else
392  ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
393  VA.getLocReg(), VA.getLocVT(), LocInfo);
394  }
395 }
396 
398  const Value *Val,
399  ArrayRef<unsigned> VRegs) const {
400 
401  MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
402 
403  if (Val != nullptr && !isSupportedType(Val->getType()))
404  return false;
405 
406  if (!VRegs.empty()) {
407  MachineFunction &MF = MIRBuilder.getMF();
408  const Function &F = MF.getFunction();
409  const DataLayout &DL = MF.getDataLayout();
410  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
411  LLVMContext &Ctx = Val->getType()->getContext();
412 
413  SmallVector<EVT, 4> SplitEVTs;
414  ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
415  assert(VRegs.size() == SplitEVTs.size() &&
416  "For each split Type there should be exactly one VReg.");
417 
418  SmallVector<ArgInfo, 8> RetInfos;
419  SmallVector<unsigned, 8> OrigArgIndices;
420 
421  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
422  ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
423  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
424  splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices);
425  }
426 
428  subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs);
429 
431  MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
432  F.getContext());
433  CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn());
434  setLocInfo(ArgLocs, Outs);
435 
436  OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
437  if (!RetHandler.handle(ArgLocs, RetInfos)) {
438  return false;
439  }
440  }
441  MIRBuilder.insertInstr(Ret);
442  return true;
443 }
444 
446  const Function &F,
447  ArrayRef<unsigned> VRegs) const {
448 
449  // Quick exit if there aren't any args.
450  if (F.arg_empty())
451  return true;
452 
453  if (F.isVarArg()) {
454  return false;
455  }
456 
457  for (auto &Arg : F.args()) {
458  if (!isSupportedType(Arg.getType()))
459  return false;
460  }
461 
462  MachineFunction &MF = MIRBuilder.getMF();
463  const DataLayout &DL = MF.getDataLayout();
464  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
465 
466  SmallVector<ArgInfo, 8> ArgInfos;
467  SmallVector<unsigned, 8> OrigArgIndices;
468  unsigned i = 0;
469  for (auto &Arg : F.args()) {
470  ArgInfo AInfo(VRegs[i], Arg.getType());
471  setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
472  splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices);
473  ++i;
474  }
475 
477  subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins);
478 
480  MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
481  F.getContext());
482 
483  const MipsTargetMachine &TM =
484  static_cast<const MipsTargetMachine &>(MF.getTarget());
485  const MipsABIInfo &ABI = TM.getABI();
486  CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
487  1);
488  CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
489  setLocInfo(ArgLocs, Ins);
490 
491  IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
492  if (!Handler.handle(ArgLocs, ArgInfos))
493  return false;
494 
495  return true;
496 }
497 
499  CallingConv::ID CallConv,
500  const MachineOperand &Callee,
501  const ArgInfo &OrigRet,
502  ArrayRef<ArgInfo> OrigArgs) const {
503 
504  if (CallConv != CallingConv::C)
505  return false;
506 
507  for (auto &Arg : OrigArgs) {
508  if (!isSupportedType(Arg.Ty))
509  return false;
510  if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
511  return false;
512  }
513  if (OrigRet.Reg && !isSupportedType(OrigRet.Ty))
514  return false;
515 
516  MachineFunction &MF = MIRBuilder.getMF();
517  const Function &F = MF.getFunction();
518  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
519  const MipsTargetMachine &TM =
520  static_cast<const MipsTargetMachine &>(MF.getTarget());
521  const MipsABIInfo &ABI = TM.getABI();
522 
523  MachineInstrBuilder CallSeqStart =
524  MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN);
525 
526  const bool IsCalleeGlobalPIC =
527  Callee.isGlobal() && TM.isPositionIndependent();
528 
529  MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(
530  Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
531  MIB.addDef(Mips::SP, RegState::Implicit);
532  if (IsCalleeGlobalPIC) {
533  unsigned CalleeReg =
535  MachineInstr *CalleeGlobalValue =
536  MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal());
537  if (!Callee.getGlobal()->hasLocalLinkage())
538  CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL);
539  MIB.addUse(CalleeReg);
540  } else
541  MIB.add(Callee);
544 
545  TargetLowering::ArgListTy FuncOrigArgs;
546  FuncOrigArgs.reserve(OrigArgs.size());
547 
548  SmallVector<ArgInfo, 8> ArgInfos;
549  SmallVector<unsigned, 8> OrigArgIndices;
550  unsigned i = 0;
551  for (auto &Arg : OrigArgs) {
552 
554  Entry.Ty = Arg.Ty;
555  FuncOrigArgs.push_back(Entry);
556 
557  splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices);
558  ++i;
559  }
560 
562  subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs);
563 
565  MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
566  F.getContext());
567 
568  CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
569  const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr;
570  CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
571  setLocInfo(ArgLocs, Outs);
572 
573  OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
574  if (!RetHandler.handle(ArgLocs, ArgInfos)) {
575  return false;
576  }
577 
578  unsigned NextStackOffset = CCInfo.getNextStackOffset();
580  unsigned StackAlignment = TFL->getStackAlignment();
581  NextStackOffset = alignTo(NextStackOffset, StackAlignment);
582  CallSeqStart.addImm(NextStackOffset).addImm(0);
583 
584  if (IsCalleeGlobalPIC) {
585  MIRBuilder.buildCopy(
586  Mips::GP,
588  MIB.addDef(Mips::GP, RegState::Implicit);
589  }
590  MIRBuilder.insertInstr(MIB);
591  if (MIB->getOpcode() == Mips::JALRPseudo) {
592  const MipsSubtarget &STI =
593  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
594  MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
595  *STI.getRegBankInfo());
596  }
597 
598  if (OrigRet.Reg) {
599 
600  ArgInfos.clear();
601  SmallVector<unsigned, 8> OrigRetIndices;
602 
603  splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices);
604 
606  subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins);
607 
609  MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
610  F.getContext());
611 
612  CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call);
613  setLocInfo(ArgLocs, Ins);
614 
615  CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
616  if (!Handler.handle(ArgLocs, ArgInfos))
617  return false;
618  }
619 
620  MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0);
621 
622  return true;
623 }
624 
625 template <typename T>
626 void MipsCallLowering::subTargetRegTypeForCallingConv(
628  ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const {
629  const DataLayout &DL = F.getParent()->getDataLayout();
630  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
631 
632  unsigned ArgNo = 0;
633  for (auto &Arg : Args) {
634 
635  EVT VT = TLI.getValueType(DL, Arg.Ty);
636  MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
637  F.getCallingConv(), VT);
638  unsigned NumRegs = TLI.getNumRegistersForCallingConv(
639  F.getContext(), F.getCallingConv(), VT);
640 
641  for (unsigned i = 0; i < NumRegs; ++i) {
642  ISD::ArgFlagsTy Flags = Arg.Flags;
643 
644  if (i == 0)
646  else
647  Flags.setOrigAlign(1);
648 
649  ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo],
650  0);
651  }
652  ++ArgNo;
653  }
654 }
655 
656 void MipsCallLowering::splitToValueTypes(
657  const ArgInfo &OrigArg, unsigned OriginalIndex,
658  SmallVectorImpl<ArgInfo> &SplitArgs,
659  SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const {
660 
661  // TODO : perform structure and array split. For now we only deal with
662  // types that pass isSupportedType check.
663  SplitArgs.push_back(OrigArg);
664  SplitArgsOrigIndices.push_back(OriginalIndex);
665 }
const RegisterBankInfo * getRegBankInfo() const override
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
bool hasLocalLinkage() const
Definition: GlobalValue.h:445
This class represents lattice values for constants.
Definition: AllocatorList.h:23
void setTargetFlags(unsigned F)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void push_back(const T &Elt)
Definition: SmallVector.h:211
unsigned Reg
unsigned getValNo() const
unsigned const TargetRegisterInfo * TRI
F(f)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isMemLoc() const
A description of a memory reference used in the backend.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:161
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
CCAssignFn * CCAssignFnForCall() const
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const
Obtain the size of the area allocated by the callee for arguments.
Definition: MipsABIInfo.cpp:48
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:410
const char * getSymbolName() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
unsigned getSizeInBits() const
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:273
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
unsigned getABIAlignmentForCallingConv(Type *ArgTy, DataLayout DL) const override
Return the correct alignment for the current calling convention.
bool arg_empty() const
Definition: Function.h:715
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn, const Type *RetTy, const char *Func)
Definition: MipsCCState.h:119
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
MipsCallLowering(const MipsTargetLowering &TLI)
void setOrigAlign(unsigned A)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:119
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:609
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...
Definition: MipsBaseInfo.h:43
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
const GlobalValue * getGlobal() const
Helper class to build MachineInstr.
AMDGPU Lower Kernel Arguments
void setLeastSignificantFirst(SmallVectorImpl< unsigned > &VRegs)
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static void setLocInfo(SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< T > &Arguments)
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
This class contains a discriminated union of information about pointers in memory operands...
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
bool isLittle() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
const MipsRegisterInfo * getRegisterInfo() const override
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
MachineOperand class - Representation of each machine instruction operand.
CCValAssign - Represent assignment of one arg/retval to a location.
Information about stack frame layout on the target.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const TargetInstrInfo & getTII()
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
Definition: MachineInstr.h:63
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
Definition: MipsCCState.h:130
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< unsigned > VRegs) const override
This hook behaves as the extended lowerReturn function, but for targets that do not support swifterro...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getLocMemOffset() const
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
virtual const TargetFrameLowering * getFrameLowering() const
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
uint32_t Size
Definition: Profile.cpp:46
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
This file describes how to lower LLVM calls to machine code calls.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegLoc() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSupportedType(Type *T)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:72
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs) const override
This hook behaves as the extended lowerCall function, but for targets that do not support swifterror ...
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
CCAssignFn * CCAssignFnForReturn() const
bool assignVRegs(ArrayRef< unsigned > VRegs, ArrayRef< CCValAssign > ArgLocs, unsigned ArgLocsStartIndex, const EVT &VT)
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, const ISD::ArgFlagsTy &Flags)
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
unsigned getLocReg() const
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
bool isFP64bit() const
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool handle(ArrayRef< CCValAssign > ArgLocs, ArrayRef< CallLowering::ArgInfo > Args)
iterator_range< arg_iterator > args()
Definition: Function.h:705
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143