LLVM  10.0.0svn
CallLowering.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 
26 #define DEBUG_TYPE "call-lowering"
27 
28 using namespace llvm;
29 
30 void CallLowering::anchor() {}
31 
33  ArrayRef<Register> ResRegs,
35  Register SwiftErrorVReg,
36  std::function<unsigned()> GetCalleeReg) const {
38  auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
39 
40  // First step is to marshall all the function's parameters into the correct
41  // physregs and memory locations. Gather the sequence of argument types that
42  // we'll pass to the assigner function.
43  unsigned i = 0;
44  unsigned NumFixedArgs = CS.getFunctionType()->getNumParams();
45  for (auto &Arg : CS.args()) {
46  ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
47  i < NumFixedArgs};
48  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS);
49  Info.OrigArgs.push_back(OrigArg);
50  ++i;
51  }
52 
53  if (const Function *F = CS.getCalledFunction())
55  else
56  Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
57 
58  Info.OrigRet = ArgInfo{ResRegs, CS.getType(), ISD::ArgFlagsTy{}};
59  if (!Info.OrigRet.Ty->isVoidTy())
61 
62  Info.KnownCallees =
63  CS.getInstruction()->getMetadata(LLVMContext::MD_callees);
64  Info.CallConv = CS.getCallingConv();
65  Info.SwiftErrorVReg = SwiftErrorVReg;
66  Info.IsMustTailCall = CS.isMustTailCall();
67  Info.IsTailCall = CS.isTailCall() &&
68  isInTailCallPosition(CS, MIRBuilder.getMF().getTarget());
69  Info.IsVarArg = CS.getFunctionType()->isVarArg();
70  return lowerCall(MIRBuilder, Info);
71 }
72 
73 template <typename FuncInfoTy>
75  const DataLayout &DL,
76  const FuncInfoTy &FuncInfo) const {
77  auto &Flags = Arg.Flags[0];
78  const AttributeList &Attrs = FuncInfo.getAttributes();
79  if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
80  Flags.setZExt();
81  if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
82  Flags.setSExt();
83  if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
84  Flags.setInReg();
85  if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
86  Flags.setSRet();
87  if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
88  Flags.setSwiftSelf();
89  if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
90  Flags.setSwiftError();
91  if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
92  Flags.setByVal();
93  if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
94  Flags.setInAlloca();
95 
96  if (Flags.isByVal() || Flags.isInAlloca()) {
97  Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
98 
99  auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
100  Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
101 
102  // For ByVal, alignment should be passed from FE. BE will guess if
103  // this info is not there but there are cases it cannot get right.
104  unsigned FrameAlign;
105  if (FuncInfo.getParamAlignment(OpIdx - 2))
106  FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2);
107  else
108  FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL);
109  Flags.setByValAlign(FrameAlign);
110  }
111  if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
112  Flags.setNest();
113  Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty));
114 }
115 
116 template void
117 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
118  const DataLayout &DL,
119  const Function &FuncInfo) const;
120 
121 template void
122 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
123  const DataLayout &DL,
124  const CallInst &FuncInfo) const;
125 
127  MachineIRBuilder &MIRBuilder) const {
128  assert(SrcRegs.size() > 1 && "Nothing to pack");
129 
130  const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
131  MachineRegisterInfo *MRI = MIRBuilder.getMRI();
132 
133  LLT PackedLLT = getLLTForType(*PackedTy, DL);
134 
135  SmallVector<LLT, 8> LLTs;
137  computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
138  assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
139 
140  Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
141  MIRBuilder.buildUndef(Dst);
142  for (unsigned i = 0; i < SrcRegs.size(); ++i) {
143  Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
144  MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
145  Dst = NewDst;
146  }
147 
148  return Dst;
149 }
150 
152  Type *PackedTy,
153  MachineIRBuilder &MIRBuilder) const {
154  assert(DstRegs.size() > 1 && "Nothing to unpack");
155 
156  const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
157 
158  SmallVector<LLT, 8> LLTs;
160  computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
161  assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
162 
163  for (unsigned i = 0; i < DstRegs.size(); ++i)
164  MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
165 }
166 
169  ValueHandler &Handler) const {
170  MachineFunction &MF = MIRBuilder.getMF();
171  const Function &F = MF.getFunction();
173  CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
174  return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
175 }
176 
179  MachineIRBuilder &MIRBuilder,
181  ValueHandler &Handler) const {
182  MachineFunction &MF = MIRBuilder.getMF();
183  const Function &F = MF.getFunction();
184  const DataLayout &DL = F.getParent()->getDataLayout();
185 
186  unsigned NumArgs = Args.size();
187  for (unsigned i = 0; i != NumArgs; ++i) {
188  MVT CurVT = MVT::getVT(Args[i].Ty);
189  if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i],
190  Args[i].Flags[0], CCInfo)) {
191  if (!CurVT.isValid())
192  return false;
193  MVT NewVT = TLI->getRegisterTypeForCallingConv(
194  F.getContext(), F.getCallingConv(), EVT(CurVT));
195 
196  // If we need to split the type over multiple regs, check it's a scenario
197  // we currently support.
198  unsigned NumParts = TLI->getNumRegistersForCallingConv(
199  F.getContext(), F.getCallingConv(), CurVT);
200  if (NumParts > 1) {
201  if (CurVT.isVector())
202  return false;
203  // For now only handle exact splits.
204  if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
205  return false;
206  }
207 
208  // For incoming arguments (return values), we could have values in
209  // physregs (or memlocs) which we want to extract and copy to vregs.
210  // During this, we might have to deal with the LLT being split across
211  // multiple regs, so we have to record this information for later.
212  //
213  // If we have outgoing args, then we have the opposite case. We have a
214  // vreg with an LLT which we want to assign to a physical location, and
215  // we might have to record that the value has to be split later.
216  if (Handler.isIncomingArgumentHandler()) {
217  if (NumParts == 1) {
218  // Try to use the register type if we couldn't assign the VT.
219  if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
220  Args[i].Flags[0], CCInfo))
221  return false;
222  } else {
223  // We're handling an incoming arg which is split over multiple regs.
224  // E.g. returning an s128 on AArch64.
225  ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
226  Args[i].OrigRegs.push_back(Args[i].Regs[0]);
227  Args[i].Regs.clear();
228  Args[i].Flags.clear();
229  LLT NewLLT = getLLTForMVT(NewVT);
230  // For each split register, create and assign a vreg that will store
231  // the incoming component of the larger value. These will later be
232  // merged to form the final vreg.
233  for (unsigned Part = 0; Part < NumParts; ++Part) {
234  Register Reg =
235  MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
236  ISD::ArgFlagsTy Flags = OrigFlags;
237  if (Part == 0) {
238  Flags.setSplit();
239  } else {
240  Flags.setOrigAlign(1);
241  if (Part == NumParts - 1)
242  Flags.setSplitEnd();
243  }
244  Args[i].Regs.push_back(Reg);
245  Args[i].Flags.push_back(Flags);
246  if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full,
247  Args[i], Args[i].Flags[Part], CCInfo)) {
248  // Still couldn't assign this smaller part type for some reason.
249  return false;
250  }
251  }
252  }
253  } else {
254  // Handling an outgoing arg that might need to be split.
255  if (NumParts < 2)
256  return false; // Don't know how to deal with this type combination.
257 
258  // This type is passed via multiple registers in the calling convention.
259  // We need to extract the individual parts.
260  Register LargeReg = Args[i].Regs[0];
261  LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
262  auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
263  assert(Unmerge->getNumOperands() == NumParts + 1);
264  ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
265  // We're going to replace the regs and flags with the split ones.
266  Args[i].Regs.clear();
267  Args[i].Flags.clear();
268  for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
269  ISD::ArgFlagsTy Flags = OrigFlags;
270  if (PartIdx == 0) {
271  Flags.setSplit();
272  } else {
273  Flags.setOrigAlign(1);
274  if (PartIdx == NumParts - 1)
275  Flags.setSplitEnd();
276  }
277  Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
278  Args[i].Flags.push_back(Flags);
279  if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full,
280  Args[i], Args[i].Flags[PartIdx], CCInfo))
281  return false;
282  }
283  }
284  }
285  }
286 
287  for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
288  assert(j < ArgLocs.size() && "Skipped too many arg locs");
289 
290  CCValAssign &VA = ArgLocs[j];
291  assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
292 
293  if (VA.needsCustom()) {
294  j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
295  continue;
296  }
297 
298  // FIXME: Pack registers if we have more than one.
299  Register ArgReg = Args[i].Regs[0];
300 
301  MVT OrigVT = MVT::getVT(Args[i].Ty);
302  MVT VAVT = VA.getValVT();
303  if (VA.isRegLoc()) {
304  if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) {
305  if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
306  // Expected to be multiple regs for a single incoming arg.
307  unsigned NumArgRegs = Args[i].Regs.size();
308  if (NumArgRegs < 2)
309  return false;
310 
311  assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
312  "Too many regs for number of args");
313  for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
314  // There should be Regs.size() ArgLocs per argument.
315  VA = ArgLocs[j + Part];
316  Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
317  }
318  j += NumArgRegs - 1;
319  // Merge the split registers into the expected larger result vreg
320  // of the original call.
321  MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
322  continue;
323  }
324  const LLT VATy(VAVT);
325  Register NewReg =
326  MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
327  Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
328  // If it's a vector type, we either need to truncate the elements
329  // or do an unmerge to get the lower block of elements.
330  if (VATy.isVector() &&
331  VATy.getNumElements() > OrigVT.getVectorNumElements()) {
332  const LLT OrigTy(OrigVT);
333  // Just handle the case where the VA type is 2 * original type.
334  if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
335  LLVM_DEBUG(dbgs()
336  << "Incoming promoted vector arg has too many elts");
337  return false;
338  }
339  auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
340  MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
341  } else {
342  MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
343  }
344  } else if (!Handler.isIncomingArgumentHandler()) {
345  assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() &&
346  "Too many regs for number of args");
347  // This is an outgoing argument that might have been split.
348  for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) {
349  // There should be Regs.size() ArgLocs per argument.
350  VA = ArgLocs[j + Part];
351  Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
352  }
353  j += Args[i].Regs.size() - 1;
354  } else {
355  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
356  }
357  } else if (VA.isMemLoc()) {
358  // Don't currently support loading/storing a type that needs to be split
359  // to the stack. Should be easy, just not implemented yet.
360  if (Args[i].Regs.size() > 1) {
361  LLVM_DEBUG(
362  dbgs()
363  << "Load/store a split arg to/from the stack not implemented yet");
364  return false;
365  }
366  MVT VT = MVT::getVT(Args[i].Ty);
367  unsigned Size = VT == MVT::iPTR ? DL.getPointerSize()
368  : alignTo(VT.getSizeInBits(), 8) / 8;
369  unsigned Offset = VA.getLocMemOffset();
370  MachinePointerInfo MPO;
371  Register StackAddr = Handler.getStackAddress(Size, Offset, MPO);
372  Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA);
373  } else {
374  // FIXME: Support byvals and other weirdness
375  return false;
376  }
377  }
378  return true;
379 }
380 
383  CCAssignFn &Fn) const {
384  for (unsigned i = 0, e = Args.size(); i < e; ++i) {
385  MVT VT = MVT::getVT(Args[i].Ty);
386  if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
387  // Bail out on anything we can't handle.
388  LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
389  << " (arg number = " << i << "\n");
390  return false;
391  }
392  }
393  return true;
394 }
395 
397  MachineFunction &MF,
398  SmallVectorImpl<ArgInfo> &InArgs,
399  CCAssignFn &CalleeAssignFn,
400  CCAssignFn &CallerAssignFn) const {
401  const Function &F = MF.getFunction();
402  CallingConv::ID CalleeCC = Info.CallConv;
403  CallingConv::ID CallerCC = F.getCallingConv();
404 
405  if (CallerCC == CalleeCC)
406  return true;
407 
409  CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
410  if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFn))
411  return false;
412 
414  CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
415  if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFn))
416  return false;
417 
418  // We need the argument locations to match up exactly. If there's more in
419  // one than the other, then we are done.
420  if (ArgLocs1.size() != ArgLocs2.size())
421  return false;
422 
423  // Make sure that each location is passed in exactly the same way.
424  for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
425  const CCValAssign &Loc1 = ArgLocs1[i];
426  const CCValAssign &Loc2 = ArgLocs2[i];
427 
428  // We need both of them to be the same. So if one is a register and one
429  // isn't, we're done.
430  if (Loc1.isRegLoc() != Loc2.isRegLoc())
431  return false;
432 
433  if (Loc1.isRegLoc()) {
434  // If they don't have the same register location, we're done.
435  if (Loc1.getLocReg() != Loc2.getLocReg())
436  return false;
437 
438  // They matched, so we can move to the next ArgLoc.
439  continue;
440  }
441 
442  // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
443  if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
444  return false;
445  }
446 
447  return true;
448 }
449 
451  CCValAssign &VA) {
452  LLT LocTy{VA.getLocVT()};
453  if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits())
454  return ValReg;
455  switch (VA.getLocInfo()) {
456  default: break;
457  case CCValAssign::Full:
458  case CCValAssign::BCvt:
459  // FIXME: bitconverting between vector types may or may not be a
460  // nop in big-endian situations.
461  return ValReg;
462  case CCValAssign::AExt: {
463  auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
464  return MIB->getOperand(0).getReg();
465  }
466  case CCValAssign::SExt: {
467  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
468  MIRBuilder.buildSExt(NewReg, ValReg);
469  return NewReg;
470  }
471  case CCValAssign::ZExt: {
472  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
473  MIRBuilder.buildZExt(NewReg, ValReg);
474  return NewReg;
475  }
476  }
477  llvm_unreachable("unable to extend register");
478 }
479 
480 void CallLowering::ValueHandler::anchor() {}
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
MachineOperand Callee
Destination of the call.
Definition: CallLowering.h:77
bool analyzeArgInfo(CCState &CCState, SmallVectorImpl< ArgInfo > &Args, CCAssignFn &Fn) const
Analyze passed or returned values from a call, supplied in ArgInfo, incorporating info about the pass...
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Register getLocReg() const
MachineInstrBuilder buildInsert(Register Res, Register Src, Register Op, unsigned Index)
bool isVector() const
Return true if this is a vector value type.
bool handleAssignments(MachineIRBuilder &MIRBuilder, SmallVectorImpl< ArgInfo > &Args, ValueHandler &Handler) const
Invoke Handler::assignArg on each of the given Args and then use Callback to move them to the assigne...
This class represents a function call, abstracting a target machine&#39;s calling convention.
unsigned Reg
bool IsMustTailCall
True if the call must be tail call optimized.
Definition: CallLowering.h:92
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
unsigned getVectorNumElements() const
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1106
F(f)
bool isTailCall() const
Tests if this call site is marked as a tail call.
Definition: CallSite.h:284
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:111
Register SwiftErrorVReg
Valid if the call has a swifterror inout parameter, and contains the vreg that the swifterror should ...
Definition: CallLowering.h:87
virtual void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA)=0
The specified value has been assigned to a stack location.
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
bool isValid() const
Return true if this is a valid simple valuetype.
FunctionType * getFunctionType() const
Definition: CallSite.h:328
virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)
Definition: CallLowering.h:157
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:53
CallingConv::ID getCallingConv() const
Get the calling convention of the call.
Definition: CallSite.h:320
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
bool isVector() const
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
Definition: CallLowering.h:173
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:450
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:295
Type * getType() const
Return the type of the instruction that generated this call site.
Definition: CallSite.h:272
LocInfo getLocInfo() const
unsigned getSizeInBits() const
InstrTy * getInstruction() const
Definition: CallSite.h:96
virtual bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:121
Type * getValueAsType() const
Return the attribute&#39;s value as a Type.
Definition: Attributes.cpp:230
bool isVarArg() const
Definition: DerivedTypes.h:123
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Definition: CallSite.h:279
MachineFunction & getMF()
Getter for the function we currently build.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:244
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
void setOrigAlign(unsigned A)
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineRegisterInfo * getMRI()
Getter for MRI.
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
Definition: Utils.cpp:425
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, CCAssignFn &CalleeAssignFn, CCAssignFn &CallerAssignFn) const
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:651
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:127
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
Helper class to build MachineInstr.
Register packRegs(ArrayRef< Register > SrcRegs, Type *PackedTy, MachineIRBuilder &MIRBuilder) const
Generate instructions for packing SrcRegs into one big register corresponding to the aggregate type P...
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:446
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static unsigned NumFixedArgs
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
Attribute getAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return the attribute object that exists at the given index.
size_t size() const
Definition: SmallVector.h:52
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:112
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
iterator_range< IterTy > args() const
Definition: CallSite.h:222
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:511
virtual unsigned assignCustomValue(const ArgInfo &Arg, ArrayRef< CCValAssign > VAs)
Handle custom values, which may be passed into one or more of VAs.
Definition: CallLowering.h:148
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
SmallVector< ArgInfo, 8 > OrigArgs
List of descriptors of the arguments passed to the function.
Definition: CallLowering.h:83
void unpackRegs(ArrayRef< Register > DstRegs, Register SrcReg, Type *PackedTy, MachineIRBuilder &MIRBuilder) const
Generate instructions for unpacking SrcReg into the DstRegs corresponding to the aggregate type Packe...
bool IsVarArg
True if the call is to a vararg function.
Definition: CallLowering.h:103
Register extendRegister(Register ValReg, CCValAssign &VA)
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
Module.h This file contains the declarations for the Module class.
CCValAssign - Represent assignment of one arg/retval to a location.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:752
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
BBTy * getParent() const
Get the basic block containing the call site.
Definition: CallSite.h:101
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA)=0
The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:470
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:126
unsigned getLocMemOffset() const
CallingConv::ID CallConv
Calling convention to be used for the call.
Definition: CallLowering.h:73
Establish a view to a call site for examination.
Definition: CallSite.h:897
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:106
uint32_t Size
Definition: Profile.cpp:46
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
bool isRegLoc() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool IsTailCall
True if the call passes all target-independent checks for tail call optimization. ...
Definition: CallLowering.h:96
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
This file describes how to lower LLVM calls to machine code calls.
virtual Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO)=0
Materialize a VReg containing the address of the specified stack-based object.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
print Print MemDeps of function
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Register getReg() const
getReg - Returns the register number.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
ArgInfo OrigRet
Descriptor for the return type of the function.
Definition: CallLowering.h:80
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This file describes how to lower LLVM code to machine code.