LLVM  14.0.0git
CallLowering.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
27 
28 #define DEBUG_TYPE "call-lowering"
29 
30 using namespace llvm;
31 
32 void CallLowering::anchor() {}
33 
34 /// Helper function which updates \p Flags when \p AttrFn returns true.
35 static void
37  const std::function<bool(Attribute::AttrKind)> &AttrFn) {
38  if (AttrFn(Attribute::SExt))
39  Flags.setSExt();
40  if (AttrFn(Attribute::ZExt))
41  Flags.setZExt();
42  if (AttrFn(Attribute::InReg))
43  Flags.setInReg();
44  if (AttrFn(Attribute::StructRet))
45  Flags.setSRet();
46  if (AttrFn(Attribute::Nest))
47  Flags.setNest();
48  if (AttrFn(Attribute::ByVal))
49  Flags.setByVal();
50  if (AttrFn(Attribute::Preallocated))
51  Flags.setPreallocated();
52  if (AttrFn(Attribute::InAlloca))
53  Flags.setInAlloca();
54  if (AttrFn(Attribute::Returned))
55  Flags.setReturned();
56  if (AttrFn(Attribute::SwiftSelf))
57  Flags.setSwiftSelf();
58  if (AttrFn(Attribute::SwiftAsync))
59  Flags.setSwiftAsync();
60  if (AttrFn(Attribute::SwiftError))
61  Flags.setSwiftError();
62 }
63 
65  unsigned ArgIdx) const {
66  ISD::ArgFlagsTy Flags;
67  addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
68  return Call.paramHasAttr(ArgIdx, Attr);
69  });
70  return Flags;
71 }
72 
74  const AttributeList &Attrs,
75  unsigned OpIdx) const {
76  addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
77  return Attrs.hasAttributeAtIndex(OpIdx, Attr);
78  });
79 }
80 
82  ArrayRef<Register> ResRegs,
84  Register SwiftErrorVReg,
85  std::function<unsigned()> GetCalleeReg) const {
87  const DataLayout &DL = MIRBuilder.getDataLayout();
88  MachineFunction &MF = MIRBuilder.getMF();
89  bool CanBeTailCalled = CB.isTailCall() &&
90  isInTailCallPosition(CB, MF.getTarget()) &&
91  (MF.getFunction()
92  .getFnAttribute("disable-tail-calls")
93  .getValueAsString() != "true");
94 
95  CallingConv::ID CallConv = CB.getCallingConv();
96  Type *RetTy = CB.getType();
97  bool IsVarArg = CB.getFunctionType()->isVarArg();
98 
100  getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
101  Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
102 
103  if (!Info.CanLowerReturn) {
104  // Callee requires sret demotion.
105  insertSRetOutgoingArgument(MIRBuilder, CB, Info);
106 
107  // The sret demotion isn't compatible with tail-calls, since the sret
108  // argument points into the caller's stack frame.
109  CanBeTailCalled = false;
110  }
111 
112  // First step is to marshall all the function's parameters into the correct
113  // physregs and memory locations. Gather the sequence of argument types that
114  // we'll pass to the assigner function.
115  unsigned i = 0;
116  unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
117  for (auto &Arg : CB.args()) {
118  ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
119  i < NumFixedArgs};
120  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
121 
122  // If we have an explicit sret argument that is an Instruction, (i.e., it
123  // might point to function-local memory), we can't meaningfully tail-call.
124  if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
125  CanBeTailCalled = false;
126 
127  Info.OrigArgs.push_back(OrigArg);
128  ++i;
129  }
130 
131  // Try looking through a bitcast from one function type to another.
132  // Commonly happens with calls to objc_msgSend().
133  const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
134  if (const Function *F = dyn_cast<Function>(CalleeV))
135  Info.Callee = MachineOperand::CreateGA(F, 0);
136  else
137  Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
138 
139  Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}};
140  if (!Info.OrigRet.Ty->isVoidTy())
142 
143  Info.CB = &CB;
144  Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
145  Info.CallConv = CallConv;
146  Info.SwiftErrorVReg = SwiftErrorVReg;
147  Info.IsMustTailCall = CB.isMustTailCall();
148  Info.IsTailCall = CanBeTailCalled;
149  Info.IsVarArg = IsVarArg;
150  return lowerCall(MIRBuilder, Info);
151 }
152 
153 template <typename FuncInfoTy>
155  const DataLayout &DL,
156  const FuncInfoTy &FuncInfo) const {
157  auto &Flags = Arg.Flags[0];
158  const AttributeList &Attrs = FuncInfo.getAttributes();
159  addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
160 
161  PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
162  if (PtrTy) {
163  Flags.setPointer();
165  }
166 
167  Align MemAlign = DL.getABITypeAlign(Arg.Ty);
168  if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
170  unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
171 
172  Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
173  if (!ElementTy)
174  ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
175  if (!ElementTy)
176  ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
177  assert(ElementTy && "Must have byval, inalloca or preallocated type");
178  Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
179 
180  // For ByVal, alignment should be passed from FE. BE will guess if
181  // this info is not there but there are cases it cannot get right.
182  if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
183  MemAlign = *ParamAlign;
184  else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
185  MemAlign = *ParamAlign;
186  else
187  MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
188  } else if (OpIdx >= AttributeList::FirstArgIndex) {
189  if (auto ParamAlign =
190  FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
191  MemAlign = *ParamAlign;
192  }
193  Flags.setMemAlign(MemAlign);
194  Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
195 
196  // Don't try to use the returned attribute if the argument is marked as
197  // swiftself, since it won't be passed in x0.
198  if (Flags.isSwiftSelf())
199  Flags.setReturned(false);
200 }
201 
202 template void
203 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
204  const DataLayout &DL,
205  const Function &FuncInfo) const;
206 
207 template void
208 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
209  const DataLayout &DL,
210  const CallBase &FuncInfo) const;
211 
213  SmallVectorImpl<ArgInfo> &SplitArgs,
214  const DataLayout &DL,
215  CallingConv::ID CallConv,
217  LLVMContext &Ctx = OrigArg.Ty->getContext();
218 
219  SmallVector<EVT, 4> SplitVTs;
220  ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
221 
222  if (SplitVTs.size() == 0)
223  return;
224 
225  if (SplitVTs.size() == 1) {
226  // No splitting to do, but we want to replace the original type (e.g. [1 x
227  // double] -> double).
228  SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
229  OrigArg.OrigArgIndex, OrigArg.Flags[0],
230  OrigArg.IsFixed, OrigArg.OrigValue);
231  return;
232  }
233 
234  // Create one ArgInfo for each virtual register in the original ArgInfo.
235  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
236 
237  bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
238  OrigArg.Ty, CallConv, false, DL);
239  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
240  Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
241  SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
242  OrigArg.Flags[0], OrigArg.IsFixed);
243  if (NeedsRegBlock)
244  SplitArgs.back().Flags[0].setInConsecutiveRegs();
245  }
246 
247  SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
248 }
249 
250 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
251 static MachineInstrBuilder
253  ArrayRef<Register> SrcRegs) {
254  MachineRegisterInfo &MRI = *B.getMRI();
255  LLT LLTy = MRI.getType(DstRegs[0]);
256  LLT PartLLT = MRI.getType(SrcRegs[0]);
257 
258  // Deal with v3s16 split into v2s16
259  LLT LCMTy = getCoverTy(LLTy, PartLLT);
260  if (LCMTy == LLTy) {
261  // Common case where no padding is needed.
262  assert(DstRegs.size() == 1);
263  return B.buildConcatVectors(DstRegs[0], SrcRegs);
264  }
265 
266  // We need to create an unmerge to the result registers, which may require
267  // widening the original value.
268  Register UnmergeSrcReg;
269  if (LCMTy != PartLLT) {
270  assert(DstRegs.size() == 1);
271  return B.buildDeleteTrailingVectorElements(DstRegs[0],
272  B.buildMerge(LCMTy, SrcRegs));
273  } else {
274  // We don't need to widen anything if we're extracting a scalar which was
275  // promoted to a vector e.g. s8 -> v4s8 -> s8
276  assert(SrcRegs.size() == 1);
277  UnmergeSrcReg = SrcRegs[0];
278  }
279 
280  int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
281 
282  SmallVector<Register, 8> PadDstRegs(NumDst);
283  std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
284 
285  // Create the excess dead defs for the unmerge.
286  for (int I = DstRegs.size(); I != NumDst; ++I)
287  PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
288 
289  if (PadDstRegs.size() == 1)
290  return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);
291  return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
292 }
293 
294 /// Create a sequence of instructions to combine pieces split into register
295 /// typed values to the original IR value. \p OrigRegs contains the destination
296 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
297 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
299  ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
300  const ISD::ArgFlagsTy Flags) {
301  MachineRegisterInfo &MRI = *B.getMRI();
302 
303  if (PartLLT == LLTy) {
304  // We should have avoided introducing a new virtual register, and just
305  // directly assigned here.
306  assert(OrigRegs[0] == Regs[0]);
307  return;
308  }
309 
310  if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
311  Regs.size() == 1) {
312  B.buildBitcast(OrigRegs[0], Regs[0]);
313  return;
314  }
315 
316  // A vector PartLLT needs extending to LLTy's element size.
317  // E.g. <2 x s64> = G_SEXT <2 x s32>.
318  if (PartLLT.isVector() == LLTy.isVector() &&
319  PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
320  (!PartLLT.isVector() ||
321  PartLLT.getNumElements() == LLTy.getNumElements()) &&
322  OrigRegs.size() == 1 && Regs.size() == 1) {
323  Register SrcReg = Regs[0];
324 
325  LLT LocTy = MRI.getType(SrcReg);
326 
327  if (Flags.isSExt()) {
328  SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
329  .getReg(0);
330  } else if (Flags.isZExt()) {
331  SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
332  .getReg(0);
333  }
334 
335  // Sometimes pointers are passed zero extended.
336  LLT OrigTy = MRI.getType(OrigRegs[0]);
337  if (OrigTy.isPointer()) {
338  LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
339  B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
340  return;
341  }
342 
343  B.buildTrunc(OrigRegs[0], SrcReg);
344  return;
345  }
346 
347  if (!LLTy.isVector() && !PartLLT.isVector()) {
348  assert(OrigRegs.size() == 1);
349  LLT OrigTy = MRI.getType(OrigRegs[0]);
350 
351  unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
352  if (SrcSize == OrigTy.getSizeInBits())
353  B.buildMerge(OrigRegs[0], Regs);
354  else {
355  auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
356  B.buildTrunc(OrigRegs[0], Widened);
357  }
358 
359  return;
360  }
361 
362  if (PartLLT.isVector()) {
363  assert(OrigRegs.size() == 1);
364  SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
365 
366  // If PartLLT is a mismatched vector in both number of elements and element
367  // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
368  // have the same elt type, i.e. v4s32.
369  if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
370  PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
371  Regs.size() == 1) {
372  LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
373  .changeElementCount(PartLLT.getElementCount() * 2);
374  CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
375  PartLLT = NewTy;
376  }
377 
378  if (LLTy.getScalarType() == PartLLT.getElementType()) {
379  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
380  } else {
381  unsigned I = 0;
382  LLT GCDTy = getGCDType(LLTy, PartLLT);
383 
384  // We are both splitting a vector, and bitcasting its element types. Cast
385  // the source pieces into the appropriate number of pieces with the result
386  // element type.
387  for (Register SrcReg : CastRegs)
388  CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
389  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
390  }
391 
392  return;
393  }
394 
395  assert(LLTy.isVector() && !PartLLT.isVector());
396 
397  LLT DstEltTy = LLTy.getElementType();
398 
399  // Pointer information was discarded. We'll need to coerce some register types
400  // to avoid violating type constraints.
401  LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
402 
403  assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
404 
405  if (DstEltTy == PartLLT) {
406  // Vector was trivially scalarized.
407 
408  if (RealDstEltTy.isPointer()) {
409  for (Register Reg : Regs)
410  MRI.setType(Reg, RealDstEltTy);
411  }
412 
413  B.buildBuildVector(OrigRegs[0], Regs);
414  } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
415  // Deal with vector with 64-bit elements decomposed to 32-bit
416  // registers. Need to create intermediate 64-bit elements.
417  SmallVector<Register, 8> EltMerges;
418  int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
419 
420  assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
421 
422  for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
423  auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
424  // Fix the type in case this is really a vector of pointers.
425  MRI.setType(Merge.getReg(0), RealDstEltTy);
426  EltMerges.push_back(Merge.getReg(0));
427  Regs = Regs.drop_front(PartsPerElt);
428  }
429 
430  B.buildBuildVector(OrigRegs[0], EltMerges);
431  } else {
432  // Vector was split, and elements promoted to a wider type.
433  // FIXME: Should handle floating point promotions.
434  LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
435  auto BV = B.buildBuildVector(BVType, Regs);
436  B.buildTrunc(OrigRegs[0], BV);
437  }
438 }
439 
440 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
441 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
442 /// contain the type of scalar value extension if necessary.
443 ///
444 /// This is used for outgoing values (vregs to physregs)
446  Register SrcReg, LLT SrcTy, LLT PartTy,
447  unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
448  // We could just insert a regular copy, but this is unreachable at the moment.
449  assert(SrcTy != PartTy && "identical part types shouldn't reach here");
450 
451  const unsigned PartSize = PartTy.getSizeInBits();
452 
453  if (PartTy.isVector() == SrcTy.isVector() &&
454  PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
455  assert(DstRegs.size() == 1);
456  B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
457  return;
458  }
459 
460  if (SrcTy.isVector() && !PartTy.isVector() &&
461  PartSize > SrcTy.getElementType().getSizeInBits()) {
462  // Vector was scalarized, and the elements extended.
463  auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
464  for (int i = 0, e = DstRegs.size(); i != e; ++i)
465  B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
466  return;
467  }
468 
469  LLT GCDTy = getGCDType(SrcTy, PartTy);
470  if (GCDTy == PartTy) {
471  // If this already evenly divisible, we can create a simple unmerge.
472  B.buildUnmerge(DstRegs, SrcReg);
473  return;
474  }
475 
476  MachineRegisterInfo &MRI = *B.getMRI();
477  LLT DstTy = MRI.getType(DstRegs[0]);
478  LLT LCMTy = getCoverTy(SrcTy, PartTy);
479 
480  const unsigned DstSize = DstTy.getSizeInBits();
481  const unsigned SrcSize = SrcTy.getSizeInBits();
482  unsigned CoveringSize = LCMTy.getSizeInBits();
483 
484  Register UnmergeSrc = SrcReg;
485 
486  if (!LCMTy.isVector() && CoveringSize != SrcSize) {
487  // For scalars, it's common to be able to use a simple extension.
488  if (SrcTy.isScalar() && DstTy.isScalar()) {
489  CoveringSize = alignTo(SrcSize, DstSize);
490  LLT CoverTy = LLT::scalar(CoveringSize);
491  UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
492  } else {
493  // Widen to the common type.
494  // FIXME: This should respect the extend type
495  Register Undef = B.buildUndef(SrcTy).getReg(0);
496  SmallVector<Register, 8> MergeParts(1, SrcReg);
497  for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
498  MergeParts.push_back(Undef);
499  UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
500  }
501  }
502 
503  if (LCMTy.isVector() && CoveringSize != SrcSize)
504  UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);
505 
506  B.buildUnmerge(DstRegs, UnmergeSrc);
507 }
508 
510  ValueHandler &Handler, ValueAssigner &Assigner,
512  CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
513  MachineFunction &MF = MIRBuilder.getMF();
514  const Function &F = MF.getFunction();
516 
517  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
518  if (!determineAssignments(Assigner, Args, CCInfo))
519  return false;
520 
521  return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
522  ThisReturnReg);
523 }
524 
525 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
526  if (Flags.isSExt())
527  return TargetOpcode::G_SEXT;
528  if (Flags.isZExt())
529  return TargetOpcode::G_ZEXT;
530  return TargetOpcode::G_ANYEXT;
531 }
532 
535  CCState &CCInfo) const {
536  LLVMContext &Ctx = CCInfo.getContext();
537  const CallingConv::ID CallConv = CCInfo.getCallingConv();
538 
539  unsigned NumArgs = Args.size();
540  for (unsigned i = 0; i != NumArgs; ++i) {
541  EVT CurVT = EVT::getEVT(Args[i].Ty);
542 
543  MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
544 
545  // If we need to split the type over multiple regs, check it's a scenario
546  // we currently support.
547  unsigned NumParts =
548  TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
549 
550  if (NumParts == 1) {
551  // Try to use the register type if we couldn't assign the VT.
552  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
553  Args[i].Flags[0], CCInfo))
554  return false;
555  continue;
556  }
557 
558  // For incoming arguments (physregs to vregs), we could have values in
559  // physregs (or memlocs) which we want to extract and copy to vregs.
560  // During this, we might have to deal with the LLT being split across
561  // multiple regs, so we have to record this information for later.
562  //
563  // If we have outgoing args, then we have the opposite case. We have a
564  // vreg with an LLT which we want to assign to a physical location, and
565  // we might have to record that the value has to be split later.
566 
567  // We're handling an incoming arg which is split over multiple regs.
568  // E.g. passing an s128 on AArch64.
569  ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
570  Args[i].Flags.clear();
571 
572  for (unsigned Part = 0; Part < NumParts; ++Part) {
573  ISD::ArgFlagsTy Flags = OrigFlags;
574  if (Part == 0) {
575  Flags.setSplit();
576  } else {
577  Flags.setOrigAlign(Align(1));
578  if (Part == NumParts - 1)
579  Flags.setSplitEnd();
580  }
581 
582  Args[i].Flags.push_back(Flags);
583  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
584  Args[i].Flags[Part], CCInfo)) {
585  // Still couldn't assign this smaller part type for some reason.
586  return false;
587  }
588  }
589  }
590 
591  return true;
592 }
593 
596  CCState &CCInfo,
598  MachineIRBuilder &MIRBuilder,
599  Register ThisReturnReg) const {
600  MachineFunction &MF = MIRBuilder.getMF();
602  const Function &F = MF.getFunction();
603  const DataLayout &DL = F.getParent()->getDataLayout();
604 
605  const unsigned NumArgs = Args.size();
606 
607  // Stores thunks for outgoing register assignments. This is used so we delay
608  // generating register copies until mem loc assignments are done. We do this
609  // so that if the target is using the delayed stack protector feature, we can
610  // find the split point of the block accurately. E.g. if we have:
611  // G_STORE %val, %memloc
612  // $x0 = COPY %foo
613  // $x1 = COPY %bar
614  // CALL func
615  // ... then the split point for the block will correctly be at, and including,
616  // the copy to $x0. If instead the G_STORE instruction immediately precedes
617  // the CALL, then we'd prematurely choose the CALL as the split point, thus
618  // generating a split block with a CALL that uses undefined physregs.
619  SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;
620 
621  for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
622  assert(j < ArgLocs.size() && "Skipped too many arg locs");
623  CCValAssign &VA = ArgLocs[j];
624  assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
625 
626  if (VA.needsCustom()) {
627  std::function<void()> Thunk;
628  unsigned NumArgRegs = Handler.assignCustomValue(
629  Args[i], makeArrayRef(ArgLocs).slice(j), &Thunk);
630  if (Thunk)
631  DelayedOutgoingRegAssignments.emplace_back(Thunk);
632  if (!NumArgRegs)
633  return false;
634  j += NumArgRegs;
635  continue;
636  }
637 
638  const MVT ValVT = VA.getValVT();
639  const MVT LocVT = VA.getLocVT();
640 
641  const LLT LocTy(LocVT);
642  const LLT ValTy(ValVT);
643  const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
644  const EVT OrigVT = EVT::getEVT(Args[i].Ty);
645  const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
646 
647  // Expected to be multiple regs for a single incoming arg.
648  // There should be Regs.size() ArgLocs per argument.
649  // This should be the same as getNumRegistersForCallingConv
650  const unsigned NumParts = Args[i].Flags.size();
651 
652  // Now split the registers into the assigned types.
653  Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
654 
655  if (NumParts != 1 || NewLLT != OrigTy) {
656  // If we can't directly assign the register, we need one or more
657  // intermediate values.
658  Args[i].Regs.resize(NumParts);
659 
660  // For each split register, create and assign a vreg that will store
661  // the incoming component of the larger value. These will later be
662  // merged to form the final vreg.
663  for (unsigned Part = 0; Part < NumParts; ++Part)
664  Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
665  }
666 
667  assert((j + (NumParts - 1)) < ArgLocs.size() &&
668  "Too many regs for number of args");
669 
670  // Coerce into outgoing value types before register assignment.
671  if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
672  assert(Args[i].OrigRegs.size() == 1);
673  buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
674  ValTy, extendOpFromFlags(Args[i].Flags[0]));
675  }
676 
677  for (unsigned Part = 0; Part < NumParts; ++Part) {
678  Register ArgReg = Args[i].Regs[Part];
679  // There should be Regs.size() ArgLocs per argument.
680  VA = ArgLocs[j + Part];
681  const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
682 
683  if (VA.isMemLoc() && !Flags.isByVal()) {
684  // Individual pieces may have been spilled to the stack and others
685  // passed in registers.
686 
687  // TODO: The memory size may be larger than the value we need to
688  // store. We may need to adjust the offset for big endian targets.
689  LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
690 
691  MachinePointerInfo MPO;
692  Register StackAddr = Handler.getStackAddress(
693  MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
694 
695  Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
696  continue;
697  }
698 
699  if (VA.isMemLoc() && Flags.isByVal()) {
700  assert(Args[i].Regs.size() == 1 &&
701  "didn't expect split byval pointer");
702 
703  if (Handler.isIncomingArgumentHandler()) {
704  // We just need to copy the frame index value to the pointer.
705  MachinePointerInfo MPO;
706  Register StackAddr = Handler.getStackAddress(
707  Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
708  MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
709  } else {
710  // For outgoing byval arguments, insert the implicit copy byval
711  // implies, such that writes in the callee do not modify the caller's
712  // value.
713  uint64_t MemSize = Flags.getByValSize();
714  int64_t Offset = VA.getLocMemOffset();
715 
716  MachinePointerInfo DstMPO;
717  Register StackAddr =
718  Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
719 
720  MachinePointerInfo SrcMPO(Args[i].OrigValue);
721  if (!Args[i].OrigValue) {
722  // We still need to accurately track the stack address space if we
723  // don't know the underlying value.
724  const LLT PtrTy = MRI.getType(StackAddr);
725  SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
726  }
727 
728  Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
729  inferAlignFromPtrInfo(MF, DstMPO));
730 
731  Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
732  inferAlignFromPtrInfo(MF, SrcMPO));
733 
734  Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
735  DstMPO, DstAlign, SrcMPO, SrcAlign,
736  MemSize, VA);
737  }
738  continue;
739  }
740 
741  assert(!VA.needsCustom() && "custom loc should have been handled already");
742 
743  if (i == 0 && ThisReturnReg.isValid() &&
744  Handler.isIncomingArgumentHandler() &&
746  Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
747  continue;
748  }
749 
750  if (Handler.isIncomingArgumentHandler())
751  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
752  else {
753  DelayedOutgoingRegAssignments.emplace_back([=, &Handler]() {
754  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
755  });
756  }
757  }
758 
759  // Now that all pieces have been assigned, re-pack the register typed values
760  // into the original value typed registers.
761  if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
762  // Merge the split registers into the expected larger result vregs of
763  // the original call.
764  buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
765  LocTy, Args[i].Flags[0]);
766  }
767 
768  j += NumParts - 1;
769  }
770  for (auto &Fn : DelayedOutgoingRegAssignments)
771  Fn();
772 
773  return true;
774 }
775 
777  ArrayRef<Register> VRegs, Register DemoteReg,
778  int FI) const {
779  MachineFunction &MF = MIRBuilder.getMF();
781  const DataLayout &DL = MF.getDataLayout();
782 
783  SmallVector<EVT, 4> SplitVTs;
785  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
786 
787  assert(VRegs.size() == SplitVTs.size());
788 
789  unsigned NumValues = SplitVTs.size();
790  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
791  Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
792  LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
793 
795 
796  for (unsigned I = 0; I < NumValues; ++I) {
797  Register Addr;
798  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
799  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
800  MRI.getType(VRegs[I]),
801  commonAlignment(BaseAlign, Offsets[I]));
802  MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
803  }
804 }
805 
807  ArrayRef<Register> VRegs,
808  Register DemoteReg) const {
809  MachineFunction &MF = MIRBuilder.getMF();
811  const DataLayout &DL = MF.getDataLayout();
812 
813  SmallVector<EVT, 4> SplitVTs;
815  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
816 
817  assert(VRegs.size() == SplitVTs.size());
818 
819  unsigned NumValues = SplitVTs.size();
820  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
821  unsigned AS = DL.getAllocaAddrSpace();
822  LLT OffsetLLTy =
823  getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
824 
825  MachinePointerInfo PtrInfo(AS);
826 
827  for (unsigned I = 0; I < NumValues; ++I) {
828  Register Addr;
829  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
830  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
831  MRI.getType(VRegs[I]),
832  commonAlignment(BaseAlign, Offsets[I]));
833  MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
834  }
835 }
836 
838  const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
839  MachineRegisterInfo &MRI, const DataLayout &DL) const {
840  unsigned AS = DL.getAllocaAddrSpace();
841  DemoteReg = MRI.createGenericVirtualRegister(
842  LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
843 
844  Type *PtrTy = PointerType::get(F.getReturnType(), AS);
845 
846  SmallVector<EVT, 1> ValueVTs;
847  ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
848 
849  // NOTE: Assume that a pointer won't get split into more than one VT.
850  assert(ValueVTs.size() == 1);
851 
852  ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
855  DemoteArg.Flags[0].setSRet();
856  SplitArgs.insert(SplitArgs.begin(), DemoteArg);
857 }
858 
860  const CallBase &CB,
861  CallLoweringInfo &Info) const {
862  const DataLayout &DL = MIRBuilder.getDataLayout();
863  Type *RetTy = CB.getType();
864  unsigned AS = DL.getAllocaAddrSpace();
865  LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
866 
867  int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
868  DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
869 
870  Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
871  ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
873  setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
874  DemoteArg.Flags[0].setSRet();
875 
876  Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
877  Info.DemoteStackIndex = FI;
878  Info.DemoteRegister = DemoteReg;
879 }
880 
883  CCAssignFn *Fn) const {
884  for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
885  MVT VT = MVT::getVT(Outs[I].Ty);
886  if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
887  return false;
888  }
889  return true;
890 }
891 
895  const DataLayout &DL) const {
896  LLVMContext &Context = RetTy->getContext();
898 
899  SmallVector<EVT, 4> SplitVTs;
900  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
902 
903  for (EVT VT : SplitVTs) {
904  unsigned NumParts =
905  TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
906  MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
907  Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
908 
909  for (unsigned I = 0; I < NumParts; ++I) {
910  Outs.emplace_back(PartTy, Flags);
911  }
912  }
913 }
914 
916  const auto &F = MF.getFunction();
917  Type *ReturnType = F.getReturnType();
918  CallingConv::ID CallConv = F.getCallingConv();
919 
920  SmallVector<BaseArgInfo, 4> SplitArgs;
921  getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
922  MF.getDataLayout());
923  return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
924 }
925 
927  const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
928  const SmallVectorImpl<CCValAssign> &OutLocs,
929  const SmallVectorImpl<ArgInfo> &OutArgs) const {
930  for (unsigned i = 0; i < OutLocs.size(); ++i) {
931  auto &ArgLoc = OutLocs[i];
932  // If it's not a register, it's fine.
933  if (!ArgLoc.isRegLoc())
934  continue;
935 
936  MCRegister PhysReg = ArgLoc.getLocReg();
937 
938  // Only look at callee-saved registers.
939  if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
940  continue;
941 
942  LLVM_DEBUG(
943  dbgs()
944  << "... Call has an argument passed in a callee-saved register.\n");
945 
946  // Check if it was copied from.
947  const ArgInfo &OutInfo = OutArgs[i];
948 
949  if (OutInfo.Regs.size() > 1) {
950  LLVM_DEBUG(
951  dbgs() << "... Cannot handle arguments in multiple registers.\n");
952  return false;
953  }
954 
955  // Check if we copy the register, walking through copies from virtual
956  // registers. Note that getDefIgnoringCopies does not ignore copies from
957  // physical registers.
958  MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
959  if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
960  LLVM_DEBUG(
961  dbgs()
962  << "... Parameter was not copied into a VReg, cannot tail call.\n");
963  return false;
964  }
965 
966  // Got a copy. Verify that it's the same as the register we want.
967  Register CopyRHS = RegDef->getOperand(1).getReg();
968  if (CopyRHS != PhysReg) {
969  LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
970  "VReg, cannot tail call.\n");
971  return false;
972  }
973  }
974 
975  return true;
976 }
977 
979  MachineFunction &MF,
980  SmallVectorImpl<ArgInfo> &InArgs,
981  ValueAssigner &CalleeAssigner,
982  ValueAssigner &CallerAssigner) const {
983  const Function &F = MF.getFunction();
984  CallingConv::ID CalleeCC = Info.CallConv;
985  CallingConv::ID CallerCC = F.getCallingConv();
986 
987  if (CallerCC == CalleeCC)
988  return true;
989 
991  CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
992  if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
993  return false;
994 
996  CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
997  if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
998  return false;
999 
1000  // We need the argument locations to match up exactly. If there's more in
1001  // one than the other, then we are done.
1002  if (ArgLocs1.size() != ArgLocs2.size())
1003  return false;
1004 
1005  // Make sure that each location is passed in exactly the same way.
1006  for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
1007  const CCValAssign &Loc1 = ArgLocs1[i];
1008  const CCValAssign &Loc2 = ArgLocs2[i];
1009 
1010  // We need both of them to be the same. So if one is a register and one
1011  // isn't, we're done.
1012  if (Loc1.isRegLoc() != Loc2.isRegLoc())
1013  return false;
1014 
1015  if (Loc1.isRegLoc()) {
1016  // If they don't have the same register location, we're done.
1017  if (Loc1.getLocReg() != Loc2.getLocReg())
1018  return false;
1019 
1020  // They matched, so we can move to the next ArgLoc.
1021  continue;
1022  }
1023 
1024  // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1025  if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1026  return false;
1027  }
1028 
1029  return true;
1030 }
1031 
1033  const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1034  const MVT ValVT = VA.getValVT();
1035  if (ValVT != MVT::iPTR) {
1036  LLT ValTy(ValVT);
1037 
1038  // We lost the pointeriness going through CCValAssign, so try to restore it
1039  // based on the flags.
1040  if (Flags.isPointer()) {
1041  LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1042  ValTy.getScalarSizeInBits());
1043  if (ValVT.isVector())
1044  return LLT::vector(ValTy.getElementCount(), PtrTy);
1045  return PtrTy;
1046  }
1047 
1048  return ValTy;
1049  }
1050 
1051  unsigned AddrSpace = Flags.getPointerAddrSpace();
1052  return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1053 }
1054 
1056  const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1057  const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1058  const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1059  CCValAssign &VA) const {
1060  MachineFunction &MF = MIRBuilder.getMF();
1062  SrcPtrInfo,
1064  SrcAlign);
1065 
1067  DstPtrInfo,
1069  MemSize, DstAlign);
1070 
1071  const LLT PtrTy = MRI.getType(DstPtr);
1072  const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1073 
1074  auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1075  MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1076 }
1077 
1079  CCValAssign &VA,
1080  unsigned MaxSizeBits) {
1081  LLT LocTy{VA.getLocVT()};
1082  LLT ValTy{VA.getValVT()};
1083 
1084  if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1085  return ValReg;
1086 
1087  if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1088  if (MaxSizeBits <= ValTy.getSizeInBits())
1089  return ValReg;
1090  LocTy = LLT::scalar(MaxSizeBits);
1091  }
1092 
1093  const LLT ValRegTy = MRI.getType(ValReg);
1094  if (ValRegTy.isPointer()) {
1095  // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1096  // we have to cast to do the extension.
1097  LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1098  ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1099  }
1100 
1101  switch (VA.getLocInfo()) {
1102  default: break;
1103  case CCValAssign::Full:
1104  case CCValAssign::BCvt:
1105  // FIXME: bitconverting between vector types may or may not be a
1106  // nop in big-endian situations.
1107  return ValReg;
1108  case CCValAssign::AExt: {
1109  auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1110  return MIB.getReg(0);
1111  }
1112  case CCValAssign::SExt: {
1113  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1114  MIRBuilder.buildSExt(NewReg, ValReg);
1115  return NewReg;
1116  }
1117  case CCValAssign::ZExt: {
1118  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1119  MIRBuilder.buildZExt(NewReg, ValReg);
1120  return NewReg;
1121  }
1122  }
1123  llvm_unreachable("unable to extend register");
1124 }
1125 
1126 void CallLowering::ValueAssigner::anchor() {}
1127 
1129  Register SrcReg,
1130  LLT NarrowTy) {
1131  switch (VA.getLocInfo()) {
1132  case CCValAssign::LocInfo::ZExt: {
1133  return MIRBuilder
1134  .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1135  NarrowTy.getScalarSizeInBits())
1136  .getReg(0);
1137  }
1138  case CCValAssign::LocInfo::SExt: {
1139  return MIRBuilder
1140  .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1141  NarrowTy.getScalarSizeInBits())
1142  .getReg(0);
1143  break;
1144  }
1145  default:
1146  return SrcReg;
1147  }
1148 }
1149 
1150 /// Check if we can use a basic COPY instruction between the two types.
1151 ///
1152 /// We're currently building on top of the infrastructure using MVT, which loses
1153 /// pointer information in the CCValAssign. We accept copies from physical
1154 /// registers that have been reported as integers if it's to an equivalent sized
1155 /// pointer LLT.
1156 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1157  if (SrcTy == DstTy)
1158  return true;
1159 
1160  if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1161  return false;
1162 
1163  SrcTy = SrcTy.getScalarType();
1164  DstTy = DstTy.getScalarType();
1165 
1166  return (SrcTy.isPointer() && DstTy.isScalar()) ||
1167  (DstTy.isScalar() && SrcTy.isPointer());
1168 }
1169 
1171  Register PhysReg,
1172  CCValAssign VA) {
1173  const MVT LocVT = VA.getLocVT();
1174  const LLT LocTy(LocVT);
1175  const LLT RegTy = MRI.getType(ValVReg);
1176 
1177  if (isCopyCompatibleType(RegTy, LocTy)) {
1178  MIRBuilder.buildCopy(ValVReg, PhysReg);
1179  return;
1180  }
1181 
1182  auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1183  auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1184  MIRBuilder.buildTrunc(ValVReg, Hint);
1185 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:153
i
i
Definition: README.txt:29
llvm::ISD::ArgFlagsTy::isInAlloca
bool isInAlloca() const
Definition: TargetCallingConv.h:91
llvm::CallLowering::ValueAssigner
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:157
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:148
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:38
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::getDefIgnoringCopies
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:453
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:69
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
Reg
unsigned Reg
Definition: MachineSink.cpp:1563
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:36
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:791
llvm::ISD::ArgFlagsTy::setSwiftSelf
void setSwiftSelf()
Definition: TargetCallingConv.h:98
llvm::LLT::getScalarSizeInBits
unsigned getScalarSizeInBits() const
Definition: LowLevelTypeImpl.h:212
llvm::ISD::ArgFlagsTy::setNest
void setNest()
Definition: TargetCallingConv.h:119
llvm::CallLowering::handleAssignments
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, Register ThisReturnReg=Register()) const
Use Handler to insert code to handle the argument/return values represented by Args.
Definition: CallLowering.cpp:594
CallLowering.h
llvm::TargetLowering::functionArgumentNeedsConsecutiveRegisters
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Definition: TargetLowering.h:4136
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:191
buildCopyToRegs
static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, Register SrcReg, LLT SrcTy, LLT PartTy, unsigned ExtendOp=TargetOpcode::G_ANYEXT)
Create a sequence of instructions to expand the value in SrcReg (of type SrcTy) to the types in DstRe...
Definition: CallLowering.cpp:445
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:425
llvm::Function
Definition: Function.h:62
llvm::CallLowering::IncomingValueHandler::assignValueToReg
void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA) override
Provides a default implementation for argument handling.
Definition: CallLowering.cpp:1170
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:729
llvm::ISD::ArgFlagsTy::setMemAlign
void setMemAlign(Align A)
Definition: TargetCallingConv.h:148
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1177
llvm::MVT::isVector
bool isVector() const
Return true if this is a vector value type.
Definition: MachineValueType.h:366
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:435
llvm::CallLowering::ValueHandler::extendRegister
Register extendRegister(Register ValReg, CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
Definition: CallLowering.cpp:1078
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:734
llvm::CallLowering::ValueHandler
Definition: CallLowering.h:225
llvm::CallLowering::insertSRetOutgoingArgument
void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, const CallBase &CB, CallLoweringInfo &Info) const
For the call-base described by CB, insert the hidden sret ArgInfo to the OrigArgs field of Info.
Definition: CallLowering.cpp:859
llvm::ISD::ArgFlagsTy::setPointer
void setPointer()
Definition: TargetCallingConv.h:142
llvm::LLT::getScalarType
LLT getScalarType() const
Definition: LowLevelTypeImpl.h:167
llvm::ISD::ArgFlagsTy::isZExt
bool isZExt() const
Definition: TargetCallingConv.h:73
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Module.h
llvm::AttributeList
Definition: Attributes.h:403
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1473
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:128
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1256
llvm::MachineMemOperand::MODereferenceable
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
Definition: MachineMemOperand.h:143
extendOpFromFlags
static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)
Definition: CallLowering.cpp:525
llvm::CallBase::isMustTailCall
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Definition: Instructions.cpp:298
llvm::CallLowering::splitToValueTypes
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
Definition: CallLowering.cpp:212
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:46
llvm::LLT::changeElementCount
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
Definition: LowLevelTypeImpl.h:189
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:80
llvm::LLT::vector
static LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:56
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:52
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
MachineIRBuilder.h
buildCopyFromRegs
static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef< Register > OrigRegs, ArrayRef< Register > Regs, LLT LLTy, LLT PartLLT, const ISD::ArgFlagsTy Flags)
Create a sequence of instructions to combine pieces split into register typed values to the original ...
Definition: CallLowering.cpp:298
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MachineIRBuilder::buildConstant
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Definition: MachineIRBuilder.cpp:297
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::FunctionType::isVarArg
bool isVarArg() const
Definition: DerivedTypes.h:123
llvm::ISD::ArgFlagsTy::isSwiftSelf
bool isSwiftSelf() const
Definition: TargetCallingConv.h:97
MachineRegisterInfo.h
Context
ManagedStatic< detail::RecordContext > Context
Definition: Record.cpp:96
llvm::ComputeValueVTs
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:124
llvm::getLLTForType
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Definition: LowLevelType.cpp:21
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::LLT::fixed_vector
static LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:74
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:185
TargetLowering.h
llvm::getGCDType
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:940
llvm::CallLowering::getTLI
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
Definition: CallLowering.h:336
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:651
llvm::CCValAssign::AExt
@ AExt
Definition: CallingConvLower.h:39
llvm::CallLowering::resultsCompatible
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
Definition: CallLowering.cpp:978
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition: CallingConvLower.h:33
TargetMachine.h
llvm::MachineIRBuilder::buildZExt
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
Definition: MachineIRBuilder.cpp:466
llvm::MachineIRBuilder::buildLoad
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
Definition: MachineIRBuilder.h:860
llvm::ISD::ArgFlagsTy::setReturned
void setReturned(bool V=true)
Definition: TargetCallingConv.h:122
llvm::MachineRegisterInfo::setType
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Definition: MachineRegisterInfo.cpp:182
llvm::CallLowering::checkReturnTypeForCallConv
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
Definition: CallLowering.cpp:915
llvm::ISD::ArgFlagsTy::isByVal
bool isByVal() const
Definition: TargetCallingConv.h:85
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::CCValAssign::getLocReg
Register getLocReg() const
Definition: CallingConvLower.h:150
llvm::CallLowering::ValueHandler::assignValueToReg
virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA)=0
The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...
llvm::CallLowering::ArgInfo
Definition: CallLowering.h:61
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::MachineIRBuilder::buildPtrToInt
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
Definition: MachineIRBuilder.h:651
mergeVectorRegsToResultRegs
static MachineInstrBuilder mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, ArrayRef< Register > SrcRegs)
Pack values SrcRegs to cover the vector type result DstRegs.
Definition: CallLowering.cpp:252
llvm::LLT::getSizeInBits
TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelTypeImpl.h:152
llvm::CallLowering::ValueHandler::getStackValueStoreType
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
Definition: CallLowering.cpp:1032
Utils.h
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineIRBuilder::getDataLayout
const DataLayout & getDataLayout() const
Definition: MachineIRBuilder.h:272
llvm::Function::getFnAttribute
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:652
llvm::CallLowering::determineAssignments
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
Definition: CallLowering.cpp:533
llvm::CallLowering::ValueHandler::assignCustomValue
virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef< CCValAssign > VAs, std::function< void()> *Thunk=nullptr)
Handle custom values, which may be passed into one or more of VAs.
Definition: CallLowering.h:291
llvm::MachineIRBuilder::getMF
MachineFunction & getMF()
Getter for the function we currently build.
Definition: MachineIRBuilder.h:262
llvm::CCState::getContext
LLVMContext & getContext() const
Definition: CallingConvLower.h:257
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::ISD::ArgFlagsTy::setInReg
void setInReg()
Definition: TargetCallingConv.h:80
llvm::CCValAssign::getLocInfo
LocInfo getLocInfo() const
Definition: CallingConvLower.h:155
Align
uint64_t Align
Definition: ELFObjHandler.cpp:82
llvm::CCValAssign::getLocMemOffset
unsigned getLocMemOffset() const
Definition: CallingConvLower.h:151
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1454
llvm::Attribute::getValueAsString
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:304
llvm::EVT::getTypeForEVT
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:181
llvm::CCValAssign::isRegLoc
bool isRegLoc() const
Definition: CallingConvLower.h:145
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineInstrBuilder::getReg
Register getReg(unsigned Idx) const
Get the register for the operand index.
Definition: MachineInstrBuilder.h:94
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
llvm::ISD::ArgFlagsTy::setSExt
void setSExt()
Definition: TargetCallingConv.h:77
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:316
llvm::CCAssignFn
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
Definition: CallingConvLower.h:177
llvm::LLT::pointer
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelTypeImpl.h:49
llvm::LLT::getAddressSpace
unsigned getAddressSpace() const
Definition: LowLevelTypeImpl.h:226
llvm::CallLowering::isTypeIsValidForThisReturn
virtual bool isTypeIsValidForThisReturn(EVT Ty) const
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
Definition: CallLowering.h:586
llvm::CallLowering::ArgInfo::NoArgIndex
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
Definition: CallLowering.h:78
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:406
llvm::ISD::ArgFlagsTy::setSplit
void setSplit()
Definition: TargetCallingConv.h:133
llvm::MachineOperand::clobbersPhysReg
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
Definition: MachineOperand.h:617
llvm::TargetLoweringBase::getNumRegistersForCallingConv
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
Definition: TargetLowering.h:1560
llvm::CallBase::isTailCall
bool isTailCall() const
Tests if this call site is marked as a tail call.
Definition: Instructions.cpp:305
llvm::ArrayRef::drop_front
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:202
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:212
llvm::CCValAssign::SExt
@ SExt
Definition: CallingConvLower.h:37
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
llvm::MachineIRBuilder::buildAssertSExt
MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_SEXT Op, Size.
Definition: MachineIRBuilder.cpp:285
uint64_t
llvm::CallLowering::checkReturn
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
Definition: CallLowering.cpp:881
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:79
llvm::MachineIRBuilder::buildAssertZExt
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
Definition: MachineIRBuilder.cpp:291
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:38
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::inferAlignFromPtrInfo
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:686
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:560
I
#define I(x, y, z)
Definition: MD5.cpp:58
Analysis.h
llvm::Attribute::AttrKind
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:71
llvm::getCoverTy
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:925
llvm::LLT::isVector
bool isVector() const
Definition: LowLevelTypeImpl.h:122
llvm::LLT::getNumElements
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelTypeImpl.h:126
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:632
llvm::ISD::ArgFlagsTy::setOrigAlign
void setOrigAlign(Align A)
Definition: TargetCallingConv.h:164
llvm::CCState::getCallingConv
CallingConv::ID getCallingConv() const
Definition: CallingConvLower.h:259
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::LLT::isPointer
bool isPointer() const
Definition: LowLevelTypeImpl.h:120
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:657
llvm::ISD::ArgFlagsTy::setSwiftError
void setSwiftError()
Definition: TargetCallingConv.h:104
llvm::ISD::ArgFlagsTy::getPointerAddrSpace
unsigned getPointerAddrSpace() const
Definition: TargetCallingConv.h:187
isCopyCompatibleType
static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy)
Check if we can use a basic COPY instruction between the two types.
Definition: CallLowering.cpp:1156
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:83
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:188
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::ISD::ArgFlagsTy::setByVal
void setByVal()
Definition: TargetCallingConv.h:86
llvm::LLT::isScalar
bool isScalar() const
Definition: LowLevelTypeImpl.h:118
llvm::ARM::WinEH::ReturnType
ReturnType
Definition: ARMWinEH.h:25
llvm::CallLowering::addArgFlagsFromAttributes
void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, const AttributeList &Attrs, unsigned OpIdx) const
Adds flags to Flags based off of the attributes in Attrs.
Definition: CallLowering.cpp:73
llvm::MachineFunction
Definition: MachineFunction.h:241
addFlagsUsingAttrFn
static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, const std::function< bool(Attribute::AttrKind)> &AttrFn)
Helper function which updates Flags when AttrFn returns true.
Definition: CallLowering.cpp:36
llvm::ISD::ArgFlagsTy::setByValSize
void setByValSize(unsigned S)
Definition: TargetCallingConv.h:173
llvm::CCValAssign::getValNo
unsigned getValNo() const
Definition: CallingConvLower.h:142
llvm::CallLowering::ValueHandler::copyArgumentMemory
void copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr, const MachinePointerInfo &DstPtrInfo, Align DstAlign, const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, CCValAssign &VA) const
Do a memory copy of MemSize bytes from SrcPtr to DstPtr.
Definition: CallLowering.cpp:1055
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineOperand::CreateGA
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
Definition: MachineOperand.h:850
DataLayout.h
llvm::MachineFrameInfo::CreateStackObject
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Definition: MachineFrameInfo.cpp:51
llvm::CallLowering::ValueHandler::getStackAddress
virtual Register getStackAddress(uint64_t MemSize, int64_t Offset, MachinePointerInfo &MPO, ISD::ArgFlagsTy Flags)=0
Materialize a VReg containing the address of the specified stack-based object.
llvm::ISD::ArgFlagsTy::setInAlloca
void setInAlloca()
Definition: TargetCallingConv.h:92
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:489
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::CallLowering::insertSRetLoads
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
Definition: CallLowering.cpp:776
llvm::MachineIRBuilder::buildCopy
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
Definition: MachineIRBuilder.cpp:280
llvm::ISD::ArgFlagsTy::setSplitEnd
void setSplitEnd()
Definition: TargetCallingConv.h:136
uint32_t
llvm::ISD::ArgFlagsTy::isPreallocated
bool isPreallocated() const
Definition: TargetCallingConv.h:94
llvm::ISD::ArgFlagsTy
Definition: TargetCallingConv.h:27
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::ISD::ArgFlagsTy::setPointerAddrSpace
void setPointerAddrSpace(unsigned AS)
Definition: TargetCallingConv.h:188
llvm::MVT::iPTR
@ iPTR
Definition: MachineValueType.h:312
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:127
llvm::LLT::changeElementType
LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelTypeImpl.h:173
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:135
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MachineIRBuilder::buildAnyExt
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Definition: MachineIRBuilder.cpp:456
llvm::MachineIRBuilder::materializePtrAdd
Optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
Definition: MachineIRBuilder.cpp:193
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
NumFixedArgs
static unsigned NumFixedArgs
Definition: LanaiISelLowering.cpp:368
llvm::MachineIRBuilder::buildFrameIndex
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
Definition: MachineIRBuilder.cpp:137
llvm::CallLowering::insertSRetIncomingArgument
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
Definition: CallLowering.cpp:837
llvm::CallLowering::getAttributesForArgIdx
ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call, unsigned ArgIdx) const
Definition: CallLowering.cpp:64
j
return j(j<< 16)
llvm::CallLowering::canLowerReturn
virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const
This hook must be implemented to check whether the return values described by Outs can fit into the r...
Definition: CallLowering.h:486
llvm::CCValAssign::isMemLoc
bool isMemLoc() const
Definition: CallingConvLower.h:146
llvm::CallLowering::insertSRetStores
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
Definition: CallLowering.cpp:806
llvm::commonAlignment
Align commonAlignment(Align A, Align B)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:211
llvm::ISD::ArgFlagsTy::getByValSize
unsigned getByValSize() const
Definition: TargetCallingConv.h:169
llvm::MachineIRBuilder::buildTrunc
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
Definition: MachineIRBuilder.cpp:777
llvm::MachineIRBuilder::buildMemCpy
MachineInstrBuilder buildMemCpy(const SrcOp &DstPtr, const SrcOp &SrcPtr, const SrcOp &Size, MachineMemOperand &DstMMO, MachineMemOperand &SrcMMO)
Definition: MachineIRBuilder.h:1868
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:607
CallingConvLower.h
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:637
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:151
llvm::ArrayRef::take_front
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:226
llvm::LLT::getSizeInBytes
TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelTypeImpl.h:162
llvm::CallLowering::determineAndHandleAssignments
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg=Register()) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
Definition: CallLowering.cpp:509
llvm::ISD::ArgFlagsTy::isSExt
bool isSExt() const
Definition: TargetCallingConv.h:76
llvm::CallLowering::ArgInfo::Regs
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:62
llvm::ISD::ArgFlagsTy::isPointer
bool isPointer() const
Definition: TargetCallingConv.h:141
llvm::CallLowering::BaseArgInfo::Ty
Type * Ty
Definition: CallLowering.h:49
llvm::CallLowering::CallLoweringInfo
Definition: CallLowering.h:101
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:776
llvm::CallLowering::IncomingValueHandler::buildExtensionHint
Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy)
Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on VA, returning the new register ...
Definition: CallLowering.cpp:1128
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1391
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:137
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:474
Instructions.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1008
llvm::MachineRegisterInfo::getType
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Definition: MachineRegisterInfo.h:732
llvm::MachineRegisterInfo::cloneVirtualRegister
Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
Definition: MachineRegisterInfo.cpp:172
llvm::ISD::ArgFlagsTy::getNonZeroByValAlign
Align getNonZeroByValAlign() const
Definition: TargetCallingConv.h:153
llvm::MachineIRBuilder::buildSExt
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
Definition: MachineIRBuilder.cpp:461
llvm::CCValAssign::getValVT
MVT getValVT() const
Definition: CallingConvLower.h:143
llvm::LLT::getElementCount
ElementCount getElementCount() const
Definition: LowLevelTypeImpl.h:143
llvm::ISD::ArgFlagsTy::setSRet
void setSRet()
Definition: TargetCallingConv.h:83
llvm::Register::isValid
bool isValid() const
Definition: Register.h:126
llvm::TargetLoweringBase::getRegisterTypeForCallingConv
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
Definition: TargetLowering.h:1552
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::MVT::getVT
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:528
llvm::isInTailCallPosition
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:525
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition: MachineFunction.cpp:266
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::CallLowering::getReturnInfo
void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl< BaseArgInfo > &Outs, const DataLayout &DL) const
Get the type and the ArgFlags for the split components of RetTy as returned by ComputeValueVTs.
Definition: CallLowering.cpp:892
MachineOperand.h
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1176
llvm::CallLowering::BaseArgInfo::IsFixed
bool IsFixed
Definition: CallLowering.h:51
llvm::ISD::ArgFlagsTy::setPreallocated
void setPreallocated()
Definition: TargetCallingConv.h:95
llvm::CallLowering::ArgInfo::OrigValue
const Value * OrigValue
Optionally track the original IR value for the argument.
Definition: CallLowering.h:72
LLVMContext.h
llvm::CallLowering::ValueAssigner::assignArg
virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)
Wrap call to (typically tablegenerated CCAssignFn).
Definition: CallLowering.h:181
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::MachineIRBuilder::buildStore
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
Definition: MachineIRBuilder.cpp:429
llvm::LLT::getElementType
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelTypeImpl.h:236
llvm::SI::KernelInputOffsets::Offsets
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1260
llvm::CallLowering::ArgInfo::OrigArgIndex
unsigned OrigArgIndex
Index original Function's argument.
Definition: CallLowering.h:75
llvm::CallLowering::parametersInCSRMatch
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
Definition: CallLowering.cpp:926
copy
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
Definition: README.txt:101
llvm::LLT::scalar
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelTypeImpl.h:42
llvm::ISD::ArgFlagsTy::setSwiftAsync
void setSwiftAsync()
Definition: TargetCallingConv.h:101
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::AttributeList::FirstArgIndex
@ FirstArgIndex
Definition: Attributes.h:408
llvm::CallLowering::ValueHandler::isIncomingArgumentHandler
bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:239
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:152
llvm::CCValAssign::needsCustom
bool needsCustom() const
Definition: CallingConvLower.h:148
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1334
llvm::ISD::ArgFlagsTy::setZExt
void setZExt()
Definition: TargetCallingConv.h:74
getReg
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
Definition: MipsDisassembler.cpp:572
llvm::CallLowering::ValueHandler::assignValueToAddress
virtual void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, MachinePointerInfo &MPO, CCValAssign &VA)=0
The specified value has been assigned to a stack location.
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:917
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::SmallVectorImpl::insert
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:782
llvm::CallLowering::BaseArgInfo::Flags
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:50
llvm::LLT
Definition: LowLevelTypeImpl.h:39
llvm::CallLowering::lowerCall
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:546
llvm::CallLowering::setArgFlags
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Definition: CallLowering.cpp:154