LLVM  16.0.0git
CallLowering.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
27 
28 #define DEBUG_TYPE "call-lowering"
29 
30 using namespace llvm;
31 
32 void CallLowering::anchor() {}
33 
34 /// Helper function which updates \p Flags when \p AttrFn returns true.
35 static void
37  const std::function<bool(Attribute::AttrKind)> &AttrFn) {
38  if (AttrFn(Attribute::SExt))
39  Flags.setSExt();
40  if (AttrFn(Attribute::ZExt))
41  Flags.setZExt();
42  if (AttrFn(Attribute::InReg))
43  Flags.setInReg();
44  if (AttrFn(Attribute::StructRet))
45  Flags.setSRet();
46  if (AttrFn(Attribute::Nest))
47  Flags.setNest();
48  if (AttrFn(Attribute::ByVal))
49  Flags.setByVal();
50  if (AttrFn(Attribute::Preallocated))
51  Flags.setPreallocated();
52  if (AttrFn(Attribute::InAlloca))
53  Flags.setInAlloca();
54  if (AttrFn(Attribute::Returned))
55  Flags.setReturned();
56  if (AttrFn(Attribute::SwiftSelf))
57  Flags.setSwiftSelf();
58  if (AttrFn(Attribute::SwiftAsync))
59  Flags.setSwiftAsync();
60  if (AttrFn(Attribute::SwiftError))
61  Flags.setSwiftError();
62 }
63 
65  unsigned ArgIdx) const {
66  ISD::ArgFlagsTy Flags;
67  addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
68  return Call.paramHasAttr(ArgIdx, Attr);
69  });
70  return Flags;
71 }
72 
75  ISD::ArgFlagsTy Flags;
76  addFlagsUsingAttrFn(Flags, [&Call](Attribute::AttrKind Attr) {
77  return Call.hasRetAttr(Attr);
78  });
79  return Flags;
80 }
81 
83  const AttributeList &Attrs,
84  unsigned OpIdx) const {
85  addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
86  return Attrs.hasAttributeAtIndex(OpIdx, Attr);
87  });
88 }
89 
91  ArrayRef<Register> ResRegs,
93  Register SwiftErrorVReg,
94  std::function<unsigned()> GetCalleeReg) const {
96  const DataLayout &DL = MIRBuilder.getDataLayout();
97  MachineFunction &MF = MIRBuilder.getMF();
99  bool CanBeTailCalled = CB.isTailCall() &&
100  isInTailCallPosition(CB, MF.getTarget()) &&
101  (MF.getFunction()
102  .getFnAttribute("disable-tail-calls")
103  .getValueAsString() != "true");
104 
105  CallingConv::ID CallConv = CB.getCallingConv();
106  Type *RetTy = CB.getType();
107  bool IsVarArg = CB.getFunctionType()->isVarArg();
108 
109  SmallVector<BaseArgInfo, 4> SplitArgs;
110  getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
111  Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
112 
113  if (!Info.CanLowerReturn) {
114  // Callee requires sret demotion.
115  insertSRetOutgoingArgument(MIRBuilder, CB, Info);
116 
117  // The sret demotion isn't compatible with tail-calls, since the sret
118  // argument points into the caller's stack frame.
119  CanBeTailCalled = false;
120  }
121 
122 
123  // First step is to marshall all the function's parameters into the correct
124  // physregs and memory locations. Gather the sequence of argument types that
125  // we'll pass to the assigner function.
126  unsigned i = 0;
127  unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
128  for (const auto &Arg : CB.args()) {
129  ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
130  i < NumFixedArgs};
131  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
132 
133  // If we have an explicit sret argument that is an Instruction, (i.e., it
134  // might point to function-local memory), we can't meaningfully tail-call.
135  if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
136  CanBeTailCalled = false;
137 
138  Info.OrigArgs.push_back(OrigArg);
139  ++i;
140  }
141 
142  // Try looking through a bitcast from one function type to another.
143  // Commonly happens with calls to objc_msgSend().
144  const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
145  if (const Function *F = dyn_cast<Function>(CalleeV))
146  Info.Callee = MachineOperand::CreateGA(F, 0);
147  else
148  Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
149 
150  Register ReturnHintAlignReg;
151  Align ReturnHintAlign;
152 
153  Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, getAttributesForReturn(CB)};
154 
155  if (!Info.OrigRet.Ty->isVoidTy()) {
157 
158  if (MaybeAlign Alignment = CB.getRetAlign()) {
159  if (*Alignment > Align(1)) {
160  ReturnHintAlignReg = MRI.cloneVirtualRegister(ResRegs[0]);
161  Info.OrigRet.Regs[0] = ReturnHintAlignReg;
162  ReturnHintAlign = *Alignment;
163  }
164  }
165  }
166 
167  auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi);
168  if (Bundle && CB.isIndirectCall()) {
169  Info.CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
170  assert(Info.CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
171  }
172 
173  Info.CB = &CB;
174  Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
175  Info.CallConv = CallConv;
176  Info.SwiftErrorVReg = SwiftErrorVReg;
177  Info.IsMustTailCall = CB.isMustTailCall();
178  Info.IsTailCall = CanBeTailCalled;
179  Info.IsVarArg = IsVarArg;
180  if (!lowerCall(MIRBuilder, Info))
181  return false;
182 
183  if (ReturnHintAlignReg && !Info.IsTailCall) {
184  MIRBuilder.buildAssertAlign(ResRegs[0], ReturnHintAlignReg,
185  ReturnHintAlign);
186  }
187 
188  return true;
189 }
190 
191 template <typename FuncInfoTy>
193  const DataLayout &DL,
194  const FuncInfoTy &FuncInfo) const {
195  auto &Flags = Arg.Flags[0];
196  const AttributeList &Attrs = FuncInfo.getAttributes();
197  addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
198 
199  PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
200  if (PtrTy) {
201  Flags.setPointer();
202  Flags.setPointerAddrSpace(PtrTy->getPointerAddressSpace());
203  }
204 
205  Align MemAlign = DL.getABITypeAlign(Arg.Ty);
206  if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
208  unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
209 
210  Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
211  if (!ElementTy)
212  ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
213  if (!ElementTy)
214  ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
215  assert(ElementTy && "Must have byval, inalloca or preallocated type");
216  Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
217 
218  // For ByVal, alignment should be passed from FE. BE will guess if
219  // this info is not there but there are cases it cannot get right.
220  if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
221  MemAlign = *ParamAlign;
222  else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
223  MemAlign = *ParamAlign;
224  else
225  MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
226  } else if (OpIdx >= AttributeList::FirstArgIndex) {
227  if (auto ParamAlign =
228  FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
229  MemAlign = *ParamAlign;
230  }
231  Flags.setMemAlign(MemAlign);
232  Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
233 
234  // Don't try to use the returned attribute if the argument is marked as
235  // swiftself, since it won't be passed in x0.
236  if (Flags.isSwiftSelf())
237  Flags.setReturned(false);
238 }
239 
240 template void
241 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
242  const DataLayout &DL,
243  const Function &FuncInfo) const;
244 
245 template void
246 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
247  const DataLayout &DL,
248  const CallBase &FuncInfo) const;
249 
251  SmallVectorImpl<ArgInfo> &SplitArgs,
252  const DataLayout &DL,
253  CallingConv::ID CallConv,
255  LLVMContext &Ctx = OrigArg.Ty->getContext();
256 
257  SmallVector<EVT, 4> SplitVTs;
258  ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
259 
260  if (SplitVTs.size() == 0)
261  return;
262 
263  if (SplitVTs.size() == 1) {
264  // No splitting to do, but we want to replace the original type (e.g. [1 x
265  // double] -> double).
266  SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
267  OrigArg.OrigArgIndex, OrigArg.Flags[0],
268  OrigArg.IsFixed, OrigArg.OrigValue);
269  return;
270  }
271 
272  // Create one ArgInfo for each virtual register in the original ArgInfo.
273  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
274 
275  bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
276  OrigArg.Ty, CallConv, false, DL);
277  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
278  Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
279  SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
280  OrigArg.Flags[0], OrigArg.IsFixed);
281  if (NeedsRegBlock)
282  SplitArgs.back().Flags[0].setInConsecutiveRegs();
283  }
284 
285  SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
286 }
287 
288 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
289 static MachineInstrBuilder
291  ArrayRef<Register> SrcRegs) {
292  MachineRegisterInfo &MRI = *B.getMRI();
293  LLT LLTy = MRI.getType(DstRegs[0]);
294  LLT PartLLT = MRI.getType(SrcRegs[0]);
295 
296  // Deal with v3s16 split into v2s16
297  LLT LCMTy = getCoverTy(LLTy, PartLLT);
298  if (LCMTy == LLTy) {
299  // Common case where no padding is needed.
300  assert(DstRegs.size() == 1);
301  return B.buildConcatVectors(DstRegs[0], SrcRegs);
302  }
303 
304  // We need to create an unmerge to the result registers, which may require
305  // widening the original value.
306  Register UnmergeSrcReg;
307  if (LCMTy != PartLLT) {
308  assert(DstRegs.size() == 1);
309  return B.buildDeleteTrailingVectorElements(DstRegs[0],
310  B.buildMerge(LCMTy, SrcRegs));
311  } else {
312  // We don't need to widen anything if we're extracting a scalar which was
313  // promoted to a vector e.g. s8 -> v4s8 -> s8
314  assert(SrcRegs.size() == 1);
315  UnmergeSrcReg = SrcRegs[0];
316  }
317 
318  int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
319 
320  SmallVector<Register, 8> PadDstRegs(NumDst);
321  std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
322 
323  // Create the excess dead defs for the unmerge.
324  for (int I = DstRegs.size(); I != NumDst; ++I)
325  PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
326 
327  if (PadDstRegs.size() == 1)
328  return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);
329  return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
330 }
331 
332 /// Create a sequence of instructions to combine pieces split into register
333 /// typed values to the original IR value. \p OrigRegs contains the destination
334 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
335 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
337  ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
338  const ISD::ArgFlagsTy Flags) {
339  MachineRegisterInfo &MRI = *B.getMRI();
340 
341  if (PartLLT == LLTy) {
342  // We should have avoided introducing a new virtual register, and just
343  // directly assigned here.
344  assert(OrigRegs[0] == Regs[0]);
345  return;
346  }
347 
348  if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
349  Regs.size() == 1) {
350  B.buildBitcast(OrigRegs[0], Regs[0]);
351  return;
352  }
353 
354  // A vector PartLLT needs extending to LLTy's element size.
355  // E.g. <2 x s64> = G_SEXT <2 x s32>.
356  if (PartLLT.isVector() == LLTy.isVector() &&
357  PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
358  (!PartLLT.isVector() ||
359  PartLLT.getNumElements() == LLTy.getNumElements()) &&
360  OrigRegs.size() == 1 && Regs.size() == 1) {
361  Register SrcReg = Regs[0];
362 
363  LLT LocTy = MRI.getType(SrcReg);
364 
365  if (Flags.isSExt()) {
366  SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
367  .getReg(0);
368  } else if (Flags.isZExt()) {
369  SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
370  .getReg(0);
371  }
372 
373  // Sometimes pointers are passed zero extended.
374  LLT OrigTy = MRI.getType(OrigRegs[0]);
375  if (OrigTy.isPointer()) {
376  LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
377  B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
378  return;
379  }
380 
381  B.buildTrunc(OrigRegs[0], SrcReg);
382  return;
383  }
384 
385  if (!LLTy.isVector() && !PartLLT.isVector()) {
386  assert(OrigRegs.size() == 1);
387  LLT OrigTy = MRI.getType(OrigRegs[0]);
388 
389  unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
390  if (SrcSize == OrigTy.getSizeInBits())
391  B.buildMerge(OrigRegs[0], Regs);
392  else {
393  auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
394  B.buildTrunc(OrigRegs[0], Widened);
395  }
396 
397  return;
398  }
399 
400  if (PartLLT.isVector()) {
401  assert(OrigRegs.size() == 1);
402  SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
403 
404  // If PartLLT is a mismatched vector in both number of elements and element
405  // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
406  // have the same elt type, i.e. v4s32.
407  if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
408  PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
409  Regs.size() == 1) {
410  LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
411  .changeElementCount(PartLLT.getElementCount() * 2);
412  CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
413  PartLLT = NewTy;
414  }
415 
416  if (LLTy.getScalarType() == PartLLT.getElementType()) {
417  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
418  } else {
419  unsigned I = 0;
420  LLT GCDTy = getGCDType(LLTy, PartLLT);
421 
422  // We are both splitting a vector, and bitcasting its element types. Cast
423  // the source pieces into the appropriate number of pieces with the result
424  // element type.
425  for (Register SrcReg : CastRegs)
426  CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
427  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
428  }
429 
430  return;
431  }
432 
433  assert(LLTy.isVector() && !PartLLT.isVector());
434 
435  LLT DstEltTy = LLTy.getElementType();
436 
437  // Pointer information was discarded. We'll need to coerce some register types
438  // to avoid violating type constraints.
439  LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
440 
441  assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
442 
443  if (DstEltTy == PartLLT) {
444  // Vector was trivially scalarized.
445 
446  if (RealDstEltTy.isPointer()) {
447  for (Register Reg : Regs)
448  MRI.setType(Reg, RealDstEltTy);
449  }
450 
451  B.buildBuildVector(OrigRegs[0], Regs);
452  } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
453  // Deal with vector with 64-bit elements decomposed to 32-bit
454  // registers. Need to create intermediate 64-bit elements.
455  SmallVector<Register, 8> EltMerges;
456  int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
457 
458  assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
459 
460  for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
461  auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
462  // Fix the type in case this is really a vector of pointers.
463  MRI.setType(Merge.getReg(0), RealDstEltTy);
464  EltMerges.push_back(Merge.getReg(0));
465  Regs = Regs.drop_front(PartsPerElt);
466  }
467 
468  B.buildBuildVector(OrigRegs[0], EltMerges);
469  } else {
470  // Vector was split, and elements promoted to a wider type.
471  // FIXME: Should handle floating point promotions.
472  LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
473  auto BV = B.buildBuildVector(BVType, Regs);
474  B.buildTrunc(OrigRegs[0], BV);
475  }
476 }
477 
478 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
479 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
480 /// contain the type of scalar value extension if necessary.
481 ///
482 /// This is used for outgoing values (vregs to physregs)
484  Register SrcReg, LLT SrcTy, LLT PartTy,
485  unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
486  // We could just insert a regular copy, but this is unreachable at the moment.
487  assert(SrcTy != PartTy && "identical part types shouldn't reach here");
488 
489  const unsigned PartSize = PartTy.getSizeInBits();
490 
491  if (PartTy.isVector() == SrcTy.isVector() &&
492  PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
493  assert(DstRegs.size() == 1);
494  B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
495  return;
496  }
497 
498  if (SrcTy.isVector() && !PartTy.isVector() &&
499  PartSize > SrcTy.getElementType().getSizeInBits()) {
500  // Vector was scalarized, and the elements extended.
501  auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
502  for (int i = 0, e = DstRegs.size(); i != e; ++i)
503  B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
504  return;
505  }
506 
507  if (SrcTy.isVector() && PartTy.isVector() &&
508  PartTy.getScalarSizeInBits() == SrcTy.getScalarSizeInBits() &&
509  SrcTy.getNumElements() < PartTy.getNumElements()) {
510  // A coercion like: v2f32 -> v4f32.
511  Register DstReg = DstRegs.front();
512  B.buildPadVectorWithUndefElements(DstReg, SrcReg);
513  return;
514  }
515 
516  LLT GCDTy = getGCDType(SrcTy, PartTy);
517  if (GCDTy == PartTy) {
518  // If this already evenly divisible, we can create a simple unmerge.
519  B.buildUnmerge(DstRegs, SrcReg);
520  return;
521  }
522 
523  MachineRegisterInfo &MRI = *B.getMRI();
524  LLT DstTy = MRI.getType(DstRegs[0]);
525  LLT LCMTy = getCoverTy(SrcTy, PartTy);
526 
527  if (PartTy.isVector() && LCMTy == PartTy) {
528  assert(DstRegs.size() == 1);
529  B.buildPadVectorWithUndefElements(DstRegs[0], SrcReg);
530  return;
531  }
532 
533  const unsigned DstSize = DstTy.getSizeInBits();
534  const unsigned SrcSize = SrcTy.getSizeInBits();
535  unsigned CoveringSize = LCMTy.getSizeInBits();
536 
537  Register UnmergeSrc = SrcReg;
538 
539  if (!LCMTy.isVector() && CoveringSize != SrcSize) {
540  // For scalars, it's common to be able to use a simple extension.
541  if (SrcTy.isScalar() && DstTy.isScalar()) {
542  CoveringSize = alignTo(SrcSize, DstSize);
543  LLT CoverTy = LLT::scalar(CoveringSize);
544  UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
545  } else {
546  // Widen to the common type.
547  // FIXME: This should respect the extend type
548  Register Undef = B.buildUndef(SrcTy).getReg(0);
549  SmallVector<Register, 8> MergeParts(1, SrcReg);
550  for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
551  MergeParts.push_back(Undef);
552  UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
553  }
554  }
555 
556  if (LCMTy.isVector() && CoveringSize != SrcSize)
557  UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);
558 
559  B.buildUnmerge(DstRegs, UnmergeSrc);
560 }
561 
563  ValueHandler &Handler, ValueAssigner &Assigner,
565  CallingConv::ID CallConv, bool IsVarArg,
566  ArrayRef<Register> ThisReturnRegs) const {
567  MachineFunction &MF = MIRBuilder.getMF();
568  const Function &F = MF.getFunction();
570 
571  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
572  if (!determineAssignments(Assigner, Args, CCInfo))
573  return false;
574 
575  return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
576  ThisReturnRegs);
577 }
578 
579 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
580  if (Flags.isSExt())
581  return TargetOpcode::G_SEXT;
582  if (Flags.isZExt())
583  return TargetOpcode::G_ZEXT;
584  return TargetOpcode::G_ANYEXT;
585 }
586 
589  CCState &CCInfo) const {
590  LLVMContext &Ctx = CCInfo.getContext();
591  const CallingConv::ID CallConv = CCInfo.getCallingConv();
592 
593  unsigned NumArgs = Args.size();
594  for (unsigned i = 0; i != NumArgs; ++i) {
595  EVT CurVT = EVT::getEVT(Args[i].Ty);
596 
597  MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
598 
599  // If we need to split the type over multiple regs, check it's a scenario
600  // we currently support.
601  unsigned NumParts =
602  TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
603 
604  if (NumParts == 1) {
605  // Try to use the register type if we couldn't assign the VT.
606  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
607  Args[i].Flags[0], CCInfo))
608  return false;
609  continue;
610  }
611 
612  // For incoming arguments (physregs to vregs), we could have values in
613  // physregs (or memlocs) which we want to extract and copy to vregs.
614  // During this, we might have to deal with the LLT being split across
615  // multiple regs, so we have to record this information for later.
616  //
617  // If we have outgoing args, then we have the opposite case. We have a
618  // vreg with an LLT which we want to assign to a physical location, and
619  // we might have to record that the value has to be split later.
620 
621  // We're handling an incoming arg which is split over multiple regs.
622  // E.g. passing an s128 on AArch64.
623  ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
624  Args[i].Flags.clear();
625 
626  for (unsigned Part = 0; Part < NumParts; ++Part) {
627  ISD::ArgFlagsTy Flags = OrigFlags;
628  if (Part == 0) {
629  Flags.setSplit();
630  } else {
631  Flags.setOrigAlign(Align(1));
632  if (Part == NumParts - 1)
633  Flags.setSplitEnd();
634  }
635 
636  Args[i].Flags.push_back(Flags);
637  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
638  Args[i].Flags[Part], CCInfo)) {
639  // Still couldn't assign this smaller part type for some reason.
640  return false;
641  }
642  }
643  }
644 
645  return true;
646 }
647 
650  CCState &CCInfo,
652  MachineIRBuilder &MIRBuilder,
653  ArrayRef<Register> ThisReturnRegs) const {
654  MachineFunction &MF = MIRBuilder.getMF();
656  const Function &F = MF.getFunction();
657  const DataLayout &DL = F.getParent()->getDataLayout();
658 
659  const unsigned NumArgs = Args.size();
660 
661  // Stores thunks for outgoing register assignments. This is used so we delay
662  // generating register copies until mem loc assignments are done. We do this
663  // so that if the target is using the delayed stack protector feature, we can
664  // find the split point of the block accurately. E.g. if we have:
665  // G_STORE %val, %memloc
666  // $x0 = COPY %foo
667  // $x1 = COPY %bar
668  // CALL func
669  // ... then the split point for the block will correctly be at, and including,
670  // the copy to $x0. If instead the G_STORE instruction immediately precedes
671  // the CALL, then we'd prematurely choose the CALL as the split point, thus
672  // generating a split block with a CALL that uses undefined physregs.
673  SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;
674 
675  for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
676  assert(j < ArgLocs.size() && "Skipped too many arg locs");
677  CCValAssign &VA = ArgLocs[j];
678  assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
679 
680  if (VA.needsCustom()) {
681  std::function<void()> Thunk;
682  unsigned NumArgRegs = Handler.assignCustomValue(
683  Args[i], makeArrayRef(ArgLocs).slice(j), &Thunk);
684  if (Thunk)
685  DelayedOutgoingRegAssignments.emplace_back(Thunk);
686  if (!NumArgRegs)
687  return false;
688  j += NumArgRegs;
689  continue;
690  }
691 
692  const MVT ValVT = VA.getValVT();
693  const MVT LocVT = VA.getLocVT();
694 
695  const LLT LocTy(LocVT);
696  const LLT ValTy(ValVT);
697  const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
698  const EVT OrigVT = EVT::getEVT(Args[i].Ty);
699  const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
700 
701  // Expected to be multiple regs for a single incoming arg.
702  // There should be Regs.size() ArgLocs per argument.
703  // This should be the same as getNumRegistersForCallingConv
704  const unsigned NumParts = Args[i].Flags.size();
705 
706  // Now split the registers into the assigned types.
707  Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
708 
709  if (NumParts != 1 || NewLLT != OrigTy) {
710  // If we can't directly assign the register, we need one or more
711  // intermediate values.
712  Args[i].Regs.resize(NumParts);
713 
714  // For each split register, create and assign a vreg that will store
715  // the incoming component of the larger value. These will later be
716  // merged to form the final vreg.
717  for (unsigned Part = 0; Part < NumParts; ++Part)
718  Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
719  }
720 
721  assert((j + (NumParts - 1)) < ArgLocs.size() &&
722  "Too many regs for number of args");
723 
724  // Coerce into outgoing value types before register assignment.
725  if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
726  assert(Args[i].OrigRegs.size() == 1);
727  buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
728  ValTy, extendOpFromFlags(Args[i].Flags[0]));
729  }
730 
731  bool BigEndianPartOrdering = TLI->hasBigEndianPartOrdering(OrigVT, DL);
732  for (unsigned Part = 0; Part < NumParts; ++Part) {
733  Register ArgReg = Args[i].Regs[Part];
734  // There should be Regs.size() ArgLocs per argument.
735  unsigned Idx = BigEndianPartOrdering ? NumParts - 1 - Part : Part;
736  CCValAssign &VA = ArgLocs[j + Idx];
737  const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
738 
739  if (VA.isMemLoc() && !Flags.isByVal()) {
740  // Individual pieces may have been spilled to the stack and others
741  // passed in registers.
742 
743  // TODO: The memory size may be larger than the value we need to
744  // store. We may need to adjust the offset for big endian targets.
745  LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
746 
747  MachinePointerInfo MPO;
748  Register StackAddr = Handler.getStackAddress(
749  MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
750 
751  Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
752  continue;
753  }
754 
755  if (VA.isMemLoc() && Flags.isByVal()) {
756  assert(Args[i].Regs.size() == 1 &&
757  "didn't expect split byval pointer");
758 
759  if (Handler.isIncomingArgumentHandler()) {
760  // We just need to copy the frame index value to the pointer.
761  MachinePointerInfo MPO;
762  Register StackAddr = Handler.getStackAddress(
763  Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
764  MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
765  } else {
766  // For outgoing byval arguments, insert the implicit copy byval
767  // implies, such that writes in the callee do not modify the caller's
768  // value.
769  uint64_t MemSize = Flags.getByValSize();
770  int64_t Offset = VA.getLocMemOffset();
771 
772  MachinePointerInfo DstMPO;
773  Register StackAddr =
774  Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
775 
776  MachinePointerInfo SrcMPO(Args[i].OrigValue);
777  if (!Args[i].OrigValue) {
778  // We still need to accurately track the stack address space if we
779  // don't know the underlying value.
780  const LLT PtrTy = MRI.getType(StackAddr);
781  SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
782  }
783 
784  Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
785  inferAlignFromPtrInfo(MF, DstMPO));
786 
787  Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
788  inferAlignFromPtrInfo(MF, SrcMPO));
789 
790  Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
791  DstMPO, DstAlign, SrcMPO, SrcAlign,
792  MemSize, VA);
793  }
794  continue;
795  }
796 
797  assert(!VA.needsCustom() && "custom loc should have been handled already");
798 
799  if (i == 0 && !ThisReturnRegs.empty() &&
800  Handler.isIncomingArgumentHandler() &&
802  Handler.assignValueToReg(ArgReg, ThisReturnRegs[Part], VA);
803  continue;
804  }
805 
806  if (Handler.isIncomingArgumentHandler())
807  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
808  else {
809  DelayedOutgoingRegAssignments.emplace_back([=, &Handler]() {
810  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
811  });
812  }
813  }
814 
815  // Now that all pieces have been assigned, re-pack the register typed values
816  // into the original value typed registers.
817  if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
818  // Merge the split registers into the expected larger result vregs of
819  // the original call.
820  buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
821  LocTy, Args[i].Flags[0]);
822  }
823 
824  j += NumParts - 1;
825  }
826  for (auto &Fn : DelayedOutgoingRegAssignments)
827  Fn();
828 
829  return true;
830 }
831 
833  ArrayRef<Register> VRegs, Register DemoteReg,
834  int FI) const {
835  MachineFunction &MF = MIRBuilder.getMF();
837  const DataLayout &DL = MF.getDataLayout();
838 
839  SmallVector<EVT, 4> SplitVTs;
841  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
842 
843  assert(VRegs.size() == SplitVTs.size());
844 
845  unsigned NumValues = SplitVTs.size();
846  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
847  Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
848  LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
849 
851 
852  for (unsigned I = 0; I < NumValues; ++I) {
853  Register Addr;
854  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
855  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
856  MRI.getType(VRegs[I]),
857  commonAlignment(BaseAlign, Offsets[I]));
858  MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
859  }
860 }
861 
863  ArrayRef<Register> VRegs,
864  Register DemoteReg) const {
865  MachineFunction &MF = MIRBuilder.getMF();
867  const DataLayout &DL = MF.getDataLayout();
868 
869  SmallVector<EVT, 4> SplitVTs;
871  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
872 
873  assert(VRegs.size() == SplitVTs.size());
874 
875  unsigned NumValues = SplitVTs.size();
876  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
877  unsigned AS = DL.getAllocaAddrSpace();
878  LLT OffsetLLTy =
879  getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
880 
881  MachinePointerInfo PtrInfo(AS);
882 
883  for (unsigned I = 0; I < NumValues; ++I) {
884  Register Addr;
885  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
886  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
887  MRI.getType(VRegs[I]),
888  commonAlignment(BaseAlign, Offsets[I]));
889  MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
890  }
891 }
892 
894  const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
895  MachineRegisterInfo &MRI, const DataLayout &DL) const {
896  unsigned AS = DL.getAllocaAddrSpace();
897  DemoteReg = MRI.createGenericVirtualRegister(
898  LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
899 
900  Type *PtrTy = PointerType::get(F.getReturnType(), AS);
901 
902  SmallVector<EVT, 1> ValueVTs;
903  ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
904 
905  // NOTE: Assume that a pointer won't get split into more than one VT.
906  assert(ValueVTs.size() == 1);
907 
908  ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
911  DemoteArg.Flags[0].setSRet();
912  SplitArgs.insert(SplitArgs.begin(), DemoteArg);
913 }
914 
916  const CallBase &CB,
917  CallLoweringInfo &Info) const {
918  const DataLayout &DL = MIRBuilder.getDataLayout();
919  Type *RetTy = CB.getType();
920  unsigned AS = DL.getAllocaAddrSpace();
921  LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
922 
923  int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
924  DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
925 
926  Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
927  ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
929  setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
930  DemoteArg.Flags[0].setSRet();
931 
932  Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
933  Info.DemoteStackIndex = FI;
934  Info.DemoteRegister = DemoteReg;
935 }
936 
939  CCAssignFn *Fn) const {
940  for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
941  MVT VT = MVT::getVT(Outs[I].Ty);
942  if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
943  return false;
944  }
945  return true;
946 }
947 
951  const DataLayout &DL) const {
952  LLVMContext &Context = RetTy->getContext();
954 
955  SmallVector<EVT, 4> SplitVTs;
956  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
958 
959  for (EVT VT : SplitVTs) {
960  unsigned NumParts =
961  TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
962  MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
963  Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
964 
965  for (unsigned I = 0; I < NumParts; ++I) {
966  Outs.emplace_back(PartTy, Flags);
967  }
968  }
969 }
970 
972  const auto &F = MF.getFunction();
973  Type *ReturnType = F.getReturnType();
974  CallingConv::ID CallConv = F.getCallingConv();
975 
976  SmallVector<BaseArgInfo, 4> SplitArgs;
977  getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
978  MF.getDataLayout());
979  return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
980 }
981 
983  const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
984  const SmallVectorImpl<CCValAssign> &OutLocs,
985  const SmallVectorImpl<ArgInfo> &OutArgs) const {
986  for (unsigned i = 0; i < OutLocs.size(); ++i) {
987  const auto &ArgLoc = OutLocs[i];
988  // If it's not a register, it's fine.
989  if (!ArgLoc.isRegLoc())
990  continue;
991 
992  MCRegister PhysReg = ArgLoc.getLocReg();
993 
994  // Only look at callee-saved registers.
995  if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
996  continue;
997 
998  LLVM_DEBUG(
999  dbgs()
1000  << "... Call has an argument passed in a callee-saved register.\n");
1001 
1002  // Check if it was copied from.
1003  const ArgInfo &OutInfo = OutArgs[i];
1004 
1005  if (OutInfo.Regs.size() > 1) {
1006  LLVM_DEBUG(
1007  dbgs() << "... Cannot handle arguments in multiple registers.\n");
1008  return false;
1009  }
1010 
1011  // Check if we copy the register, walking through copies from virtual
1012  // registers. Note that getDefIgnoringCopies does not ignore copies from
1013  // physical registers.
1014  MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
1015  if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
1016  LLVM_DEBUG(
1017  dbgs()
1018  << "... Parameter was not copied into a VReg, cannot tail call.\n");
1019  return false;
1020  }
1021 
1022  // Got a copy. Verify that it's the same as the register we want.
1023  Register CopyRHS = RegDef->getOperand(1).getReg();
1024  if (CopyRHS != PhysReg) {
1025  LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
1026  "VReg, cannot tail call.\n");
1027  return false;
1028  }
1029  }
1030 
1031  return true;
1032 }
1033 
1035  MachineFunction &MF,
1036  SmallVectorImpl<ArgInfo> &InArgs,
1037  ValueAssigner &CalleeAssigner,
1038  ValueAssigner &CallerAssigner) const {
1039  const Function &F = MF.getFunction();
1040  CallingConv::ID CalleeCC = Info.CallConv;
1041  CallingConv::ID CallerCC = F.getCallingConv();
1042 
1043  if (CallerCC == CalleeCC)
1044  return true;
1045 
1047  CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
1048  if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
1049  return false;
1050 
1052  CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
1053  if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
1054  return false;
1055 
1056  // We need the argument locations to match up exactly. If there's more in
1057  // one than the other, then we are done.
1058  if (ArgLocs1.size() != ArgLocs2.size())
1059  return false;
1060 
1061  // Make sure that each location is passed in exactly the same way.
1062  for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
1063  const CCValAssign &Loc1 = ArgLocs1[i];
1064  const CCValAssign &Loc2 = ArgLocs2[i];
1065 
1066  // We need both of them to be the same. So if one is a register and one
1067  // isn't, we're done.
1068  if (Loc1.isRegLoc() != Loc2.isRegLoc())
1069  return false;
1070 
1071  if (Loc1.isRegLoc()) {
1072  // If they don't have the same register location, we're done.
1073  if (Loc1.getLocReg() != Loc2.getLocReg())
1074  return false;
1075 
1076  // They matched, so we can move to the next ArgLoc.
1077  continue;
1078  }
1079 
1080  // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1081  if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1082  return false;
1083  }
1084 
1085  return true;
1086 }
1087 
1089  const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1090  const MVT ValVT = VA.getValVT();
1091  if (ValVT != MVT::iPTR) {
1092  LLT ValTy(ValVT);
1093 
1094  // We lost the pointeriness going through CCValAssign, so try to restore it
1095  // based on the flags.
1096  if (Flags.isPointer()) {
1097  LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1098  ValTy.getScalarSizeInBits());
1099  if (ValVT.isVector())
1100  return LLT::vector(ValTy.getElementCount(), PtrTy);
1101  return PtrTy;
1102  }
1103 
1104  return ValTy;
1105  }
1106 
1107  unsigned AddrSpace = Flags.getPointerAddrSpace();
1108  return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1109 }
1110 
1112  const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1113  const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1114  const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1115  CCValAssign &VA) const {
1116  MachineFunction &MF = MIRBuilder.getMF();
1118  SrcPtrInfo,
1120  SrcAlign);
1121 
1123  DstPtrInfo,
1125  MemSize, DstAlign);
1126 
1127  const LLT PtrTy = MRI.getType(DstPtr);
1128  const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1129 
1130  auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1131  MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1132 }
1133 
1135  CCValAssign &VA,
1136  unsigned MaxSizeBits) {
1137  LLT LocTy{VA.getLocVT()};
1138  LLT ValTy{VA.getValVT()};
1139 
1140  if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1141  return ValReg;
1142 
1143  if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1144  if (MaxSizeBits <= ValTy.getSizeInBits())
1145  return ValReg;
1146  LocTy = LLT::scalar(MaxSizeBits);
1147  }
1148 
1149  const LLT ValRegTy = MRI.getType(ValReg);
1150  if (ValRegTy.isPointer()) {
1151  // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1152  // we have to cast to do the extension.
1153  LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1154  ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1155  }
1156 
1157  switch (VA.getLocInfo()) {
1158  default: break;
1159  case CCValAssign::Full:
1160  case CCValAssign::BCvt:
1161  // FIXME: bitconverting between vector types may or may not be a
1162  // nop in big-endian situations.
1163  return ValReg;
1164  case CCValAssign::AExt: {
1165  auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1166  return MIB.getReg(0);
1167  }
1168  case CCValAssign::SExt: {
1169  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1170  MIRBuilder.buildSExt(NewReg, ValReg);
1171  return NewReg;
1172  }
1173  case CCValAssign::ZExt: {
1174  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1175  MIRBuilder.buildZExt(NewReg, ValReg);
1176  return NewReg;
1177  }
1178  }
1179  llvm_unreachable("unable to extend register");
1180 }
1181 
1182 void CallLowering::ValueAssigner::anchor() {}
1183 
1185  Register SrcReg,
1186  LLT NarrowTy) {
1187  switch (VA.getLocInfo()) {
1188  case CCValAssign::LocInfo::ZExt: {
1189  return MIRBuilder
1190  .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1191  NarrowTy.getScalarSizeInBits())
1192  .getReg(0);
1193  }
1194  case CCValAssign::LocInfo::SExt: {
1195  return MIRBuilder
1196  .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1197  NarrowTy.getScalarSizeInBits())
1198  .getReg(0);
1199  break;
1200  }
1201  default:
1202  return SrcReg;
1203  }
1204 }
1205 
1206 /// Check if we can use a basic COPY instruction between the two types.
1207 ///
1208 /// We're currently building on top of the infrastructure using MVT, which loses
1209 /// pointer information in the CCValAssign. We accept copies from physical
1210 /// registers that have been reported as integers if it's to an equivalent sized
1211 /// pointer LLT.
1212 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1213  if (SrcTy == DstTy)
1214  return true;
1215 
1216  if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1217  return false;
1218 
1219  SrcTy = SrcTy.getScalarType();
1220  DstTy = DstTy.getScalarType();
1221 
1222  return (SrcTy.isPointer() && DstTy.isScalar()) ||
1223  (DstTy.isPointer() && SrcTy.isScalar());
1224 }
1225 
1227  Register PhysReg,
1228  CCValAssign VA) {
1229  const MVT LocVT = VA.getLocVT();
1230  const LLT LocTy(LocVT);
1231  const LLT RegTy = MRI.getType(ValVReg);
1232 
1233  if (isCopyCompatibleType(RegTy, LocTy)) {
1234  MIRBuilder.buildCopy(ValVReg, PhysReg);
1235  return;
1236  }
1237 
1238  auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1239  auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1240  MIRBuilder.buildTrunc(ValVReg, Hint);
1241 }
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:151
i
i
Definition: README.txt:29
llvm::CallLowering::ValueAssigner
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:161
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:36
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::getDefIgnoringCopies
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:462
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:70
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:34
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:800
llvm::LLT::getScalarSizeInBits
unsigned getScalarSizeInBits() const
Definition: LowLevelTypeImpl.h:224
CallLowering.h
llvm::TargetLowering::functionArgumentNeedsConsecutiveRegisters
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Definition: TargetLowering.h:4453
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:189
buildCopyToRegs
static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, Register SrcReg, LLT SrcTy, LLT PartTy, unsigned ExtendOp=TargetOpcode::G_ANYEXT)
Create a sequence of instructions to expand the value in SrcReg (of type SrcTy) to the types in DstRe...
Definition: CallLowering.cpp:483
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:444
llvm::Function
Definition: Function.h:60
llvm::CallLowering::IncomingValueHandler::assignValueToReg
void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA) override
Provides a default implementation for argument handling.
Definition: CallLowering.cpp:1226
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:727
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1199
llvm::MVT::isVector
bool isVector() const
Return true if this is a vector value type.
Definition: MachineValueType.h:386
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:454
llvm::CallLowering::ValueHandler::extendRegister
Register extendRegister(Register ValReg, CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
Definition: CallLowering.cpp:1134
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:729
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::CallLowering::ValueHandler
Definition: CallLowering.h:229
llvm::CallLowering::insertSRetOutgoingArgument
void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, const CallBase &CB, CallLoweringInfo &Info) const
For the call-base described by CB, insert the hidden sret ArgInfo to the OrigArgs field of Info.
Definition: CallLowering.cpp:915
llvm::LLT::getScalarType
LLT getScalarType() const
Definition: LowLevelTypeImpl.h:167
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Module.h
llvm::AttributeList
Definition: Attributes.h:430
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1475
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1255
llvm::MachineMemOperand::MODereferenceable
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
Definition: MachineMemOperand.h:142
extendOpFromFlags
static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)
Definition: CallLowering.cpp:579
llvm::CallBase::isMustTailCall
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Definition: Instructions.cpp:300
llvm::CallLowering::splitToValueTypes
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
Definition: CallLowering.cpp:250
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:44
llvm::LLT::changeElementCount
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
Definition: LowLevelTypeImpl.h:189
llvm::LLT::vector
static LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:56
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
MachineIRBuilder.h
buildCopyFromRegs
static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef< Register > OrigRegs, ArrayRef< Register > Regs, LLT LLTy, LLT PartLLT, const ISD::ArgFlagsTy Flags)
Create a sequence of instructions to combine pieces split into register typed values to the original ...
Definition: CallLowering.cpp:336
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:159
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MachineIRBuilder::buildConstant
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Definition: MachineIRBuilder.cpp:293
llvm::commonAlignment
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:213
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::FunctionType::isVarArg
bool isVarArg() const
Definition: DerivedTypes.h:123
MachineRegisterInfo.h
llvm::ComputeValueVTs
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:121
llvm::getLLTForType
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Definition: LowLevelType.cpp:20
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::LLT::fixed_vector
static LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:74
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:187
TargetLowering.h
llvm::getGCDType
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:960
llvm::CallLowering::getTLI
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
Definition: CallLowering.h:340
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:667
llvm::CCValAssign::AExt
@ AExt
Definition: CallingConvLower.h:37
llvm::CallLowering::resultsCompatible
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
Definition: CallLowering.cpp:1034
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition: CallingConvLower.h:31
TargetMachine.h
llvm::MachineIRBuilder::buildZExt
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
Definition: MachineIRBuilder.cpp:462
llvm::MachineIRBuilder::buildLoad
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
Definition: MachineIRBuilder.h:904
llvm::MachineRegisterInfo::setType
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Definition: MachineRegisterInfo.cpp:180
llvm::CallLowering::checkReturnTypeForCallConv
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
Definition: CallLowering.cpp:971
getReg
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Definition: MipsDisassembler.cpp:517
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::CCValAssign::getLocReg
Register getLocReg() const
Definition: CallingConvLower.h:148
llvm::CallLowering::ValueHandler::assignValueToReg
virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA)=0
The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...
llvm::CallLowering::ArgInfo
Definition: CallLowering.h:62
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:34
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
llvm::MachineIRBuilder::buildPtrToInt
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
Definition: MachineIRBuilder.h:667
mergeVectorRegsToResultRegs
static MachineInstrBuilder mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, ArrayRef< Register > SrcRegs)
Pack values SrcRegs to cover the vector type result DstRegs.
Definition: CallLowering.cpp:290
llvm::LLT::getSizeInBits
TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelTypeImpl.h:152
llvm::CallLowering::ValueHandler::getStackValueStoreType
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
Definition: CallLowering.cpp:1088
Utils.h
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineIRBuilder::getDataLayout
const DataLayout & getDataLayout() const
Definition: MachineIRBuilder.h:281
llvm::Function::getFnAttribute
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:655
llvm::CallLowering::determineAssignments
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
Definition: CallLowering.cpp:587
llvm::CallLowering::ValueHandler::assignCustomValue
virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef< CCValAssign > VAs, std::function< void()> *Thunk=nullptr)
Handle custom values, which may be passed into one or more of VAs.
Definition: CallLowering.h:295
llvm::MachineIRBuilder::getMF
MachineFunction & getMF()
Getter for the function we currently build.
Definition: MachineIRBuilder.h:271
llvm::CCState::getContext
LLVMContext & getContext() const
Definition: CallingConvLower.h:255
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::CCValAssign::getLocInfo
LocInfo getLocInfo() const
Definition: CallingConvLower.h:153
Align
uint64_t Align
Definition: ELFObjHandler.cpp:82
llvm::CCValAssign::getLocMemOffset
unsigned getLocMemOffset() const
Definition: CallingConvLower.h:149
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1456
llvm::Attribute::getValueAsString
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:312
llvm::EVT::getTypeForEVT
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:182
llvm::CCValAssign::isRegLoc
bool isRegLoc() const
Definition: CallingConvLower.h:143
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineInstrBuilder::getReg
Register getReg(unsigned Idx) const
Get the register for the operand index.
Definition: MachineInstrBuilder.h:94
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:276
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:314
llvm::CCAssignFn
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
Definition: CallingConvLower.h:175
llvm::LLT::pointer
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelTypeImpl.h:49
llvm::LLT::getAddressSpace
unsigned getAddressSpace() const
Definition: LowLevelTypeImpl.h:238
llvm::CallLowering::isTypeIsValidForThisReturn
virtual bool isTypeIsValidForThisReturn(EVT Ty) const
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
Definition: CallLowering.h:593
llvm::CallLowering::ArgInfo::NoArgIndex
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
Definition: CallLowering.h:79
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:433
llvm::MachineOperand::clobbersPhysReg
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
Definition: MachineOperand.h:626
llvm::TargetLoweringBase::getNumRegistersForCallingConv
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
Definition: TargetLowering.h:1618
llvm::CallBase::isTailCall
bool isTailCall() const
Tests if this call site is marked as a tail call.
Definition: Instructions.cpp:307
llvm::ArrayRef::drop_front
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:203
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:221
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:52
llvm::CCValAssign::SExt
@ SExt
Definition: CallingConvLower.h:35
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::CallBase::getRetAlign
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
Definition: InstrTypes.h:1734
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::CallLowering::checkReturn
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
Definition: CallLowering.cpp:937
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:79
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:39
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:53
llvm::inferAlignFromPtrInfo
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:712
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:595
I
#define I(x, y, z)
Definition: MD5.cpp:58
Analysis.h
llvm::Attribute::AttrKind
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
llvm::getCoverTy
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
Definition: Utils.cpp:945
llvm::LLT::isVector
bool isVector() const
Definition: LowLevelTypeImpl.h:122
llvm::LLT::getNumElements
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelTypeImpl.h:126
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:632
llvm::TargetLoweringBase::hasBigEndianPartOrdering
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
Definition: TargetLowering.h:1651
llvm::CCState::getCallingConv
CallingConv::ID getCallingConv() const
Definition: CallingConvLower.h:257
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::LLT::isPointer
bool isPointer() const
Definition: LowLevelTypeImpl.h:120
llvm::CallLowering::getAttributesForReturn
ISD::ArgFlagsTy getAttributesForReturn(const CallBase &Call) const
Definition: CallLowering.cpp:74
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:673
isCopyCompatibleType
static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy)
Check if we can use a basic COPY instruction between the two types.
Definition: CallLowering.cpp:1212
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:82
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:186
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::MachineIRBuilder::buildAssertZExt
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
Definition: MachineIRBuilder.h:874
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::LLT::isScalar
bool isScalar() const
Definition: LowLevelTypeImpl.h:118
llvm::ARM::WinEH::ReturnType
ReturnType
Definition: ARMWinEH.h:25
llvm::CallLowering::addArgFlagsFromAttributes
void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, const AttributeList &Attrs, unsigned OpIdx) const
Adds flags to Flags based off of the attributes in Attrs.
Definition: CallLowering.cpp:82
llvm::MachineFunction
Definition: MachineFunction.h:257
addFlagsUsingAttrFn
static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, const std::function< bool(Attribute::AttrKind)> &AttrFn)
Helper function which updates Flags when AttrFn returns true.
Definition: CallLowering.cpp:36
llvm::CCValAssign::getValNo
unsigned getValNo() const
Definition: CallingConvLower.h:140
llvm::CallLowering::ValueHandler::copyArgumentMemory
void copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr, const MachinePointerInfo &DstPtrInfo, Align DstAlign, const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, CCValAssign &VA) const
Do a memory copy of MemSize bytes from SrcPtr to DstPtr.
Definition: CallLowering.cpp:1111
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineOperand::CreateGA
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
Definition: MachineOperand.h:859
DataLayout.h
llvm::MachineFrameInfo::CreateStackObject
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Definition: MachineFrameInfo.cpp:51
llvm::CallLowering::ValueHandler::getStackAddress
virtual Register getStackAddress(uint64_t MemSize, int64_t Offset, MachinePointerInfo &MPO, ISD::ArgFlagsTy Flags)=0
Materialize a VReg containing the address of the specified stack-based object.
llvm::CallBase::getOperandBundle
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2018
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
llvm::MachineIRBuilder::buildAssertAlign
MachineInstrBuilder buildAssertAlign(const DstOp &Res, const SrcOp &Op, Align AlignVal)
Build and insert Res = G_ASSERT_ALIGN Op, AlignVal.
Definition: MachineIRBuilder.h:890
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::CallLowering::insertSRetLoads
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
Definition: CallLowering.cpp:832
llvm::MachineIRBuilder::buildCopy
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
Definition: MachineIRBuilder.cpp:288
uint32_t
llvm::LLVMContext::OB_kcfi
@ OB_kcfi
Definition: LLVMContext.h:97
llvm::ISD::ArgFlagsTy
Definition: TargetCallingConv.h:27
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::MVT::iPTR
@ iPTR
Definition: MachineValueType.h:332
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::ArrayRef::front
const T & front() const
front - Get the first element.
Definition: ArrayRef.h:167
llvm::LLT::changeElementType
LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelTypeImpl.h:173
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MachineIRBuilder::buildAnyExt
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Definition: MachineIRBuilder.cpp:452
llvm::MachineIRBuilder::materializePtrAdd
Optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
Definition: MachineIRBuilder.cpp:201
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
NumFixedArgs
static unsigned NumFixedArgs
Definition: LanaiISelLowering.cpp:364
llvm::MachineIRBuilder::buildFrameIndex
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
Definition: MachineIRBuilder.cpp:145
llvm::CallLowering::insertSRetIncomingArgument
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
Definition: CallLowering.cpp:893
llvm::CallLowering::getAttributesForArgIdx
ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call, unsigned ArgIdx) const
Definition: CallLowering.cpp:64
j
return j(j<< 16)
llvm::CallLowering::canLowerReturn
virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const
This hook must be implemented to check whether the return values described by Outs can fit into the r...
Definition: CallLowering.h:493
llvm::CCValAssign::isMemLoc
bool isMemLoc() const
Definition: CallingConvLower.h:144
llvm::CallLowering::insertSRetStores
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
Definition: CallLowering.cpp:862
llvm::MachineIRBuilder::buildTrunc
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
Definition: MachineIRBuilder.cpp:760
llvm::MachineIRBuilder::buildMemCpy
MachineInstrBuilder buildMemCpy(const SrcOp &DstPtr, const SrcOp &SrcPtr, const SrcOp &Size, MachineMemOperand &DstMMO, MachineMemOperand &SrcMMO)
Definition: MachineIRBuilder.h:1942
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:623
CallingConvLower.h
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:653
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:152
llvm::ArrayRef::take_front
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:227
MachineFrameInfo.h
llvm::LLT::getSizeInBytes
TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelTypeImpl.h:162
llvm::CallLowering::handleAssignments
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs=None) const
Use Handler to insert code to handle the argument/return values represented by Args.
Definition: CallLowering.cpp:648
llvm::CallLowering::ArgInfo::Regs
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:63
llvm::CallLowering::BaseArgInfo::Ty
Type * Ty
Definition: CallLowering.h:50
llvm::CallBase::isIndirectCall
bool isIndirectCall() const
Return true if the callsite is an indirect call.
Definition: Instructions.cpp:291
llvm::CallLowering::CallLoweringInfo
Definition: CallLowering.h:102
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:774
llvm::CallLowering::IncomingValueHandler::buildExtensionHint
Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy)
Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on VA, returning the new register ...
Definition: CallLowering.cpp:1184
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1390
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:136
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:475
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1019
llvm::MachineRegisterInfo::getType
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Definition: MachineRegisterInfo.h:745
llvm::MachineRegisterInfo::cloneVirtualRegister
Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
Definition: MachineRegisterInfo.cpp:170
llvm::MachineIRBuilder::buildSExt
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
Definition: MachineIRBuilder.cpp:457
llvm::MachineIRBuilder::buildAssertSExt
MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_SEXT Op, Size.
Definition: MachineIRBuilder.h:882
llvm::CCValAssign::getValVT
MVT getValVT() const
Definition: CallingConvLower.h:141
llvm::LLT::getElementCount
ElementCount getElementCount() const
Definition: LowLevelTypeImpl.h:143
llvm::TargetLoweringBase::getRegisterTypeForCallingConv
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
Definition: TargetLowering.h:1610
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::MVT::getVT
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:563
llvm::isInTailCallPosition
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:522
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition: MachineFunction.cpp:285
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::CallLowering::getReturnInfo
void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl< BaseArgInfo > &Outs, const DataLayout &DL) const
Get the type and the ArgFlags for the split components of RetTy as returned by ComputeValueVTs.
Definition: CallLowering.cpp:948
MachineOperand.h
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1175
llvm::CallLowering::BaseArgInfo::IsFixed
bool IsFixed
Definition: CallLowering.h:52
llvm::CallLowering::ArgInfo::OrigValue
const Value * OrigValue
Optionally track the original IR value for the argument.
Definition: CallLowering.h:73
LLVMContext.h
llvm::CallLowering::ValueAssigner::assignArg
virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)
Wrap call to (typically tablegenerated CCAssignFn).
Definition: CallLowering.h:185
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:394
llvm::MachineIRBuilder::buildStore
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
Definition: MachineIRBuilder.cpp:425
llvm::LLT::getElementType
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelTypeImpl.h:248
llvm::SI::KernelInputOffsets::Offsets
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1314
llvm::CallLowering::ArgInfo::OrigArgIndex
unsigned OrigArgIndex
Index original Function's argument.
Definition: CallLowering.h:76
llvm::CallLowering::parametersInCSRMatch
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
Definition: CallLowering.cpp:982
copy
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
Definition: README.txt:101
llvm::LLT::scalar
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelTypeImpl.h:42
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::AttributeList::FirstArgIndex
@ FirstArgIndex
Definition: Attributes.h:435
llvm::CallLowering::ValueHandler::isIncomingArgumentHandler
bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:243
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:153
llvm::CCValAssign::needsCustom
bool needsCustom() const
Definition: CallingConvLower.h:146
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1333
llvm::CallLowering::determineAndHandleAssignments
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs=None) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
Definition: CallLowering.cpp:562
llvm::CallLowering::ValueHandler::assignValueToAddress
virtual void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, MachinePointerInfo &MPO, CCValAssign &VA)=0
The specified value has been assigned to a stack location.
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::SmallVectorImpl::insert
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:809
llvm::CallLowering::BaseArgInfo::Flags
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:51
llvm::LLT
Definition: LowLevelTypeImpl.h:39
llvm::CallLowering::lowerCall
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:553
llvm::CallLowering::setArgFlags
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Definition: CallLowering.cpp:192