LLVM  9.0.0svn
AArch64CallLowering.cpp
Go to the documentation of this file.
1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
18 #include "AArch64Subtarget.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <iterator>
46 
47 #define DEBUG_TYPE "aarch64-call-lowering"
48 
49 using namespace llvm;
50 
52  : CallLowering(&TLI) {}
53 
54 namespace {
55 struct IncomingArgHandler : public CallLowering::ValueHandler {
56  IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
57  CCAssignFn *AssignFn)
58  : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
59 
60  unsigned getStackAddress(uint64_t Size, int64_t Offset,
61  MachinePointerInfo &MPO) override {
62  auto &MFI = MIRBuilder.getMF().getFrameInfo();
63  int FI = MFI.CreateFixedObject(Size, Offset, true);
64  MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
65  unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
66  MIRBuilder.buildFrameIndex(AddrReg, FI);
67  StackUsed = std::max(StackUsed, Size + Offset);
68  return AddrReg;
69  }
70 
71  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
72  CCValAssign &VA) override {
73  markPhysRegUsed(PhysReg);
74  switch (VA.getLocInfo()) {
75  default:
76  MIRBuilder.buildCopy(ValVReg, PhysReg);
77  break;
78  case CCValAssign::LocInfo::SExt:
79  case CCValAssign::LocInfo::ZExt:
80  case CCValAssign::LocInfo::AExt: {
81  auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
82  MIRBuilder.buildTrunc(ValVReg, Copy);
83  break;
84  }
85  }
86  }
87 
88  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
89  MachinePointerInfo &MPO, CCValAssign &VA) override {
90  // FIXME: Get alignment
91  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
93  1);
94  MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
95  }
96 
97  /// How the physical register gets marked varies between formal
98  /// parameters (it's a basic-block live-in), and a call instruction
99  /// (it's an implicit-def of the BL).
100  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
101 
102  bool isArgumentHandler() const override { return true; }
103 
104  uint64_t StackUsed;
105 };
106 
107 struct FormalArgHandler : public IncomingArgHandler {
108  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
109  CCAssignFn *AssignFn)
110  : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
111 
112  void markPhysRegUsed(unsigned PhysReg) override {
113  MIRBuilder.getMBB().addLiveIn(PhysReg);
114  }
115 };
116 
117 struct CallReturnHandler : public IncomingArgHandler {
118  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
119  MachineInstrBuilder MIB, CCAssignFn *AssignFn)
120  : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
121 
122  void markPhysRegUsed(unsigned PhysReg) override {
123  MIB.addDef(PhysReg, RegState::Implicit);
124  }
125 
127 };
128 
129 struct OutgoingArgHandler : public CallLowering::ValueHandler {
130  OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
131  MachineInstrBuilder MIB, CCAssignFn *AssignFn,
132  CCAssignFn *AssignFnVarArg)
133  : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
134  AssignFnVarArg(AssignFnVarArg), StackSize(0) {}
135 
136  unsigned getStackAddress(uint64_t Size, int64_t Offset,
137  MachinePointerInfo &MPO) override {
138  LLT p0 = LLT::pointer(0, 64);
139  LLT s64 = LLT::scalar(64);
140  unsigned SPReg = MRI.createGenericVirtualRegister(p0);
141  MIRBuilder.buildCopy(SPReg, AArch64::SP);
142 
143  unsigned OffsetReg = MRI.createGenericVirtualRegister(s64);
144  MIRBuilder.buildConstant(OffsetReg, Offset);
145 
146  unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
147  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
148 
149  MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
150  return AddrReg;
151  }
152 
153  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
154  CCValAssign &VA) override {
155  MIB.addUse(PhysReg, RegState::Implicit);
156  unsigned ExtReg = extendRegister(ValVReg, VA);
157  MIRBuilder.buildCopy(PhysReg, ExtReg);
158  }
159 
160  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
161  MachinePointerInfo &MPO, CCValAssign &VA) override {
162  if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
163  Size = VA.getLocVT().getSizeInBits() / 8;
164  ValVReg = MIRBuilder.buildAnyExt(LLT::scalar(Size * 8), ValVReg)
165  ->getOperand(0)
166  .getReg();
167  }
168  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
169  MPO, MachineMemOperand::MOStore, Size, 1);
170  MIRBuilder.buildStore(ValVReg, Addr, *MMO);
171  }
172 
173  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
174  CCValAssign::LocInfo LocInfo,
176  CCState &State) override {
177  bool Res;
178  if (Info.IsFixed)
179  Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
180  else
181  Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
182 
183  StackSize = State.getNextStackOffset();
184  return Res;
185  }
186 
188  CCAssignFn *AssignFnVarArg;
189  uint64_t StackSize;
190 };
191 } // namespace
192 
193 void AArch64CallLowering::splitToValueTypes(
194  const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
195  const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv,
196  const SplitArgTy &PerformArgSplit) const {
197  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
198  LLVMContext &Ctx = OrigArg.Ty->getContext();
199 
200  if (OrigArg.Ty->isVoidTy())
201  return;
202 
203  SmallVector<EVT, 4> SplitVTs;
205  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
206 
207  if (SplitVTs.size() == 1) {
208  // No splitting to do, but we want to replace the original type (e.g. [1 x
209  // double] -> double).
210  SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx),
211  OrigArg.Flags, OrigArg.IsFixed);
212  return;
213  }
214 
215  unsigned FirstRegIdx = SplitArgs.size();
216  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
217  OrigArg.Ty, CallConv, false);
218  for (auto SplitVT : SplitVTs) {
219  Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
220  SplitArgs.push_back(
222  SplitTy, OrigArg.Flags, OrigArg.IsFixed});
223  if (NeedsRegBlock)
224  SplitArgs.back().Flags.setInConsecutiveRegs();
225  }
226 
227  SplitArgs.back().Flags.setInConsecutiveRegsLast();
228 
229  for (unsigned i = 0; i < Offsets.size(); ++i)
230  PerformArgSplit(SplitArgs[FirstRegIdx + i].Reg, Offsets[i] * 8);
231 }
232 
234  const Value *Val,
235  ArrayRef<unsigned> VRegs) const {
236  auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
237  assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
238  "Return value without a vreg");
239 
240  bool Success = true;
241  if (!VRegs.empty()) {
242  MachineFunction &MF = MIRBuilder.getMF();
243  const Function &F = MF.getFunction();
244 
245  MachineRegisterInfo &MRI = MF.getRegInfo();
246  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
247  CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
248  auto &DL = F.getParent()->getDataLayout();
249  LLVMContext &Ctx = Val->getType()->getContext();
250 
251  SmallVector<EVT, 4> SplitEVTs;
252  ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
253  assert(VRegs.size() == SplitEVTs.size() &&
254  "For each split Type there should be exactly one VReg.");
255 
256  SmallVector<ArgInfo, 8> SplitArgs;
258 
259  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
260  if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) {
261  LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split");
262  return false;
263  }
264 
265  unsigned CurVReg = VRegs[i];
266  ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
267  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
268 
269  // i1 is a special case because SDAG i1 true is naturally zero extended
270  // when widened using ANYEXT. We need to do it explicitly here.
271  if (MRI.getType(CurVReg).getSizeInBits() == 1) {
272  CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
273  } else {
274  // Some types will need extending as specified by the CC.
275  MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
276  if (EVT(NewVT) != SplitEVTs[i]) {
277  unsigned ExtendOp = TargetOpcode::G_ANYEXT;
279  Attribute::SExt))
280  ExtendOp = TargetOpcode::G_SEXT;
282  Attribute::ZExt))
283  ExtendOp = TargetOpcode::G_ZEXT;
284 
285  LLT NewLLT(NewVT);
286  LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
287  CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
288  // Instead of an extend, we might have a vector type which needs
289  // padding with more elements, e.g. <2 x half> -> <4 x half>
290  if (NewVT.isVector() &&
291  NewLLT.getNumElements() > OldLLT.getNumElements()) {
292  // We don't handle VA types which are not exactly twice the size,
293  // but can easily be done in future.
294  if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
295  LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts");
296  return false;
297  }
298  auto Undef = MIRBuilder.buildUndef({OldLLT});
299  CurVReg =
300  MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef.getReg(0)})
301  .getReg(0);
302  } else {
303  CurVReg =
304  MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0);
305  }
306  }
307  }
308  if (CurVReg != CurArgInfo.Reg) {
309  CurArgInfo.Reg = CurVReg;
310  // Reset the arg flags after modifying CurVReg.
311  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
312  }
313  splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC,
314  [&](unsigned Reg, uint64_t Offset) {
315  MIRBuilder.buildExtract(Reg, CurVReg, Offset);
316  });
317  }
318 
319  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
320  Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
321  }
322 
323  MIRBuilder.insertInstr(MIB);
324  return Success;
325 }
326 
328  const Function &F,
329  ArrayRef<unsigned> VRegs) const {
330  MachineFunction &MF = MIRBuilder.getMF();
331  MachineBasicBlock &MBB = MIRBuilder.getMBB();
332  MachineRegisterInfo &MRI = MF.getRegInfo();
333  auto &DL = F.getParent()->getDataLayout();
334 
335  SmallVector<ArgInfo, 8> SplitArgs;
336  unsigned i = 0;
337  for (auto &Arg : F.args()) {
338  if (DL.getTypeStoreSize(Arg.getType()) == 0)
339  continue;
340  ArgInfo OrigArg{VRegs[i], Arg.getType()};
341  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
342  bool Split = false;
343  LLT Ty = MRI.getType(VRegs[i]);
344  unsigned Dst = VRegs[i];
345 
346  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv(),
347  [&](unsigned Reg, uint64_t Offset) {
348  if (!Split) {
349  Split = true;
350  Dst = MRI.createGenericVirtualRegister(Ty);
351  MIRBuilder.buildUndef(Dst);
352  }
353  unsigned Tmp = MRI.createGenericVirtualRegister(Ty);
354  MIRBuilder.buildInsert(Tmp, Dst, Reg, Offset);
355  Dst = Tmp;
356  });
357 
358  if (Dst != VRegs[i])
359  MIRBuilder.buildCopy(VRegs[i], Dst);
360  ++i;
361  }
362 
363  if (!MBB.empty())
364  MIRBuilder.setInstr(*MBB.begin());
365 
366  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
367  CCAssignFn *AssignFn =
368  TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
369 
370  FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
371  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
372  return false;
373 
374  if (F.isVarArg()) {
376  // FIXME: we need to reimplement saveVarArgsRegisters from
377  // AArch64ISelLowering.
378  return false;
379  }
380 
381  // We currently pass all varargs at 8-byte alignment.
382  uint64_t StackOffset = alignTo(Handler.StackUsed, 8);
383 
384  auto &MFI = MIRBuilder.getMF().getFrameInfo();
386  FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
387  }
388 
389  auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
390  if (Subtarget.hasCustomCallingConv())
392 
393  // Move back to the end of the basic block.
394  MIRBuilder.setMBB(MBB);
395 
396  return true;
397 }
398 
400  CallingConv::ID CallConv,
401  const MachineOperand &Callee,
402  const ArgInfo &OrigRet,
403  ArrayRef<ArgInfo> OrigArgs) const {
404  MachineFunction &MF = MIRBuilder.getMF();
405  const Function &F = MF.getFunction();
406  MachineRegisterInfo &MRI = MF.getRegInfo();
407  auto &DL = F.getParent()->getDataLayout();
408 
409  SmallVector<ArgInfo, 8> SplitArgs;
410  for (auto &OrigArg : OrigArgs) {
411  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv,
412  [&](unsigned Reg, uint64_t Offset) {
413  MIRBuilder.buildExtract(Reg, OrigArg.Reg, Offset);
414  });
415  // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
416  if (OrigArg.Ty->isIntegerTy(1))
417  SplitArgs.back().Flags.setZExt();
418  }
419 
420  // Find out which ABI gets to decide where things go.
421  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
422  CCAssignFn *AssignFnFixed =
423  TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
424  CCAssignFn *AssignFnVarArg =
425  TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/true);
426 
427  auto CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
428 
429  // Create a temporarily-floating call instruction so we can add the implicit
430  // uses of arg registers.
431  auto MIB = MIRBuilder.buildInstrNoInsert(Callee.isReg() ? AArch64::BLR
432  : AArch64::BL);
433  MIB.add(Callee);
434 
435  // Tell the call which registers are clobbered.
436  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
437  const uint32_t *Mask = TRI->getCallPreservedMask(MF, F.getCallingConv());
439  TRI->UpdateCustomCallPreservedMask(MF, &Mask);
440  MIB.addRegMask(Mask);
441 
442  if (TRI->isAnyArgRegReserved(MF))
443  TRI->emitReservedArgRegCallError(MF);
444 
445  // Do the actual argument marshalling.
446  SmallVector<unsigned, 8> PhysRegs;
447  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
448  AssignFnVarArg);
449  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
450  return false;
451 
452  // Now we can add the actual call instruction to the correct basic block.
453  MIRBuilder.insertInstr(MIB);
454 
455  // If Callee is a reg, since it is used by a target specific
456  // instruction, it must have a register class matching the
457  // constraint of that instruction.
458  if (Callee.isReg())
460  MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
461  *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0));
462 
463  // Finally we can copy the returned value back into its virtual-register. In
464  // symmetry with the arugments, the physical register must be an
465  // implicit-define of the call instruction.
466  CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
467  if (OrigRet.Reg) {
468  SplitArgs.clear();
469 
470  SmallVector<uint64_t, 8> RegOffsets;
471  SmallVector<unsigned, 8> SplitRegs;
472  splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv(),
473  [&](unsigned Reg, uint64_t Offset) {
474  RegOffsets.push_back(Offset);
475  SplitRegs.push_back(Reg);
476  });
477 
478  CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
479  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
480  return false;
481 
482  if (!RegOffsets.empty())
483  MIRBuilder.buildSequence(OrigRet.Reg, SplitRegs, RegOffsets);
484  }
485 
486  CallSeqStart.addImm(Handler.StackSize).addImm(0);
487  MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
488  .addImm(Handler.StackSize)
489  .addImm(0);
490 
491  return true;
492 }
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:645
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool hasCustomCallingConv() const
bool isVector() const
Return true if this is a vector value type.
unsigned getReg() const
getReg - Returns the register number.
unsigned Reg
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1034
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
unsigned const TargetRegisterInfo * TRI
F(f)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef< ArgInfo > Args, ValueHandler &Handler) const
Invoke Handler::assignArg on each of the given Args and then use Callback to move them to the assigne...
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
This file contains the simple types necessary to represent the attributes associated with functions a...
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< unsigned > VRegs) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
unsigned getSizeInBits() const
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
const AArch64RegisterInfo * getRegisterInfo() const override
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
virtual const TargetInstrInfo * getInstrInfo() const
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:83
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
Helper class to build MachineInstr.
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:288
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
size_t size() const
Definition: SmallVector.h:52
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:64
This class contains a discriminated union of information about pointers in memory operands...
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
CCState - This class holds information needed while lowering arguments and return values...
void buildSequence(unsigned Res, ArrayRef< unsigned > Ops, ArrayRef< uint64_t > Indices)
Build and insert instructions to put Ops together at the specified p Indices to form a larger registe...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
CCValAssign - Represent assignment of one arg/retval to a location.
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
The memory access always returns the same value (or traps).
uint32_t Size
Definition: Profile.cpp:46
This file describes how to lower LLVM calls to machine code calls.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
unsigned getReg(unsigned Idx)
Get the register for the operand index.
AArch64CallLowering(const AArch64TargetLowering &TLI)
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, const MachineOperand &RegMO, unsigned OpIdx)
Try to constrain Reg so that it is usable by argument OpIdx of the provided MCInstrDesc II...
Definition: Utils.cpp:46
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
iterator_range< arg_iterator > args()
Definition: Function.h:691
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143