LLVM  10.0.0svn
AArch64CallLowering.cpp
Go to the documentation of this file.
1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
18 #include "AArch64Subtarget.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <iterator>
46 
47 #define DEBUG_TYPE "aarch64-call-lowering"
48 
49 using namespace llvm;
50 
52  : CallLowering(&TLI) {}
53 
54 namespace {
55 struct IncomingArgHandler : public CallLowering::ValueHandler {
56  IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
57  CCAssignFn *AssignFn)
58  : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
59 
60  Register getStackAddress(uint64_t Size, int64_t Offset,
61  MachinePointerInfo &MPO) override {
62  auto &MFI = MIRBuilder.getMF().getFrameInfo();
63  int FI = MFI.CreateFixedObject(Size, Offset, true);
64  MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
66  MIRBuilder.buildFrameIndex(AddrReg, FI);
67  StackUsed = std::max(StackUsed, Size + Offset);
68  return AddrReg;
69  }
70 
71  void assignValueToReg(Register ValVReg, Register PhysReg,
72  CCValAssign &VA) override {
73  markPhysRegUsed(PhysReg);
74  switch (VA.getLocInfo()) {
75  default:
76  MIRBuilder.buildCopy(ValVReg, PhysReg);
77  break;
78  case CCValAssign::LocInfo::SExt:
79  case CCValAssign::LocInfo::ZExt:
80  case CCValAssign::LocInfo::AExt: {
81  auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
82  MIRBuilder.buildTrunc(ValVReg, Copy);
83  break;
84  }
85  }
86  }
87 
88  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
89  MachinePointerInfo &MPO, CCValAssign &VA) override {
90  // FIXME: Get alignment
91  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
93  1);
94  MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
95  }
96 
97  /// How the physical register gets marked varies between formal
98  /// parameters (it's a basic-block live-in), and a call instruction
99  /// (it's an implicit-def of the BL).
100  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
101 
102  bool isArgumentHandler() const override { return true; }
103 
104  uint64_t StackUsed;
105 };
106 
107 struct FormalArgHandler : public IncomingArgHandler {
108  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
109  CCAssignFn *AssignFn)
110  : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
111 
112  void markPhysRegUsed(unsigned PhysReg) override {
113  MIRBuilder.getMBB().addLiveIn(PhysReg);
114  }
115 };
116 
117 struct CallReturnHandler : public IncomingArgHandler {
118  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
119  MachineInstrBuilder MIB, CCAssignFn *AssignFn)
120  : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
121 
122  void markPhysRegUsed(unsigned PhysReg) override {
123  MIB.addDef(PhysReg, RegState::Implicit);
124  }
125 
127 };
128 
129 struct OutgoingArgHandler : public CallLowering::ValueHandler {
130  OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
131  MachineInstrBuilder MIB, CCAssignFn *AssignFn,
132  CCAssignFn *AssignFnVarArg)
133  : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
134  AssignFnVarArg(AssignFnVarArg), StackSize(0) {}
135 
136  Register getStackAddress(uint64_t Size, int64_t Offset,
137  MachinePointerInfo &MPO) override {
138  LLT p0 = LLT::pointer(0, 64);
139  LLT s64 = LLT::scalar(64);
140  Register SPReg = MRI.createGenericVirtualRegister(p0);
141  MIRBuilder.buildCopy(SPReg, Register(AArch64::SP));
142 
143  Register OffsetReg = MRI.createGenericVirtualRegister(s64);
144  MIRBuilder.buildConstant(OffsetReg, Offset);
145 
146  Register AddrReg = MRI.createGenericVirtualRegister(p0);
147  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
148 
149  MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
150  return AddrReg;
151  }
152 
153  void assignValueToReg(Register ValVReg, Register PhysReg,
154  CCValAssign &VA) override {
155  MIB.addUse(PhysReg, RegState::Implicit);
156  Register ExtReg = extendRegister(ValVReg, VA);
157  MIRBuilder.buildCopy(PhysReg, ExtReg);
158  }
159 
160  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
161  MachinePointerInfo &MPO, CCValAssign &VA) override {
162  if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
163  Size = VA.getLocVT().getSizeInBits() / 8;
164  ValVReg = MIRBuilder.buildAnyExt(LLT::scalar(Size * 8), ValVReg)
165  ->getOperand(0)
166  .getReg();
167  }
168  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
169  MPO, MachineMemOperand::MOStore, Size, 1);
170  MIRBuilder.buildStore(ValVReg, Addr, *MMO);
171  }
172 
173  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
174  CCValAssign::LocInfo LocInfo,
176  CCState &State) override {
177  bool Res;
178  if (Info.IsFixed)
179  Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
180  else
181  Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
182 
183  StackSize = State.getNextStackOffset();
184  return Res;
185  }
186 
188  CCAssignFn *AssignFnVarArg;
189  uint64_t StackSize;
190 };
191 } // namespace
192 
193 void AArch64CallLowering::splitToValueTypes(
194  const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
195  const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const {
196  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
197  LLVMContext &Ctx = OrigArg.Ty->getContext();
198 
199  if (OrigArg.Ty->isVoidTy())
200  return;
201 
202  SmallVector<EVT, 4> SplitVTs;
204  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
205 
206  if (SplitVTs.size() == 1) {
207  // No splitting to do, but we want to replace the original type (e.g. [1 x
208  // double] -> double).
209  SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
210  OrigArg.Flags, OrigArg.IsFixed);
211  return;
212  }
213 
214  // Create one ArgInfo for each virtual register in the original ArgInfo.
215  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
216 
217  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
218  OrigArg.Ty, CallConv, false);
219  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
220  Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
221  SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags,
222  OrigArg.IsFixed);
223  if (NeedsRegBlock)
224  SplitArgs.back().Flags.setInConsecutiveRegs();
225  }
226 
227  SplitArgs.back().Flags.setInConsecutiveRegsLast();
228 }
229 
231  const Value *Val,
232  ArrayRef<Register> VRegs,
233  Register SwiftErrorVReg) const {
234  auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
235  assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
236  "Return value without a vreg");
237 
238  bool Success = true;
239  if (!VRegs.empty()) {
240  MachineFunction &MF = MIRBuilder.getMF();
241  const Function &F = MF.getFunction();
242 
243  MachineRegisterInfo &MRI = MF.getRegInfo();
244  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
245  CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
246  auto &DL = F.getParent()->getDataLayout();
247  LLVMContext &Ctx = Val->getType()->getContext();
248 
249  SmallVector<EVT, 4> SplitEVTs;
250  ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
251  assert(VRegs.size() == SplitEVTs.size() &&
252  "For each split Type there should be exactly one VReg.");
253 
254  SmallVector<ArgInfo, 8> SplitArgs;
256 
257  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
258  if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) {
259  LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split");
260  return false;
261  }
262 
263  Register CurVReg = VRegs[i];
264  ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
265  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
266 
267  // i1 is a special case because SDAG i1 true is naturally zero extended
268  // when widened using ANYEXT. We need to do it explicitly here.
269  if (MRI.getType(CurVReg).getSizeInBits() == 1) {
270  CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
271  } else {
272  // Some types will need extending as specified by the CC.
273  MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
274  if (EVT(NewVT) != SplitEVTs[i]) {
275  unsigned ExtendOp = TargetOpcode::G_ANYEXT;
277  Attribute::SExt))
278  ExtendOp = TargetOpcode::G_SEXT;
280  Attribute::ZExt))
281  ExtendOp = TargetOpcode::G_ZEXT;
282 
283  LLT NewLLT(NewVT);
284  LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
285  CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
286  // Instead of an extend, we might have a vector type which needs
287  // padding with more elements, e.g. <2 x half> -> <4 x half>.
288  if (NewVT.isVector()) {
289  if (OldLLT.isVector()) {
290  if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
291  // We don't handle VA types which are not exactly twice the
292  // size, but can easily be done in future.
293  if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
294  LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts");
295  return false;
296  }
297  auto Undef = MIRBuilder.buildUndef({OldLLT});
298  CurVReg =
299  MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef.getReg(0)})
300  .getReg(0);
301  } else {
302  // Just do a vector extend.
303  CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
304  .getReg(0);
305  }
306  } else if (NewLLT.getNumElements() == 2) {
307  // We need to pad a <1 x S> type to <2 x S>. Since we don't have
308  // <1 x S> vector types in GISel we use a build_vector instead
309  // of a vector merge/concat.
310  auto Undef = MIRBuilder.buildUndef({OldLLT});
311  CurVReg =
312  MIRBuilder
313  .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
314  .getReg(0);
315  } else {
316  LLVM_DEBUG(dbgs() << "Could not handle ret ty");
317  return false;
318  }
319  } else {
320  // A scalar extend.
321  CurVReg =
322  MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0);
323  }
324  }
325  }
326  if (CurVReg != CurArgInfo.Regs[0]) {
327  CurArgInfo.Regs[0] = CurVReg;
328  // Reset the arg flags after modifying CurVReg.
329  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
330  }
331  splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC);
332  }
333 
334  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
335  Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
336  }
337 
338  if (SwiftErrorVReg) {
339  MIB.addUse(AArch64::X21, RegState::Implicit);
340  MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
341  }
342 
343  MIRBuilder.insertInstr(MIB);
344  return Success;
345 }
346 
348  MachineIRBuilder &MIRBuilder, const Function &F,
349  ArrayRef<ArrayRef<Register>> VRegs) const {
350  MachineFunction &MF = MIRBuilder.getMF();
351  MachineBasicBlock &MBB = MIRBuilder.getMBB();
352  MachineRegisterInfo &MRI = MF.getRegInfo();
353  auto &DL = F.getParent()->getDataLayout();
354 
355  SmallVector<ArgInfo, 8> SplitArgs;
356  unsigned i = 0;
357  for (auto &Arg : F.args()) {
358  if (DL.getTypeStoreSize(Arg.getType()) == 0)
359  continue;
360 
361  ArgInfo OrigArg{VRegs[i], Arg.getType()};
362  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
363 
364  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv());
365  ++i;
366  }
367 
368  if (!MBB.empty())
369  MIRBuilder.setInstr(*MBB.begin());
370 
371  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
372  CCAssignFn *AssignFn =
373  TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
374 
375  FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
376  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
377  return false;
378 
379  if (F.isVarArg()) {
381  // FIXME: we need to reimplement saveVarArgsRegisters from
382  // AArch64ISelLowering.
383  return false;
384  }
385 
386  // We currently pass all varargs at 8-byte alignment.
387  uint64_t StackOffset = alignTo(Handler.StackUsed, 8);
388 
389  auto &MFI = MIRBuilder.getMF().getFrameInfo();
391  FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
392  }
393 
394  auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
395  if (Subtarget.hasCustomCallingConv())
397 
398  // Move back to the end of the basic block.
399  MIRBuilder.setMBB(MBB);
400 
401  return true;
402 }
403 
405  CallingConv::ID CallConv,
406  const MachineOperand &Callee,
407  const ArgInfo &OrigRet,
408  ArrayRef<ArgInfo> OrigArgs,
409  Register SwiftErrorVReg) const {
410  MachineFunction &MF = MIRBuilder.getMF();
411  const Function &F = MF.getFunction();
412  MachineRegisterInfo &MRI = MF.getRegInfo();
413  auto &DL = F.getParent()->getDataLayout();
414 
415  SmallVector<ArgInfo, 8> SplitArgs;
416  for (auto &OrigArg : OrigArgs) {
417  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv);
418  // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
419  if (OrigArg.Ty->isIntegerTy(1))
420  SplitArgs.back().Flags.setZExt();
421  }
422 
423  // Find out which ABI gets to decide where things go.
424  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
425  CCAssignFn *AssignFnFixed =
426  TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
427  CCAssignFn *AssignFnVarArg =
428  TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/true);
429 
430  auto CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
431 
432  // Create a temporarily-floating call instruction so we can add the implicit
433  // uses of arg registers.
434  auto MIB = MIRBuilder.buildInstrNoInsert(Callee.isReg() ? AArch64::BLR
435  : AArch64::BL);
436  MIB.add(Callee);
437 
438  // Tell the call which registers are clobbered.
439  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
440  const uint32_t *Mask = TRI->getCallPreservedMask(MF, F.getCallingConv());
442  TRI->UpdateCustomCallPreservedMask(MF, &Mask);
443  MIB.addRegMask(Mask);
444 
445  if (TRI->isAnyArgRegReserved(MF))
446  TRI->emitReservedArgRegCallError(MF);
447 
448  // Do the actual argument marshalling.
449  SmallVector<unsigned, 8> PhysRegs;
450  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
451  AssignFnVarArg);
452  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
453  return false;
454 
455  // Now we can add the actual call instruction to the correct basic block.
456  MIRBuilder.insertInstr(MIB);
457 
458  // If Callee is a reg, since it is used by a target specific
459  // instruction, it must have a register class matching the
460  // constraint of that instruction.
461  if (Callee.isReg())
463  MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
464  *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0));
465 
466  // Finally we can copy the returned value back into its virtual-register. In
467  // symmetry with the arugments, the physical register must be an
468  // implicit-define of the call instruction.
469  CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
470  if (!OrigRet.Ty->isVoidTy()) {
471  SplitArgs.clear();
472 
473  splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv());
474 
475  CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
476  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
477  return false;
478  }
479 
480  if (SwiftErrorVReg) {
481  MIB.addDef(AArch64::X21, RegState::Implicit);
482  MIRBuilder.buildCopy(SwiftErrorVReg, Register(AArch64::X21));
483  }
484 
485  CallSeqStart.addImm(Handler.StackSize).addImm(0);
486  MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
487  .addImm(Handler.StackSize)
488  .addImm(0);
489 
490  return true;
491 }
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, const MachineOperand &RegMO, unsigned OpIdx)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:40
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool hasCustomCallingConv() const
Register getReg(unsigned Idx) const
Get the register for the operand index.
bool isVector() const
Return true if this is a vector value type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1100
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
unsigned const TargetRegisterInfo * TRI
F(f)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:689
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register >> VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs...
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isVector() const
bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef< ArgInfo > Args, ValueHandler &Handler) const
Invoke Handler::assignArg on each of the given Args and then use Callback to move them to the assigne...
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:217
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
This file contains the simple types necessary to represent the attributes associated with functions a...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
unsigned getSizeInBits() const
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
const AArch64RegisterInfo * getRegisterInfo() const override
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs, Register SwiftErrorVReg) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
virtual const TargetInstrInfo * getInstrInfo() const
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:119
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
Helper class to build MachineInstr.
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, Register SwiftErrorVReg) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:312
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
MachineInstrBuilder buildGEP(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_GEP Op0, Op1.
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
size_t size() const
Definition: SmallVector.h:52
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:68
This class contains a discriminated union of information about pointers in memory operands...
The memory access writes data.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
CCValAssign - Represent assignment of one arg/retval to a location.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Promote Memory to Register
Definition: Mem2Reg.cpp:109
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
The memory access always returns the same value (or traps).
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
uint32_t Size
Definition: Profile.cpp:46
This file describes how to lower LLVM calls to machine code calls.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:72
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
AArch64CallLowering(const AArch64TargetLowering &TLI)
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:47
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Register getReg() const
getReg - Returns the register number.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
iterator_range< arg_iterator > args()
Definition: Function.h:713
Wrapper class representing virtual and physical registers.
Definition: Register.h:18
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143