LLVM  8.0.0svn
ARMCallLowering.cpp
Go to the documentation of this file.
1 //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file implements the lowering of LLVM calls to machine code calls for
12 /// GlobalISel.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "ARMCallLowering.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMISelLowering.h"
19 #include "ARMSubtarget.h"
20 #include "Utils/ARMBaseInfo.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/CodeGen/Analysis.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
46 #include <algorithm>
47 #include <cassert>
48 #include <cstdint>
49 #include <utility>
50 
51 using namespace llvm;
52 
54  : CallLowering(&TLI) {}
55 
56 static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
57  Type *T) {
58  if (T->isArrayTy())
59  return true;
60 
61  if (T->isStructTy()) {
62  // For now we only allow homogeneous structs that we can manipulate with
63  // G_MERGE_VALUES and G_UNMERGE_VALUES
64  auto StructT = cast<StructType>(T);
65  for (unsigned i = 1, e = StructT->getNumElements(); i != e; ++i)
66  if (StructT->getElementType(i) != StructT->getElementType(0))
67  return false;
68  return true;
69  }
70 
71  EVT VT = TLI.getValueType(DL, T, true);
72  if (!VT.isSimple() || VT.isVector() ||
73  !(VT.isInteger() || VT.isFloatingPoint()))
74  return false;
75 
76  unsigned VTSize = VT.getSimpleVT().getSizeInBits();
77 
78  if (VTSize == 64)
79  // FIXME: Support i64 too
80  return VT.isFloatingPoint();
81 
82  return VTSize == 1 || VTSize == 8 || VTSize == 16 || VTSize == 32;
83 }
84 
85 namespace {
86 
87 /// Helper class for values going out through an ABI boundary (used for handling
88 /// function return values and call parameters).
89 struct OutgoingValueHandler : public CallLowering::ValueHandler {
90  OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
91  MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
92  : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
93 
94  unsigned getStackAddress(uint64_t Size, int64_t Offset,
95  MachinePointerInfo &MPO) override {
96  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
97  "Unsupported size");
98 
99  LLT p0 = LLT::pointer(0, 32);
100  LLT s32 = LLT::scalar(32);
101  unsigned SPReg = MRI.createGenericVirtualRegister(p0);
102  MIRBuilder.buildCopy(SPReg, ARM::SP);
103 
104  unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
105  MIRBuilder.buildConstant(OffsetReg, Offset);
106 
107  unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
108  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
109 
110  MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
111  return AddrReg;
112  }
113 
114  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
115  CCValAssign &VA) override {
116  assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
117  assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
118 
119  assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
120  assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
121 
122  unsigned ExtReg = extendRegister(ValVReg, VA);
123  MIRBuilder.buildCopy(PhysReg, ExtReg);
124  MIB.addUse(PhysReg, RegState::Implicit);
125  }
126 
127  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
128  MachinePointerInfo &MPO, CCValAssign &VA) override {
129  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
130  "Unsupported size");
131 
132  unsigned ExtReg = extendRegister(ValVReg, VA);
133  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
135  /* Alignment */ 0);
136  MIRBuilder.buildStore(ExtReg, Addr, *MMO);
137  }
138 
139  unsigned assignCustomValue(const CallLowering::ArgInfo &Arg,
140  ArrayRef<CCValAssign> VAs) override {
141  CCValAssign VA = VAs[0];
142  assert(VA.needsCustom() && "Value doesn't need custom handling");
143  assert(VA.getValVT() == MVT::f64 && "Unsupported type");
144 
145  CCValAssign NextVA = VAs[1];
146  assert(NextVA.needsCustom() && "Value doesn't need custom handling");
147  assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
148 
149  assert(VA.getValNo() == NextVA.getValNo() &&
150  "Values belong to different arguments");
151 
152  assert(VA.isRegLoc() && "Value should be in reg");
153  assert(NextVA.isRegLoc() && "Value should be in reg");
154 
155  unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
157  MIRBuilder.buildUnmerge(NewRegs, Arg.Reg);
158 
159  bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
160  if (!IsLittle)
161  std::swap(NewRegs[0], NewRegs[1]);
162 
163  assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
164  assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
165 
166  return 1;
167  }
168 
169  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
170  CCValAssign::LocInfo LocInfo,
171  const CallLowering::ArgInfo &Info, CCState &State) override {
172  if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State))
173  return true;
174 
175  StackSize =
176  std::max(StackSize, static_cast<uint64_t>(State.getNextStackOffset()));
177  return false;
178  }
179 
180  MachineInstrBuilder &MIB;
181  uint64_t StackSize = 0;
182 };
183 
184 } // end anonymous namespace
185 
186 void ARMCallLowering::splitToValueTypes(
187  const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
188  MachineFunction &MF, const SplitArgTy &PerformArgSplit) const {
189  const ARMTargetLowering &TLI = *getTLI<ARMTargetLowering>();
190  LLVMContext &Ctx = OrigArg.Ty->getContext();
191  const DataLayout &DL = MF.getDataLayout();
193  const Function &F = MF.getFunction();
194 
195  SmallVector<EVT, 4> SplitVTs;
197  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
198 
199  if (SplitVTs.size() == 1) {
200  // Even if there is no splitting to do, we still want to replace the
201  // original type (e.g. pointer type -> integer).
202  auto Flags = OrigArg.Flags;
203  unsigned OriginalAlignment = DL.getABITypeAlignment(OrigArg.Ty);
204  Flags.setOrigAlign(OriginalAlignment);
205  SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx), Flags,
206  OrigArg.IsFixed);
207  return;
208  }
209 
210  unsigned FirstRegIdx = SplitArgs.size();
211  for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) {
212  EVT SplitVT = SplitVTs[i];
213  Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
214  auto Flags = OrigArg.Flags;
215 
216  unsigned OriginalAlignment = DL.getABITypeAlignment(SplitTy);
217  Flags.setOrigAlign(OriginalAlignment);
218 
219  bool NeedsConsecutiveRegisters =
221  SplitTy, F.getCallingConv(), F.isVarArg());
222  if (NeedsConsecutiveRegisters) {
223  Flags.setInConsecutiveRegs();
224  if (i == e - 1)
225  Flags.setInConsecutiveRegsLast();
226  }
227 
228  SplitArgs.push_back(
229  ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)),
230  SplitTy, Flags, OrigArg.IsFixed});
231  }
232 
233  for (unsigned i = 0; i < Offsets.size(); ++i)
234  PerformArgSplit(SplitArgs[FirstRegIdx + i].Reg, Offsets[i] * 8);
235 }
236 
237 /// Lower the return value for the already existing \p Ret. This assumes that
238 /// \p MIRBuilder's insertion point is correct.
239 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
240  const Value *Val, ArrayRef<unsigned> VRegs,
241  MachineInstrBuilder &Ret) const {
242  if (!Val)
243  // Nothing to do here.
244  return true;
245 
246  auto &MF = MIRBuilder.getMF();
247  const auto &F = MF.getFunction();
248 
249  auto DL = MF.getDataLayout();
250  auto &TLI = *getTLI<ARMTargetLowering>();
251  if (!isSupportedType(DL, TLI, Val->getType()))
252  return false;
253 
254  SmallVector<EVT, 4> SplitEVTs;
255  ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
256  assert(VRegs.size() == SplitEVTs.size() &&
257  "For each split Type there should be exactly one VReg.");
258 
259  SmallVector<ArgInfo, 4> SplitVTs;
260  LLVMContext &Ctx = Val->getType()->getContext();
261  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
262  ArgInfo CurArgInfo(VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx));
263  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
264 
266  splitToValueTypes(
267  CurArgInfo, SplitVTs, MF,
268  [&](unsigned Reg, uint64_t Offset) { Regs.push_back(Reg); });
269  if (Regs.size() > 1)
270  MIRBuilder.buildUnmerge(Regs, VRegs[i]);
271  }
272 
273  CCAssignFn *AssignFn =
274  TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg());
275 
276  OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret, AssignFn);
277  return handleAssignments(MIRBuilder, SplitVTs, RetHandler);
278 }
279 
281  const Value *Val,
282  ArrayRef<unsigned> VRegs) const {
283  assert(!Val == VRegs.empty() && "Return value without a vreg");
284 
285  auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
286  unsigned Opcode = ST.getReturnOpcode();
287  auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(predOps(ARMCC::AL));
288 
289  if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
290  return false;
291 
292  MIRBuilder.insertInstr(Ret);
293  return true;
294 }
295 
296 namespace {
297 
298 /// Helper class for values coming in through an ABI boundary (used for handling
299 /// formal arguments and call return values).
300 struct IncomingValueHandler : public CallLowering::ValueHandler {
301  IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
302  CCAssignFn AssignFn)
303  : ValueHandler(MIRBuilder, MRI, AssignFn) {}
304 
305  unsigned getStackAddress(uint64_t Size, int64_t Offset,
306  MachinePointerInfo &MPO) override {
307  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
308  "Unsupported size");
309 
310  auto &MFI = MIRBuilder.getMF().getFrameInfo();
311 
312  int FI = MFI.CreateFixedObject(Size, Offset, true);
313  MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
314 
315  unsigned AddrReg =
317  MIRBuilder.buildFrameIndex(AddrReg, FI);
318 
319  return AddrReg;
320  }
321 
322  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
323  MachinePointerInfo &MPO, CCValAssign &VA) override {
324  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
325  "Unsupported size");
326 
327  if (VA.getLocInfo() == CCValAssign::SExt ||
328  VA.getLocInfo() == CCValAssign::ZExt) {
329  // If the value is zero- or sign-extended, its size becomes 4 bytes, so
330  // that's what we should load.
331  Size = 4;
332  assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
333 
334  auto LoadVReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
335  buildLoad(LoadVReg, Addr, Size, /* Alignment */ 0, MPO);
336  MIRBuilder.buildTrunc(ValVReg, LoadVReg);
337  } else {
338  // If the value is not extended, a simple load will suffice.
339  buildLoad(ValVReg, Addr, Size, /* Alignment */ 0, MPO);
340  }
341  }
342 
343  void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment,
344  MachinePointerInfo &MPO) {
345  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
346  MPO, MachineMemOperand::MOLoad, Size, Alignment);
347  MIRBuilder.buildLoad(Val, Addr, *MMO);
348  }
349 
350  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
351  CCValAssign &VA) override {
352  assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
353  assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
354 
355  auto ValSize = VA.getValVT().getSizeInBits();
356  auto LocSize = VA.getLocVT().getSizeInBits();
357 
358  assert(ValSize <= 64 && "Unsupported value size");
359  assert(LocSize <= 64 && "Unsupported location size");
360 
361  markPhysRegUsed(PhysReg);
362  if (ValSize == LocSize) {
363  MIRBuilder.buildCopy(ValVReg, PhysReg);
364  } else {
365  assert(ValSize < LocSize && "Extensions not supported");
366 
367  // We cannot create a truncating copy, nor a trunc of a physical register.
368  // Therefore, we need to copy the content of the physical register into a
369  // virtual one and then truncate that.
370  auto PhysRegToVReg =
372  MIRBuilder.buildCopy(PhysRegToVReg, PhysReg);
373  MIRBuilder.buildTrunc(ValVReg, PhysRegToVReg);
374  }
375  }
376 
377  unsigned assignCustomValue(const ARMCallLowering::ArgInfo &Arg,
378  ArrayRef<CCValAssign> VAs) override {
379  CCValAssign VA = VAs[0];
380  assert(VA.needsCustom() && "Value doesn't need custom handling");
381  assert(VA.getValVT() == MVT::f64 && "Unsupported type");
382 
383  CCValAssign NextVA = VAs[1];
384  assert(NextVA.needsCustom() && "Value doesn't need custom handling");
385  assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
386 
387  assert(VA.getValNo() == NextVA.getValNo() &&
388  "Values belong to different arguments");
389 
390  assert(VA.isRegLoc() && "Value should be in reg");
391  assert(NextVA.isRegLoc() && "Value should be in reg");
392 
393  unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
395 
396  assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
397  assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
398 
399  bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
400  if (!IsLittle)
401  std::swap(NewRegs[0], NewRegs[1]);
402 
403  MIRBuilder.buildMerge(Arg.Reg, NewRegs);
404 
405  return 1;
406  }
407 
408  /// Marking a physical register as used is different between formal
409  /// parameters, where it's a basic block live-in, and call returns, where it's
410  /// an implicit-def of the call instruction.
411  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
412 };
413 
414 struct FormalArgHandler : public IncomingValueHandler {
415  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
416  CCAssignFn AssignFn)
417  : IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
418 
419  void markPhysRegUsed(unsigned PhysReg) override {
420  MIRBuilder.getMBB().addLiveIn(PhysReg);
421  }
422 };
423 
424 } // end anonymous namespace
425 
427  const Function &F,
428  ArrayRef<unsigned> VRegs) const {
429  auto &TLI = *getTLI<ARMTargetLowering>();
430  auto Subtarget = TLI.getSubtarget();
431 
432  if (Subtarget->isThumb())
433  return false;
434 
435  // Quick exit if there aren't any args
436  if (F.arg_empty())
437  return true;
438 
439  if (F.isVarArg())
440  return false;
441 
442  auto &MF = MIRBuilder.getMF();
443  auto &MBB = MIRBuilder.getMBB();
444  auto DL = MF.getDataLayout();
445 
446  for (auto &Arg : F.args()) {
447  if (!isSupportedType(DL, TLI, Arg.getType()))
448  return false;
449  if (Arg.hasByValOrInAllocaAttr())
450  return false;
451  }
452 
453  CCAssignFn *AssignFn =
454  TLI.CCAssignFnForCall(F.getCallingConv(), F.isVarArg());
455 
456  FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(),
457  AssignFn);
458 
459  SmallVector<ArgInfo, 8> ArgInfos;
460  SmallVector<unsigned, 4> SplitRegs;
461  unsigned Idx = 0;
462  for (auto &Arg : F.args()) {
463  ArgInfo AInfo(VRegs[Idx], Arg.getType());
464  setArgFlags(AInfo, Idx + AttributeList::FirstArgIndex, DL, F);
465 
466  SplitRegs.clear();
467 
468  splitToValueTypes(AInfo, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) {
469  SplitRegs.push_back(Reg);
470  });
471 
472  if (!SplitRegs.empty())
473  MIRBuilder.buildMerge(VRegs[Idx], SplitRegs);
474 
475  Idx++;
476  }
477 
478  if (!MBB.empty())
479  MIRBuilder.setInstr(*MBB.begin());
480 
481  if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler))
482  return false;
483 
484  // Move back to the end of the basic block.
485  MIRBuilder.setMBB(MBB);
486  return true;
487 }
488 
489 namespace {
490 
491 struct CallReturnHandler : public IncomingValueHandler {
492  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
493  MachineInstrBuilder MIB, CCAssignFn *AssignFn)
494  : IncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
495 
496  void markPhysRegUsed(unsigned PhysReg) override {
497  MIB.addDef(PhysReg, RegState::Implicit);
498  }
499 
501 };
502 
503 } // end anonymous namespace
504 
506  CallingConv::ID CallConv,
507  const MachineOperand &Callee,
508  const ArgInfo &OrigRet,
509  ArrayRef<ArgInfo> OrigArgs) const {
510  MachineFunction &MF = MIRBuilder.getMF();
511  const auto &TLI = *getTLI<ARMTargetLowering>();
512  const auto &DL = MF.getDataLayout();
513  const auto &STI = MF.getSubtarget<ARMSubtarget>();
514  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
516 
517  if (STI.genLongCalls())
518  return false;
519 
520  auto CallSeqStart = MIRBuilder.buildInstr(ARM::ADJCALLSTACKDOWN);
521 
522  // Create the call instruction so we can add the implicit uses of arg
523  // registers, but don't insert it yet.
524  bool isDirect = !Callee.isReg();
525  auto CallOpcode =
526  isDirect ? ARM::BL
527  : STI.hasV5TOps()
528  ? ARM::BLX
529  : STI.hasV4TOps() ? ARM::BX_CALL : ARM::BMOVPCRX_CALL;
530  auto MIB = MIRBuilder.buildInstrNoInsert(CallOpcode)
531  .add(Callee)
532  .addRegMask(TRI->getCallPreservedMask(MF, CallConv));
533  if (Callee.isReg()) {
534  auto CalleeReg = Callee.getReg();
535  if (CalleeReg && !TRI->isPhysicalRegister(CalleeReg))
536  MIB->getOperand(0).setReg(constrainOperandRegClass(
537  MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(),
538  *MIB.getInstr(), MIB->getDesc(), Callee, 0));
539  }
540 
541  SmallVector<ArgInfo, 8> ArgInfos;
542  for (auto Arg : OrigArgs) {
543  if (!isSupportedType(DL, TLI, Arg.Ty))
544  return false;
545 
546  if (!Arg.IsFixed)
547  return false;
548 
549  if (Arg.Flags.isByVal())
550  return false;
551 
553  splitToValueTypes(Arg, ArgInfos, MF, [&](unsigned Reg, uint64_t Offset) {
554  Regs.push_back(Reg);
555  });
556 
557  if (Regs.size() > 1)
558  MIRBuilder.buildUnmerge(Regs, Arg.Reg);
559  }
560 
561  auto ArgAssignFn = TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
562  OutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB, ArgAssignFn);
563  if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler))
564  return false;
565 
566  // Now we can add the actual call instruction to the correct basic block.
567  MIRBuilder.insertInstr(MIB);
568 
569  if (!OrigRet.Ty->isVoidTy()) {
570  if (!isSupportedType(DL, TLI, OrigRet.Ty))
571  return false;
572 
573  ArgInfos.clear();
574  SmallVector<unsigned, 8> SplitRegs;
575  splitToValueTypes(OrigRet, ArgInfos, MF,
576  [&](unsigned Reg, uint64_t Offset) {
577  SplitRegs.push_back(Reg);
578  });
579 
580  auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, /*IsVarArg=*/false);
581  CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);
582  if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler))
583  return false;
584 
585  if (!SplitRegs.empty()) {
586  // We have split the value and allocated each individual piece, now build
587  // it up again.
588  MIRBuilder.buildMerge(OrigRet.Reg, SplitRegs);
589  }
590  }
591 
592  // We now know the size of the stack - update the ADJCALLSTACKDOWN
593  // accordingly.
594  CallSeqStart.addImm(ArgHandler.StackSize).addImm(0).add(predOps(ARMCC::AL));
595 
596  MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
597  .addImm(ArgHandler.StackSize)
598  .addImm(0)
599  .add(predOps(ARMCC::AL));
600 
601  return true;
602 }
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:177
MachineInstrBuilder buildCopy(unsigned Res, unsigned Op)
Build and insert Res = COPY Op.
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool isScalar() const
unsigned getReg() const
getReg - Returns the register number.
MachineInstrBuilder buildUnmerge(ArrayRef< unsigned > Res, unsigned Op)
Build and insert Res0, ...
unsigned Reg
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:954
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
unsigned getValNo() const
static const MCPhysReg VRegs[32]
unsigned const TargetRegisterInfo * TRI
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
bool needsCustom() const
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:136
bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef< ArgInfo > Args, ValueHandler &Handler) const
Invoke Handler::assignArg on each of the given Args and then use Callback to move them to the assigne...
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
ARMCallLowering(const ARMTargetLowering &TLI)
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
This file contains the simple types necessary to represent the attributes associated with functions a...
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
unsigned getSizeInBits() const
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool arg_empty() const
Definition: Function.h:699
#define T
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
amdgpu Simplify well known AMD library false Value * Callee
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:84
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:149
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:53
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:61
This class contains a discriminated union of information about pointers in memory operands...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:213
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
CCValAssign - Represent assignment of one arg/retval to a location.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:722
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< unsigned > VRegs) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op)
Build and insert Res = G_TRUNC Op.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:941
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
unsigned getReturnOpcode() const
Returns the correct return opcode for the current feature set.
Definition: ARMSubtarget.h:796
MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineInstrBuilder buildMerge(unsigned Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:652
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:56
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineFunction & getMF()
Getter for the function we currently build.
uint32_t Size
Definition: Profile.cpp:47
static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI, Type *T)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegLoc() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
LLVM Value Representation.
Definition: Value.h:73
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space (defaulting to 0).
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
MachineBasicBlock & getMBB()
Getter for the basic block we currently build.
MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, UseArgsTy &&... Args)
DAG like Generic method for building arbitrary instructions as above.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, const MachineOperand &RegMO, unsigned OpIdx)
Try to constrain Reg so that it is usable by argument OpIdx of the provided MCInstrDesc II...
Definition: Utils.cpp:47
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
This file describes how to lower LLVM calls to machine code calls.
unsigned getLocReg() const
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
iterator_range< arg_iterator > args()
Definition: Function.h:689
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:218
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:221