53#define DEBUG_TYPE "aarch64-call-lowering" 
   71  if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
 
   72    ValVT = LocVT = MVT::i8;
 
   73  else if (OrigVT == MVT::i16)
 
   74    ValVT = LocVT = MVT::i16;
 
 
   80  return (ValVT == MVT::i8 || ValVT == MVT::i16) ? 
LLT(ValVT)
 
 
   86struct AArch64IncomingValueAssigner
 
   88  AArch64IncomingValueAssigner(
CCAssignFn *AssignFn_,
 
   90      : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
 
   92  bool assignArg(
unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
 
   94                 const CallLowering::ArgInfo &
Info, ISD::ArgFlagsTy Flags,
 
   95                 CCState &State)
 override {
 
   97    return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
 
   98                                            LocInfo, 
Info, Flags, State);
 
  102struct AArch64OutgoingValueAssigner
 
  104  const AArch64Subtarget &Subtarget;
 
  111  AArch64OutgoingValueAssigner(
CCAssignFn *AssignFn_,
 
  113                               const AArch64Subtarget &Subtarget_,
 
  115      : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
 
  116        Subtarget(Subtarget_), IsReturn(IsReturn) {}
 
  118  bool assignArg(
unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
 
  120                 const CallLowering::ArgInfo &
Info, ISD::ArgFlagsTy Flags,
 
  121                 CCState &State)
 override {
 
  125    bool UseVarArgsCCForFixed = IsCalleeWin && State.
isVarArg();
 
  128    if (!
Flags.isVarArg() && !UseVarArgsCCForFixed) {
 
  131      Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, 
Info.Ty, State);
 
  133      Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, 
Info.Ty, State);
 
  141  IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &
MRI)
 
  142      : IncomingValueHandler(MIRBuilder, 
MRI) {}
 
  145                           MachinePointerInfo &MPO,
 
  146                           ISD::ArgFlagsTy Flags)
 override {
 
  147    auto &MFI = MIRBuilder.getMF().getFrameInfo();
 
  151    const bool IsImmutable = !
Flags.isByVal();
 
  153    int FI = MFI.CreateFixedObject(
Size, 
Offset, IsImmutable);
 
  155    auto AddrReg = MIRBuilder.buildFrameIndex(
LLT::pointer(0, 64), FI);
 
  156    return AddrReg.getReg(0);
 
  159  LLT getStackValueStoreType(
const DataLayout &
DL, 
const CCValAssign &VA,
 
  160                             ISD::ArgFlagsTy Flags)
 const override {
 
  163    if (
Flags.isPointer())
 
  169                        const CCValAssign &VA)
 override {
 
  170    markRegUsed(PhysReg);
 
  171    IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
 
  175                            const MachinePointerInfo &MPO,
 
  176                            const CCValAssign &VA)
 override {
 
  177    MachineFunction &MF = MIRBuilder.getMF();
 
  197    case CCValAssign::LocInfo::ZExt:
 
  198      MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);
 
  200    case CCValAssign::LocInfo::SExt:
 
  201      MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);
 
  204      MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
 
  225struct CallReturnHandler : 
public IncomingArgHandler {
 
  226  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &
MRI,
 
  227                    MachineInstrBuilder MIB)
 
  228      : IncomingArgHandler(MIRBuilder, 
MRI), MIB(MIB) {}
 
  234  MachineInstrBuilder MIB;
 
  238struct ReturnedArgCallReturnHandler : 
public CallReturnHandler {
 
  239  ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
 
  240                               MachineRegisterInfo &
MRI,
 
  241                               MachineInstrBuilder MIB)
 
  242      : CallReturnHandler(MIRBuilder, 
MRI, MIB) {}
 
  248  OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &
MRI,
 
  249                     MachineInstrBuilder MIB, 
bool IsTailCall = 
false,
 
  251      : OutgoingValueHandler(MIRBuilder, 
MRI), MIB(MIB), IsTailCall(IsTailCall),
 
  253        Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
 
  256                           MachinePointerInfo &MPO,
 
  257                           ISD::ArgFlagsTy Flags)
 override {
 
  258    MachineFunction &MF = MIRBuilder.getMF();
 
  263      assert(!
Flags.isByVal() && 
"byval unhandled with tail calls");
 
  267      auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
 
  269      return FIReg.getReg(0);
 
  273      SPReg = MIRBuilder.buildCopy(p0, 
Register(AArch64::SP)).getReg(0);
 
  275    auto OffsetReg = MIRBuilder.buildConstant(s64, 
Offset);
 
  277    auto AddrReg = MIRBuilder.buildPtrAdd(p0, 
SPReg, OffsetReg);
 
  280    return AddrReg.getReg(0);
 
  287  LLT getStackValueStoreType(
const DataLayout &
DL, 
const CCValAssign &VA,
 
  288                             ISD::ArgFlagsTy Flags)
 const override {
 
  289    if (
Flags.isPointer())
 
  295                        const CCValAssign &VA)
 override {
 
  297    Register ExtReg = extendRegister(ValVReg, VA);
 
  298    MIRBuilder.buildCopy(PhysReg, ExtReg);
 
  303                                          const CCValAssign &VA,
 
  308    auto *
DefMI = 
MRI.getVRegDef(ValVReg);
 
  313      if (
Op == TargetOpcode::G_ZEXT || 
Op == TargetOpcode::G_ANYEXT ||
 
  325    auto *LoadAddrDef = 
MRI.getVRegDef(LoadReg);
 
  326    if (LoadAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)
 
  329    int LoadFI = LoadAddrDef->getOperand(1).getIndex();
 
  331    auto *StoreAddrDef = 
MRI.getVRegDef(StoreAddr);
 
  332    if (StoreAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)
 
  334    int StoreFI = StoreAddrDef->getOperand(1).getIndex();
 
  347                            const MachinePointerInfo &MPO,
 
  348                            const CCValAssign &VA)
 override {
 
  349    MachineFunction &MF = MIRBuilder.getMF();
 
  354    MIRBuilder.buildStore(ValVReg, Addr, *MMO);
 
  357  void assignValueToAddress(
const CallLowering::ArgInfo &Arg, 
unsigned RegIndex,
 
  359                            const MachinePointerInfo &MPO,
 
  360                            const CCValAssign &VA)
 override {
 
  364    if (Arg.
Flags[0].isVarArg())
 
  368    if (VA.
getLocInfo() != CCValAssign::LocInfo::FPExt) {
 
  377      ValVReg = extendRegister(ValVReg, VA, MaxSize);
 
  383    assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
 
  386  MachineInstrBuilder MIB;
 
  397  const AArch64Subtarget &Subtarget;
 
  413         "Return value without a vreg");
 
  418  } 
else if (!VRegs.
empty()) {
 
  425    CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(
F.getCallingConv());
 
  426    auto &
DL = 
F.getDataLayout();
 
  432           "For each split Type there should be exactly one VReg.");
 
  437    for (
unsigned i = 0; i < SplitEVTs.
size(); ++i) {
 
  439      ArgInfo CurArgInfo = 
ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
 
  444      auto &Flags = CurArgInfo.
Flags[0];
 
  446          !Flags.isSExt() && !Flags.isZExt()) {
 
  448      } 
else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==
 
  451        MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
 
  452        if (
EVT(NewVT) != SplitEVTs[i]) {
 
  453          unsigned ExtendOp = TargetOpcode::G_ANYEXT;
 
  454          if (
F.getAttributes().hasRetAttr(Attribute::SExt))
 
  455            ExtendOp = TargetOpcode::G_SEXT;
 
  456          else if (
F.getAttributes().hasRetAttr(Attribute::ZExt))
 
  457            ExtendOp = TargetOpcode::G_ZEXT;
 
  472                CurVReg = MIRBuilder.
buildInstr(ExtendOp, {NewLLT}, {CurVReg})
 
  491            if (NewLLT != 
MRI.getType(CurVReg)) {
 
  493              CurVReg = MIRBuilder.
buildInstr(ExtendOp, {NewLLT}, {CurVReg})
 
  499      if (CurVReg != CurArgInfo.
Regs[0]) {
 
  500        CurArgInfo.
Regs[0] = CurVReg;
 
  507    AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
 
  509    OutgoingArgHandler Handler(MIRBuilder, 
MRI, MIB);
 
  511                                            MIRBuilder, CC, 
F.isVarArg());
 
  514  if (SwiftErrorVReg) {
 
  516    MIRBuilder.
buildCopy(AArch64::X21, SwiftErrorVReg);
 
 
  526                                         bool IsVarArg)
 const {
 
  529  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
 
  532  return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
 
 
  549  assert(
F.isVarArg() && 
"Expected F to be vararg?");
 
  553  CCState CCInfo(
F.getCallingConv(), 
true, MF, ArgLocs,
 
  572  for (
const auto &
F : Forwards) {
 
  573    MBB.addLiveIn(
F.PReg);
 
 
  582                            return A.getType()->isScalableTy();
 
  586  if (!ST.hasNEON() || !ST.hasFPARMv8()) {
 
  587    LLVM_DEBUG(
dbgs() << 
"Falling back to SDAG because we don't support no-NEON\n");
 
  592  if (Attrs.hasZAState() || Attrs.hasZT0State() ||
 
  593      Attrs.hasStreamingInterfaceOrBody() ||
 
  594      Attrs.hasStreamingCompatibleInterface())
 
 
  600void AArch64CallLowering::saveVarArgRegisters(
 
  611  bool IsWin64CC = Subtarget.isCallingConvWin64(CCInfo.
getCallingConv(),
 
  617  unsigned NumVariadicGPRArgRegs = 
GPRArgRegs.size() - FirstVariadicGPR + 1;
 
  619  unsigned GPRSaveSize = 8 * (
GPRArgRegs.size() - FirstVariadicGPR);
 
  621  if (GPRSaveSize != 0) {
 
  624                                     -
static_cast<int>(GPRSaveSize), 
false);
 
  625      if (GPRSaveSize & 15)
 
  628                              -
static_cast<int>(
alignTo(GPRSaveSize, 16)),
 
  637    for (
unsigned i = FirstVariadicGPR; i < 
GPRArgRegs.size(); ++i) {
 
  638      Register Val = 
MRI.createGenericVirtualRegister(s64);
 
  644                               MF, GPRIdx, (i - FirstVariadicGPR) * 8)
 
  648      FIN = MIRBuilder.
buildPtrAdd(
MRI.createGenericVirtualRegister(p0),
 
  655  if (Subtarget.hasFPARMv8() && !IsWin64CC) {
 
  658    unsigned FPRSaveSize = 16 * (
FPRArgRegs.size() - FirstVariadicFPR);
 
  660    if (FPRSaveSize != 0) {
 
  667      for (
unsigned i = FirstVariadicFPR; i < 
FPRArgRegs.size(); ++i) {
 
  678        FIN = MIRBuilder.
buildPtrAdd(
MRI.createGenericVirtualRegister(p0),
 
  693  auto &
DL = 
F.getDataLayout();
 
  698  if (
F.isVarArg() && Subtarget.isWindowsArm64EC())
 
  708      Subtarget.isCallingConvWin64(
F.getCallingConv(), 
F.isVarArg()) &&
 
  709      !Subtarget.isWindowsArm64EC();
 
  720  for (
auto &Arg : 
F.args()) {
 
  721    if (
DL.getTypeStoreSize(Arg.getType()).isZero())
 
  724    ArgInfo OrigArg{VRegs[i], Arg, i};
 
  731             MRI.getType(OrigArg.
Regs[0]).getSizeInBits() == 1 &&
 
  732             "Unexpected registers used for i1 arg");
 
  734      auto &Flags = OrigArg.
Flags[0];
 
  735      if (!Flags.isZExt() && !Flags.isSExt()) {
 
  739        OrigArg.
Regs[0] = WideReg;
 
  744    if (Arg.hasAttribute(Attribute::SwiftAsync))
 
  755  CCAssignFn *AssignFn = TLI.CCAssignFnForCall(
F.getCallingConv(), IsWin64 && 
F.isVarArg());
 
  757  AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
 
  760  CCState CCInfo(
F.getCallingConv(), 
F.isVarArg(), MF, ArgLocs, 
F.getContext());
 
  765  if (!BoolArgs.
empty()) {
 
  766    for (
auto &KV : BoolArgs) {
 
  769      LLT WideTy = 
MRI.getType(WideReg);
 
  770      assert(
MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&
 
  771             "Unexpected bit size of a bool arg");
 
  778  uint64_t StackSize = Assigner.StackSize;
 
  780    if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {
 
  786      saveVarArgRegisters(MIRBuilder, Handler, CCInfo);
 
  787    } 
else if (Subtarget.isWindowsArm64EC()) {
 
  792    StackSize = 
alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);
 
  802    StackSize = 
alignTo(StackSize, 16);
 
  818  if (Subtarget.hasCustomCallingConv())
 
  819    Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
 
 
  854static std::pair<CCAssignFn *, CCAssignFn *>
 
  859bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
 
  867  if (CalleeCC == CallerCC)
 
  874  std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
 
  879  std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
 
  882  AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
 
  883                                              CalleeAssignFnVarArg);
 
  884  AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
 
  885                                              CallerAssignFnVarArg);
 
  892  const uint32_t *CallerPreserved = 
TRI->getCallPreservedMask(MF, CallerCC);
 
  893  const uint32_t *CalleePreserved = 
TRI->getCallPreservedMask(MF, CalleeCC);
 
  894  if (MF.
getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
 
  895    TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
 
  896    TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
 
  899  return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
 
  902bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
 
  906  if (OrigOutArgs.
empty())
 
  914  const AArch64Subtarget &Subtarget = MF.
getSubtarget<AArch64Subtarget>();
 
  922  CCState OutInfo(CalleeCC, 
false, MF, OutLocs, Ctx);
 
  924  AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
 
  935  const AArch64FunctionInfo *FuncInfo = MF.
getInfo<AArch64FunctionInfo>();
 
  937    LLVM_DEBUG(
dbgs() << 
"... Cannot fit call operands on caller's stack.\n");
 
  945  const uint32_t *CallerPreservedMask = 
TRI->getCallPreservedMask(MF, CallerCC);
 
  954    for (
unsigned i = 0; i < OutLocs.
size(); ++i) {
 
  955      auto &ArgLoc = OutLocs[i];
 
  956      if (ArgLoc.isRegLoc())
 
  961          << 
"... Cannot tail call vararg function with stack arguments\n");
 
  975  if (!Info.IsTailCall)
 
  984  if (Info.SwiftErrorVReg) {
 
  989    LLVM_DEBUG(
dbgs() << 
"... Cannot handle tail calls with swifterror yet.\n");
 
  994    LLVM_DEBUG(
dbgs() << 
"... Calling convention cannot be tail called.\n");
 
 1016        return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
 
 1018    LLVM_DEBUG(
dbgs() << 
"... Cannot tail call from callers with byval, " 
 1019                         "inreg, or swifterror arguments\n");
 
 1030  if (Info.Callee.isGlobal()) {
 
 1034        (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
 
 1035         TT.isOSBinFormatMachO())) {
 
 1036      LLVM_DEBUG(
dbgs() << 
"... Cannot tail call externally-defined function " 
 1037                           "with weak linkage for this OS.\n");
 
 1052         "Unexpected variadic calling convention");
 
 1056  if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
 
 1059        << 
"... Caller and callee have incompatible calling conventions.\n");
 
 1063  if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
 
 1067      dbgs() << 
"... Call is eligible for tail call optimization.\n");
 
 
 1073                              std::optional<CallLowering::PtrAuthInfo> &PAI,
 
 1081    assert(IsIndirect && 
"Direct call should not be authenticated");
 
 1083           "Invalid auth call key");
 
 1084    return AArch64::BLRA;
 
 1088    return AArch64::TCRETURNdi;
 
 1094      assert(!PAI && 
"ptrauth tail-calls not yet supported with PAuthLR");
 
 1095      return AArch64::TCRETURNrix17;
 
 1098      return AArch64::AUTH_TCRETURN_BTI;
 
 1099    return AArch64::TCRETURNrix16x17;
 
 1103    assert(!PAI && 
"ptrauth tail-calls not yet supported with PAuthLR");
 
 1104    return AArch64::TCRETURNrinotx16;
 
 1108    return AArch64::AUTH_TCRETURN;
 
 1109  return AArch64::TCRETURNri;
 
 
 1112static const uint32_t *
 
 1117  if (!OutArgs.
empty() && OutArgs[0].Flags[0].isReturned()) {
 
 1119    Mask = 
TRI.getThisReturnPreservedMask(MF, 
Info.CallConv);
 
 1121      OutArgs[0].Flags[0].setReturned(
false);
 
 1122      Mask = 
TRI.getCallPreservedMask(MF, 
Info.CallConv);
 
 1125    Mask = 
TRI.getCallPreservedMask(MF, 
Info.CallConv);
 
 
 1130bool AArch64CallLowering::lowerTailCall(
 
 1133  MachineFunction &MF = MIRBuilder.
getMF();
 
 1137  AArch64FunctionInfo *FuncInfo = MF.
getInfo<AArch64FunctionInfo>();
 
 1150  MachineInstrBuilder CallSeqStart;
 
 1152    CallSeqStart = MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKDOWN);
 
 1159  const AArch64Subtarget &Subtarget = MF.
getSubtarget<AArch64Subtarget>();
 
 1167  if (
Opc == AArch64::AUTH_TCRETURN || 
Opc == AArch64::AUTH_TCRETURN_BTI) {
 
 1170           "Invalid auth call key");
 
 1171    MIB.addImm(
Info.PAI->Key);
 
 1174    uint16_t IntDisc = 0;
 
 1175    std::tie(IntDisc, AddrDisc) =
 
 1178    MIB.addImm(IntDisc);
 
 1179    MIB.addUse(AddrDisc);
 
 1180    if (AddrDisc != AArch64::NoRegister) {
 
 1184          MIB->getOperand(4), 4));
 
 1189  const uint32_t *
Mask = 
TRI->getCallPreservedMask(MF, CalleeCC);
 
 1191    TRI->UpdateCustomCallPreservedMask(MF, &Mask);
 
 1192  MIB.addRegMask(Mask);
 
 1195    MIB->setCFIType(MF, 
Info.CFIType->getZExtValue());
 
 1197  if (
TRI->isAnyArgRegReserved(MF))
 
 1198    TRI->emitReservedArgRegCallError(MF);
 
 1210  unsigned NumBytes = 0;
 
 1217    CCState OutInfo(CalleeCC, 
false, MF, OutLocs, 
F.getContext());
 
 1219    AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
 
 1226    NumBytes = 
alignTo(OutInfo.getStackSize(), 16);
 
 1231    FPDiff = NumReusableBytes - NumBytes;
 
 1235    if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (
unsigned)-FPDiff)
 
 1243    assert(FPDiff % 16 == 0 && 
"unaligned stack on tail call");
 
 1248  AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
 
 1252  OutgoingArgHandler Handler(MIRBuilder, 
MRI, MIB,
 
 1255                                     CalleeCC, 
Info.IsVarArg))
 
 1260  if (
Info.IsVarArg && 
Info.IsMustTailCall) {
 
 1264    for (
const auto &
F : Forwards) {
 
 1268      if (
any_of(MIB->uses(), [&ForwardedReg, &
TRI](
const MachineOperand &Use) {
 
 1271            return TRI->regsOverlap(Use.getReg(), ForwardedReg);
 
 1284    MIB->getOperand(1).setImm(FPDiff);
 
 1298  if (MIB->getOperand(0).isReg())
 
 1301                             MIB->getDesc(), MIB->getOperand(0), 0);
 
 1304  Info.LoweredTailCall = 
true;
 
 1313  auto &
DL = 
F.getDataLayout();
 
 1331  for (
auto &OrigArg : Info.OrigArgs) {
 
 1334    auto &Flags = OrigArg.Flags[0];
 
 1335    if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
 
 1338             MRI.getType(OutArg.
Regs[0]).getSizeInBits() == 1 &&
 
 1339             "Unexpected registers used for i1 arg");
 
 1351  if (!Info.OrigRet.Ty->isVoidTy())
 
 1355  bool CanTailCallOpt =
 
 1359  if (Info.IsMustTailCall && !CanTailCallOpt) {
 
 1363    LLVM_DEBUG(
dbgs() << 
"Failed to lower musttail call as tail call\n");
 
 1367  Info.IsTailCall = CanTailCallOpt;
 
 1369    return lowerTailCall(MIRBuilder, Info, OutArgs);
 
 1374  std::tie(AssignFnFixed, AssignFnVarArg) =
 
 1378  CallSeqStart = MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKDOWN);
 
 1388    Opc = Info.PAI ? AArch64::BLRA_RVMARKER : AArch64::BLR_RVMARKER;
 
 1391  else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&
 
 1392           !Subtarget.noBTIAtReturnTwice() &&
 
 1394    Opc = AArch64::BLR_BTI;
 
 1398    if (Info.Callee.isSymbol() && 
F.getParent()->getRtLibUseGOT()) {
 
 1399      auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_GLOBAL_VALUE);
 
 1408  unsigned CalleeOpNo = 0;
 
 1410  if (
Opc == AArch64::BLR_RVMARKER || 
Opc == AArch64::BLRA_RVMARKER) {
 
 1414    MIB.addGlobalAddress(ARCFn);
 
 1421  } 
else if (Info.CFIType) {
 
 1422    MIB->setCFIType(MF, Info.CFIType->getZExtValue());
 
 1425  MIB.
add(Info.Callee);
 
 1431  AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
 
 1434  OutgoingArgHandler Handler(MIRBuilder, 
MRI, MIB,  
false);
 
 1436                                     Info.CallConv, Info.IsVarArg))
 
 1441  if (
Opc == AArch64::BLRA || 
Opc == AArch64::BLRA_RVMARKER) {
 
 1444           "Invalid auth call key");
 
 1445    MIB.addImm(Info.PAI->Key);
 
 1449    std::tie(IntDisc, AddrDisc) =
 
 1452    MIB.addImm(IntDisc);
 
 1453    MIB.addUse(AddrDisc);
 
 1454    if (AddrDisc != AArch64::NoRegister) {
 
 1457                               MIB->getDesc(), MIB->getOperand(CalleeOpNo + 3),
 
 1464    TRI->UpdateCustomCallPreservedMask(MF, &Mask);
 
 1465  MIB.addRegMask(Mask);
 
 1467  if (
TRI->isAnyArgRegReserved(MF))
 
 1468    TRI->emitReservedArgRegCallError(MF);
 
 1476          ? 
alignTo(Assigner.StackSize, 16)
 
 1480  MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKUP)
 
 1481      .
addImm(Assigner.StackSize)
 
 1487  if (MIB->getOperand(CalleeOpNo).isReg())
 
 1490                             MIB->getOperand(CalleeOpNo), CalleeOpNo);
 
 1495  if (Info.CanLowerReturn  && !Info.OrigRet.Ty->isVoidTy()) {
 
 1496    CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
 
 1497    CallReturnHandler Handler(MIRBuilder, 
MRI, MIB);
 
 1498    bool UsingReturnedArg =
 
 1499        !OutArgs.
empty() && OutArgs[0].Flags[0].isReturned();
 
 1501    AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
 
 1503    ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, 
MRI, MIB);
 
 1505            UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
 
 1506            MIRBuilder, Info.CallConv, Info.IsVarArg,
 
 1507            UsingReturnedArg ? 
ArrayRef(OutArgs[0].Regs)
 
 1512  if (Info.SwiftErrorVReg) {
 
 1517  if (!Info.CanLowerReturn) {
 
 1519                    Info.DemoteRegister, Info.DemoteStackIndex);
 
 
 1525  return Ty.getSizeInBits() == 64;
 
 
unsigned const MachineRegisterInfo * MRI
 
static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn)
Helper function to compute forwarded registers for musttail calls.
 
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)
 
static LLT getStackValueStoreTypeHack(const CCValAssign &VA)
 
static const uint32_t * getMaskForArgs(SmallVectorImpl< AArch64CallLowering::ArgInfo > &OutArgs, AArch64CallLowering::CallLoweringInfo &Info, const AArch64RegisterInfo &TRI, MachineFunction &MF)
 
static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT, MVT &LocVT)
 
static std::pair< CCAssignFn *, CCAssignFn * > getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI)
Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for CC.
 
static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt)
 
This file describes how to lower LLVM calls to machine code calls.
 
MachineInstrBuilder MachineInstrBuilder & DefMI
 
static std::tuple< SDValue, SDValue > extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG)
 
static bool shouldLowerTailCallStackArg(const MachineFunction &MF, const CCValAssign &VA, SDValue Arg, ISD::ArgFlagsTy Flags, int CallOffset)
Check whether a stack argument requires lowering in a tail call.
 
static const MCPhysReg GPRArgRegs[]
 
static const MCPhysReg FPRArgRegs[]
 
cl::opt< bool > EnableSVEGISel("aarch64-enable-gisel-sve", cl::Hidden, cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false))
 
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
 
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
 
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
 
This file contains the simple types necessary to represent the attributes associated with functions a...
 
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
 
Analysis containing CSE Info
 
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
 
Implement a low-level type suitable for MachineInstr level instruction selection.
 
This file declares the MachineIRBuilder class.
 
Register const TargetRegisterInfo * TRI
 
Promote Memory to Register
 
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
 
This file defines ARC utility functions which are used by various parts of the compiler.
 
static constexpr MCPhysReg SPReg
 
This file defines the SmallVector class.
 
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
 
bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const override
This hook must be implemented to check whether the return values described by Outs can fit into the r...
 
bool fallBackToDAGISel(const MachineFunction &MF) const override
 
bool isTypeIsValidForThisReturn(EVT Ty) const override
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
 
bool isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &InArgs, SmallVectorImpl< ArgInfo > &OutArgs) const
Returns true if the call can be lowered as a tail call.
 
AArch64CallLowering(const AArch64TargetLowering &TLI)
 
bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
 
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
 
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
 
bool branchTargetEnforcement() const
 
void setVarArgsStackIndex(int Index)
 
void setTailCallReservedStack(unsigned bytes)
 
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
 
void setBytesInStackArgArea(unsigned bytes)
 
void setVarArgsGPRIndex(int Index)
 
bool branchProtectionPAuthLR() const
 
void setVarArgsFPRSize(unsigned Size)
 
unsigned getBytesInStackArgArea() const
 
void setVarArgsFPRIndex(int Index)
 
void setVarArgsGPRSize(unsigned Size)
 
void setArgumentStackToRestore(unsigned bytes)
 
const AArch64RegisterInfo * getRegisterInfo() const override
 
const AArch64InstrInfo * getInstrInfo() const override
 
bool isWindowsArm64EC() const
 
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
 
const RegisterBankInfo * getRegBankInfo() const override
 
bool hasCustomCallingConv() const
 
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
 
This class represents an incoming formal argument to a Function.
 
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
 
size_t size() const
size - Get the array size.
 
bool empty() const
empty - Check if the array is empty.
 
CCState - This class holds information needed while lowering arguments and return values.
 
MachineFunction & getMachineFunction() const
 
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
 
LLVM_ABI void analyzeMustTailForwardedRegisters(SmallVectorImpl< ForwardedRegister > &Forwards, ArrayRef< MVT > RegParmTypes, CCAssignFn Fn)
Compute the set of registers that need to be preserved and forwarded to any musttail calls.
 
CallingConv::ID getCallingConv() const
 
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
 
bool isAllocated(MCRegister Reg) const
isAllocated - Return true if the specified register (or an alias) is allocated.
 
CCValAssign - Represent assignment of one arg/retval to a location.
 
LocInfo getLocInfo() const
 
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
 
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
 
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const
Use Handler to insert code to handle the argument/return values represented by Args.
 
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
 
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
 
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
 
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
 
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
 
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
 
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
 
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
 
CallLowering(const TargetLowering *TLI)
 
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
 
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
 
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
 
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
 
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
 
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
 
iterator_range< arg_iterator > args()
 
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
 
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
 
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
 
bool hasExternalWeakLinkage() const
 
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
 
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
 
constexpr bool isVector() const
 
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
 
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
 
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
 
This is an important class for using LLVM in a threaded context.
 
bool isVector() const
Return true if this is a vector value type.
 
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
 
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
 
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
 
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
 
void setHasTailCall(bool V=true)
 
bool hasMustTailInVarArgFunc() const
Returns true if the function is variadic and contains a musttail call.
 
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
 
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
 
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
 
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
 
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
 
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
 
Function & getFunction()
Return the LLVM function that this machine code represents.
 
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
 
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
 
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
 
Helper class to build MachineInstr.
 
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
 
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
 
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
 
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
 
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
 
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
 
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
 
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
 
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
 
MachineFunction & getMF()
Getter for the function we currently build.
 
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
 
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
 
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
 
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
 
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
 
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
 
Register getReg(unsigned Idx) const
Get the register for the operand index.
 
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
 
const MachineInstrBuilder & add(const MachineOperand &MO) const
 
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
 
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
 
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
 
const MachineOperand & getOperand(unsigned i) const
 
@ MOLoad
The memory access reads data.
 
@ MOInvariant
The memory access always returns the same value (or traps).
 
@ MOStore
The memory access writes data.
 
Register getReg() const
getReg - Returns the register number.
 
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
 
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
 
Wrapper class representing virtual and physical registers.
 
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
 
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
 
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
 
void push_back(const T &Elt)
 
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
 
const Triple & getTargetTriple() const
 
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
 
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
 
virtual const TargetInstrInfo * getInstrInfo() const
 
Triple - Helper class for working with autoconf configuration names.
 
static constexpr TypeSize getFixed(ScalarTy ExactSize)
 
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
 
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
 
bool isIntegerTy() const
True if this is an instance of IntegerType.
 
unsigned getNumOperands() const
 
LLVM Value Representation.
 
Type * getType() const
All values are typed, get the type of this value.
 
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
 
ArrayRef< MCPhysReg > getFPRArgRegs()
 
ArrayRef< MCPhysReg > getGPRArgRegs()
 
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
 
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
 
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
 
@ ARM64EC_Thunk_Native
Calling convention used in the ARM64EC ABI to implement calls between ARM64 code and thunks.
 
@ Swift
Calling convention for Swift.
 
@ PreserveMost
Used for runtime calls that preserves most registers.
 
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
 
@ Fast
Attempts to make calls as fast as possible (e.g.
 
@ PreserveNone
Used for runtime calls that preserves none general registers.
 
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
 
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
 
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
 
@ C
The default llvm calling convention, compatible with C.
 
@ Implicit
Not emitted register (e.g. carry, or temporary result).
 
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
 
bool attachedCallOpBundleNeedsMarker(const CallBase *CB)
This function determines whether the clang_arc_attachedcall should be emitted with or without the mar...
 
bool hasAttachedCallOpBundle(const CallBase *CB)
 
This is an optimization pass for GlobalISel generic memory operations.
 
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
 
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
 
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
 
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
 
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
 
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
 
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
 
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
 
@ Success
The lock was released successfully.
 
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
 
DWARFExpression::Operation Op
 
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
 
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
 
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
 
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
 
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
 
This struct is a compact representation of a valid (non-zero power of two) alignment.
 
SmallVector< Register, 4 > Regs
 
SmallVector< ISD::ArgFlagsTy, 4 > Flags
 
Base class for ValueHandlers used for arguments coming into the current function, or for return value...
 
void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA) override
Provides a default implementation for argument handling.
 
Base class for ValueHandlers used for arguments passed to a function call, or for return values.
 
MachineIRBuilder & MIRBuilder
 
MachineRegisterInfo & MRI
 
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
 
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
 
Describes a register that needs to be forwarded from the prologue to a musttail call.
 
This class contains a discriminated union of information about pointers in memory operands,...
 
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
 
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.