51#define DEBUG_TYPE "aarch64-call-lowering"
54using namespace AArch64GISelUtils;
69 if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
70 ValVT = LocVT = MVT::i8;
71 else if (OrigVT == MVT::i16)
72 ValVT = LocVT = MVT::i16;
78 return (ValVT == MVT::i8 || ValVT == MVT::i16) ?
LLT(ValVT)
84struct AArch64IncomingValueAssigner
86 AArch64IncomingValueAssigner(
CCAssignFn *AssignFn_,
88 : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
90 bool assignArg(
unsigned ValNo,
EVT OrigVT,
MVT ValVT,
MVT LocVT,
95 return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
96 LocInfo,
Info, Flags, State);
100struct AArch64OutgoingValueAssigner
109 AArch64OutgoingValueAssigner(
CCAssignFn *AssignFn_,
113 : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
114 Subtarget(Subtarget_), IsReturn(IsReturn) {}
116 bool assignArg(
unsigned ValNo,
EVT OrigVT,
MVT ValVT,
MVT LocVT,
123 bool UseVarArgsCCForFixed = IsCalleeWin && State.
isVarArg();
126 if (
Info.IsFixed && !UseVarArgsCCForFixed) {
129 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
131 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
140 : IncomingValueHandler(MIRBuilder,
MRI) {}
145 auto &MFI = MIRBuilder.getMF().getFrameInfo();
149 const bool IsImmutable = !
Flags.isByVal();
151 int FI = MFI.CreateFixedObject(
Size,
Offset, IsImmutable);
153 auto AddrReg = MIRBuilder.buildFrameIndex(
LLT::pointer(0, 64), FI);
154 return AddrReg.getReg(0);
161 if (
Flags.isPointer())
168 markPhysRegUsed(PhysReg);
169 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
195 case CCValAssign::LocInfo::ZExt:
196 MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg,
Addr, *MMO);
198 case CCValAssign::LocInfo::SExt:
199 MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg,
Addr, *MMO);
202 MIRBuilder.buildLoad(ValVReg,
Addr, *MMO);
210 virtual void markPhysRegUsed(
MCRegister PhysReg) = 0;
217 void markPhysRegUsed(
MCRegister PhysReg)
override {
223struct CallReturnHandler :
public IncomingArgHandler {
226 : IncomingArgHandler(MIRBuilder,
MRI), MIB(MIB) {}
228 void markPhysRegUsed(
MCRegister PhysReg)
override {
236struct ReturnedArgCallReturnHandler :
public CallReturnHandler {
240 : CallReturnHandler(MIRBuilder,
MRI, MIB) {}
242 void markPhysRegUsed(
MCRegister PhysReg)
override {}
249 : OutgoingValueHandler(MIRBuilder,
MRI), MIB(MIB), IsTailCall(IsTailCall),
261 assert(!
Flags.isByVal() &&
"byval unhandled with tail calls");
265 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
267 return FIReg.getReg(0);
271 SPReg = MIRBuilder.buildCopy(p0,
Register(AArch64::SP)).getReg(0);
273 auto OffsetReg = MIRBuilder.buildConstant(s64,
Offset);
275 auto AddrReg = MIRBuilder.buildPtrAdd(p0,
SPReg, OffsetReg);
278 return AddrReg.getReg(0);
287 if (
Flags.isPointer())
295 Register ExtReg = extendRegister(ValVReg, VA);
296 MIRBuilder.buildCopy(PhysReg, ExtReg);
305 MIRBuilder.buildStore(ValVReg,
Addr, *MMO);
319 if (VA.
getLocInfo() != CCValAssign::LocInfo::FPExt) {
328 ValVReg = extendRegister(ValVReg, VA, MaxSize);
334 assignValueToAddress(ValVReg,
Addr, MemTy, MPO, VA);
364 "Return value without a vreg");
367 if (!FLI.CanLowerReturn) {
369 }
else if (!VRegs.
empty()) {
377 auto &
DL =
F.getDataLayout();
383 "For each split Type there should be exactly one VReg.");
388 for (
unsigned i = 0; i < SplitEVTs.
size(); ++i) {
390 ArgInfo CurArgInfo =
ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
395 auto &Flags = CurArgInfo.
Flags[0];
397 !Flags.isSExt() && !Flags.isZExt()) {
403 if (
EVT(NewVT) != SplitEVTs[i]) {
404 unsigned ExtendOp = TargetOpcode::G_ANYEXT;
405 if (
F.getAttributes().hasRetAttr(Attribute::SExt))
406 ExtendOp = TargetOpcode::G_SEXT;
407 else if (
F.getAttributes().hasRetAttr(Attribute::ZExt))
408 ExtendOp = TargetOpcode::G_ZEXT;
423 CurVReg = MIRBuilder.
buildInstr(ExtendOp, {NewLLT}, {CurVReg})
442 if (NewLLT !=
MRI.getType(CurVReg)) {
444 CurVReg = MIRBuilder.
buildInstr(ExtendOp, {NewLLT}, {CurVReg})
450 if (CurVReg != CurArgInfo.
Regs[0]) {
451 CurArgInfo.
Regs[0] = CurVReg;
458 AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
460 OutgoingArgHandler Handler(MIRBuilder,
MRI, MIB);
462 MIRBuilder,
CC,
F.isVarArg());
465 if (SwiftErrorVReg) {
467 MIRBuilder.
buildCopy(AArch64::X21, SwiftErrorVReg);
477 bool IsVarArg)
const {
479 const auto &TLI = *getTLI<AArch64TargetLowering>();
480 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
483 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
500 assert(
F.isVarArg() &&
"Expected F to be vararg?");
504 CCState CCInfo(
F.getCallingConv(),
true, MF, ArgLocs,
523 for (
const auto &
F : Forwards) {
533 return A.getType()->isScalableTy();
537 if (!ST.hasNEON() || !ST.hasFPARMv8()) {
538 LLVM_DEBUG(
dbgs() <<
"Falling back to SDAG because we don't support no-NEON\n");
543 if (Attrs.hasZAState() || Attrs.hasZT0State() ||
544 Attrs.hasStreamingInterfaceOrBody() ||
545 Attrs.hasStreamingCompatibleInterface())
551void AArch64CallLowering::saveVarArgRegisters(
562 bool IsWin64CC = Subtarget.isCallingConvWin64(CCInfo.
getCallingConv(),
568 unsigned NumVariadicGPRArgRegs =
GPRArgRegs.size() - FirstVariadicGPR + 1;
570 unsigned GPRSaveSize = 8 * (
GPRArgRegs.size() - FirstVariadicGPR);
572 if (GPRSaveSize != 0) {
575 -
static_cast<int>(GPRSaveSize),
false);
576 if (GPRSaveSize & 15)
579 -
static_cast<int>(
alignTo(GPRSaveSize, 16)),
588 for (
unsigned i = FirstVariadicGPR; i <
GPRArgRegs.size(); ++i) {
589 Register Val =
MRI.createGenericVirtualRegister(s64);
595 MF, GPRIdx, (i - FirstVariadicGPR) * 8)
599 FIN = MIRBuilder.
buildPtrAdd(
MRI.createGenericVirtualRegister(p0),
606 if (Subtarget.hasFPARMv8() && !IsWin64CC) {
609 unsigned FPRSaveSize = 16 * (
FPRArgRegs.size() - FirstVariadicFPR);
611 if (FPRSaveSize != 0) {
618 for (
unsigned i = FirstVariadicFPR; i <
FPRArgRegs.size(); ++i) {
629 FIN = MIRBuilder.
buildPtrAdd(
MRI.createGenericVirtualRegister(p0),
644 auto &
DL =
F.getDataLayout();
649 if (
F.isVarArg() && Subtarget.isWindowsArm64EC())
659 Subtarget.isCallingConvWin64(
F.getCallingConv(),
F.isVarArg()) &&
660 !Subtarget.isWindowsArm64EC();
667 if (!FLI.CanLowerReturn)
671 for (
auto &Arg :
F.args()) {
672 if (
DL.getTypeStoreSize(Arg.getType()).isZero())
675 ArgInfo OrigArg{VRegs[i], Arg, i};
680 if (OrigArg.Ty->isIntegerTy(1)) {
681 assert(OrigArg.Regs.size() == 1 &&
682 MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
683 "Unexpected registers used for i1 arg");
685 auto &Flags = OrigArg.Flags[0];
686 if (!Flags.isZExt() && !Flags.isSExt()) {
690 OrigArg.Regs[0] = WideReg;
695 if (Arg.hasAttribute(Attribute::SwiftAsync))
708 AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
711 CCState CCInfo(
F.getCallingConv(),
F.isVarArg(), MF, ArgLocs,
F.getContext());
716 if (!BoolArgs.
empty()) {
717 for (
auto &KV : BoolArgs) {
720 LLT WideTy =
MRI.getType(WideReg);
721 assert(
MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&
722 "Unexpected bit size of a bool arg");
729 uint64_t StackSize = Assigner.StackSize;
731 if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {
737 saveVarArgRegisters(MIRBuilder, Handler, CCInfo);
738 }
else if (Subtarget.isWindowsArm64EC()) {
743 StackSize =
alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);
753 StackSize =
alignTo(StackSize, 16);
769 if (Subtarget.hasCustomCallingConv())
770 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
805static std::pair<CCAssignFn *, CCAssignFn *>
810bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
818 if (CalleeCC == CallerCC)
825 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
830 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
833 AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
834 CalleeAssignFnVarArg);
835 AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
836 CallerAssignFnVarArg);
843 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
844 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
846 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
847 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
850 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
853bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
857 if (OrigOutArgs.
empty())
873 CCState OutInfo(CalleeCC,
false, MF, OutLocs, Ctx);
875 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
888 LLVM_DEBUG(
dbgs() <<
"... Cannot fit call operands on caller's stack.\n");
896 const uint32_t *CallerPreservedMask =
TRI->getCallPreservedMask(MF, CallerCC);
905 for (
unsigned i = 0; i < OutLocs.
size(); ++i) {
906 auto &ArgLoc = OutLocs[i];
907 if (ArgLoc.isRegLoc())
912 <<
"... Cannot tail call vararg function with stack arguments\n");
926 if (!
Info.IsTailCall)
935 if (
Info.SwiftErrorVReg) {
940 LLVM_DEBUG(
dbgs() <<
"... Cannot handle tail calls with swifterror yet.\n");
945 LLVM_DEBUG(
dbgs() <<
"... Calling convention cannot be tail called.\n");
967 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
969 LLVM_DEBUG(
dbgs() <<
"... Cannot tail call from callers with byval, "
970 "inreg, or swifterror arguments\n");
981 if (
Info.Callee.isGlobal()) {
985 (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
986 TT.isOSBinFormatMachO())) {
987 LLVM_DEBUG(
dbgs() <<
"... Cannot tail call externally-defined function "
988 "with weak linkage for this OS.\n");
1003 "Unexpected variadic calling convention");
1007 if (!doCallerAndCalleePassArgsTheSameWay(
Info, MF, InArgs)) {
1010 <<
"... Caller and callee have incompatible calling conventions.\n");
1014 if (!areCalleeOutgoingArgsTailCallable(
Info, MF, OutArgs))
1018 dbgs() <<
"... Call is eligible for tail call optimization.\n");
1024 std::optional<CallLowering::PtrAuthInfo> &PAI,
1032 assert(IsIndirect &&
"Direct call should not be authenticated");
1034 "Invalid auth call key");
1035 return AArch64::BLRA;
1039 return AArch64::TCRETURNdi;
1045 assert(!PAI &&
"ptrauth tail-calls not yet supported with PAuthLR");
1046 return AArch64::TCRETURNrix17;
1049 return AArch64::AUTH_TCRETURN_BTI;
1050 return AArch64::TCRETURNrix16x17;
1054 assert(!PAI &&
"ptrauth tail-calls not yet supported with PAuthLR");
1055 return AArch64::TCRETURNrinotx16;
1059 return AArch64::AUTH_TCRETURN;
1060 return AArch64::TCRETURNri;
1068 if (!OutArgs.
empty() && OutArgs[0].Flags[0].isReturned()) {
1070 Mask =
TRI.getThisReturnPreservedMask(MF,
Info.CallConv);
1072 OutArgs[0].Flags[0].setReturned(
false);
1073 Mask =
TRI.getCallPreservedMask(MF,
Info.CallConv);
1076 Mask =
TRI.getCallPreservedMask(MF,
Info.CallConv);
1081bool AArch64CallLowering::lowerTailCall(
1103 CallSeqStart = MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKDOWN);
1118 if (Opc == AArch64::AUTH_TCRETURN || Opc == AArch64::AUTH_TCRETURN_BTI) {
1121 "Invalid auth call key");
1122 MIB.addImm(
Info.PAI->Key);
1126 std::tie(IntDisc, AddrDisc) =
1129 MIB.addImm(IntDisc);
1130 MIB.addUse(AddrDisc);
1131 if (AddrDisc != AArch64::NoRegister) {
1135 MIB->getOperand(4), 4));
1142 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1143 MIB.addRegMask(Mask);
1146 MIB->setCFIType(MF,
Info.CFIType->getZExtValue());
1148 if (
TRI->isAnyArgRegReserved(MF))
1149 TRI->emitReservedArgRegCallError(MF);
1161 unsigned NumBytes = 0;
1168 CCState OutInfo(CalleeCC,
false, MF, OutLocs,
F.getContext());
1170 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
1177 NumBytes =
alignTo(OutInfo.getStackSize(), 16);
1182 FPDiff = NumReusableBytes - NumBytes;
1186 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (
unsigned)-FPDiff)
1194 assert(FPDiff % 16 == 0 &&
"unaligned stack on tail call");
1199 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1203 OutgoingArgHandler Handler(MIRBuilder,
MRI, MIB,
1206 CalleeCC,
Info.IsVarArg))
1211 if (
Info.IsVarArg &&
Info.IsMustTailCall) {
1215 for (
const auto &
F : Forwards) {
1222 return TRI->regsOverlap(Use.getReg(), ForwardedReg);
1249 if (MIB->getOperand(0).isReg())
1252 MIB->getDesc(), MIB->getOperand(0), 0);
1255 Info.LoweredTailCall =
true;
1264 auto &
DL =
F.getDataLayout();
1282 for (
auto &OrigArg :
Info.OrigArgs) {
1285 auto &Flags = OrigArg.Flags[0];
1286 if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
1289 MRI.getType(OutArg.
Regs[0]).getSizeInBits() == 1 &&
1290 "Unexpected registers used for i1 arg");
1302 if (!
Info.OrigRet.Ty->isVoidTy())
1306 bool CanTailCallOpt =
1310 if (
Info.IsMustTailCall && !CanTailCallOpt) {
1314 LLVM_DEBUG(
dbgs() <<
"Failed to lower musttail call as tail call\n");
1318 Info.IsTailCall = CanTailCallOpt;
1320 return lowerTailCall(MIRBuilder,
Info, OutArgs);
1325 std::tie(AssignFnFixed, AssignFnVarArg) =
1329 CallSeqStart = MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKDOWN);
1339 Opc =
Info.PAI ? AArch64::BLRA_RVMARKER : AArch64::BLR_RVMARKER;
1342 else if (
Info.CB &&
Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&
1343 !Subtarget.noBTIAtReturnTwice() &&
1345 Opc = AArch64::BLR_BTI;
1349 if (
Info.Callee.isSymbol() &&
F.getParent()->getRtLibUseGOT()) {
1350 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_GLOBAL_VALUE);
1359 unsigned CalleeOpNo = 0;
1361 if (Opc == AArch64::BLR_RVMARKER || Opc == AArch64::BLRA_RVMARKER) {
1365 MIB.addGlobalAddress(ARCFn);
1367 }
else if (
Info.CFIType) {
1368 MIB->setCFIType(MF,
Info.CFIType->getZExtValue());
1371 MIB.add(
Info.Callee);
1377 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1380 OutgoingArgHandler Handler(MIRBuilder,
MRI, MIB,
false);
1387 if (Opc == AArch64::BLRA || Opc == AArch64::BLRA_RVMARKER) {
1390 "Invalid auth call key");
1391 MIB.addImm(
Info.PAI->Key);
1395 std::tie(IntDisc, AddrDisc) =
1398 MIB.addImm(IntDisc);
1399 MIB.addUse(AddrDisc);
1400 if (AddrDisc != AArch64::NoRegister) {
1403 MIB->getDesc(), MIB->getOperand(CalleeOpNo + 3),
1410 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1411 MIB.addRegMask(Mask);
1413 if (
TRI->isAnyArgRegReserved(MF))
1414 TRI->emitReservedArgRegCallError(MF);
1422 ?
alignTo(Assigner.StackSize, 16)
1426 MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKUP)
1427 .
addImm(Assigner.StackSize)
1433 if (MIB->getOperand(CalleeOpNo).isReg())
1436 MIB->getOperand(CalleeOpNo), CalleeOpNo);
1441 if (
Info.CanLowerReturn && !
Info.OrigRet.Ty->isVoidTy()) {
1443 CallReturnHandler Handler(MIRBuilder,
MRI, MIB);
1444 bool UsingReturnedArg =
1445 !OutArgs.
empty() && OutArgs[0].Flags[0].isReturned();
1447 AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
1449 ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder,
MRI, MIB);
1451 UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
1452 MIRBuilder,
Info.CallConv,
Info.IsVarArg,
1453 UsingReturnedArg ?
ArrayRef(OutArgs[0].Regs)
1458 if (
Info.SwiftErrorVReg) {
1463 if (!
Info.CanLowerReturn) {
1465 Info.DemoteRegister,
Info.DemoteStackIndex);
unsigned const MachineRegisterInfo * MRI
static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn)
Helper function to compute forwarded registers for musttail calls.
cl::opt< bool > EnableSVEGISel
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)
static LLT getStackValueStoreTypeHack(const CCValAssign &VA)
static const uint32_t * getMaskForArgs(SmallVectorImpl< AArch64CallLowering::ArgInfo > &OutArgs, AArch64CallLowering::CallLoweringInfo &Info, const AArch64RegisterInfo &TRI, MachineFunction &MF)
static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT, MVT &LocVT)
static std::pair< CCAssignFn *, CCAssignFn * > getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI)
Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for CC.
static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt)
This file describes how to lower LLVM calls to machine code calls.
static std::tuple< SDValue, SDValue > extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG)
static const MCPhysReg GPRArgRegs[]
static const MCPhysReg FPRArgRegs[]
cl::opt< bool > EnableSVEGISel("aarch64-enable-gisel-sve", cl::Hidden, cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false))
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
This file defines ARC utility functions which are used by various parts of the compiler.
static constexpr Register SPReg
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const override
This hook must be implemented to check whether the return values described by Outs can fit into the r...
bool fallBackToDAGISel(const MachineFunction &MF) const override
bool isTypeIsValidForThisReturn(EVT Ty) const override
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
bool isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &InArgs, SmallVectorImpl< ArgInfo > &OutArgs) const
Returns true if the call can be lowered as a tail call.
AArch64CallLowering(const AArch64TargetLowering &TLI)
bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool branchTargetEnforcement() const
void setVarArgsStackIndex(int Index)
void setTailCallReservedStack(unsigned bytes)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setBytesInStackArgArea(unsigned bytes)
void setVarArgsGPRIndex(int Index)
bool branchProtectionPAuthLR() const
void setVarArgsFPRSize(unsigned Size)
unsigned getBytesInStackArgArea() const
void setVarArgsFPRIndex(int Index)
void setVarArgsGPRSize(unsigned Size)
void setArgumentStackToRestore(unsigned bytes)
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64InstrInfo * getInstrInfo() const override
bool isWindowsArm64EC() const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
const RegisterBankInfo * getRegBankInfo() const override
bool hasCustomCallingConv() const
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void analyzeMustTailForwardedRegisters(SmallVectorImpl< ForwardedRegister > &Forwards, ArrayRef< MVT > RegParmTypes, CCAssignFn Fn)
Compute the set of registers that need to be preserved and forwarded to any musttail calls.
CallingConv::ID getCallingConv() const
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
bool isAllocated(MCRegister Reg) const
isAllocated - Return true if the specified register (or an alias) is allocated.
CCValAssign - Represent assignment of one arg/retval to a location.
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const
Use Handler to insert code to handle the argument/return values represented by Args.
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
A parsed version of the target data layout string in and methods for querying it.
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
iterator_range< arg_iterator > args()
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasExternalWeakLinkage() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Wrapper class representing physical registers. Should be passed by value.
bool isVector() const
Return true if this is a vector value type.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setHasTailCall(bool V=true)
bool hasMustTailInVarArgFunc() const
Returns true if the function is variadic and contains a musttail call.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineOperand & getOperand(unsigned i) const
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Wrapper class representing virtual and physical registers.
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
const Triple & getTargetTriple() const
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Triple - Helper class for working with autoconf configuration names.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
ArrayRef< MCPhysReg > getFPRArgRegs()
ArrayRef< MCPhysReg > getGPRArgRegs()
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ ARM64EC_Thunk_Native
Calling convention used in the ARM64EC ABI to implement calls between ARM64 code and thunks.
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
SmallVector< Register, 4 > Regs
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Base class for ValueHandlers used for arguments coming into the current function, or for return value...
void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA) override
Provides a default implementation for argument handling.
Base class for ValueHandlers used for arguments passed to a function call, or for return values.
MachineIRBuilder & MIRBuilder
MachineRegisterInfo & MRI
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Describes a register that needs to be forwarded from the prologue to a musttail call.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.