24#include "llvm/IR/IntrinsicsAMDGPU.h" 
   26#define DEBUG_TYPE "amdgpu-call-lowering" 
   47      : OutgoingValueHandler(
B, 
MRI), MIB(MIB) {}
 
   65    Register ExtReg = extendRegisterMin32(*
this, ValVReg, VA);
 
   72    if (
TRI->isSGPRReg(
MRI, PhysReg)) {
 
   73      LLT Ty = 
MRI.getType(ExtReg);
 
   78        assert(Ty.getSizeInBits() == 32);
 
   80          ExtReg = MIRBuilder.buildPtrToInt(
S32, ExtReg).getReg(0);
 
   82          ExtReg = MIRBuilder.buildBitcast(
S32, ExtReg).getReg(0);
 
   85      auto ToSGPR = MIRBuilder
 
   86                        .buildIntrinsic(Intrinsic::amdgcn_readfirstlane,
 
   87                                        {
MRI.getType(ExtReg)})
 
   89      ExtReg = ToSGPR.getReg(0);
 
   92    MIRBuilder.buildCopy(PhysReg, ExtReg);
 
  101      : IncomingValueHandler(
B, 
MRI) {}
 
  106    auto &MFI = MIRBuilder.getMF().getFrameInfo();
 
  110    const bool IsImmutable = !Flags.isByVal();
 
  111    int FI = MFI.CreateFixedObject(
Size, 
Offset, IsImmutable);
 
  113    auto AddrReg = MIRBuilder.buildFrameIndex(
 
  115    StackUsed = std::max(StackUsed, 
Size + 
Offset);
 
  116    return AddrReg.getReg(0);
 
  121    markPhysRegUsed(PhysReg);
 
  126      auto Copy = MIRBuilder.buildCopy(
LLT::scalar(32), PhysReg);
 
  131          buildExtensionHint(VA, Copy.getReg(0), 
LLT(VA.
getLocVT()));
 
  132      MIRBuilder.buildTrunc(ValVReg, Extended);
 
  136    IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
 
  147    MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
 
  153  virtual void markPhysRegUsed(
unsigned PhysReg) = 0;
 
  156struct FormalArgHandler : 
public AMDGPUIncomingArgHandler {
 
  158      : AMDGPUIncomingArgHandler(
B, 
MRI) {}
 
  160  void markPhysRegUsed(
unsigned PhysReg)
 override {
 
  161    MIRBuilder.getMBB().addLiveIn(PhysReg);
 
  165struct CallReturnHandler : 
public AMDGPUIncomingArgHandler {
 
  168      : AMDGPUIncomingArgHandler(MIRBuilder, 
MRI), MIB(MIB) {}
 
  170  void markPhysRegUsed(
unsigned PhysReg)
 override {
 
  177struct AMDGPUOutgoingArgHandler : 
public AMDGPUOutgoingValueHandler {
 
  189                           bool IsTailCall = 
false, 
int FPDiff = 0)
 
  190      : AMDGPUOutgoingValueHandler(MIRBuilder, 
MRI, MIB), FPDiff(FPDiff),
 
  191        IsTailCall(IsTailCall) {}
 
  205      return FIReg.getReg(0);
 
  212      if (ST.enableFlatScratch()) {
 
  229    return AddrReg.getReg(0);
 
  250                           ? extendRegister(Arg.
Regs[ValRegIndex], VA)
 
  251                           : Arg.
Regs[ValRegIndex];
 
  252    assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
 
  264  case TargetOpcode::G_SEXT:
 
  266  case TargetOpcode::G_ZEXT:
 
  268  case TargetOpcode::G_ANYEXT:
 
 
  278                                        bool IsVarArg)
 const {
 
  285  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
 
  288  return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg));
 
  299  auto &MF = 
B.getMF();
 
  302  MachineRegisterInfo *
MRI = 
B.getMRI();
 
  303  LLVMContext &Ctx = 
F.getContext();
 
  311         "For each split Type there should be exactly one VReg.");
 
  315  for (
unsigned i = 0; i < SplitEVTs.
size(); ++i) {
 
  316    EVT VT = SplitEVTs[i];
 
  322      unsigned ExtendOp = TargetOpcode::G_ANYEXT;
 
  323      if (RetInfo.Flags[0].isSExt()) {
 
  324        assert(RetInfo.Regs.size() == 1 && 
"expect only simple return values");
 
  325        ExtendOp = TargetOpcode::G_SEXT;
 
  326      } 
else if (RetInfo.Flags[0].isZExt()) {
 
  327        assert(RetInfo.Regs.size() == 1 && 
"expect only simple return values");
 
  328        ExtendOp = TargetOpcode::G_ZEXT;
 
  331      EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT,
 
  340    if (
Reg != RetInfo.Regs[0]) {
 
  341      RetInfo.Regs[0] = 
Reg;
 
  349  CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, 
F.isVarArg());
 
  352  AMDGPUOutgoingValueHandler RetHandler(
B, *
MRI, Ret);
 
  365  assert(!Val == VRegs.
empty() && 
"Return value without a vreg");
 
  369  const bool IsWaveEnd =
 
  372    B.buildInstr(AMDGPU::S_ENDPGM)
 
  378  unsigned ReturnOpc = IsWholeWave ? AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_RETURN
 
  379                       : IsShader  ? AMDGPU::SI_RETURN_TO_EPILOG
 
  381  auto Ret = 
B.buildInstrNoInsert(ReturnOpc);
 
  385  else if (!lowerReturnVal(
B, Val, VRegs, Ret))
 
  389    addOriginalExecToReturn(
B.getMF(), Ret);
 
 
  404  Register KernArgSegmentVReg = 
MRI.getLiveInVirtReg(KernArgSegmentPtr);
 
  408  B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg);
 
  413                                        Align Alignment)
 const {
 
  426  for (
ArgInfo &SplitArg : SplitArgs) {
 
  427    Register PtrReg = 
B.getMRI()->createGenericVirtualRegister(PtrTy);
 
  428    lowerParameterPtr(PtrReg, 
B, 
Offset + FieldOffsets[Idx]);
 
  431    if (SplitArg.Flags[0].isPointer()) {
 
  445    assert(SplitArg.Regs.size() == 1);
 
  447    B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO);
 
  462    MF.
addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
 
  468    MF.
addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
 
  474    MF.
addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
 
  482    Register VReg = 
MRI.createGenericVirtualRegister(P4);
 
  483    MRI.addLiveIn(InputPtrReg, VReg);
 
  484    B.getMBB().addLiveIn(InputPtrReg);
 
  485    B.buildCopy(VReg, InputPtrReg);
 
  491    MF.
addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
 
  497    MF.
addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
 
  503    MF.
addLiveIn(PrivateSegmentSizeReg, &AMDGPU::SGPR_32RegClass);
 
 
  523  CCState CCInfo(
F.getCallingConv(), 
F.isVarArg(), MF, ArgLocs, 
F.getContext());
 
  528  const Align KernArgBaseAlign(16);
 
  533  for (
auto &Arg : 
F.args()) {
 
  535    if (Arg.hasAttribute(
"amdgpu-hidden-argument")) {
 
  536      LLVM_DEBUG(
dbgs() << 
"Preloading hidden arguments is not supported\n");
 
  540    const bool IsByRef = Arg.hasByRefAttr();
 
  541    Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
 
  542    unsigned AllocSize = 
DL.getTypeAllocSize(ArgTy);
 
  546    MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
 
  547    Align ABIAlign = 
DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
 
  549    uint64_t ArgOffset = 
alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
 
  550    ExplicitArgOffset = 
alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
 
  552    if (Arg.use_empty()) {
 
  563             "expected only one register for byval pointers");
 
  565        lowerParameterPtr(VRegs[i][0], 
B, ArgOffset);
 
  568        Register PtrReg = 
MRI.createGenericVirtualRegister(ConstPtrTy);
 
  569        lowerParameterPtr(PtrReg, 
B, ArgOffset);
 
  571        B.buildAddrSpaceCast(VRegs[i][0], PtrReg);
 
  574      ArgInfo OrigArg(VRegs[i], Arg, i);
 
  575      const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex;
 
  577      lowerParameter(
B, OrigArg, ArgOffset, Alignment);
 
  583  if (Info->getNumKernargPreloadedSGPRs())
 
  584    Info->setNumWaveDispatchSGPRs(Info->getNumUserSGPRs());
 
  586  TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *
TRI, *Info);
 
  587  TLI.allocateSystemSGPRs(CCInfo, MF, *Info, 
F.getCallingConv(), 
false);
 
 
  614  CCState CCInfo(CC, 
F.isVarArg(), MF, ArgLocs, 
F.getContext());
 
  618    Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*
TRI);
 
  619    MF.
addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
 
  625    Register FlatScratchInitReg = Info->addFlatScratchInit(*
TRI);
 
  626    MF.
addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
 
  632  unsigned PSInputNum = 0;
 
  639  for (
auto &Arg : 
F.args()) {
 
  640    if (
DL.getTypeStoreSize(Arg.getType()) == 0)
 
  643    if (Info->isWholeWaveFunction() && Idx == 0) {
 
  644      assert(VRegs[Idx].
size() == 1 && 
"Expected only one register");
 
  647      B.buildInstr(AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_SETUP)
 
  648          .addDef(VRegs[Idx][0]);
 
  654    const bool InReg = Arg.hasAttribute(Attribute::InReg);
 
  656    if (Arg.hasAttribute(Attribute::SwiftSelf) ||
 
  657        Arg.hasAttribute(Attribute::SwiftError) ||
 
  658        Arg.hasAttribute(Attribute::Nest))
 
  662      const bool ArgUsed = !Arg.use_empty();
 
  663      bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum);
 
  666        Info->markPSInputAllocated(PSInputNum);
 
  668          Info->markPSInputEnabled(PSInputNum);
 
  682    ArgInfo OrigArg(VRegs[Idx], Arg, Idx);
 
  683    const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex;
 
  704    if ((Info->getPSInputAddr() & 0x7F) == 0 ||
 
  705        ((Info->getPSInputAddr() & 0xF) == 0 &&
 
  706         Info->isPSInputAllocated(11))) {
 
  709      Info->markPSInputAllocated(0);
 
  710      Info->markPSInputEnabled(0);
 
  713    if (Subtarget.isAmdPalOS()) {
 
  722      unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
 
  723      if ((PsInputBits & 0x7F) == 0 ||
 
  724          ((PsInputBits & 0xF) == 0 &&
 
  725           (PsInputBits >> 11 & 1)))
 
  731  CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, 
F.isVarArg());
 
  734    B.setInstr(*
MBB.begin());
 
  736  if (!IsEntryFunc && !IsGraphics) {
 
  738    TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *
TRI, *Info);
 
  740    if (!Subtarget.enableFlatScratch())
 
  742    TLI.allocateSpecialInputSGPRs(CCInfo, MF, *
TRI, *Info);
 
  752    Info->setNumWaveDispatchSGPRs(
 
  754    Info->setNumWaveDispatchVGPRs(
 
  766    TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics);
 
  772  Info->setBytesInStackArgArea(StackSize);
 
 
  813      {
"amdgpu-no-dispatch-ptr", 
""},
 
  814      {
"amdgpu-no-queue-ptr", 
""},
 
  815      {
"amdgpu-no-implicitarg-ptr", 
""},
 
  816      {
"amdgpu-no-dispatch-id", 
""},
 
  817      {
"amdgpu-no-workgroup-id-x", 
"amdgpu-no-cluster-id-x"},
 
  818      {
"amdgpu-no-workgroup-id-y", 
"amdgpu-no-cluster-id-y"},
 
  819      {
"amdgpu-no-workgroup-id-z", 
"amdgpu-no-cluster-id-z"},
 
  820      {
"amdgpu-no-lds-kernel-id", 
""},
 
  830  for (
auto InputID : InputRegs) {
 
  837          return AttrName.
empty() || Info.CB->hasFnAttr(AttrName);
 
  841    std::tie(OutgoingArg, ArgRC, ArgTy) =
 
  848    std::tie(IncomingArg, IncomingArgRC, ArgTy) =
 
  849        CallerArgInfo.getPreloadedValue(InputID);
 
  850    assert(IncomingArgRC == ArgRC);
 
  852    Register InputReg = 
MRI.createGenericVirtualRegister(ArgTy);
 
  855      LI->buildLoadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy);
 
  857      LI->getImplicitArgPtr(InputReg, 
MRI, MIRBuilder);
 
  859      std::optional<uint32_t> Id =
 
  873      ArgRegs.emplace_back(OutgoingArg->
getRegister(), InputReg);
 
  877      LLVM_DEBUG(
dbgs() << 
"Unhandled stack passed implicit input argument\n");
 
  888  std::tie(OutgoingArg, ArgRC, ArgTy) =
 
  891    std::tie(OutgoingArg, ArgRC, ArgTy) =
 
  894    std::tie(OutgoingArg, ArgRC, ArgTy) =
 
  906  const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX);
 
  907  const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY);
 
  908  const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ);
 
  911  const bool NeedWorkItemIDX = !Info.CB->hasFnAttr(
"amdgpu-no-workitem-id-x");
 
  912  const bool NeedWorkItemIDY = !Info.CB->hasFnAttr(
"amdgpu-no-workitem-id-y");
 
  913  const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr(
"amdgpu-no-workitem-id-z");
 
  920    if (ST.getMaxWorkitemID(MF.
getFunction(), 0) != 0) {
 
  921      InputReg = 
MRI.createGenericVirtualRegister(
S32);
 
  922      LI->buildLoadInputValue(InputReg, MIRBuilder, IncomingArgX,
 
  923                              std::get<1>(WorkitemIDX),
 
  924                              std::get<2>(WorkitemIDX));
 
  931      NeedWorkItemIDY && ST.getMaxWorkitemID(MF.
getFunction(), 1) != 0) {
 
  933    LI->buildLoadInputValue(
Y, MIRBuilder, IncomingArgY,
 
  934                            std::get<1>(WorkitemIDY), std::get<2>(WorkitemIDY));
 
  941      NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.
getFunction(), 2) != 0) {
 
  943    LI->buildLoadInputValue(Z, MIRBuilder, IncomingArgZ,
 
  944                            std::get<1>(WorkitemIDZ), std::get<2>(WorkitemIDZ));
 
  951      (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
 
  952    InputReg = 
MRI.createGenericVirtualRegister(
S32);
 
  953    if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
 
  963        IncomingArgX ? *IncomingArgX :
 
  964        IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u);
 
  965      LI->buildLoadInputValue(InputReg, MIRBuilder, &IncomingArg,
 
  966                              &AMDGPU::VGPR_32RegClass, 
S32);
 
  972      ArgRegs.emplace_back(OutgoingArg->
getRegister(), InputReg);
 
  977    LLVM_DEBUG(
dbgs() << 
"Unhandled stack passed implicit input argument\n");
 
 
  986static std::pair<CCAssignFn *, CCAssignFn *>
 
  992                              bool IsTailCall, 
bool IsWave32,
 
  994                              bool IsDynamicVGPRChainCall = 
false) {
 
  997         "Indirect calls can't be tail calls, " 
  998         "because the address can be divergent");
 
 1000    return AMDGPU::G_SI_CALL;
 
 1003    if (IsDynamicVGPRChainCall)
 
 1004      return IsWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32_DVGPR
 
 1005                      : AMDGPU::SI_CS_CHAIN_TC_W64_DVGPR;
 
 1006    return IsWave32 ? AMDGPU::SI_CS_CHAIN_TC_W32 : AMDGPU::SI_CS_CHAIN_TC_W64;
 
 1011    return AMDGPU::SI_TCRETURN_GFX_WholeWave;
 
 1014    return AMDGPU::SI_TCRETURN_GFX;
 
 1016  return AMDGPU::SI_TCRETURN;
 
 
 1023                                  bool IsDynamicVGPRChainCall = 
false) {
 
 1024  if (
Info.Callee.isReg()) {
 
 1027  } 
else if (
Info.Callee.isGlobal() && 
Info.Callee.getOffset() == 0) {
 
 1035    if (IsDynamicVGPRChainCall) {
 
 
 1054  if (CalleeCC == CallerCC)
 
 1060  const auto *
TRI = ST.getRegisterInfo();
 
 1062  const uint32_t *CallerPreserved = 
TRI->getCallPreservedMask(MF, CallerCC);
 
 1063  const uint32_t *CalleePreserved = 
TRI->getCallPreservedMask(MF, CalleeCC);
 
 1064  if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
 
 1071  std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
 
 1076  std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
 
 1082                                       CalleeAssignFnVarArg);
 
 1084                                       CallerAssignFnVarArg);
 
 
 1092  if (OutArgs.
empty())
 
 1117    LLVM_DEBUG(
dbgs() << 
"... Cannot fit call operands on caller's stack.\n");
 
 1124  const uint32_t *CallerPreservedMask = 
TRI->getCallPreservedMask(MF, CallerCC);
 
 
 1133  if (!Info.IsTailCall)
 
 1138  if (Info.Callee.isReg())
 
 1147  const uint32_t *CallerPreserved = 
TRI->getCallPreservedMask(MF, CallerCC);
 
 1150  if (!CallerPreserved)
 
 1154    LLVM_DEBUG(
dbgs() << 
"... Calling convention cannot be tail called.\n");
 
 1159        return A.hasByValAttr() || A.hasSwiftErrorAttr();
 
 1161    LLVM_DEBUG(
dbgs() << 
"... Cannot tail call from callers with byval " 
 1162                         "or swifterror arguments\n");
 
 1177        << 
"... Caller and callee have incompatible calling conventions.\n");
 
 1187  LLVM_DEBUG(
dbgs() << 
"... Call is eligible for tail call optimization.\n");
 
 
 1198    ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs)
 const {
 
 1199  if (!ST.enableFlatScratch()) {
 
 1206                             ? AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51
 
 1207                             : AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
 
 1209    MIRBuilder.
buildCopy(CalleeRSrcReg, ScratchRSrcReg);
 
 1213  for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) {
 
 
 1222enum ChainCallArgIdx {
 
 1254    CallSeqStart = MIRBuilder.
buildInstr(AMDGPU::ADJCALLSTACKUP);
 
 1257  bool IsDynamicVGPRChainCall = 
false;
 
 1260    ArgInfo FlagsArg = Info.OrigArgs[ChainCallArgIdx::Flags];
 
 1262    if (FlagsValue.
isZero()) {
 
 1263      if (Info.OrigArgs.size() != 5) {
 
 1264        LLVM_DEBUG(
dbgs() << 
"No additional args allowed if flags == 0\n");
 
 1268      IsDynamicVGPRChainCall = 
true;
 
 1270      if (Info.OrigArgs.size() != 8) {
 
 1276      if (!ST.isWave32()) {
 
 1278            F, 
"dynamic VGPR mode is only supported for wave32"));
 
 1282      ArgInfo FallbackExecArg = Info.OrigArgs[ChainCallArgIdx::FallbackExec];
 
 1284             "Expected single register for fallback EXEC");
 
 1285      if (!FallbackExecArg.
Ty->
isIntegerTy(ST.getWavefrontSize())) {
 
 1293                               ST.isWave32(), CalleeCC, IsDynamicVGPRChainCall);
 
 1296  if (FuncInfo->isWholeWaveFunction())
 
 1297    addOriginalExecToReturn(MF, MIB);
 
 1300  unsigned CalleeIdx = MIB->getNumOperands();
 
 1312    auto AddRegOrImm = [&](
const ArgInfo &Arg) {
 
 1314        MIB.addImm(CI->getSExtValue());
 
 1316        MIB.addReg(Arg.Regs[0]);
 
 1317        unsigned Idx = MIB->getNumOperands() - 1;
 
 1319            MF, *
TRI, 
MRI, *
TII, *ST.getRegBankInfo(), *MIB, MIB->getDesc(),
 
 1320            MIB->getOperand(Idx), Idx));
 
 1324    ArgInfo ExecArg = Info.OrigArgs[ChainCallArgIdx::Exec];
 
 1325    assert(ExecArg.
Regs.size() == 1 && 
"Too many regs for EXEC");
 
 1332    AddRegOrImm(ExecArg);
 
 1333    if (IsDynamicVGPRChainCall)
 
 1334      std::for_each(Info.OrigArgs.begin() + ChainCallArgIdx::NumVGPRs,
 
 1335                    Info.OrigArgs.end(), AddRegOrImm);
 
 1339  const uint32_t *Mask = 
TRI->getCallPreservedMask(MF, CalleeCC);
 
 1340  MIB.addRegMask(Mask);
 
 1352  unsigned NumBytes = 0;
 
 1357    unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
 
 1359    CCState OutInfo(CalleeCC, 
false, MF, OutLocs, 
F.getContext());
 
 1373    FPDiff = NumReusableBytes - NumBytes;
 
 1381           "unaligned stack on tail call");
 
 1385  CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, 
F.getContext());
 
 1406  AMDGPUOutgoingArgHandler Handler(MIRBuilder, 
MRI, MIB, 
true, FPDiff);
 
 1410  if (Info.ConvergenceCtrlToken) {
 
 1419    MIB->getOperand(CalleeIdx + 1).setImm(FPDiff);
 
 1433  if (MIB->getOpcode() == AMDGPU::SI_TCRETURN_GFX_WholeWave) {
 
 1434    MIB->getOperand(0).setReg(
 
 1436                                 *MIB, MIB->getDesc(), MIB->getOperand(0), 0));
 
 1445  if (MIB->getOperand(CalleeIdx).isReg()) {
 
 1447        MF, *
TRI, 
MRI, *
TII, *ST.getRegBankInfo(), *MIB, MIB->getDesc(),
 
 1448        MIB->getOperand(CalleeIdx), CalleeIdx));
 
 1452  Info.LoweredTailCall = 
true;
 
 
 1459  ArgInfo Callee = Info.OrigArgs[0];
 
 1460  ArgInfo SGPRArgs = Info.OrigArgs[2];
 
 1461  ArgInfo VGPRArgs = Info.OrigArgs[3];
 
 1469  const Value *CalleeV = Callee.OrigValue->stripPointerCasts();
 
 1472    Info.CallConv = 
F->getCallingConv();
 
 1474    assert(Callee.Regs.size() == 1 && 
"Too many regs for the callee");
 
 1481  Info.IsVarArg = 
false;
 
 1485      "SGPR arguments should be marked inreg");
 
 1488      "VGPR arguments should not be marked inreg");
 
 1494  Info.IsMustTailCall = 
true;
 
 
 1500  if (
Function *
F = Info.CB->getCalledFunction())
 
 1501    if (
F->isIntrinsic()) {
 
 1502      switch (
F->getIntrinsicID()) {
 
 1503      case Intrinsic::amdgcn_cs_chain:
 
 1505      case Intrinsic::amdgcn_call_whole_wave:
 
 1512        Info.OrigArgs.erase(Info.OrigArgs.begin());
 
 1513        Info.IsVarArg = 
false;
 
 1520  if (Info.IsVarArg) {
 
 1535  for (
auto &OrigArg : Info.OrigArgs)
 
 1539  if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy())
 
 1543  bool CanTailCallOpt =
 
 1547  if (Info.IsMustTailCall && !CanTailCallOpt) {
 
 1548    LLVM_DEBUG(
dbgs() << 
"Failed to lower musttail call as tail call\n");
 
 1552  Info.IsTailCall = CanTailCallOpt;
 
 1559  std::tie(AssignFnFixed, AssignFnVarArg) =
 
 1562  MIRBuilder.
buildInstr(AMDGPU::ADJCALLSTACKUP)
 
 1568  unsigned Opc = 
getCallOpcode(MF, Info.Callee.isReg(), 
false, ST.isWave32(),
 
 1572  MIB.
addDef(
TRI->getReturnAddressReg(MF));
 
 1574  if (!Info.IsConvergent)
 
 1581  const uint32_t *Mask = 
TRI->getCallPreservedMask(MF, Info.CallConv);
 
 1582  MIB.addRegMask(Mask);
 
 1585  CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, 
F.getContext());
 
 1604  AMDGPUOutgoingArgHandler Handler(MIRBuilder, 
MRI, MIB, 
false);
 
 1610  if (Info.ConvergenceCtrlToken) {
 
 1625  if (MIB->getOperand(1).isReg()) {
 
 1627        MF, *
TRI, 
MRI, *ST.getInstrInfo(),
 
 1628        *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1),
 
 1638  if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
 
 1639    CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv,
 
 1642    CallReturnHandler Handler(MIRBuilder, 
MRI, MIB);
 
 1644                                       Info.CallConv, Info.IsVarArg))
 
 1648  uint64_t CalleePopBytes = NumBytes;
 
 1650  MIRBuilder.
buildInstr(AMDGPU::ADJCALLSTACKDOWN)
 
 1654  if (!Info.CanLowerReturn) {
 
 1656                    Info.DemoteRegister, Info.DemoteStackIndex);
 
 
 1662void AMDGPUCallLowering::addOriginalExecToReturn(
 
 1667  Ret.addReg(Setup->getOperand(0).getReg());
 
unsigned const MachineRegisterInfo * MRI
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)
static std::pair< CCAssignFn *, CCAssignFn * > getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI)
Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for CC.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc)
static void allocateHSAUserSGPRs(CCState &CCInfo, MachineIRBuilder &B, MachineFunction &MF, const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info)
static bool addCallTargetOperands(MachineInstrBuilder &CallInst, MachineIRBuilder &MIRBuilder, AMDGPUCallLowering::CallLoweringInfo &Info, bool IsDynamicVGPRChainCall=false)
This file describes how to lower LLVM calls to machine code calls.
This file declares the targeting of the Machinelegalizer class for AMDGPU.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
const HexagonInstrInfo * TII
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr MCPhysReg SPReg
Interface definition for SIRegisterInfo.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static const AMDGPUFunctionArgInfo FixedABIFunctionInfo
bool lowerTailCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &OutArgs) const
bool isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &InArgs, SmallVectorImpl< ArgInfo > &OutArgs) const
Returns true if the call can be lowered as a tail call.
bool lowerFormalArgumentsKernel(MachineIRBuilder &B, const Function &F, ArrayRef< ArrayRef< Register > > VRegs) const
bool lowerReturn(MachineIRBuilder &B, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI) const override
This hook behaves as the extended lowerReturn function, but for targets that do not support swifterro...
void handleImplicitCallArguments(MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, const GCNSubtarget &ST, const SIMachineFunctionInfo &MFI, CallingConv::ID CalleeCC, ArrayRef< std::pair< MCRegister, Register > > ImplicitArgRegs) const
bool areCalleeOutgoingArgsTailCallable(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &OutArgs) const
bool lowerChainCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
Lower a call to the @llvm.amdgcn.cs.chain intrinsic.
AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
bool passSpecialInputs(MachineIRBuilder &MIRBuilder, CCState &CCInfo, SmallVectorImpl< std::pair< MCRegister, Register > > &ArgRegs, CallLoweringInfo &Info) const
bool lowerFormalArguments(MachineIRBuilder &B, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
bool doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs) const
static std::optional< uint32_t > getLDSKernelIdMetadata(const Function &F)
unsigned getExplicitKernelArgOffset() const
Returns the offset in bytes from the start of the input buffer of the first explicit kernel argument.
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
Selects the correct CCAssignFn for a given CallingConvention value.
Class for arbitrary precision integers.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isOneBitSet(unsigned BitNo) const
Determine if this APInt Value only has the specified bit set.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const
Use Handler to insert code to handle the argument/return values represented by Args.
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
CallLowering(const TargetLowering *TLI)
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
iterator_range< arg_iterator > args()
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const SIRegisterInfo * getRegisterInfo() const override
bool hasKernargSegmentPtr() const
bool hasDispatchID() const
bool hasPrivateSegmentBuffer() const
bool hasImplicitBufferPtr() const
bool hasPrivateSegmentSize() const
bool hasDispatchPtr() const
bool hasFlatScratchInit() const
unsigned getAddressSpace() const
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setHasTailCall(bool V=true)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool isWholeWaveFunction() const
Register getStackPtrOffsetReg() const
Register getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses.
unsigned getBytesInStackArgArea() const
void setIfReturnsVoid(bool Value)
MCRegister getPreloadedReg(AMDGPUFunctionArgInfo::PreloadedValue Value) const
AMDGPUFunctionArgInfo & getArgInfo()
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A wrapper around a string literal that serves as a proxy for constructing global tables of StringRefs...
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ PRIVATE_ADDRESS
Address space for private memory.
LLVM_READNONE constexpr bool isShader(CallingConv::ID CC)
LLVM_READNONE constexpr bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
LLVM_READNONE constexpr bool isKernel(CallingConv::ID CC)
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool isChainCC(CallingConv::ID CC)
LLVM_READNONE constexpr bool canGuaranteeTCO(CallingConv::ID CC)
LLVM_READNONE constexpr bool isGraphics(CallingConv::ID CC)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ SIGN_EXTEND
Conversion operators.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
ArgDescriptor WorkItemIDZ
ArgDescriptor WorkItemIDY
std::tuple< const ArgDescriptor *, const TargetRegisterClass *, LLT > getPreloadedValue(PreloadedValue Value) const
ArgDescriptor WorkItemIDX
This struct is a compact representation of a valid (non-zero power of two) alignment.
MCRegister getRegister() const
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
Helper struct shared between Function Specialization and SCCP Solver.
const Value * OrigValue
Optionally track the original IR value for the argument.
SmallVector< Register, 4 > Regs
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Base class for ValueHandlers used for arguments coming into the current function, or for return value...
Base class for ValueHandlers used for arguments passed to a function call, or for return values.
uint64_t StackSize
The size of the currently allocated portion of the stack.
MachineIRBuilder & MIRBuilder
Register extendRegister(Register ValReg, const CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.