| File: | build/source/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp |
| Warning: | line 385, column 25 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | /// | |||
| 9 | /// \file | |||
| 10 | /// This file implements the lowering of LLVM calls to machine code calls for | |||
| 11 | /// GlobalISel. | |||
| 12 | /// | |||
| 13 | //===----------------------------------------------------------------------===// | |||
| 14 | ||||
| 15 | #include "AMDGPUCallLowering.h" | |||
| 16 | #include "AMDGPU.h" | |||
| 17 | #include "AMDGPULegalizerInfo.h" | |||
| 18 | #include "AMDGPUTargetMachine.h" | |||
| 19 | #include "SIMachineFunctionInfo.h" | |||
| 20 | #include "SIRegisterInfo.h" | |||
| 21 | #include "llvm/CodeGen/Analysis.h" | |||
| 22 | #include "llvm/CodeGen/FunctionLoweringInfo.h" | |||
| 23 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" | |||
| 24 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
| 25 | #include "llvm/IR/IntrinsicsAMDGPU.h" | |||
| 26 | ||||
| 27 | #define DEBUG_TYPE"amdgpu-call-lowering" "amdgpu-call-lowering" | |||
| 28 | ||||
| 29 | using namespace llvm; | |||
| 30 | ||||
| 31 | namespace { | |||
| 32 | ||||
| 33 | /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. | |||
| 34 | static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, | |||
| 35 | Register ValVReg, CCValAssign &VA) { | |||
| 36 | if (VA.getLocVT().getSizeInBits() < 32) { | |||
| 37 | // 16-bit types are reported as legal for 32-bit registers. We need to | |||
| 38 | // extend and do a 32-bit copy to avoid the verifier complaining about it. | |||
| 39 | return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); | |||
| 40 | } | |||
| 41 | ||||
| 42 | return Handler.extendRegister(ValVReg, VA); | |||
| 43 | } | |||
| 44 | ||||
| 45 | struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { | |||
| 46 | AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, | |||
| 47 | MachineInstrBuilder MIB) | |||
| 48 | : OutgoingValueHandler(B, MRI), MIB(MIB) {} | |||
| 49 | ||||
| 50 | MachineInstrBuilder MIB; | |||
| 51 | ||||
| 52 | Register getStackAddress(uint64_t Size, int64_t Offset, | |||
| 53 | MachinePointerInfo &MPO, | |||
| 54 | ISD::ArgFlagsTy Flags) override { | |||
| 55 | llvm_unreachable("not implemented")::llvm::llvm_unreachable_internal("not implemented", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 55); | |||
| 56 | } | |||
| 57 | ||||
| 58 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, | |||
| 59 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
| 60 | llvm_unreachable("not implemented")::llvm::llvm_unreachable_internal("not implemented", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 60); | |||
| 61 | } | |||
| 62 | ||||
| 63 | void assignValueToReg(Register ValVReg, Register PhysReg, | |||
| 64 | CCValAssign VA) override { | |||
| 65 | Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); | |||
| 66 | ||||
| 67 | // If this is a scalar return, insert a readfirstlane just in case the value | |||
| 68 | // ends up in a VGPR. | |||
| 69 | // FIXME: Assert this is a shader return. | |||
| 70 | const SIRegisterInfo *TRI | |||
| 71 | = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); | |||
| 72 | if (TRI->isSGPRReg(MRI, PhysReg)) { | |||
| 73 | LLT Ty = MRI.getType(ExtReg); | |||
| 74 | LLT S32 = LLT::scalar(32); | |||
| 75 | if (Ty != S32) { | |||
| 76 | // FIXME: We should probably support readfirstlane intrinsics with all | |||
| 77 | // legal 32-bit types. | |||
| 78 | assert(Ty.getSizeInBits() == 32)(static_cast <bool> (Ty.getSizeInBits() == 32) ? void ( 0) : __assert_fail ("Ty.getSizeInBits() == 32", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 78, __extension__ __PRETTY_FUNCTION__)); | |||
| 79 | if (Ty.isPointer()) | |||
| 80 | ExtReg = MIRBuilder.buildPtrToInt(S32, ExtReg).getReg(0); | |||
| 81 | else | |||
| 82 | ExtReg = MIRBuilder.buildBitcast(S32, ExtReg).getReg(0); | |||
| 83 | } | |||
| 84 | ||||
| 85 | auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, | |||
| 86 | {MRI.getType(ExtReg)}, false) | |||
| 87 | .addReg(ExtReg); | |||
| 88 | ExtReg = ToSGPR.getReg(0); | |||
| 89 | } | |||
| 90 | ||||
| 91 | MIRBuilder.buildCopy(PhysReg, ExtReg); | |||
| 92 | MIB.addUse(PhysReg, RegState::Implicit); | |||
| 93 | } | |||
| 94 | }; | |||
| 95 | ||||
| 96 | struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { | |||
| 97 | uint64_t StackUsed = 0; | |||
| 98 | ||||
| 99 | AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) | |||
| 100 | : IncomingValueHandler(B, MRI) {} | |||
| 101 | ||||
| 102 | Register getStackAddress(uint64_t Size, int64_t Offset, | |||
| 103 | MachinePointerInfo &MPO, | |||
| 104 | ISD::ArgFlagsTy Flags) override { | |||
| 105 | auto &MFI = MIRBuilder.getMF().getFrameInfo(); | |||
| 106 | ||||
| 107 | // Byval is assumed to be writable memory, but other stack passed arguments | |||
| 108 | // are not. | |||
| 109 | const bool IsImmutable = !Flags.isByVal(); | |||
| 110 | int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); | |||
| 111 | MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); | |||
| 112 | auto AddrReg = MIRBuilder.buildFrameIndex( | |||
| 113 | LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); | |||
| 114 | StackUsed = std::max(StackUsed, Size + Offset); | |||
| 115 | return AddrReg.getReg(0); | |||
| 116 | } | |||
| 117 | ||||
| 118 | void assignValueToReg(Register ValVReg, Register PhysReg, | |||
| 119 | CCValAssign VA) override { | |||
| 120 | markPhysRegUsed(PhysReg); | |||
| 121 | ||||
| 122 | if (VA.getLocVT().getSizeInBits() < 32) { | |||
| 123 | // 16-bit types are reported as legal for 32-bit registers. We need to do | |||
| 124 | // a 32-bit copy, and truncate to avoid the verifier complaining about it. | |||
| 125 | auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); | |||
| 126 | ||||
| 127 | // If we have signext/zeroext, it applies to the whole 32-bit register | |||
| 128 | // before truncation. | |||
| 129 | auto Extended = | |||
| 130 | buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); | |||
| 131 | MIRBuilder.buildTrunc(ValVReg, Extended); | |||
| 132 | return; | |||
| 133 | } | |||
| 134 | ||||
| 135 | IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); | |||
| 136 | } | |||
| 137 | ||||
| 138 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, | |||
| 139 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
| 140 | MachineFunction &MF = MIRBuilder.getMF(); | |||
| 141 | ||||
| 142 | auto MMO = MF.getMachineMemOperand( | |||
| 143 | MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, | |||
| 144 | inferAlignFromPtrInfo(MF, MPO)); | |||
| 145 | MIRBuilder.buildLoad(ValVReg, Addr, *MMO); | |||
| 146 | } | |||
| 147 | ||||
| 148 | /// How the physical register gets marked varies between formal | |||
| 149 | /// parameters (it's a basic-block live-in), and a call instruction | |||
| 150 | /// (it's an implicit-def of the BL). | |||
| 151 | virtual void markPhysRegUsed(unsigned PhysReg) = 0; | |||
| 152 | }; | |||
| 153 | ||||
| 154 | struct FormalArgHandler : public AMDGPUIncomingArgHandler { | |||
| 155 | FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) | |||
| 156 | : AMDGPUIncomingArgHandler(B, MRI) {} | |||
| 157 | ||||
| 158 | void markPhysRegUsed(unsigned PhysReg) override { | |||
| 159 | MIRBuilder.getMBB().addLiveIn(PhysReg); | |||
| 160 | } | |||
| 161 | }; | |||
| 162 | ||||
| 163 | struct CallReturnHandler : public AMDGPUIncomingArgHandler { | |||
| 164 | CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, | |||
| 165 | MachineInstrBuilder MIB) | |||
| 166 | : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} | |||
| 167 | ||||
| 168 | void markPhysRegUsed(unsigned PhysReg) override { | |||
| 169 | MIB.addDef(PhysReg, RegState::Implicit); | |||
| 170 | } | |||
| 171 | ||||
| 172 | MachineInstrBuilder MIB; | |||
| 173 | }; | |||
| 174 | ||||
| 175 | struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { | |||
| 176 | /// For tail calls, the byte offset of the call's argument area from the | |||
| 177 | /// callee's. Unused elsewhere. | |||
| 178 | int FPDiff; | |||
| 179 | ||||
| 180 | // Cache the SP register vreg if we need it more than once in this call site. | |||
| 181 | Register SPReg; | |||
| 182 | ||||
| 183 | bool IsTailCall; | |||
| 184 | ||||
| 185 | AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, | |||
| 186 | MachineRegisterInfo &MRI, MachineInstrBuilder MIB, | |||
| 187 | bool IsTailCall = false, int FPDiff = 0) | |||
| 188 | : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), | |||
| 189 | IsTailCall(IsTailCall) {} | |||
| 190 | ||||
| 191 | Register getStackAddress(uint64_t Size, int64_t Offset, | |||
| 192 | MachinePointerInfo &MPO, | |||
| 193 | ISD::ArgFlagsTy Flags) override { | |||
| 194 | MachineFunction &MF = MIRBuilder.getMF(); | |||
| 195 | const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); | |||
| 196 | const LLT S32 = LLT::scalar(32); | |||
| 197 | ||||
| 198 | if (IsTailCall) { | |||
| 199 | Offset += FPDiff; | |||
| 200 | int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); | |||
| 201 | auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); | |||
| 202 | MPO = MachinePointerInfo::getFixedStack(MF, FI); | |||
| 203 | return FIReg.getReg(0); | |||
| 204 | } | |||
| 205 | ||||
| 206 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 207 | ||||
| 208 | if (!SPReg) { | |||
| 209 | const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>(); | |||
| 210 | if (ST.enableFlatScratch()) { | |||
| 211 | // The stack is accessed unswizzled, so we can use a regular copy. | |||
| 212 | SPReg = MIRBuilder.buildCopy(PtrTy, | |||
| 213 | MFI->getStackPtrOffsetReg()).getReg(0); | |||
| 214 | } else { | |||
| 215 | // The address we produce here, without knowing the use context, is going | |||
| 216 | // to be interpreted as a vector address, so we need to convert to a | |||
| 217 | // swizzled address. | |||
| 218 | SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy}, | |||
| 219 | {MFI->getStackPtrOffsetReg()}).getReg(0); | |||
| 220 | } | |||
| 221 | } | |||
| 222 | ||||
| 223 | auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); | |||
| 224 | ||||
| 225 | auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); | |||
| 226 | MPO = MachinePointerInfo::getStack(MF, Offset); | |||
| 227 | return AddrReg.getReg(0); | |||
| 228 | } | |||
| 229 | ||||
| 230 | void assignValueToReg(Register ValVReg, Register PhysReg, | |||
| 231 | CCValAssign VA) override { | |||
| 232 | MIB.addUse(PhysReg, RegState::Implicit); | |||
| 233 | Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); | |||
| 234 | MIRBuilder.buildCopy(PhysReg, ExtReg); | |||
| 235 | } | |||
| 236 | ||||
| 237 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, | |||
| 238 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
| 239 | MachineFunction &MF = MIRBuilder.getMF(); | |||
| 240 | uint64_t LocMemOffset = VA.getLocMemOffset(); | |||
| 241 | const auto &ST = MF.getSubtarget<GCNSubtarget>(); | |||
| 242 | ||||
| 243 | auto MMO = MF.getMachineMemOperand( | |||
| 244 | MPO, MachineMemOperand::MOStore, MemTy, | |||
| 245 | commonAlignment(ST.getStackAlignment(), LocMemOffset)); | |||
| 246 | MIRBuilder.buildStore(ValVReg, Addr, *MMO); | |||
| 247 | } | |||
| 248 | ||||
| 249 | void assignValueToAddress(const CallLowering::ArgInfo &Arg, | |||
| 250 | unsigned ValRegIndex, Register Addr, LLT MemTy, | |||
| 251 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
| 252 | Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt | |||
| 253 | ? extendRegister(Arg.Regs[ValRegIndex], VA) | |||
| 254 | : Arg.Regs[ValRegIndex]; | |||
| 255 | assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); | |||
| 256 | } | |||
| 257 | }; | |||
| 258 | } | |||
| 259 | ||||
| 260 | AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) | |||
| 261 | : CallLowering(&TLI) { | |||
| 262 | } | |||
| 263 | ||||
| 264 | // FIXME: Compatibility shim | |||
| 265 | static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { | |||
| 266 | switch (MIOpc) { | |||
| 267 | case TargetOpcode::G_SEXT: | |||
| 268 | return ISD::SIGN_EXTEND; | |||
| 269 | case TargetOpcode::G_ZEXT: | |||
| 270 | return ISD::ZERO_EXTEND; | |||
| 271 | case TargetOpcode::G_ANYEXT: | |||
| 272 | return ISD::ANY_EXTEND; | |||
| 273 | default: | |||
| 274 | llvm_unreachable("not an extend opcode")::llvm::llvm_unreachable_internal("not an extend opcode", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 274); | |||
| 275 | } | |||
| 276 | } | |||
| 277 | ||||
| 278 | bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, | |||
| 279 | CallingConv::ID CallConv, | |||
| 280 | SmallVectorImpl<BaseArgInfo> &Outs, | |||
| 281 | bool IsVarArg) const { | |||
| 282 | // For shaders. Vector types should be explicitly handled by CC. | |||
| 283 | if (AMDGPU::isEntryFunctionCC(CallConv)) | |||
| 284 | return true; | |||
| 285 | ||||
| 286 | SmallVector<CCValAssign, 16> ArgLocs; | |||
| 287 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 288 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, | |||
| 289 | MF.getFunction().getContext()); | |||
| 290 | ||||
| 291 | return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); | |||
| 292 | } | |||
| 293 | ||||
| 294 | /// Lower the return value for the already existing \p Ret. This assumes that | |||
| 295 | /// \p B's insertion point is correct. | |||
| 296 | bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, | |||
| 297 | const Value *Val, ArrayRef<Register> VRegs, | |||
| 298 | MachineInstrBuilder &Ret) const { | |||
| 299 | if (!Val) | |||
| 300 | return true; | |||
| 301 | ||||
| 302 | auto &MF = B.getMF(); | |||
| 303 | const auto &F = MF.getFunction(); | |||
| 304 | const DataLayout &DL = MF.getDataLayout(); | |||
| 305 | MachineRegisterInfo *MRI = B.getMRI(); | |||
| 306 | LLVMContext &Ctx = F.getContext(); | |||
| 307 | ||||
| 308 | CallingConv::ID CC = F.getCallingConv(); | |||
| 309 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 310 | ||||
| 311 | SmallVector<EVT, 8> SplitEVTs; | |||
| 312 | ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); | |||
| 313 | assert(VRegs.size() == SplitEVTs.size() &&(static_cast <bool> (VRegs.size() == SplitEVTs.size() && "For each split Type there should be exactly one VReg.") ? void (0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 314, __extension__ __PRETTY_FUNCTION__)) | |||
| 314 | "For each split Type there should be exactly one VReg.")(static_cast <bool> (VRegs.size() == SplitEVTs.size() && "For each split Type there should be exactly one VReg.") ? void (0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 314, __extension__ __PRETTY_FUNCTION__)); | |||
| 315 | ||||
| 316 | SmallVector<ArgInfo, 8> SplitRetInfos; | |||
| 317 | ||||
| 318 | for (unsigned i = 0; i < SplitEVTs.size(); ++i) { | |||
| 319 | EVT VT = SplitEVTs[i]; | |||
| 320 | Register Reg = VRegs[i]; | |||
| 321 | ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); | |||
| 322 | setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); | |||
| 323 | ||||
| 324 | if (VT.isScalarInteger()) { | |||
| 325 | unsigned ExtendOp = TargetOpcode::G_ANYEXT; | |||
| 326 | if (RetInfo.Flags[0].isSExt()) { | |||
| 327 | assert(RetInfo.Regs.size() == 1 && "expect only simple return values")(static_cast <bool> (RetInfo.Regs.size() == 1 && "expect only simple return values") ? void (0) : __assert_fail ("RetInfo.Regs.size() == 1 && \"expect only simple return values\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 327, __extension__ __PRETTY_FUNCTION__)); | |||
| 328 | ExtendOp = TargetOpcode::G_SEXT; | |||
| 329 | } else if (RetInfo.Flags[0].isZExt()) { | |||
| 330 | assert(RetInfo.Regs.size() == 1 && "expect only simple return values")(static_cast <bool> (RetInfo.Regs.size() == 1 && "expect only simple return values") ? void (0) : __assert_fail ("RetInfo.Regs.size() == 1 && \"expect only simple return values\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 330, __extension__ __PRETTY_FUNCTION__)); | |||
| 331 | ExtendOp = TargetOpcode::G_ZEXT; | |||
| 332 | } | |||
| 333 | ||||
| 334 | EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, | |||
| 335 | extOpcodeToISDExtOpcode(ExtendOp)); | |||
| 336 | if (ExtVT != VT) { | |||
| 337 | RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); | |||
| 338 | LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); | |||
| 339 | Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); | |||
| 340 | } | |||
| 341 | } | |||
| 342 | ||||
| 343 | if (Reg != RetInfo.Regs[0]) { | |||
| 344 | RetInfo.Regs[0] = Reg; | |||
| 345 | // Reset the arg flags after modifying Reg. | |||
| 346 | setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); | |||
| 347 | } | |||
| 348 | ||||
| 349 | splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); | |||
| 350 | } | |||
| 351 | ||||
| 352 | CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); | |||
| 353 | ||||
| 354 | OutgoingValueAssigner Assigner(AssignFn); | |||
| 355 | AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); | |||
| 356 | return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, | |||
| 357 | CC, F.isVarArg()); | |||
| 358 | } | |||
| 359 | ||||
| 360 | bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, | |||
| 361 | ArrayRef<Register> VRegs, | |||
| 362 | FunctionLoweringInfo &FLI) const { | |||
| 363 | ||||
| 364 | MachineFunction &MF = B.getMF(); | |||
| 365 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 366 | MFI->setIfReturnsVoid(!Val); | |||
| ||||
| 367 | ||||
| 368 | assert(!Val == VRegs.empty() && "Return value without a vreg")(static_cast <bool> (!Val == VRegs.empty() && "Return value without a vreg" ) ? void (0) : __assert_fail ("!Val == VRegs.empty() && \"Return value without a vreg\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 368, __extension__ __PRETTY_FUNCTION__)); | |||
| 369 | ||||
| 370 | CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); | |||
| 371 | const bool IsShader = AMDGPU::isShader(CC); | |||
| 372 | const bool IsWaveEnd = | |||
| 373 | (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); | |||
| 374 | if (IsWaveEnd
| |||
| 375 | B.buildInstr(AMDGPU::S_ENDPGM) | |||
| 376 | .addImm(0); | |||
| 377 | return true; | |||
| 378 | } | |||
| 379 | ||||
| 380 | unsigned ReturnOpc = | |||
| 381 | IsShader
| |||
| 382 | auto Ret = B.buildInstrNoInsert(ReturnOpc); | |||
| 383 | ||||
| 384 | if (!FLI.CanLowerReturn) | |||
| 385 | insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); | |||
| ||||
| 386 | else if (!lowerReturnVal(B, Val, VRegs, Ret)) | |||
| 387 | return false; | |||
| 388 | ||||
| 389 | // TODO: Handle CalleeSavedRegsViaCopy. | |||
| 390 | ||||
| 391 | B.insertInstr(Ret); | |||
| 392 | return true; | |||
| 393 | } | |||
| 394 | ||||
| 395 | void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, | |||
| 396 | uint64_t Offset) const { | |||
| 397 | MachineFunction &MF = B.getMF(); | |||
| 398 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 399 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 400 | Register KernArgSegmentPtr = | |||
| 401 | MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); | |||
| 402 | Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); | |||
| 403 | ||||
| 404 | auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); | |||
| 405 | ||||
| 406 | B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); | |||
| 407 | } | |||
| 408 | ||||
| 409 | void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, | |||
| 410 | uint64_t Offset, | |||
| 411 | Align Alignment) const { | |||
| 412 | MachineFunction &MF = B.getMF(); | |||
| 413 | const Function &F = MF.getFunction(); | |||
| 414 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
| 415 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); | |||
| 416 | ||||
| 417 | LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); | |||
| 418 | ||||
| 419 | SmallVector<ArgInfo, 32> SplitArgs; | |||
| 420 | SmallVector<uint64_t> FieldOffsets; | |||
| 421 | splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); | |||
| 422 | ||||
| 423 | unsigned Idx = 0; | |||
| 424 | for (ArgInfo &SplitArg : SplitArgs) { | |||
| 425 | Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); | |||
| 426 | lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); | |||
| 427 | ||||
| 428 | LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); | |||
| 429 | if (SplitArg.Flags[0].isPointer()) { | |||
| 430 | // Compensate for losing pointeriness in splitValueTypes. | |||
| 431 | LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), | |||
| 432 | ArgTy.getScalarSizeInBits()); | |||
| 433 | ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) | |||
| 434 | : PtrTy; | |||
| 435 | } | |||
| 436 | ||||
| 437 | MachineMemOperand *MMO = MF.getMachineMemOperand( | |||
| 438 | PtrInfo, | |||
| 439 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | | |||
| 440 | MachineMemOperand::MOInvariant, | |||
| 441 | ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); | |||
| 442 | ||||
| 443 | assert(SplitArg.Regs.size() == 1)(static_cast <bool> (SplitArg.Regs.size() == 1) ? void ( 0) : __assert_fail ("SplitArg.Regs.size() == 1", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 443, __extension__ __PRETTY_FUNCTION__)); | |||
| 444 | ||||
| 445 | B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); | |||
| 446 | ++Idx; | |||
| 447 | } | |||
| 448 | } | |||
| 449 | ||||
| 450 | // Allocate special inputs passed in user SGPRs. | |||
| 451 | static void allocateHSAUserSGPRs(CCState &CCInfo, | |||
| 452 | MachineIRBuilder &B, | |||
| 453 | MachineFunction &MF, | |||
| 454 | const SIRegisterInfo &TRI, | |||
| 455 | SIMachineFunctionInfo &Info) { | |||
| 456 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? | |||
| 457 | if (Info.hasPrivateSegmentBuffer()) { | |||
| 458 | Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); | |||
| 459 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); | |||
| 460 | CCInfo.AllocateReg(PrivateSegmentBufferReg); | |||
| 461 | } | |||
| 462 | ||||
| 463 | if (Info.hasDispatchPtr()) { | |||
| 464 | Register DispatchPtrReg = Info.addDispatchPtr(TRI); | |||
| 465 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); | |||
| 466 | CCInfo.AllocateReg(DispatchPtrReg); | |||
| 467 | } | |||
| 468 | ||||
| 469 | const Module *M = MF.getFunction().getParent(); | |||
| 470 | if (Info.hasQueuePtr() && | |||
| 471 | AMDGPU::getCodeObjectVersion(*M) < AMDGPU::AMDHSA_COV5) { | |||
| 472 | Register QueuePtrReg = Info.addQueuePtr(TRI); | |||
| 473 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); | |||
| 474 | CCInfo.AllocateReg(QueuePtrReg); | |||
| 475 | } | |||
| 476 | ||||
| 477 | if (Info.hasKernargSegmentPtr()) { | |||
| 478 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 479 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); | |||
| 480 | const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); | |||
| 481 | Register VReg = MRI.createGenericVirtualRegister(P4); | |||
| 482 | MRI.addLiveIn(InputPtrReg, VReg); | |||
| 483 | B.getMBB().addLiveIn(InputPtrReg); | |||
| 484 | B.buildCopy(VReg, InputPtrReg); | |||
| 485 | CCInfo.AllocateReg(InputPtrReg); | |||
| 486 | } | |||
| 487 | ||||
| 488 | if (Info.hasDispatchID()) { | |||
| 489 | Register DispatchIDReg = Info.addDispatchID(TRI); | |||
| 490 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); | |||
| 491 | CCInfo.AllocateReg(DispatchIDReg); | |||
| 492 | } | |||
| 493 | ||||
| 494 | if (Info.hasFlatScratchInit()) { | |||
| 495 | Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); | |||
| 496 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); | |||
| 497 | CCInfo.AllocateReg(FlatScratchInitReg); | |||
| 498 | } | |||
| 499 | ||||
| 500 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read | |||
| 501 | // these from the dispatch pointer. | |||
| 502 | } | |||
| 503 | ||||
| 504 | bool AMDGPUCallLowering::lowerFormalArgumentsKernel( | |||
| 505 | MachineIRBuilder &B, const Function &F, | |||
| 506 | ArrayRef<ArrayRef<Register>> VRegs) const { | |||
| 507 | MachineFunction &MF = B.getMF(); | |||
| 508 | const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); | |||
| 509 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 510 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 511 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
| 512 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 513 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
| 514 | ||||
| 515 | Info->allocateKnownAddressLDSGlobal(F); | |||
| 516 | ||||
| 517 | SmallVector<CCValAssign, 16> ArgLocs; | |||
| 518 | CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); | |||
| 519 | ||||
| 520 | allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); | |||
| 521 | ||||
| 522 | unsigned i = 0; | |||
| 523 | const Align KernArgBaseAlign(16); | |||
| 524 | const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); | |||
| 525 | uint64_t ExplicitArgOffset = 0; | |||
| 526 | ||||
| 527 | // TODO: Align down to dword alignment and extract bits for extending loads. | |||
| 528 | for (auto &Arg : F.args()) { | |||
| 529 | const bool IsByRef = Arg.hasByRefAttr(); | |||
| 530 | Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); | |||
| 531 | unsigned AllocSize = DL.getTypeAllocSize(ArgTy); | |||
| 532 | if (AllocSize == 0) | |||
| 533 | continue; | |||
| 534 | ||||
| 535 | MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt; | |||
| 536 | Align ABIAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy); | |||
| 537 | ||||
| 538 | uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; | |||
| 539 | ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; | |||
| 540 | ||||
| 541 | if (Arg.use_empty()) { | |||
| 542 | ++i; | |||
| 543 | continue; | |||
| 544 | } | |||
| 545 | ||||
| 546 | Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); | |||
| 547 | ||||
| 548 | if (IsByRef) { | |||
| 549 | unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); | |||
| 550 | ||||
| 551 | assert(VRegs[i].size() == 1 &&(static_cast <bool> (VRegs[i].size() == 1 && "expected only one register for byval pointers" ) ? void (0) : __assert_fail ("VRegs[i].size() == 1 && \"expected only one register for byval pointers\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 552, __extension__ __PRETTY_FUNCTION__)) | |||
| 552 | "expected only one register for byval pointers")(static_cast <bool> (VRegs[i].size() == 1 && "expected only one register for byval pointers" ) ? void (0) : __assert_fail ("VRegs[i].size() == 1 && \"expected only one register for byval pointers\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 552, __extension__ __PRETTY_FUNCTION__)); | |||
| 553 | if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { | |||
| 554 | lowerParameterPtr(VRegs[i][0], B, ArgOffset); | |||
| 555 | } else { | |||
| 556 | const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); | |||
| 557 | Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); | |||
| 558 | lowerParameterPtr(PtrReg, B, ArgOffset); | |||
| 559 | ||||
| 560 | B.buildAddrSpaceCast(VRegs[i][0], PtrReg); | |||
| 561 | } | |||
| 562 | } else { | |||
| 563 | ArgInfo OrigArg(VRegs[i], Arg, i); | |||
| 564 | const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; | |||
| 565 | setArgFlags(OrigArg, OrigArgIdx, DL, F); | |||
| 566 | lowerParameter(B, OrigArg, ArgOffset, Alignment); | |||
| 567 | } | |||
| 568 | ||||
| 569 | ++i; | |||
| 570 | } | |||
| 571 | ||||
| 572 | TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); | |||
| 573 | TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); | |||
| 574 | return true; | |||
| 575 | } | |||
| 576 | ||||
| 577 | bool AMDGPUCallLowering::lowerFormalArguments( | |||
| 578 | MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, | |||
| 579 | FunctionLoweringInfo &FLI) const { | |||
| 580 | CallingConv::ID CC = F.getCallingConv(); | |||
| 581 | ||||
| 582 | // The infrastructure for normal calling convention lowering is essentially | |||
| 583 | // useless for kernels. We want to avoid any kind of legalization or argument | |||
| 584 | // splitting. | |||
| 585 | if (CC == CallingConv::AMDGPU_KERNEL) | |||
| 586 | return lowerFormalArgumentsKernel(B, F, VRegs); | |||
| 587 | ||||
| 588 | const bool IsGraphics = AMDGPU::isGraphics(CC); | |||
| 589 | const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); | |||
| 590 | ||||
| 591 | MachineFunction &MF = B.getMF(); | |||
| 592 | MachineBasicBlock &MBB = B.getMBB(); | |||
| 593 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 594 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 595 | const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); | |||
| 596 | const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
| 597 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
| 598 | ||||
| 599 | Info->allocateKnownAddressLDSGlobal(F); | |||
| 600 | ||||
| 601 | SmallVector<CCValAssign, 16> ArgLocs; | |||
| 602 | CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); | |||
| 603 | ||||
| 604 | if (Info->hasImplicitBufferPtr()) { | |||
| 605 | Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); | |||
| 606 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); | |||
| 607 | CCInfo.AllocateReg(ImplicitBufferPtrReg); | |||
| 608 | } | |||
| 609 | ||||
| 610 | // FIXME: This probably isn't defined for mesa | |||
| 611 | if (Info->hasFlatScratchInit() && !Subtarget.isAmdPalOS()) { | |||
| 612 | Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI); | |||
| 613 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); | |||
| 614 | CCInfo.AllocateReg(FlatScratchInitReg); | |||
| 615 | } | |||
| 616 | ||||
| 617 | SmallVector<ArgInfo, 32> SplitArgs; | |||
| 618 | unsigned Idx = 0; | |||
| 619 | unsigned PSInputNum = 0; | |||
| 620 | ||||
| 621 | // Insert the hidden sret parameter if the return value won't fit in the | |||
| 622 | // return registers. | |||
| 623 | if (!FLI.CanLowerReturn) | |||
| 624 | insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); | |||
| 625 | ||||
| 626 | for (auto &Arg : F.args()) { | |||
| 627 | if (DL.getTypeStoreSize(Arg.getType()) == 0) | |||
| 628 | continue; | |||
| 629 | ||||
| 630 | const bool InReg = Arg.hasAttribute(Attribute::InReg); | |||
| 631 | ||||
| 632 | // SGPR arguments to functions not implemented. | |||
| 633 | if (!IsGraphics && InReg) | |||
| 634 | return false; | |||
| 635 | ||||
| 636 | if (Arg.hasAttribute(Attribute::SwiftSelf) || | |||
| 637 | Arg.hasAttribute(Attribute::SwiftError) || | |||
| 638 | Arg.hasAttribute(Attribute::Nest)) | |||
| 639 | return false; | |||
| 640 | ||||
| 641 | if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { | |||
| 642 | const bool ArgUsed = !Arg.use_empty(); | |||
| 643 | bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); | |||
| 644 | ||||
| 645 | if (!SkipArg) { | |||
| 646 | Info->markPSInputAllocated(PSInputNum); | |||
| 647 | if (ArgUsed) | |||
| 648 | Info->markPSInputEnabled(PSInputNum); | |||
| 649 | } | |||
| 650 | ||||
| 651 | ++PSInputNum; | |||
| 652 | ||||
| 653 | if (SkipArg) { | |||
| 654 | for (Register R : VRegs[Idx]) | |||
| 655 | B.buildUndef(R); | |||
| 656 | ||||
| 657 | ++Idx; | |||
| 658 | continue; | |||
| 659 | } | |||
| 660 | } | |||
| 661 | ||||
| 662 | ArgInfo OrigArg(VRegs[Idx], Arg, Idx); | |||
| 663 | const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; | |||
| 664 | setArgFlags(OrigArg, OrigArgIdx, DL, F); | |||
| 665 | ||||
| 666 | splitToValueTypes(OrigArg, SplitArgs, DL, CC); | |||
| 667 | ++Idx; | |||
| 668 | } | |||
| 669 | ||||
| 670 | // At least one interpolation mode must be enabled or else the GPU will | |||
| 671 | // hang. | |||
| 672 | // | |||
| 673 | // Check PSInputAddr instead of PSInputEnable. The idea is that if the user | |||
| 674 | // set PSInputAddr, the user wants to enable some bits after the compilation | |||
| 675 | // based on run-time states. Since we can't know what the final PSInputEna | |||
| 676 | // will look like, so we shouldn't do anything here and the user should take | |||
| 677 | // responsibility for the correct programming. | |||
| 678 | // | |||
| 679 | // Otherwise, the following restrictions apply: | |||
| 680 | // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. | |||
| 681 | // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be | |||
| 682 | // enabled too. | |||
| 683 | if (CC == CallingConv::AMDGPU_PS) { | |||
| 684 | if ((Info->getPSInputAddr() & 0x7F) == 0 || | |||
| 685 | ((Info->getPSInputAddr() & 0xF) == 0 && | |||
| 686 | Info->isPSInputAllocated(11))) { | |||
| 687 | CCInfo.AllocateReg(AMDGPU::VGPR0); | |||
| 688 | CCInfo.AllocateReg(AMDGPU::VGPR1); | |||
| 689 | Info->markPSInputAllocated(0); | |||
| 690 | Info->markPSInputEnabled(0); | |||
| 691 | } | |||
| 692 | ||||
| 693 | if (Subtarget.isAmdPalOS()) { | |||
| 694 | // For isAmdPalOS, the user does not enable some bits after compilation | |||
| 695 | // based on run-time states; the register values being generated here are | |||
| 696 | // the final ones set in hardware. Therefore we need to apply the | |||
| 697 | // workaround to PSInputAddr and PSInputEnable together. (The case where | |||
| 698 | // a bit is set in PSInputAddr but not PSInputEnable is where the frontend | |||
| 699 | // set up an input arg for a particular interpolation mode, but nothing | |||
| 700 | // uses that input arg. Really we should have an earlier pass that removes | |||
| 701 | // such an arg.) | |||
| 702 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); | |||
| 703 | if ((PsInputBits & 0x7F) == 0 || | |||
| 704 | ((PsInputBits & 0xF) == 0 && | |||
| 705 | (PsInputBits >> 11 & 1))) | |||
| 706 | Info->markPSInputEnabled(llvm::countr_zero(Info->getPSInputAddr())); | |||
| 707 | } | |||
| 708 | } | |||
| 709 | ||||
| 710 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 711 | CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); | |||
| 712 | ||||
| 713 | if (!MBB.empty()) | |||
| 714 | B.setInstr(*MBB.begin()); | |||
| 715 | ||||
| 716 | if (!IsEntryFunc && !IsGraphics) { | |||
| 717 | // For the fixed ABI, pass workitem IDs in the last argument register. | |||
| 718 | TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); | |||
| 719 | } | |||
| 720 | ||||
| 721 | IncomingValueAssigner Assigner(AssignFn); | |||
| 722 | if (!determineAssignments(Assigner, SplitArgs, CCInfo)) | |||
| 723 | return false; | |||
| 724 | ||||
| 725 | FormalArgHandler Handler(B, MRI); | |||
| 726 | if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) | |||
| 727 | return false; | |||
| 728 | ||||
| 729 | uint64_t StackOffset = Assigner.StackOffset; | |||
| 730 | ||||
| 731 | // Start adding system SGPRs. | |||
| 732 | if (IsEntryFunc) { | |||
| 733 | TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); | |||
| 734 | } else { | |||
| 735 | if (!Subtarget.enableFlatScratch()) | |||
| 736 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); | |||
| 737 | TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); | |||
| 738 | } | |||
| 739 | ||||
| 740 | // When we tail call, we need to check if the callee's arguments will fit on | |||
| 741 | // the caller's stack. So, whenever we lower formal arguments, we should keep | |||
| 742 | // track of this information, since we might lower a tail call in this | |||
| 743 | // function later. | |||
| 744 | Info->setBytesInStackArgArea(StackOffset); | |||
| 745 | ||||
| 746 | // Move back to the end of the basic block. | |||
| 747 | B.setMBB(MBB); | |||
| 748 | ||||
| 749 | return true; | |||
| 750 | } | |||
| 751 | ||||
| 752 | bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, | |||
| 753 | CCState &CCInfo, | |||
| 754 | SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, | |||
| 755 | CallLoweringInfo &Info) const { | |||
| 756 | MachineFunction &MF = MIRBuilder.getMF(); | |||
| 757 | ||||
| 758 | // If there's no call site, this doesn't correspond to a call from the IR and | |||
| 759 | // doesn't need implicit inputs. | |||
| 760 | if (!Info.CB) | |||
| 761 | return true; | |||
| 762 | ||||
| 763 | const AMDGPUFunctionArgInfo *CalleeArgInfo | |||
| 764 | = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; | |||
| 765 | ||||
| 766 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 767 | const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); | |||
| 768 | ||||
| 769 | ||||
| 770 | // TODO: Unify with private memory register handling. This is complicated by | |||
| 771 | // the fact that at least in kernels, the input argument is not necessarily | |||
| 772 | // in the same location as the input. | |||
| 773 | AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { | |||
| 774 | AMDGPUFunctionArgInfo::DISPATCH_PTR, | |||
| 775 | AMDGPUFunctionArgInfo::QUEUE_PTR, | |||
| 776 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, | |||
| 777 | AMDGPUFunctionArgInfo::DISPATCH_ID, | |||
| 778 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X, | |||
| 779 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, | |||
| 780 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, | |||
| 781 | AMDGPUFunctionArgInfo::LDS_KERNEL_ID, | |||
| 782 | }; | |||
| 783 | ||||
| 784 | static constexpr StringLiteral ImplicitAttrNames[] = { | |||
| 785 | "amdgpu-no-dispatch-ptr", | |||
| 786 | "amdgpu-no-queue-ptr", | |||
| 787 | "amdgpu-no-implicitarg-ptr", | |||
| 788 | "amdgpu-no-dispatch-id", | |||
| 789 | "amdgpu-no-workgroup-id-x", | |||
| 790 | "amdgpu-no-workgroup-id-y", | |||
| 791 | "amdgpu-no-workgroup-id-z", | |||
| 792 | "amdgpu-no-lds-kernel-id", | |||
| 793 | }; | |||
| 794 | ||||
| 795 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 796 | ||||
| 797 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
| 798 | const AMDGPULegalizerInfo *LI | |||
| 799 | = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); | |||
| 800 | ||||
| 801 | unsigned I = 0; | |||
| 802 | for (auto InputID : InputRegs) { | |||
| 803 | const ArgDescriptor *OutgoingArg; | |||
| 804 | const TargetRegisterClass *ArgRC; | |||
| 805 | LLT ArgTy; | |||
| 806 | ||||
| 807 | // If the callee does not use the attribute value, skip copying the value. | |||
| 808 | if (Info.CB->hasFnAttr(ImplicitAttrNames[I++])) | |||
| 809 | continue; | |||
| 810 | ||||
| 811 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
| 812 | CalleeArgInfo->getPreloadedValue(InputID); | |||
| 813 | if (!OutgoingArg) | |||
| 814 | continue; | |||
| 815 | ||||
| 816 | const ArgDescriptor *IncomingArg; | |||
| 817 | const TargetRegisterClass *IncomingArgRC; | |||
| 818 | std::tie(IncomingArg, IncomingArgRC, ArgTy) = | |||
| 819 | CallerArgInfo.getPreloadedValue(InputID); | |||
| 820 | assert(IncomingArgRC == ArgRC)(static_cast <bool> (IncomingArgRC == ArgRC) ? void (0) : __assert_fail ("IncomingArgRC == ArgRC", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 820, __extension__ __PRETTY_FUNCTION__)); | |||
| 821 | ||||
| 822 | Register InputReg = MRI.createGenericVirtualRegister(ArgTy); | |||
| 823 | ||||
| 824 | if (IncomingArg) { | |||
| 825 | LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); | |||
| 826 | } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { | |||
| 827 | LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); | |||
| 828 | } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) { | |||
| 829 | std::optional<uint32_t> Id = | |||
| 830 | AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction()); | |||
| 831 | if (Id) { | |||
| 832 | MIRBuilder.buildConstant(InputReg, *Id); | |||
| 833 | } else { | |||
| 834 | MIRBuilder.buildUndef(InputReg); | |||
| 835 | } | |||
| 836 | } else { | |||
| 837 | // We may have proven the input wasn't needed, although the ABI is | |||
| 838 | // requiring it. We just need to allocate the register appropriately. | |||
| 839 | MIRBuilder.buildUndef(InputReg); | |||
| 840 | } | |||
| 841 | ||||
| 842 | if (OutgoingArg->isRegister()) { | |||
| 843 | ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); | |||
| 844 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) | |||
| 845 | report_fatal_error("failed to allocate implicit input argument"); | |||
| 846 | } else { | |||
| 847 | LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Unhandled stack passed implicit input argument\n" ; } } while (false); | |||
| 848 | return false; | |||
| 849 | } | |||
| 850 | } | |||
| 851 | ||||
| 852 | // Pack workitem IDs into a single register or pass it as is if already | |||
| 853 | // packed. | |||
| 854 | const ArgDescriptor *OutgoingArg; | |||
| 855 | const TargetRegisterClass *ArgRC; | |||
| 856 | LLT ArgTy; | |||
| 857 | ||||
| 858 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
| 859 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); | |||
| 860 | if (!OutgoingArg) | |||
| 861 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
| 862 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); | |||
| 863 | if (!OutgoingArg) | |||
| 864 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
| 865 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); | |||
| 866 | if (!OutgoingArg) | |||
| 867 | return false; | |||
| 868 | ||||
| 869 | auto WorkitemIDX = | |||
| 870 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); | |||
| 871 | auto WorkitemIDY = | |||
| 872 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); | |||
| 873 | auto WorkitemIDZ = | |||
| 874 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); | |||
| 875 | ||||
| 876 | const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); | |||
| 877 | const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); | |||
| 878 | const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); | |||
| 879 | const LLT S32 = LLT::scalar(32); | |||
| 880 | ||||
| 881 | const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x"); | |||
| 882 | const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y"); | |||
| 883 | const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z"); | |||
| 884 | ||||
| 885 | // If incoming ids are not packed we need to pack them. | |||
| 886 | // FIXME: Should consider known workgroup size to eliminate known 0 cases. | |||
| 887 | Register InputReg; | |||
| 888 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && | |||
| 889 | NeedWorkItemIDX) { | |||
| 890 | if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) { | |||
| 891 | InputReg = MRI.createGenericVirtualRegister(S32); | |||
| 892 | LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, | |||
| 893 | std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); | |||
| 894 | } else { | |||
| 895 | InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0); | |||
| 896 | } | |||
| 897 | } | |||
| 898 | ||||
| 899 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && | |||
| 900 | NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) { | |||
| 901 | Register Y = MRI.createGenericVirtualRegister(S32); | |||
| 902 | LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), | |||
| 903 | std::get<2>(WorkitemIDY)); | |||
| 904 | ||||
| 905 | Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); | |||
| 906 | InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; | |||
| 907 | } | |||
| 908 | ||||
| 909 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && | |||
| 910 | NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) { | |||
| 911 | Register Z = MRI.createGenericVirtualRegister(S32); | |||
| 912 | LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), | |||
| 913 | std::get<2>(WorkitemIDZ)); | |||
| 914 | ||||
| 915 | Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); | |||
| 916 | InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; | |||
| 917 | } | |||
| 918 | ||||
| 919 | if (!InputReg && | |||
| 920 | (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { | |||
| 921 | InputReg = MRI.createGenericVirtualRegister(S32); | |||
| 922 | if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { | |||
| 923 | // We're in a situation where the outgoing function requires the workitem | |||
| 924 | // ID, but the calling function does not have it (e.g a graphics function | |||
| 925 | // calling a C calling convention function). This is illegal, but we need | |||
| 926 | // to produce something. | |||
| 927 | MIRBuilder.buildUndef(InputReg); | |||
| 928 | } else { | |||
| 929 | // Workitem ids are already packed, any of present incoming arguments will | |||
| 930 | // carry all required fields. | |||
| 931 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( | |||
| 932 | IncomingArgX ? *IncomingArgX : | |||
| 933 | IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); | |||
| 934 | LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, | |||
| 935 | &AMDGPU::VGPR_32RegClass, S32); | |||
| 936 | } | |||
| 937 | } | |||
| 938 | ||||
| 939 | if (OutgoingArg->isRegister()) { | |||
| 940 | if (InputReg) | |||
| 941 | ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); | |||
| 942 | ||||
| 943 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) | |||
| 944 | report_fatal_error("failed to allocate implicit input argument"); | |||
| 945 | } else { | |||
| 946 | LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Unhandled stack passed implicit input argument\n" ; } } while (false); | |||
| 947 | return false; | |||
| 948 | } | |||
| 949 | ||||
| 950 | return true; | |||
| 951 | } | |||
| 952 | ||||
| 953 | /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for | |||
| 954 | /// CC. | |||
| 955 | static std::pair<CCAssignFn *, CCAssignFn *> | |||
| 956 | getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { | |||
| 957 | return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; | |||
| 958 | } | |||
| 959 | ||||
| 960 | static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, | |||
| 961 | bool IsTailCall, CallingConv::ID CC) { | |||
| 962 | assert(!(IsIndirect && IsTailCall) && "Indirect calls can't be tail calls, "(static_cast <bool> (!(IsIndirect && IsTailCall ) && "Indirect calls can't be tail calls, " "because the address can be divergent" ) ? void (0) : __assert_fail ("!(IsIndirect && IsTailCall) && \"Indirect calls can't be tail calls, \" \"because the address can be divergent\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 963, __extension__ __PRETTY_FUNCTION__)) | |||
| 963 | "because the address can be divergent")(static_cast <bool> (!(IsIndirect && IsTailCall ) && "Indirect calls can't be tail calls, " "because the address can be divergent" ) ? void (0) : __assert_fail ("!(IsIndirect && IsTailCall) && \"Indirect calls can't be tail calls, \" \"because the address can be divergent\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 963, __extension__ __PRETTY_FUNCTION__)); | |||
| 964 | if (!IsTailCall) | |||
| 965 | return AMDGPU::G_SI_CALL; | |||
| 966 | ||||
| 967 | return CC == CallingConv::AMDGPU_Gfx ? AMDGPU::SI_TCRETURN_GFX : | |||
| 968 | AMDGPU::SI_TCRETURN; | |||
| 969 | } | |||
| 970 | ||||
| 971 | // Add operands to call instruction to track the callee. | |||
| 972 | static bool addCallTargetOperands(MachineInstrBuilder &CallInst, | |||
| 973 | MachineIRBuilder &MIRBuilder, | |||
| 974 | AMDGPUCallLowering::CallLoweringInfo &Info) { | |||
| 975 | if (Info.Callee.isReg()) { | |||
| 976 | CallInst.addReg(Info.Callee.getReg()); | |||
| 977 | CallInst.addImm(0); | |||
| 978 | } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { | |||
| 979 | // The call lowering lightly assumed we can directly encode a call target in | |||
| 980 | // the instruction, which is not the case. Materialize the address here. | |||
| 981 | const GlobalValue *GV = Info.Callee.getGlobal(); | |||
| 982 | auto Ptr = MIRBuilder.buildGlobalValue( | |||
| 983 | LLT::pointer(GV->getAddressSpace(), 64), GV); | |||
| 984 | CallInst.addReg(Ptr.getReg(0)); | |||
| 985 | CallInst.add(Info.Callee); | |||
| 986 | } else | |||
| 987 | return false; | |||
| 988 | ||||
| 989 | return true; | |||
| 990 | } | |||
| 991 | ||||
| 992 | bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( | |||
| 993 | CallLoweringInfo &Info, MachineFunction &MF, | |||
| 994 | SmallVectorImpl<ArgInfo> &InArgs) const { | |||
| 995 | const Function &CallerF = MF.getFunction(); | |||
| 996 | CallingConv::ID CalleeCC = Info.CallConv; | |||
| 997 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
| 998 | ||||
| 999 | // If the calling conventions match, then everything must be the same. | |||
| 1000 | if (CalleeCC == CallerCC) | |||
| 1001 | return true; | |||
| 1002 | ||||
| 1003 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
| 1004 | ||||
| 1005 | // Make sure that the caller and callee preserve all of the same registers. | |||
| 1006 | auto TRI = ST.getRegisterInfo(); | |||
| 1007 | ||||
| 1008 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
| 1009 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
| 1010 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
| 1011 | return false; | |||
| 1012 | ||||
| 1013 | // Check if the caller and callee will handle arguments in the same way. | |||
| 1014 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 1015 | CCAssignFn *CalleeAssignFnFixed; | |||
| 1016 | CCAssignFn *CalleeAssignFnVarArg; | |||
| 1017 | std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = | |||
| 1018 | getAssignFnsForCC(CalleeCC, TLI); | |||
| 1019 | ||||
| 1020 | CCAssignFn *CallerAssignFnFixed; | |||
| 1021 | CCAssignFn *CallerAssignFnVarArg; | |||
| 1022 | std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = | |||
| 1023 | getAssignFnsForCC(CallerCC, TLI); | |||
| 1024 | ||||
| 1025 | // FIXME: We are not accounting for potential differences in implicitly passed | |||
| 1026 | // inputs, but only the fixed ABI is supported now anyway. | |||
| 1027 | IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, | |||
| 1028 | CalleeAssignFnVarArg); | |||
| 1029 | IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, | |||
| 1030 | CallerAssignFnVarArg); | |||
| 1031 | return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); | |||
| 1032 | } | |||
| 1033 | ||||
| 1034 | bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( | |||
| 1035 | CallLoweringInfo &Info, MachineFunction &MF, | |||
| 1036 | SmallVectorImpl<ArgInfo> &OutArgs) const { | |||
| 1037 | // If there are no outgoing arguments, then we are done. | |||
| 1038 | if (OutArgs.empty()) | |||
| 1039 | return true; | |||
| 1040 | ||||
| 1041 | const Function &CallerF = MF.getFunction(); | |||
| 1042 | CallingConv::ID CalleeCC = Info.CallConv; | |||
| 1043 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
| 1044 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 1045 | ||||
| 1046 | CCAssignFn *AssignFnFixed; | |||
| 1047 | CCAssignFn *AssignFnVarArg; | |||
| 1048 | std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); | |||
| 1049 | ||||
| 1050 | // We have outgoing arguments. Make sure that we can tail call with them. | |||
| 1051 | SmallVector<CCValAssign, 16> OutLocs; | |||
| 1052 | CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); | |||
| 1053 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); | |||
| 1054 | ||||
| 1055 | if (!determineAssignments(Assigner, OutArgs, OutInfo)) { | |||
| 1056 | LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Could not analyze call operands.\n" ; } } while (false); | |||
| 1057 | return false; | |||
| 1058 | } | |||
| 1059 | ||||
| 1060 | // Make sure that they can fit on the caller's stack. | |||
| 1061 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 1062 | if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { | |||
| 1063 | LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Cannot fit call operands on caller's stack.\n" ; } } while (false); | |||
| 1064 | return false; | |||
| 1065 | } | |||
| 1066 | ||||
| 1067 | // Verify that the parameters in callee-saved registers match. | |||
| 1068 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
| 1069 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
| 1070 | const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); | |||
| 1071 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 1072 | return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); | |||
| 1073 | } | |||
| 1074 | ||||
| 1075 | /// Return true if the calling convention is one that we can guarantee TCO for. | |||
| 1076 | static bool canGuaranteeTCO(CallingConv::ID CC) { | |||
| 1077 | return CC == CallingConv::Fast; | |||
| 1078 | } | |||
| 1079 | ||||
| 1080 | /// Return true if we might ever do TCO for calls with this calling convention. | |||
| 1081 | static bool mayTailCallThisCC(CallingConv::ID CC) { | |||
| 1082 | switch (CC) { | |||
| 1083 | case CallingConv::C: | |||
| 1084 | case CallingConv::AMDGPU_Gfx: | |||
| 1085 | return true; | |||
| 1086 | default: | |||
| 1087 | return canGuaranteeTCO(CC); | |||
| 1088 | } | |||
| 1089 | } | |||
| 1090 | ||||
| 1091 | bool AMDGPUCallLowering::isEligibleForTailCallOptimization( | |||
| 1092 | MachineIRBuilder &B, CallLoweringInfo &Info, | |||
| 1093 | SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { | |||
| 1094 | // Must pass all target-independent checks in order to tail call optimize. | |||
| 1095 | if (!Info.IsTailCall) | |||
| 1096 | return false; | |||
| 1097 | ||||
| 1098 | // Indirect calls can't be tail calls, because the address can be divergent. | |||
| 1099 | // TODO Check divergence info if the call really is divergent. | |||
| 1100 | if (Info.Callee.isReg()) | |||
| 1101 | return false; | |||
| 1102 | ||||
| 1103 | MachineFunction &MF = B.getMF(); | |||
| 1104 | const Function &CallerF = MF.getFunction(); | |||
| 1105 | CallingConv::ID CalleeCC = Info.CallConv; | |||
| 1106 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
| 1107 | ||||
| 1108 | const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); | |||
| 1109 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
| 1110 | // Kernels aren't callable, and don't have a live in return address so it | |||
| 1111 | // doesn't make sense to do a tail call with entry functions. | |||
| 1112 | if (!CallerPreserved) | |||
| 1113 | return false; | |||
| 1114 | ||||
| 1115 | if (!mayTailCallThisCC(CalleeCC)) { | |||
| 1116 | LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Calling convention cannot be tail called.\n" ; } } while (false); | |||
| 1117 | return false; | |||
| 1118 | } | |||
| 1119 | ||||
| 1120 | if (any_of(CallerF.args(), [](const Argument &A) { | |||
| 1121 | return A.hasByValAttr() || A.hasSwiftErrorAttr(); | |||
| 1122 | })) { | |||
| 1123 | LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval " "or swifterror arguments\n"; } } while (false) | |||
| 1124 | "or swifterror arguments\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval " "or swifterror arguments\n"; } } while (false); | |||
| 1125 | return false; | |||
| 1126 | } | |||
| 1127 | ||||
| 1128 | // If we have -tailcallopt, then we're done. | |||
| 1129 | if (MF.getTarget().Options.GuaranteedTailCallOpt) | |||
| 1130 | return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); | |||
| 1131 | ||||
| 1132 | // Verify that the incoming and outgoing arguments from the callee are | |||
| 1133 | // safe to tail call. | |||
| 1134 | if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { | |||
| 1135 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n" ; } } while (false) | |||
| 1136 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n" ; } } while (false) | |||
| 1137 | << "... Caller and callee have incompatible calling conventions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n" ; } } while (false); | |||
| 1138 | return false; | |||
| 1139 | } | |||
| 1140 | ||||
| 1141 | if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) | |||
| 1142 | return false; | |||
| 1143 | ||||
| 1144 | LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Call is eligible for tail call optimization.\n" ; } } while (false); | |||
| 1145 | return true; | |||
| 1146 | } | |||
| 1147 | ||||
| 1148 | // Insert outgoing implicit arguments for a call, by inserting copies to the | |||
| 1149 | // implicit argument registers and adding the necessary implicit uses to the | |||
| 1150 | // call instruction. | |||
| 1151 | void AMDGPUCallLowering::handleImplicitCallArguments( | |||
| 1152 | MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, | |||
| 1153 | const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, | |||
| 1154 | ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { | |||
| 1155 | if (!ST.enableFlatScratch()) { | |||
| 1156 | // Insert copies for the SRD. In the HSA case, this should be an identity | |||
| 1157 | // copy. | |||
| 1158 | auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), | |||
| 1159 | FuncInfo.getScratchRSrcReg()); | |||
| 1160 | MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); | |||
| 1161 | CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); | |||
| 1162 | } | |||
| 1163 | ||||
| 1164 | for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { | |||
| 1165 | MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); | |||
| 1166 | CallInst.addReg(ArgReg.first, RegState::Implicit); | |||
| 1167 | } | |||
| 1168 | } | |||
| 1169 | ||||
| 1170 | bool AMDGPUCallLowering::lowerTailCall( | |||
| 1171 | MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, | |||
| 1172 | SmallVectorImpl<ArgInfo> &OutArgs) const { | |||
| 1173 | MachineFunction &MF = MIRBuilder.getMF(); | |||
| 1174 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
| 1175 | SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 1176 | const Function &F = MF.getFunction(); | |||
| 1177 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 1178 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 1179 | ||||
| 1180 | // True when we're tail calling, but without -tailcallopt. | |||
| 1181 | bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; | |||
| 1182 | ||||
| 1183 | // Find out which ABI gets to decide where things go. | |||
| 1184 | CallingConv::ID CalleeCC = Info.CallConv; | |||
| 1185 | CCAssignFn *AssignFnFixed; | |||
| 1186 | CCAssignFn *AssignFnVarArg; | |||
| 1187 | std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); | |||
| 1188 | ||||
| 1189 | MachineInstrBuilder CallSeqStart; | |||
| 1190 | if (!IsSibCall) | |||
| 1191 | CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); | |||
| 1192 | ||||
| 1193 | unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true, CalleeCC); | |||
| 1194 | auto MIB = MIRBuilder.buildInstrNoInsert(Opc); | |||
| 1195 | if (!addCallTargetOperands(MIB, MIRBuilder, Info)) | |||
| 1196 | return false; | |||
| 1197 | ||||
| 1198 | // Byte offset for the tail call. When we are sibcalling, this will always | |||
| 1199 | // be 0. | |||
| 1200 | MIB.addImm(0); | |||
| 1201 | ||||
| 1202 | // Tell the call which registers are clobbered. | |||
| 1203 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
| 1204 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); | |||
| 1205 | MIB.addRegMask(Mask); | |||
| 1206 | ||||
| 1207 | // FPDiff is the byte offset of the call's argument area from the callee's. | |||
| 1208 | // Stores to callee stack arguments will be placed in FixedStackSlots offset | |||
| 1209 | // by this amount for a tail call. In a sibling call it must be 0 because the | |||
| 1210 | // caller will deallocate the entire stack and the callee still expects its | |||
| 1211 | // arguments to begin at SP+0. | |||
| 1212 | int FPDiff = 0; | |||
| 1213 | ||||
| 1214 | // This will be 0 for sibcalls, potentially nonzero for tail calls produced | |||
| 1215 | // by -tailcallopt. For sibcalls, the memory operands for the call are | |||
| 1216 | // already available in the caller's incoming argument space. | |||
| 1217 | unsigned NumBytes = 0; | |||
| 1218 | if (!IsSibCall) { | |||
| 1219 | // We aren't sibcalling, so we need to compute FPDiff. We need to do this | |||
| 1220 | // before handling assignments, because FPDiff must be known for memory | |||
| 1221 | // arguments. | |||
| 1222 | unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); | |||
| 1223 | SmallVector<CCValAssign, 16> OutLocs; | |||
| 1224 | CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); | |||
| 1225 | ||||
| 1226 | // FIXME: Not accounting for callee implicit inputs | |||
| 1227 | OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); | |||
| 1228 | if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) | |||
| 1229 | return false; | |||
| 1230 | ||||
| 1231 | // The callee will pop the argument stack as a tail call. Thus, we must | |||
| 1232 | // keep it 16-byte aligned. | |||
| 1233 | NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); | |||
| 1234 | ||||
| 1235 | // FPDiff will be negative if this tail call requires more space than we | |||
| 1236 | // would automatically have in our incoming argument space. Positive if we | |||
| 1237 | // actually shrink the stack. | |||
| 1238 | FPDiff = NumReusableBytes - NumBytes; | |||
| 1239 | ||||
| 1240 | // The stack pointer must be 16-byte aligned at all times it's used for a | |||
| 1241 | // memory operation, which in practice means at *all* times and in | |||
| 1242 | // particular across call boundaries. Therefore our own arguments started at | |||
| 1243 | // a 16-byte aligned SP and the delta applied for the tail call should | |||
| 1244 | // satisfy the same constraint. | |||
| 1245 | assert(isAligned(ST.getStackAlignment(), FPDiff) &&(static_cast <bool> (isAligned(ST.getStackAlignment(), FPDiff ) && "unaligned stack on tail call") ? void (0) : __assert_fail ("isAligned(ST.getStackAlignment(), FPDiff) && \"unaligned stack on tail call\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 1246, __extension__ __PRETTY_FUNCTION__)) | |||
| 1246 | "unaligned stack on tail call")(static_cast <bool> (isAligned(ST.getStackAlignment(), FPDiff ) && "unaligned stack on tail call") ? void (0) : __assert_fail ("isAligned(ST.getStackAlignment(), FPDiff) && \"unaligned stack on tail call\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 1246, __extension__ __PRETTY_FUNCTION__)); | |||
| 1247 | } | |||
| 1248 | ||||
| 1249 | SmallVector<CCValAssign, 16> ArgLocs; | |||
| 1250 | CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); | |||
| 1251 | ||||
| 1252 | // We could pass MIB and directly add the implicit uses to the call | |||
| 1253 | // now. However, as an aesthetic choice, place implicit argument operands | |||
| 1254 | // after the ordinary user argument registers. | |||
| 1255 | SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; | |||
| 1256 | ||||
| 1257 | if (Info.CallConv != CallingConv::AMDGPU_Gfx) { | |||
| 1258 | // With a fixed ABI, allocate fixed registers before user arguments. | |||
| 1259 | if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) | |||
| 1260 | return false; | |||
| 1261 | } | |||
| 1262 | ||||
| 1263 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); | |||
| 1264 | ||||
| 1265 | if (!determineAssignments(Assigner, OutArgs, CCInfo)) | |||
| 1266 | return false; | |||
| 1267 | ||||
| 1268 | // Do the actual argument marshalling. | |||
| 1269 | AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); | |||
| 1270 | if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) | |||
| 1271 | return false; | |||
| 1272 | ||||
| 1273 | handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs); | |||
| 1274 | ||||
| 1275 | // If we have -tailcallopt, we need to adjust the stack. We'll do the call | |||
| 1276 | // sequence start and end here. | |||
| 1277 | if (!IsSibCall) { | |||
| 1278 | MIB->getOperand(1).setImm(FPDiff); | |||
| 1279 | CallSeqStart.addImm(NumBytes).addImm(0); | |||
| 1280 | // End the call sequence *before* emitting the call. Normally, we would | |||
| 1281 | // tidy the frame up after the call. However, here, we've laid out the | |||
| 1282 | // parameters so that when SP is reset, they will be in the correct | |||
| 1283 | // location. | |||
| 1284 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); | |||
| 1285 | } | |||
| 1286 | ||||
| 1287 | // Now we can add the actual call instruction to the correct basic block. | |||
| 1288 | MIRBuilder.insertInstr(MIB); | |||
| 1289 | ||||
| 1290 | // If Callee is a reg, since it is used by a target specific | |||
| 1291 | // instruction, it must have a register class matching the | |||
| 1292 | // constraint of that instruction. | |||
| 1293 | ||||
| 1294 | // FIXME: We should define regbankselectable call instructions to handle | |||
| 1295 | // divergent call targets. | |||
| 1296 | if (MIB->getOperand(0).isReg()) { | |||
| 1297 | MIB->getOperand(0).setReg(constrainOperandRegClass( | |||
| 1298 | MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, | |||
| 1299 | MIB->getDesc(), MIB->getOperand(0), 0)); | |||
| 1300 | } | |||
| 1301 | ||||
| 1302 | MF.getFrameInfo().setHasTailCall(); | |||
| 1303 | Info.LoweredTailCall = true; | |||
| 1304 | return true; | |||
| 1305 | } | |||
| 1306 | ||||
| 1307 | bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, | |||
| 1308 | CallLoweringInfo &Info) const { | |||
| 1309 | if (Info.IsVarArg) { | |||
| 1310 | LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Variadic functions not implemented\n" ; } } while (false); | |||
| 1311 | return false; | |||
| 1312 | } | |||
| 1313 | ||||
| 1314 | MachineFunction &MF = MIRBuilder.getMF(); | |||
| 1315 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
| 1316 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
| 1317 | ||||
| 1318 | const Function &F = MF.getFunction(); | |||
| 1319 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
| 1320 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
| 1321 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
| 1322 | ||||
| 1323 | SmallVector<ArgInfo, 8> OutArgs; | |||
| 1324 | for (auto &OrigArg : Info.OrigArgs) | |||
| 1325 | splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); | |||
| 1326 | ||||
| 1327 | SmallVector<ArgInfo, 8> InArgs; | |||
| 1328 | if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) | |||
| 1329 | splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); | |||
| 1330 | ||||
| 1331 | // If we can lower as a tail call, do that instead. | |||
| 1332 | bool CanTailCallOpt = | |||
| 1333 | isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); | |||
| 1334 | ||||
| 1335 | // We must emit a tail call if we have musttail. | |||
| 1336 | if (Info.IsMustTailCall && !CanTailCallOpt) { | |||
| 1337 | LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Failed to lower musttail call as tail call\n" ; } } while (false); | |||
| 1338 | return false; | |||
| 1339 | } | |||
| 1340 | ||||
| 1341 | Info.IsTailCall = CanTailCallOpt; | |||
| 1342 | if (CanTailCallOpt) | |||
| 1343 | return lowerTailCall(MIRBuilder, Info, OutArgs); | |||
| 1344 | ||||
| 1345 | // Find out which ABI gets to decide where things go. | |||
| 1346 | CCAssignFn *AssignFnFixed; | |||
| 1347 | CCAssignFn *AssignFnVarArg; | |||
| 1348 | std::tie(AssignFnFixed, AssignFnVarArg) = | |||
| 1349 | getAssignFnsForCC(Info.CallConv, TLI); | |||
| 1350 | ||||
| 1351 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) | |||
| 1352 | .addImm(0) | |||
| 1353 | .addImm(0); | |||
| 1354 | ||||
| 1355 | // Create a temporarily-floating call instruction so we can add the implicit | |||
| 1356 | // uses of arg registers. | |||
| 1357 | unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false, Info.CallConv); | |||
| 1358 | ||||
| 1359 | auto MIB = MIRBuilder.buildInstrNoInsert(Opc); | |||
| 1360 | MIB.addDef(TRI->getReturnAddressReg(MF)); | |||
| 1361 | ||||
| 1362 | if (!addCallTargetOperands(MIB, MIRBuilder, Info)) | |||
| 1363 | return false; | |||
| 1364 | ||||
| 1365 | // Tell the call which registers are clobbered. | |||
| 1366 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); | |||
| 1367 | MIB.addRegMask(Mask); | |||
| 1368 | ||||
| 1369 | SmallVector<CCValAssign, 16> ArgLocs; | |||
| 1370 | CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); | |||
| 1371 | ||||
| 1372 | // We could pass MIB and directly add the implicit uses to the call | |||
| 1373 | // now. However, as an aesthetic choice, place implicit argument operands | |||
| 1374 | // after the ordinary user argument registers. | |||
| 1375 | SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; | |||
| 1376 | ||||
| 1377 | if (Info.CallConv != CallingConv::AMDGPU_Gfx) { | |||
| 1378 | // With a fixed ABI, allocate fixed registers before user arguments. | |||
| 1379 | if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) | |||
| 1380 | return false; | |||
| 1381 | } | |||
| 1382 | ||||
| 1383 | // Do the actual argument marshalling. | |||
| 1384 | SmallVector<Register, 8> PhysRegs; | |||
| 1385 | ||||
| 1386 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); | |||
| 1387 | if (!determineAssignments(Assigner, OutArgs, CCInfo)) | |||
| 1388 | return false; | |||
| 1389 | ||||
| 1390 | AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); | |||
| 1391 | if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) | |||
| 1392 | return false; | |||
| 1393 | ||||
| 1394 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
| 1395 | ||||
| 1396 | handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); | |||
| 1397 | ||||
| 1398 | // Get a count of how many bytes are to be pushed on the stack. | |||
| 1399 | unsigned NumBytes = CCInfo.getNextStackOffset(); | |||
| 1400 | ||||
| 1401 | // If Callee is a reg, since it is used by a target specific | |||
| 1402 | // instruction, it must have a register class matching the | |||
| 1403 | // constraint of that instruction. | |||
| 1404 | ||||
| 1405 | // FIXME: We should define regbankselectable call instructions to handle | |||
| 1406 | // divergent call targets. | |||
| 1407 | if (MIB->getOperand(1).isReg()) { | |||
| 1408 | MIB->getOperand(1).setReg(constrainOperandRegClass( | |||
| 1409 | MF, *TRI, MRI, *ST.getInstrInfo(), | |||
| 1410 | *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), | |||
| 1411 | 1)); | |||
| 1412 | } | |||
| 1413 | ||||
| 1414 | // Now we can add the actual call instruction to the correct position. | |||
| 1415 | MIRBuilder.insertInstr(MIB); | |||
| 1416 | ||||
| 1417 | // Finally we can copy the returned value back into its virtual-register. In | |||
| 1418 | // symmetry with the arguments, the physical register must be an | |||
| 1419 | // implicit-define of the call instruction. | |||
| 1420 | if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { | |||
| 1421 | CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, | |||
| 1422 | Info.IsVarArg); | |||
| 1423 | IncomingValueAssigner Assigner(RetAssignFn); | |||
| 1424 | CallReturnHandler Handler(MIRBuilder, MRI, MIB); | |||
| 1425 | if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, | |||
| 1426 | Info.CallConv, Info.IsVarArg)) | |||
| 1427 | return false; | |||
| 1428 | } | |||
| 1429 | ||||
| 1430 | uint64_t CalleePopBytes = NumBytes; | |||
| 1431 | ||||
| 1432 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) | |||
| 1433 | .addImm(0) | |||
| 1434 | .addImm(CalleePopBytes); | |||
| 1435 | ||||
| 1436 | if (!Info.CanLowerReturn) { | |||
| 1437 | insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, | |||
| 1438 | Info.DemoteRegister, Info.DemoteStackIndex); | |||
| 1439 | } | |||
| 1440 | ||||
| 1441 | return true; | |||
| 1442 | } |