File: | build/llvm-toolchain-snapshot-15~++20220301100735+026fe5ffc352/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp |
Warning: | line 390, column 25 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | /// | |||
9 | /// \file | |||
10 | /// This file implements the lowering of LLVM calls to machine code calls for | |||
11 | /// GlobalISel. | |||
12 | /// | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "AMDGPUCallLowering.h" | |||
16 | #include "AMDGPU.h" | |||
17 | #include "AMDGPULegalizerInfo.h" | |||
18 | #include "AMDGPUTargetMachine.h" | |||
19 | #include "SIMachineFunctionInfo.h" | |||
20 | #include "SIRegisterInfo.h" | |||
21 | #include "llvm/CodeGen/Analysis.h" | |||
22 | #include "llvm/CodeGen/FunctionLoweringInfo.h" | |||
23 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" | |||
24 | #include "llvm/IR/IntrinsicsAMDGPU.h" | |||
25 | ||||
26 | #define DEBUG_TYPE"amdgpu-call-lowering" "amdgpu-call-lowering" | |||
27 | ||||
28 | using namespace llvm; | |||
29 | ||||
30 | namespace { | |||
31 | ||||
32 | /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. | |||
33 | static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, | |||
34 | Register ValVReg, CCValAssign &VA) { | |||
35 | if (VA.getLocVT().getSizeInBits() < 32) { | |||
36 | // 16-bit types are reported as legal for 32-bit registers. We need to | |||
37 | // extend and do a 32-bit copy to avoid the verifier complaining about it. | |||
38 | return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); | |||
39 | } | |||
40 | ||||
41 | return Handler.extendRegister(ValVReg, VA); | |||
42 | } | |||
43 | ||||
44 | struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { | |||
45 | AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, | |||
46 | MachineInstrBuilder MIB) | |||
47 | : OutgoingValueHandler(B, MRI), MIB(MIB) {} | |||
48 | ||||
49 | MachineInstrBuilder MIB; | |||
50 | ||||
51 | Register getStackAddress(uint64_t Size, int64_t Offset, | |||
52 | MachinePointerInfo &MPO, | |||
53 | ISD::ArgFlagsTy Flags) override { | |||
54 | llvm_unreachable("not implemented")::llvm::llvm_unreachable_internal("not implemented", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 54); | |||
55 | } | |||
56 | ||||
57 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, | |||
58 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
59 | llvm_unreachable("not implemented")::llvm::llvm_unreachable_internal("not implemented", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 59); | |||
60 | } | |||
61 | ||||
62 | void assignValueToReg(Register ValVReg, Register PhysReg, | |||
63 | CCValAssign VA) override { | |||
64 | Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); | |||
65 | ||||
66 | // If this is a scalar return, insert a readfirstlane just in case the value | |||
67 | // ends up in a VGPR. | |||
68 | // FIXME: Assert this is a shader return. | |||
69 | const SIRegisterInfo *TRI | |||
70 | = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); | |||
71 | if (TRI->isSGPRReg(MRI, PhysReg)) { | |||
72 | auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, | |||
73 | {MRI.getType(ExtReg)}, false) | |||
74 | .addReg(ExtReg); | |||
75 | ExtReg = ToSGPR.getReg(0); | |||
76 | } | |||
77 | ||||
78 | MIRBuilder.buildCopy(PhysReg, ExtReg); | |||
79 | MIB.addUse(PhysReg, RegState::Implicit); | |||
80 | } | |||
81 | }; | |||
82 | ||||
83 | struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { | |||
84 | uint64_t StackUsed = 0; | |||
85 | ||||
86 | AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) | |||
87 | : IncomingValueHandler(B, MRI) {} | |||
88 | ||||
89 | Register getStackAddress(uint64_t Size, int64_t Offset, | |||
90 | MachinePointerInfo &MPO, | |||
91 | ISD::ArgFlagsTy Flags) override { | |||
92 | auto &MFI = MIRBuilder.getMF().getFrameInfo(); | |||
93 | ||||
94 | // Byval is assumed to be writable memory, but other stack passed arguments | |||
95 | // are not. | |||
96 | const bool IsImmutable = !Flags.isByVal(); | |||
97 | int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); | |||
98 | MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); | |||
99 | auto AddrReg = MIRBuilder.buildFrameIndex( | |||
100 | LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); | |||
101 | StackUsed = std::max(StackUsed, Size + Offset); | |||
102 | return AddrReg.getReg(0); | |||
103 | } | |||
104 | ||||
105 | void assignValueToReg(Register ValVReg, Register PhysReg, | |||
106 | CCValAssign VA) override { | |||
107 | markPhysRegUsed(PhysReg); | |||
108 | ||||
109 | if (VA.getLocVT().getSizeInBits() < 32) { | |||
110 | // 16-bit types are reported as legal for 32-bit registers. We need to do | |||
111 | // a 32-bit copy, and truncate to avoid the verifier complaining about it. | |||
112 | auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); | |||
113 | ||||
114 | // If we have signext/zeroext, it applies to the whole 32-bit register | |||
115 | // before truncation. | |||
116 | auto Extended = | |||
117 | buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); | |||
118 | MIRBuilder.buildTrunc(ValVReg, Extended); | |||
119 | return; | |||
120 | } | |||
121 | ||||
122 | IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); | |||
123 | } | |||
124 | ||||
125 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, | |||
126 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
127 | MachineFunction &MF = MIRBuilder.getMF(); | |||
128 | ||||
129 | auto MMO = MF.getMachineMemOperand( | |||
130 | MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, | |||
131 | inferAlignFromPtrInfo(MF, MPO)); | |||
132 | MIRBuilder.buildLoad(ValVReg, Addr, *MMO); | |||
133 | } | |||
134 | ||||
135 | /// How the physical register gets marked varies between formal | |||
136 | /// parameters (it's a basic-block live-in), and a call instruction | |||
137 | /// (it's an implicit-def of the BL). | |||
138 | virtual void markPhysRegUsed(unsigned PhysReg) = 0; | |||
139 | }; | |||
140 | ||||
141 | struct FormalArgHandler : public AMDGPUIncomingArgHandler { | |||
142 | FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) | |||
143 | : AMDGPUIncomingArgHandler(B, MRI) {} | |||
144 | ||||
145 | void markPhysRegUsed(unsigned PhysReg) override { | |||
146 | MIRBuilder.getMBB().addLiveIn(PhysReg); | |||
147 | } | |||
148 | }; | |||
149 | ||||
150 | struct CallReturnHandler : public AMDGPUIncomingArgHandler { | |||
151 | CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, | |||
152 | MachineInstrBuilder MIB) | |||
153 | : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} | |||
154 | ||||
155 | void markPhysRegUsed(unsigned PhysReg) override { | |||
156 | MIB.addDef(PhysReg, RegState::Implicit); | |||
157 | } | |||
158 | ||||
159 | MachineInstrBuilder MIB; | |||
160 | }; | |||
161 | ||||
162 | struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { | |||
163 | /// For tail calls, the byte offset of the call's argument area from the | |||
164 | /// callee's. Unused elsewhere. | |||
165 | int FPDiff; | |||
166 | ||||
167 | // Cache the SP register vreg if we need it more than once in this call site. | |||
168 | Register SPReg; | |||
169 | ||||
170 | bool IsTailCall; | |||
171 | ||||
172 | AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, | |||
173 | MachineRegisterInfo &MRI, MachineInstrBuilder MIB, | |||
174 | bool IsTailCall = false, int FPDiff = 0) | |||
175 | : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), | |||
176 | IsTailCall(IsTailCall) {} | |||
177 | ||||
178 | Register getStackAddress(uint64_t Size, int64_t Offset, | |||
179 | MachinePointerInfo &MPO, | |||
180 | ISD::ArgFlagsTy Flags) override { | |||
181 | MachineFunction &MF = MIRBuilder.getMF(); | |||
182 | const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); | |||
183 | const LLT S32 = LLT::scalar(32); | |||
184 | ||||
185 | if (IsTailCall) { | |||
186 | Offset += FPDiff; | |||
187 | int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); | |||
188 | auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); | |||
189 | MPO = MachinePointerInfo::getFixedStack(MF, FI); | |||
190 | return FIReg.getReg(0); | |||
191 | } | |||
192 | ||||
193 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
194 | ||||
195 | if (!SPReg) { | |||
196 | const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>(); | |||
197 | if (ST.enableFlatScratch()) { | |||
198 | // The stack is accessed unswizzled, so we can use a regular copy. | |||
199 | SPReg = MIRBuilder.buildCopy(PtrTy, | |||
200 | MFI->getStackPtrOffsetReg()).getReg(0); | |||
201 | } else { | |||
202 | // The address we produce here, without knowing the use context, is going | |||
203 | // to be interpreted as a vector address, so we need to convert to a | |||
204 | // swizzled address. | |||
205 | SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy}, | |||
206 | {MFI->getStackPtrOffsetReg()}).getReg(0); | |||
207 | } | |||
208 | } | |||
209 | ||||
210 | auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); | |||
211 | ||||
212 | auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); | |||
213 | MPO = MachinePointerInfo::getStack(MF, Offset); | |||
214 | return AddrReg.getReg(0); | |||
215 | } | |||
216 | ||||
217 | void assignValueToReg(Register ValVReg, Register PhysReg, | |||
218 | CCValAssign VA) override { | |||
219 | MIB.addUse(PhysReg, RegState::Implicit); | |||
220 | Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); | |||
221 | MIRBuilder.buildCopy(PhysReg, ExtReg); | |||
222 | } | |||
223 | ||||
224 | void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, | |||
225 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
226 | MachineFunction &MF = MIRBuilder.getMF(); | |||
227 | uint64_t LocMemOffset = VA.getLocMemOffset(); | |||
228 | const auto &ST = MF.getSubtarget<GCNSubtarget>(); | |||
229 | ||||
230 | auto MMO = MF.getMachineMemOperand( | |||
231 | MPO, MachineMemOperand::MOStore, MemTy, | |||
232 | commonAlignment(ST.getStackAlignment(), LocMemOffset)); | |||
233 | MIRBuilder.buildStore(ValVReg, Addr, *MMO); | |||
234 | } | |||
235 | ||||
236 | void assignValueToAddress(const CallLowering::ArgInfo &Arg, | |||
237 | unsigned ValRegIndex, Register Addr, LLT MemTy, | |||
238 | MachinePointerInfo &MPO, CCValAssign &VA) override { | |||
239 | Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt | |||
240 | ? extendRegister(Arg.Regs[ValRegIndex], VA) | |||
241 | : Arg.Regs[ValRegIndex]; | |||
242 | assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); | |||
243 | } | |||
244 | }; | |||
245 | } | |||
246 | ||||
247 | AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) | |||
248 | : CallLowering(&TLI) { | |||
249 | } | |||
250 | ||||
251 | // FIXME: Compatibility shim | |||
252 | static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { | |||
253 | switch (MIOpc) { | |||
254 | case TargetOpcode::G_SEXT: | |||
255 | return ISD::SIGN_EXTEND; | |||
256 | case TargetOpcode::G_ZEXT: | |||
257 | return ISD::ZERO_EXTEND; | |||
258 | case TargetOpcode::G_ANYEXT: | |||
259 | return ISD::ANY_EXTEND; | |||
260 | default: | |||
261 | llvm_unreachable("not an extend opcode")::llvm::llvm_unreachable_internal("not an extend opcode", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 261); | |||
262 | } | |||
263 | } | |||
264 | ||||
265 | bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, | |||
266 | CallingConv::ID CallConv, | |||
267 | SmallVectorImpl<BaseArgInfo> &Outs, | |||
268 | bool IsVarArg) const { | |||
269 | // For shaders. Vector types should be explicitly handled by CC. | |||
270 | if (AMDGPU::isEntryFunctionCC(CallConv)) | |||
271 | return true; | |||
272 | ||||
273 | SmallVector<CCValAssign, 16> ArgLocs; | |||
274 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
275 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, | |||
276 | MF.getFunction().getContext()); | |||
277 | ||||
278 | return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); | |||
279 | } | |||
280 | ||||
281 | /// Lower the return value for the already existing \p Ret. This assumes that | |||
282 | /// \p B's insertion point is correct. | |||
283 | bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, | |||
284 | const Value *Val, ArrayRef<Register> VRegs, | |||
285 | MachineInstrBuilder &Ret) const { | |||
286 | if (!Val) | |||
287 | return true; | |||
288 | ||||
289 | auto &MF = B.getMF(); | |||
290 | const auto &F = MF.getFunction(); | |||
291 | const DataLayout &DL = MF.getDataLayout(); | |||
292 | MachineRegisterInfo *MRI = B.getMRI(); | |||
293 | LLVMContext &Ctx = F.getContext(); | |||
294 | ||||
295 | CallingConv::ID CC = F.getCallingConv(); | |||
296 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
297 | ||||
298 | SmallVector<EVT, 8> SplitEVTs; | |||
299 | ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); | |||
300 | assert(VRegs.size() == SplitEVTs.size() &&(static_cast <bool> (VRegs.size() == SplitEVTs.size() && "For each split Type there should be exactly one VReg.") ? void (0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 301, __extension__ __PRETTY_FUNCTION__)) | |||
301 | "For each split Type there should be exactly one VReg.")(static_cast <bool> (VRegs.size() == SplitEVTs.size() && "For each split Type there should be exactly one VReg.") ? void (0) : __assert_fail ("VRegs.size() == SplitEVTs.size() && \"For each split Type there should be exactly one VReg.\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 301, __extension__ __PRETTY_FUNCTION__)); | |||
302 | ||||
303 | SmallVector<ArgInfo, 8> SplitRetInfos; | |||
304 | ||||
305 | for (unsigned i = 0; i < SplitEVTs.size(); ++i) { | |||
306 | EVT VT = SplitEVTs[i]; | |||
307 | Register Reg = VRegs[i]; | |||
308 | ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); | |||
309 | setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); | |||
310 | ||||
311 | if (VT.isScalarInteger()) { | |||
312 | unsigned ExtendOp = TargetOpcode::G_ANYEXT; | |||
313 | if (RetInfo.Flags[0].isSExt()) { | |||
314 | assert(RetInfo.Regs.size() == 1 && "expect only simple return values")(static_cast <bool> (RetInfo.Regs.size() == 1 && "expect only simple return values") ? void (0) : __assert_fail ("RetInfo.Regs.size() == 1 && \"expect only simple return values\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 314, __extension__ __PRETTY_FUNCTION__)); | |||
315 | ExtendOp = TargetOpcode::G_SEXT; | |||
316 | } else if (RetInfo.Flags[0].isZExt()) { | |||
317 | assert(RetInfo.Regs.size() == 1 && "expect only simple return values")(static_cast <bool> (RetInfo.Regs.size() == 1 && "expect only simple return values") ? void (0) : __assert_fail ("RetInfo.Regs.size() == 1 && \"expect only simple return values\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 317, __extension__ __PRETTY_FUNCTION__)); | |||
318 | ExtendOp = TargetOpcode::G_ZEXT; | |||
319 | } | |||
320 | ||||
321 | EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, | |||
322 | extOpcodeToISDExtOpcode(ExtendOp)); | |||
323 | if (ExtVT != VT) { | |||
324 | RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); | |||
325 | LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); | |||
326 | Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); | |||
327 | } | |||
328 | } | |||
329 | ||||
330 | if (Reg != RetInfo.Regs[0]) { | |||
331 | RetInfo.Regs[0] = Reg; | |||
332 | // Reset the arg flags after modifying Reg. | |||
333 | setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); | |||
334 | } | |||
335 | ||||
336 | splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); | |||
337 | } | |||
338 | ||||
339 | CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); | |||
340 | ||||
341 | OutgoingValueAssigner Assigner(AssignFn); | |||
342 | AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); | |||
343 | return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, | |||
344 | CC, F.isVarArg()); | |||
345 | } | |||
346 | ||||
347 | bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, | |||
348 | ArrayRef<Register> VRegs, | |||
349 | FunctionLoweringInfo &FLI) const { | |||
350 | ||||
351 | MachineFunction &MF = B.getMF(); | |||
352 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
353 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
354 | MFI->setIfReturnsVoid(!Val); | |||
| ||||
355 | ||||
356 | assert(!Val == VRegs.empty() && "Return value without a vreg")(static_cast <bool> (!Val == VRegs.empty() && "Return value without a vreg" ) ? void (0) : __assert_fail ("!Val == VRegs.empty() && \"Return value without a vreg\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 356, __extension__ __PRETTY_FUNCTION__)); | |||
357 | ||||
358 | CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); | |||
359 | const bool IsShader = AMDGPU::isShader(CC); | |||
360 | const bool IsWaveEnd = | |||
361 | (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); | |||
362 | if (IsWaveEnd
| |||
363 | B.buildInstr(AMDGPU::S_ENDPGM) | |||
364 | .addImm(0); | |||
365 | return true; | |||
366 | } | |||
367 | ||||
368 | auto const &ST = MF.getSubtarget<GCNSubtarget>(); | |||
369 | ||||
370 | unsigned ReturnOpc = 0; | |||
371 | if (IsShader
| |||
372 | ReturnOpc = AMDGPU::SI_RETURN_TO_EPILOG; | |||
373 | else if (CC == CallingConv::AMDGPU_Gfx) | |||
374 | ReturnOpc = AMDGPU::S_SETPC_B64_return_gfx; | |||
375 | else | |||
376 | ReturnOpc = AMDGPU::S_SETPC_B64_return; | |||
377 | ||||
378 | auto Ret = B.buildInstrNoInsert(ReturnOpc); | |||
379 | Register ReturnAddrVReg; | |||
380 | if (ReturnOpc
| |||
381 | ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); | |||
382 | Ret.addUse(ReturnAddrVReg); | |||
383 | } else if (ReturnOpc == AMDGPU::S_SETPC_B64_return_gfx) { | |||
384 | ReturnAddrVReg = | |||
385 | MRI.createVirtualRegister(&AMDGPU::Gfx_CCR_SGPR_64RegClass); | |||
386 | Ret.addUse(ReturnAddrVReg); | |||
387 | } | |||
388 | ||||
389 | if (!FLI.CanLowerReturn) | |||
390 | insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); | |||
| ||||
391 | else if (!lowerReturnVal(B, Val, VRegs, Ret)) | |||
392 | return false; | |||
393 | ||||
394 | if (ReturnOpc == AMDGPU::S_SETPC_B64_return || | |||
395 | ReturnOpc == AMDGPU::S_SETPC_B64_return_gfx) { | |||
396 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
397 | Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), | |||
398 | &AMDGPU::SGPR_64RegClass); | |||
399 | B.buildCopy(ReturnAddrVReg, LiveInReturn); | |||
400 | } | |||
401 | ||||
402 | // TODO: Handle CalleeSavedRegsViaCopy. | |||
403 | ||||
404 | B.insertInstr(Ret); | |||
405 | return true; | |||
406 | } | |||
407 | ||||
408 | void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, | |||
409 | uint64_t Offset) const { | |||
410 | MachineFunction &MF = B.getMF(); | |||
411 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
412 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
413 | Register KernArgSegmentPtr = | |||
414 | MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); | |||
415 | Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); | |||
416 | ||||
417 | auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); | |||
418 | ||||
419 | B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); | |||
420 | } | |||
421 | ||||
422 | void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, | |||
423 | uint64_t Offset, | |||
424 | Align Alignment) const { | |||
425 | MachineFunction &MF = B.getMF(); | |||
426 | const Function &F = MF.getFunction(); | |||
427 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
428 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); | |||
429 | ||||
430 | LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); | |||
431 | ||||
432 | SmallVector<ArgInfo, 32> SplitArgs; | |||
433 | SmallVector<uint64_t> FieldOffsets; | |||
434 | splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); | |||
435 | ||||
436 | unsigned Idx = 0; | |||
437 | for (ArgInfo &SplitArg : SplitArgs) { | |||
438 | Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); | |||
439 | lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); | |||
440 | ||||
441 | LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); | |||
442 | if (SplitArg.Flags[0].isPointer()) { | |||
443 | // Compensate for losing pointeriness in splitValueTypes. | |||
444 | LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), | |||
445 | ArgTy.getScalarSizeInBits()); | |||
446 | ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) | |||
447 | : PtrTy; | |||
448 | } | |||
449 | ||||
450 | MachineMemOperand *MMO = MF.getMachineMemOperand( | |||
451 | PtrInfo, | |||
452 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | | |||
453 | MachineMemOperand::MOInvariant, | |||
454 | ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); | |||
455 | ||||
456 | assert(SplitArg.Regs.size() == 1)(static_cast <bool> (SplitArg.Regs.size() == 1) ? void ( 0) : __assert_fail ("SplitArg.Regs.size() == 1", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 456, __extension__ __PRETTY_FUNCTION__)); | |||
457 | ||||
458 | B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); | |||
459 | ++Idx; | |||
460 | } | |||
461 | } | |||
462 | ||||
463 | // Allocate special inputs passed in user SGPRs. | |||
464 | static void allocateHSAUserSGPRs(CCState &CCInfo, | |||
465 | MachineIRBuilder &B, | |||
466 | MachineFunction &MF, | |||
467 | const SIRegisterInfo &TRI, | |||
468 | SIMachineFunctionInfo &Info) { | |||
469 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? | |||
470 | if (Info.hasPrivateSegmentBuffer()) { | |||
471 | Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); | |||
472 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); | |||
473 | CCInfo.AllocateReg(PrivateSegmentBufferReg); | |||
474 | } | |||
475 | ||||
476 | if (Info.hasDispatchPtr()) { | |||
477 | Register DispatchPtrReg = Info.addDispatchPtr(TRI); | |||
478 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); | |||
479 | CCInfo.AllocateReg(DispatchPtrReg); | |||
480 | } | |||
481 | ||||
482 | if (Info.hasQueuePtr()) { | |||
483 | Register QueuePtrReg = Info.addQueuePtr(TRI); | |||
484 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); | |||
485 | CCInfo.AllocateReg(QueuePtrReg); | |||
486 | } | |||
487 | ||||
488 | if (Info.hasKernargSegmentPtr()) { | |||
489 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
490 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); | |||
491 | const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); | |||
492 | Register VReg = MRI.createGenericVirtualRegister(P4); | |||
493 | MRI.addLiveIn(InputPtrReg, VReg); | |||
494 | B.getMBB().addLiveIn(InputPtrReg); | |||
495 | B.buildCopy(VReg, InputPtrReg); | |||
496 | CCInfo.AllocateReg(InputPtrReg); | |||
497 | } | |||
498 | ||||
499 | if (Info.hasDispatchID()) { | |||
500 | Register DispatchIDReg = Info.addDispatchID(TRI); | |||
501 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); | |||
502 | CCInfo.AllocateReg(DispatchIDReg); | |||
503 | } | |||
504 | ||||
505 | if (Info.hasFlatScratchInit()) { | |||
506 | Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); | |||
507 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); | |||
508 | CCInfo.AllocateReg(FlatScratchInitReg); | |||
509 | } | |||
510 | ||||
511 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read | |||
512 | // these from the dispatch pointer. | |||
513 | } | |||
514 | ||||
515 | bool AMDGPUCallLowering::lowerFormalArgumentsKernel( | |||
516 | MachineIRBuilder &B, const Function &F, | |||
517 | ArrayRef<ArrayRef<Register>> VRegs) const { | |||
518 | MachineFunction &MF = B.getMF(); | |||
519 | const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); | |||
520 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
521 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
522 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
523 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
524 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
525 | ||||
526 | Info->allocateModuleLDSGlobal(F.getParent()); | |||
527 | ||||
528 | SmallVector<CCValAssign, 16> ArgLocs; | |||
529 | CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); | |||
530 | ||||
531 | allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); | |||
532 | ||||
533 | unsigned i = 0; | |||
534 | const Align KernArgBaseAlign(16); | |||
535 | const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); | |||
536 | uint64_t ExplicitArgOffset = 0; | |||
537 | ||||
538 | // TODO: Align down to dword alignment and extract bits for extending loads. | |||
539 | for (auto &Arg : F.args()) { | |||
540 | const bool IsByRef = Arg.hasByRefAttr(); | |||
541 | Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); | |||
542 | unsigned AllocSize = DL.getTypeAllocSize(ArgTy); | |||
543 | if (AllocSize == 0) | |||
544 | continue; | |||
545 | ||||
546 | MaybeAlign ABIAlign = IsByRef ? Arg.getParamAlign() : None; | |||
547 | if (!ABIAlign) | |||
548 | ABIAlign = DL.getABITypeAlign(ArgTy); | |||
549 | ||||
550 | uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; | |||
551 | ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; | |||
552 | ||||
553 | if (Arg.use_empty()) { | |||
554 | ++i; | |||
555 | continue; | |||
556 | } | |||
557 | ||||
558 | Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); | |||
559 | ||||
560 | if (IsByRef) { | |||
561 | unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); | |||
562 | ||||
563 | assert(VRegs[i].size() == 1 &&(static_cast <bool> (VRegs[i].size() == 1 && "expected only one register for byval pointers" ) ? void (0) : __assert_fail ("VRegs[i].size() == 1 && \"expected only one register for byval pointers\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 564, __extension__ __PRETTY_FUNCTION__)) | |||
564 | "expected only one register for byval pointers")(static_cast <bool> (VRegs[i].size() == 1 && "expected only one register for byval pointers" ) ? void (0) : __assert_fail ("VRegs[i].size() == 1 && \"expected only one register for byval pointers\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 564, __extension__ __PRETTY_FUNCTION__)); | |||
565 | if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { | |||
566 | lowerParameterPtr(VRegs[i][0], B, ArgOffset); | |||
567 | } else { | |||
568 | const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); | |||
569 | Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); | |||
570 | lowerParameterPtr(PtrReg, B, ArgOffset); | |||
571 | ||||
572 | B.buildAddrSpaceCast(VRegs[i][0], PtrReg); | |||
573 | } | |||
574 | } else { | |||
575 | ArgInfo OrigArg(VRegs[i], Arg, i); | |||
576 | const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; | |||
577 | setArgFlags(OrigArg, OrigArgIdx, DL, F); | |||
578 | lowerParameter(B, OrigArg, ArgOffset, Alignment); | |||
579 | } | |||
580 | ||||
581 | ++i; | |||
582 | } | |||
583 | ||||
584 | TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); | |||
585 | TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); | |||
586 | return true; | |||
587 | } | |||
588 | ||||
589 | bool AMDGPUCallLowering::lowerFormalArguments( | |||
590 | MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, | |||
591 | FunctionLoweringInfo &FLI) const { | |||
592 | CallingConv::ID CC = F.getCallingConv(); | |||
593 | ||||
594 | // The infrastructure for normal calling convention lowering is essentially | |||
595 | // useless for kernels. We want to avoid any kind of legalization or argument | |||
596 | // splitting. | |||
597 | if (CC == CallingConv::AMDGPU_KERNEL) | |||
598 | return lowerFormalArgumentsKernel(B, F, VRegs); | |||
599 | ||||
600 | const bool IsGraphics = AMDGPU::isGraphics(CC); | |||
601 | const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); | |||
602 | ||||
603 | MachineFunction &MF = B.getMF(); | |||
604 | MachineBasicBlock &MBB = B.getMBB(); | |||
605 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
606 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
607 | const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); | |||
608 | const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
609 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
610 | ||||
611 | Info->allocateModuleLDSGlobal(F.getParent()); | |||
612 | ||||
613 | SmallVector<CCValAssign, 16> ArgLocs; | |||
614 | CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); | |||
615 | ||||
616 | if (!IsEntryFunc) { | |||
617 | Register ReturnAddrReg = TRI->getReturnAddressReg(MF); | |||
618 | Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, | |||
619 | &AMDGPU::SGPR_64RegClass); | |||
620 | MBB.addLiveIn(ReturnAddrReg); | |||
621 | B.buildCopy(LiveInReturn, ReturnAddrReg); | |||
622 | } | |||
623 | ||||
624 | if (Info->hasImplicitBufferPtr()) { | |||
625 | Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); | |||
626 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); | |||
627 | CCInfo.AllocateReg(ImplicitBufferPtrReg); | |||
628 | } | |||
629 | ||||
630 | // FIXME: This probably isn't defined for mesa | |||
631 | if (Info->hasFlatScratchInit() && !Subtarget.isAmdPalOS()) { | |||
632 | Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI); | |||
633 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); | |||
634 | CCInfo.AllocateReg(FlatScratchInitReg); | |||
635 | } | |||
636 | ||||
637 | SmallVector<ArgInfo, 32> SplitArgs; | |||
638 | unsigned Idx = 0; | |||
639 | unsigned PSInputNum = 0; | |||
640 | ||||
641 | // Insert the hidden sret parameter if the return value won't fit in the | |||
642 | // return registers. | |||
643 | if (!FLI.CanLowerReturn) | |||
644 | insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); | |||
645 | ||||
646 | for (auto &Arg : F.args()) { | |||
647 | if (DL.getTypeStoreSize(Arg.getType()) == 0) | |||
648 | continue; | |||
649 | ||||
650 | const bool InReg = Arg.hasAttribute(Attribute::InReg); | |||
651 | ||||
652 | // SGPR arguments to functions not implemented. | |||
653 | if (!IsGraphics && InReg) | |||
654 | return false; | |||
655 | ||||
656 | if (Arg.hasAttribute(Attribute::SwiftSelf) || | |||
657 | Arg.hasAttribute(Attribute::SwiftError) || | |||
658 | Arg.hasAttribute(Attribute::Nest)) | |||
659 | return false; | |||
660 | ||||
661 | if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { | |||
662 | const bool ArgUsed = !Arg.use_empty(); | |||
663 | bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); | |||
664 | ||||
665 | if (!SkipArg) { | |||
666 | Info->markPSInputAllocated(PSInputNum); | |||
667 | if (ArgUsed) | |||
668 | Info->markPSInputEnabled(PSInputNum); | |||
669 | } | |||
670 | ||||
671 | ++PSInputNum; | |||
672 | ||||
673 | if (SkipArg) { | |||
674 | for (Register R : VRegs[Idx]) | |||
675 | B.buildUndef(R); | |||
676 | ||||
677 | ++Idx; | |||
678 | continue; | |||
679 | } | |||
680 | } | |||
681 | ||||
682 | ArgInfo OrigArg(VRegs[Idx], Arg, Idx); | |||
683 | const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; | |||
684 | setArgFlags(OrigArg, OrigArgIdx, DL, F); | |||
685 | ||||
686 | splitToValueTypes(OrigArg, SplitArgs, DL, CC); | |||
687 | ++Idx; | |||
688 | } | |||
689 | ||||
690 | // At least one interpolation mode must be enabled or else the GPU will | |||
691 | // hang. | |||
692 | // | |||
693 | // Check PSInputAddr instead of PSInputEnable. The idea is that if the user | |||
694 | // set PSInputAddr, the user wants to enable some bits after the compilation | |||
695 | // based on run-time states. Since we can't know what the final PSInputEna | |||
696 | // will look like, so we shouldn't do anything here and the user should take | |||
697 | // responsibility for the correct programming. | |||
698 | // | |||
699 | // Otherwise, the following restrictions apply: | |||
700 | // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. | |||
701 | // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be | |||
702 | // enabled too. | |||
703 | if (CC == CallingConv::AMDGPU_PS) { | |||
704 | if ((Info->getPSInputAddr() & 0x7F) == 0 || | |||
705 | ((Info->getPSInputAddr() & 0xF) == 0 && | |||
706 | Info->isPSInputAllocated(11))) { | |||
707 | CCInfo.AllocateReg(AMDGPU::VGPR0); | |||
708 | CCInfo.AllocateReg(AMDGPU::VGPR1); | |||
709 | Info->markPSInputAllocated(0); | |||
710 | Info->markPSInputEnabled(0); | |||
711 | } | |||
712 | ||||
713 | if (Subtarget.isAmdPalOS()) { | |||
714 | // For isAmdPalOS, the user does not enable some bits after compilation | |||
715 | // based on run-time states; the register values being generated here are | |||
716 | // the final ones set in hardware. Therefore we need to apply the | |||
717 | // workaround to PSInputAddr and PSInputEnable together. (The case where | |||
718 | // a bit is set in PSInputAddr but not PSInputEnable is where the frontend | |||
719 | // set up an input arg for a particular interpolation mode, but nothing | |||
720 | // uses that input arg. Really we should have an earlier pass that removes | |||
721 | // such an arg.) | |||
722 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); | |||
723 | if ((PsInputBits & 0x7F) == 0 || | |||
724 | ((PsInputBits & 0xF) == 0 && | |||
725 | (PsInputBits >> 11 & 1))) | |||
726 | Info->markPSInputEnabled( | |||
727 | countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); | |||
728 | } | |||
729 | } | |||
730 | ||||
731 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
732 | CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); | |||
733 | ||||
734 | if (!MBB.empty()) | |||
735 | B.setInstr(*MBB.begin()); | |||
736 | ||||
737 | if (!IsEntryFunc && !IsGraphics) { | |||
738 | // For the fixed ABI, pass workitem IDs in the last argument register. | |||
739 | TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); | |||
740 | } | |||
741 | ||||
742 | IncomingValueAssigner Assigner(AssignFn); | |||
743 | if (!determineAssignments(Assigner, SplitArgs, CCInfo)) | |||
744 | return false; | |||
745 | ||||
746 | FormalArgHandler Handler(B, MRI); | |||
747 | if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) | |||
748 | return false; | |||
749 | ||||
750 | uint64_t StackOffset = Assigner.StackOffset; | |||
751 | ||||
752 | // Start adding system SGPRs. | |||
753 | if (IsEntryFunc) { | |||
754 | TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); | |||
755 | } else { | |||
756 | if (!Subtarget.enableFlatScratch()) | |||
757 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); | |||
758 | TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); | |||
759 | } | |||
760 | ||||
761 | // When we tail call, we need to check if the callee's arguments will fit on | |||
762 | // the caller's stack. So, whenever we lower formal arguments, we should keep | |||
763 | // track of this information, since we might lower a tail call in this | |||
764 | // function later. | |||
765 | Info->setBytesInStackArgArea(StackOffset); | |||
766 | ||||
767 | // Move back to the end of the basic block. | |||
768 | B.setMBB(MBB); | |||
769 | ||||
770 | return true; | |||
771 | } | |||
772 | ||||
773 | bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, | |||
774 | CCState &CCInfo, | |||
775 | SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, | |||
776 | CallLoweringInfo &Info) const { | |||
777 | MachineFunction &MF = MIRBuilder.getMF(); | |||
778 | ||||
779 | // If there's no call site, this doesn't correspond to a call from the IR and | |||
780 | // doesn't need implicit inputs. | |||
781 | if (!Info.CB) | |||
782 | return true; | |||
783 | ||||
784 | const AMDGPUFunctionArgInfo *CalleeArgInfo | |||
785 | = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; | |||
786 | ||||
787 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
788 | const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); | |||
789 | ||||
790 | ||||
791 | // TODO: Unify with private memory register handling. This is complicated by | |||
792 | // the fact that at least in kernels, the input argument is not necessarily | |||
793 | // in the same location as the input. | |||
794 | AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { | |||
795 | AMDGPUFunctionArgInfo::DISPATCH_PTR, | |||
796 | AMDGPUFunctionArgInfo::QUEUE_PTR, | |||
797 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, | |||
798 | AMDGPUFunctionArgInfo::DISPATCH_ID, | |||
799 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X, | |||
800 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, | |||
801 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z | |||
802 | }; | |||
803 | ||||
804 | static constexpr StringLiteral ImplicitAttrNames[] = { | |||
805 | "amdgpu-no-dispatch-ptr", | |||
806 | "amdgpu-no-queue-ptr", | |||
807 | "amdgpu-no-implicitarg-ptr", | |||
808 | "amdgpu-no-dispatch-id", | |||
809 | "amdgpu-no-workgroup-id-x", | |||
810 | "amdgpu-no-workgroup-id-y", | |||
811 | "amdgpu-no-workgroup-id-z" | |||
812 | }; | |||
813 | ||||
814 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
815 | ||||
816 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
817 | const AMDGPULegalizerInfo *LI | |||
818 | = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); | |||
819 | ||||
820 | unsigned I = 0; | |||
821 | for (auto InputID : InputRegs) { | |||
822 | const ArgDescriptor *OutgoingArg; | |||
823 | const TargetRegisterClass *ArgRC; | |||
824 | LLT ArgTy; | |||
825 | ||||
826 | // If the callee does not use the attribute value, skip copying the value. | |||
827 | if (Info.CB->hasFnAttr(ImplicitAttrNames[I++])) | |||
828 | continue; | |||
829 | ||||
830 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
831 | CalleeArgInfo->getPreloadedValue(InputID); | |||
832 | if (!OutgoingArg) | |||
833 | continue; | |||
834 | ||||
835 | const ArgDescriptor *IncomingArg; | |||
836 | const TargetRegisterClass *IncomingArgRC; | |||
837 | std::tie(IncomingArg, IncomingArgRC, ArgTy) = | |||
838 | CallerArgInfo.getPreloadedValue(InputID); | |||
839 | assert(IncomingArgRC == ArgRC)(static_cast <bool> (IncomingArgRC == ArgRC) ? void (0) : __assert_fail ("IncomingArgRC == ArgRC", "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp" , 839, __extension__ __PRETTY_FUNCTION__)); | |||
840 | ||||
841 | Register InputReg = MRI.createGenericVirtualRegister(ArgTy); | |||
842 | ||||
843 | if (IncomingArg) { | |||
844 | LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); | |||
845 | } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { | |||
846 | LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); | |||
847 | } else { | |||
848 | // We may have proven the input wasn't needed, although the ABI is | |||
849 | // requiring it. We just need to allocate the register appropriately. | |||
850 | MIRBuilder.buildUndef(InputReg); | |||
851 | } | |||
852 | ||||
853 | if (OutgoingArg->isRegister()) { | |||
854 | ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); | |||
855 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) | |||
856 | report_fatal_error("failed to allocate implicit input argument"); | |||
857 | } else { | |||
858 | LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Unhandled stack passed implicit input argument\n" ; } } while (false); | |||
859 | return false; | |||
860 | } | |||
861 | } | |||
862 | ||||
863 | // Pack workitem IDs into a single register or pass it as is if already | |||
864 | // packed. | |||
865 | const ArgDescriptor *OutgoingArg; | |||
866 | const TargetRegisterClass *ArgRC; | |||
867 | LLT ArgTy; | |||
868 | ||||
869 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
870 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); | |||
871 | if (!OutgoingArg) | |||
872 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
873 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); | |||
874 | if (!OutgoingArg) | |||
875 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
876 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); | |||
877 | if (!OutgoingArg) | |||
878 | return false; | |||
879 | ||||
880 | auto WorkitemIDX = | |||
881 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); | |||
882 | auto WorkitemIDY = | |||
883 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); | |||
884 | auto WorkitemIDZ = | |||
885 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); | |||
886 | ||||
887 | const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); | |||
888 | const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); | |||
889 | const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); | |||
890 | const LLT S32 = LLT::scalar(32); | |||
891 | ||||
892 | const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x"); | |||
893 | const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y"); | |||
894 | const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z"); | |||
895 | ||||
896 | // If incoming ids are not packed we need to pack them. | |||
897 | // FIXME: Should consider known workgroup size to eliminate known 0 cases. | |||
898 | Register InputReg; | |||
899 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && | |||
900 | NeedWorkItemIDX) { | |||
901 | if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) { | |||
902 | InputReg = MRI.createGenericVirtualRegister(S32); | |||
903 | LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, | |||
904 | std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); | |||
905 | } else { | |||
906 | InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0); | |||
907 | } | |||
908 | } | |||
909 | ||||
910 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && | |||
911 | NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) { | |||
912 | Register Y = MRI.createGenericVirtualRegister(S32); | |||
913 | LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), | |||
914 | std::get<2>(WorkitemIDY)); | |||
915 | ||||
916 | Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); | |||
917 | InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; | |||
918 | } | |||
919 | ||||
920 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && | |||
921 | NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) { | |||
922 | Register Z = MRI.createGenericVirtualRegister(S32); | |||
923 | LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), | |||
924 | std::get<2>(WorkitemIDZ)); | |||
925 | ||||
926 | Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); | |||
927 | InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; | |||
928 | } | |||
929 | ||||
930 | if (!InputReg && | |||
931 | (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { | |||
932 | InputReg = MRI.createGenericVirtualRegister(S32); | |||
933 | if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { | |||
934 | // We're in a situation where the outgoing function requires the workitem | |||
935 | // ID, but the calling function does not have it (e.g a graphics function | |||
936 | // calling a C calling convention function). This is illegal, but we need | |||
937 | // to produce something. | |||
938 | MIRBuilder.buildUndef(InputReg); | |||
939 | } else { | |||
940 | // Workitem ids are already packed, any of present incoming arguments will | |||
941 | // carry all required fields. | |||
942 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( | |||
943 | IncomingArgX ? *IncomingArgX : | |||
944 | IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); | |||
945 | LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, | |||
946 | &AMDGPU::VGPR_32RegClass, S32); | |||
947 | } | |||
948 | } | |||
949 | ||||
950 | if (OutgoingArg->isRegister()) { | |||
951 | if (InputReg) | |||
952 | ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); | |||
953 | ||||
954 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) | |||
955 | report_fatal_error("failed to allocate implicit input argument"); | |||
956 | } else { | |||
957 | LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Unhandled stack passed implicit input argument\n" ; } } while (false); | |||
958 | return false; | |||
959 | } | |||
960 | ||||
961 | return true; | |||
962 | } | |||
963 | ||||
964 | /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for | |||
965 | /// CC. | |||
966 | static std::pair<CCAssignFn *, CCAssignFn *> | |||
967 | getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { | |||
968 | return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; | |||
969 | } | |||
970 | ||||
971 | static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, | |||
972 | bool IsTailCall) { | |||
973 | assert(!(IsIndirect && IsTailCall) && "Indirect calls can't be tail calls, "(static_cast <bool> (!(IsIndirect && IsTailCall ) && "Indirect calls can't be tail calls, " "because the address can be divergent" ) ? void (0) : __assert_fail ("!(IsIndirect && IsTailCall) && \"Indirect calls can't be tail calls, \" \"because the address can be divergent\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 974, __extension__ __PRETTY_FUNCTION__)) | |||
974 | "because the address can be divergent")(static_cast <bool> (!(IsIndirect && IsTailCall ) && "Indirect calls can't be tail calls, " "because the address can be divergent" ) ? void (0) : __assert_fail ("!(IsIndirect && IsTailCall) && \"Indirect calls can't be tail calls, \" \"because the address can be divergent\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 974, __extension__ __PRETTY_FUNCTION__)); | |||
975 | return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::G_SI_CALL; | |||
976 | } | |||
977 | ||||
978 | // Add operands to call instruction to track the callee. | |||
979 | static bool addCallTargetOperands(MachineInstrBuilder &CallInst, | |||
980 | MachineIRBuilder &MIRBuilder, | |||
981 | AMDGPUCallLowering::CallLoweringInfo &Info) { | |||
982 | if (Info.Callee.isReg()) { | |||
983 | CallInst.addReg(Info.Callee.getReg()); | |||
984 | CallInst.addImm(0); | |||
985 | } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { | |||
986 | // The call lowering lightly assumed we can directly encode a call target in | |||
987 | // the instruction, which is not the case. Materialize the address here. | |||
988 | const GlobalValue *GV = Info.Callee.getGlobal(); | |||
989 | auto Ptr = MIRBuilder.buildGlobalValue( | |||
990 | LLT::pointer(GV->getAddressSpace(), 64), GV); | |||
991 | CallInst.addReg(Ptr.getReg(0)); | |||
992 | CallInst.add(Info.Callee); | |||
993 | } else | |||
994 | return false; | |||
995 | ||||
996 | return true; | |||
997 | } | |||
998 | ||||
999 | bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( | |||
1000 | CallLoweringInfo &Info, MachineFunction &MF, | |||
1001 | SmallVectorImpl<ArgInfo> &InArgs) const { | |||
1002 | const Function &CallerF = MF.getFunction(); | |||
1003 | CallingConv::ID CalleeCC = Info.CallConv; | |||
1004 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
1005 | ||||
1006 | // If the calling conventions match, then everything must be the same. | |||
1007 | if (CalleeCC == CallerCC) | |||
1008 | return true; | |||
1009 | ||||
1010 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
1011 | ||||
1012 | // Make sure that the caller and callee preserve all of the same registers. | |||
1013 | auto TRI = ST.getRegisterInfo(); | |||
1014 | ||||
1015 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
1016 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
1017 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
1018 | return false; | |||
1019 | ||||
1020 | // Check if the caller and callee will handle arguments in the same way. | |||
1021 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
1022 | CCAssignFn *CalleeAssignFnFixed; | |||
1023 | CCAssignFn *CalleeAssignFnVarArg; | |||
1024 | std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = | |||
1025 | getAssignFnsForCC(CalleeCC, TLI); | |||
1026 | ||||
1027 | CCAssignFn *CallerAssignFnFixed; | |||
1028 | CCAssignFn *CallerAssignFnVarArg; | |||
1029 | std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = | |||
1030 | getAssignFnsForCC(CallerCC, TLI); | |||
1031 | ||||
1032 | // FIXME: We are not accounting for potential differences in implicitly passed | |||
1033 | // inputs, but only the fixed ABI is supported now anyway. | |||
1034 | IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, | |||
1035 | CalleeAssignFnVarArg); | |||
1036 | IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, | |||
1037 | CallerAssignFnVarArg); | |||
1038 | return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); | |||
1039 | } | |||
1040 | ||||
1041 | bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( | |||
1042 | CallLoweringInfo &Info, MachineFunction &MF, | |||
1043 | SmallVectorImpl<ArgInfo> &OutArgs) const { | |||
1044 | // If there are no outgoing arguments, then we are done. | |||
1045 | if (OutArgs.empty()) | |||
1046 | return true; | |||
1047 | ||||
1048 | const Function &CallerF = MF.getFunction(); | |||
1049 | CallingConv::ID CalleeCC = Info.CallConv; | |||
1050 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
1051 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
1052 | ||||
1053 | CCAssignFn *AssignFnFixed; | |||
1054 | CCAssignFn *AssignFnVarArg; | |||
1055 | std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); | |||
1056 | ||||
1057 | // We have outgoing arguments. Make sure that we can tail call with them. | |||
1058 | SmallVector<CCValAssign, 16> OutLocs; | |||
1059 | CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); | |||
1060 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); | |||
1061 | ||||
1062 | if (!determineAssignments(Assigner, OutArgs, OutInfo)) { | |||
1063 | LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Could not analyze call operands.\n" ; } } while (false); | |||
1064 | return false; | |||
1065 | } | |||
1066 | ||||
1067 | // Make sure that they can fit on the caller's stack. | |||
1068 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); | |||
1069 | if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { | |||
1070 | LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Cannot fit call operands on caller's stack.\n" ; } } while (false); | |||
1071 | return false; | |||
1072 | } | |||
1073 | ||||
1074 | // Verify that the parameters in callee-saved registers match. | |||
1075 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
1076 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
1077 | const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); | |||
1078 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
1079 | return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); | |||
1080 | } | |||
1081 | ||||
1082 | /// Return true if the calling convention is one that we can guarantee TCO for. | |||
1083 | static bool canGuaranteeTCO(CallingConv::ID CC) { | |||
1084 | return CC == CallingConv::Fast; | |||
1085 | } | |||
1086 | ||||
1087 | /// Return true if we might ever do TCO for calls with this calling convention. | |||
1088 | static bool mayTailCallThisCC(CallingConv::ID CC) { | |||
1089 | switch (CC) { | |||
1090 | case CallingConv::C: | |||
1091 | case CallingConv::AMDGPU_Gfx: | |||
1092 | return true; | |||
1093 | default: | |||
1094 | return canGuaranteeTCO(CC); | |||
1095 | } | |||
1096 | } | |||
1097 | ||||
1098 | bool AMDGPUCallLowering::isEligibleForTailCallOptimization( | |||
1099 | MachineIRBuilder &B, CallLoweringInfo &Info, | |||
1100 | SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { | |||
1101 | // Must pass all target-independent checks in order to tail call optimize. | |||
1102 | if (!Info.IsTailCall) | |||
1103 | return false; | |||
1104 | ||||
1105 | // Indirect calls can't be tail calls, because the address can be divergent. | |||
1106 | // TODO Check divergence info if the call really is divergent. | |||
1107 | if (Info.Callee.isReg()) | |||
1108 | return false; | |||
1109 | ||||
1110 | MachineFunction &MF = B.getMF(); | |||
1111 | const Function &CallerF = MF.getFunction(); | |||
1112 | CallingConv::ID CalleeCC = Info.CallConv; | |||
1113 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
1114 | ||||
1115 | const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); | |||
1116 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
1117 | // Kernels aren't callable, and don't have a live in return address so it | |||
1118 | // doesn't make sense to do a tail call with entry functions. | |||
1119 | if (!CallerPreserved) | |||
1120 | return false; | |||
1121 | ||||
1122 | if (!mayTailCallThisCC(CalleeCC)) { | |||
1123 | LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Calling convention cannot be tail called.\n" ; } } while (false); | |||
1124 | return false; | |||
1125 | } | |||
1126 | ||||
1127 | if (any_of(CallerF.args(), [](const Argument &A) { | |||
1128 | return A.hasByValAttr() || A.hasSwiftErrorAttr(); | |||
1129 | })) { | |||
1130 | LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval " "or swifterror arguments\n"; } } while (false) | |||
1131 | "or swifterror arguments\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Cannot tail call from callers with byval " "or swifterror arguments\n"; } } while (false); | |||
1132 | return false; | |||
1133 | } | |||
1134 | ||||
1135 | // If we have -tailcallopt, then we're done. | |||
1136 | if (MF.getTarget().Options.GuaranteedTailCallOpt) | |||
1137 | return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); | |||
1138 | ||||
1139 | // Verify that the incoming and outgoing arguments from the callee are | |||
1140 | // safe to tail call. | |||
1141 | if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { | |||
1142 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n" ; } } while (false) | |||
1143 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n" ; } } while (false) | |||
1144 | << "... Caller and callee have incompatible calling conventions.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Caller and callee have incompatible calling conventions.\n" ; } } while (false); | |||
1145 | return false; | |||
1146 | } | |||
1147 | ||||
1148 | if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) | |||
1149 | return false; | |||
1150 | ||||
1151 | LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "... Call is eligible for tail call optimization.\n" ; } } while (false); | |||
1152 | return true; | |||
1153 | } | |||
1154 | ||||
1155 | // Insert outgoing implicit arguments for a call, by inserting copies to the | |||
1156 | // implicit argument registers and adding the necessary implicit uses to the | |||
1157 | // call instruction. | |||
1158 | void AMDGPUCallLowering::handleImplicitCallArguments( | |||
1159 | MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, | |||
1160 | const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, | |||
1161 | ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { | |||
1162 | if (!ST.enableFlatScratch()) { | |||
1163 | // Insert copies for the SRD. In the HSA case, this should be an identity | |||
1164 | // copy. | |||
1165 | auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), | |||
1166 | FuncInfo.getScratchRSrcReg()); | |||
1167 | MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); | |||
1168 | CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); | |||
1169 | } | |||
1170 | ||||
1171 | for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { | |||
1172 | MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); | |||
1173 | CallInst.addReg(ArgReg.first, RegState::Implicit); | |||
1174 | } | |||
1175 | } | |||
1176 | ||||
1177 | bool AMDGPUCallLowering::lowerTailCall( | |||
1178 | MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, | |||
1179 | SmallVectorImpl<ArgInfo> &OutArgs) const { | |||
1180 | MachineFunction &MF = MIRBuilder.getMF(); | |||
1181 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
1182 | SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); | |||
1183 | const Function &F = MF.getFunction(); | |||
1184 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
1185 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
1186 | ||||
1187 | // True when we're tail calling, but without -tailcallopt. | |||
1188 | bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; | |||
1189 | ||||
1190 | // Find out which ABI gets to decide where things go. | |||
1191 | CallingConv::ID CalleeCC = Info.CallConv; | |||
1192 | CCAssignFn *AssignFnFixed; | |||
1193 | CCAssignFn *AssignFnVarArg; | |||
1194 | std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); | |||
1195 | ||||
1196 | MachineInstrBuilder CallSeqStart; | |||
1197 | if (!IsSibCall) | |||
1198 | CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); | |||
1199 | ||||
1200 | unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true); | |||
1201 | auto MIB = MIRBuilder.buildInstrNoInsert(Opc); | |||
1202 | if (!addCallTargetOperands(MIB, MIRBuilder, Info)) | |||
1203 | return false; | |||
1204 | ||||
1205 | // Byte offset for the tail call. When we are sibcalling, this will always | |||
1206 | // be 0. | |||
1207 | MIB.addImm(0); | |||
1208 | ||||
1209 | // Tell the call which registers are clobbered. | |||
1210 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
1211 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); | |||
1212 | MIB.addRegMask(Mask); | |||
1213 | ||||
1214 | // FPDiff is the byte offset of the call's argument area from the callee's. | |||
1215 | // Stores to callee stack arguments will be placed in FixedStackSlots offset | |||
1216 | // by this amount for a tail call. In a sibling call it must be 0 because the | |||
1217 | // caller will deallocate the entire stack and the callee still expects its | |||
1218 | // arguments to begin at SP+0. | |||
1219 | int FPDiff = 0; | |||
1220 | ||||
1221 | // This will be 0 for sibcalls, potentially nonzero for tail calls produced | |||
1222 | // by -tailcallopt. For sibcalls, the memory operands for the call are | |||
1223 | // already available in the caller's incoming argument space. | |||
1224 | unsigned NumBytes = 0; | |||
1225 | if (!IsSibCall) { | |||
1226 | // We aren't sibcalling, so we need to compute FPDiff. We need to do this | |||
1227 | // before handling assignments, because FPDiff must be known for memory | |||
1228 | // arguments. | |||
1229 | unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); | |||
1230 | SmallVector<CCValAssign, 16> OutLocs; | |||
1231 | CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); | |||
1232 | ||||
1233 | // FIXME: Not accounting for callee implicit inputs | |||
1234 | OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); | |||
1235 | if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) | |||
1236 | return false; | |||
1237 | ||||
1238 | // The callee will pop the argument stack as a tail call. Thus, we must | |||
1239 | // keep it 16-byte aligned. | |||
1240 | NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); | |||
1241 | ||||
1242 | // FPDiff will be negative if this tail call requires more space than we | |||
1243 | // would automatically have in our incoming argument space. Positive if we | |||
1244 | // actually shrink the stack. | |||
1245 | FPDiff = NumReusableBytes - NumBytes; | |||
1246 | ||||
1247 | // The stack pointer must be 16-byte aligned at all times it's used for a | |||
1248 | // memory operation, which in practice means at *all* times and in | |||
1249 | // particular across call boundaries. Therefore our own arguments started at | |||
1250 | // a 16-byte aligned SP and the delta applied for the tail call should | |||
1251 | // satisfy the same constraint. | |||
1252 | assert(isAligned(ST.getStackAlignment(), FPDiff) &&(static_cast <bool> (isAligned(ST.getStackAlignment(), FPDiff ) && "unaligned stack on tail call") ? void (0) : __assert_fail ("isAligned(ST.getStackAlignment(), FPDiff) && \"unaligned stack on tail call\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 1253, __extension__ __PRETTY_FUNCTION__)) | |||
1253 | "unaligned stack on tail call")(static_cast <bool> (isAligned(ST.getStackAlignment(), FPDiff ) && "unaligned stack on tail call") ? void (0) : __assert_fail ("isAligned(ST.getStackAlignment(), FPDiff) && \"unaligned stack on tail call\"" , "llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp", 1253, __extension__ __PRETTY_FUNCTION__)); | |||
1254 | } | |||
1255 | ||||
1256 | SmallVector<CCValAssign, 16> ArgLocs; | |||
1257 | CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); | |||
1258 | ||||
1259 | // We could pass MIB and directly add the implicit uses to the call | |||
1260 | // now. However, as an aesthetic choice, place implicit argument operands | |||
1261 | // after the ordinary user argument registers. | |||
1262 | SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; | |||
1263 | ||||
1264 | if (Info.CallConv != CallingConv::AMDGPU_Gfx) { | |||
1265 | // With a fixed ABI, allocate fixed registers before user arguments. | |||
1266 | if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) | |||
1267 | return false; | |||
1268 | } | |||
1269 | ||||
1270 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); | |||
1271 | ||||
1272 | if (!determineAssignments(Assigner, OutArgs, CCInfo)) | |||
1273 | return false; | |||
1274 | ||||
1275 | // Do the actual argument marshalling. | |||
1276 | AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); | |||
1277 | if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) | |||
1278 | return false; | |||
1279 | ||||
1280 | handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs); | |||
1281 | ||||
1282 | // If we have -tailcallopt, we need to adjust the stack. We'll do the call | |||
1283 | // sequence start and end here. | |||
1284 | if (!IsSibCall) { | |||
1285 | MIB->getOperand(1).setImm(FPDiff); | |||
1286 | CallSeqStart.addImm(NumBytes).addImm(0); | |||
1287 | // End the call sequence *before* emitting the call. Normally, we would | |||
1288 | // tidy the frame up after the call. However, here, we've laid out the | |||
1289 | // parameters so that when SP is reset, they will be in the correct | |||
1290 | // location. | |||
1291 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); | |||
1292 | } | |||
1293 | ||||
1294 | // Now we can add the actual call instruction to the correct basic block. | |||
1295 | MIRBuilder.insertInstr(MIB); | |||
1296 | ||||
1297 | // If Callee is a reg, since it is used by a target specific | |||
1298 | // instruction, it must have a register class matching the | |||
1299 | // constraint of that instruction. | |||
1300 | ||||
1301 | // FIXME: We should define regbankselectable call instructions to handle | |||
1302 | // divergent call targets. | |||
1303 | if (MIB->getOperand(0).isReg()) { | |||
1304 | MIB->getOperand(0).setReg(constrainOperandRegClass( | |||
1305 | MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, | |||
1306 | MIB->getDesc(), MIB->getOperand(0), 0)); | |||
1307 | } | |||
1308 | ||||
1309 | MF.getFrameInfo().setHasTailCall(); | |||
1310 | Info.LoweredTailCall = true; | |||
1311 | return true; | |||
1312 | } | |||
1313 | ||||
1314 | bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, | |||
1315 | CallLoweringInfo &Info) const { | |||
1316 | if (Info.IsVarArg) { | |||
1317 | LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Variadic functions not implemented\n" ; } } while (false); | |||
1318 | return false; | |||
1319 | } | |||
1320 | ||||
1321 | MachineFunction &MF = MIRBuilder.getMF(); | |||
1322 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
1323 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
1324 | ||||
1325 | const Function &F = MF.getFunction(); | |||
1326 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
1327 | const SITargetLowering &TLI = *getTLI<SITargetLowering>(); | |||
1328 | const DataLayout &DL = F.getParent()->getDataLayout(); | |||
1329 | ||||
1330 | SmallVector<ArgInfo, 8> OutArgs; | |||
1331 | for (auto &OrigArg : Info.OrigArgs) | |||
1332 | splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); | |||
1333 | ||||
1334 | SmallVector<ArgInfo, 8> InArgs; | |||
1335 | if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) | |||
1336 | splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); | |||
1337 | ||||
1338 | // If we can lower as a tail call, do that instead. | |||
1339 | bool CanTailCallOpt = | |||
1340 | isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); | |||
1341 | ||||
1342 | // We must emit a tail call if we have musttail. | |||
1343 | if (Info.IsMustTailCall && !CanTailCallOpt) { | |||
1344 | LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-call-lowering")) { dbgs() << "Failed to lower musttail call as tail call\n" ; } } while (false); | |||
1345 | return false; | |||
1346 | } | |||
1347 | ||||
1348 | Info.IsTailCall = CanTailCallOpt; | |||
1349 | if (CanTailCallOpt) | |||
1350 | return lowerTailCall(MIRBuilder, Info, OutArgs); | |||
1351 | ||||
1352 | // Find out which ABI gets to decide where things go. | |||
1353 | CCAssignFn *AssignFnFixed; | |||
1354 | CCAssignFn *AssignFnVarArg; | |||
1355 | std::tie(AssignFnFixed, AssignFnVarArg) = | |||
1356 | getAssignFnsForCC(Info.CallConv, TLI); | |||
1357 | ||||
1358 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) | |||
1359 | .addImm(0) | |||
1360 | .addImm(0); | |||
1361 | ||||
1362 | // Create a temporarily-floating call instruction so we can add the implicit | |||
1363 | // uses of arg registers. | |||
1364 | unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); | |||
1365 | ||||
1366 | auto MIB = MIRBuilder.buildInstrNoInsert(Opc); | |||
1367 | MIB.addDef(TRI->getReturnAddressReg(MF)); | |||
1368 | ||||
1369 | if (!addCallTargetOperands(MIB, MIRBuilder, Info)) | |||
1370 | return false; | |||
1371 | ||||
1372 | // Tell the call which registers are clobbered. | |||
1373 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); | |||
1374 | MIB.addRegMask(Mask); | |||
1375 | ||||
1376 | SmallVector<CCValAssign, 16> ArgLocs; | |||
1377 | CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); | |||
1378 | ||||
1379 | // We could pass MIB and directly add the implicit uses to the call | |||
1380 | // now. However, as an aesthetic choice, place implicit argument operands | |||
1381 | // after the ordinary user argument registers. | |||
1382 | SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; | |||
1383 | ||||
1384 | if (Info.CallConv != CallingConv::AMDGPU_Gfx) { | |||
1385 | // With a fixed ABI, allocate fixed registers before user arguments. | |||
1386 | if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) | |||
1387 | return false; | |||
1388 | } | |||
1389 | ||||
1390 | // Do the actual argument marshalling. | |||
1391 | SmallVector<Register, 8> PhysRegs; | |||
1392 | ||||
1393 | OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); | |||
1394 | if (!determineAssignments(Assigner, OutArgs, CCInfo)) | |||
1395 | return false; | |||
1396 | ||||
1397 | AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); | |||
1398 | if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) | |||
1399 | return false; | |||
1400 | ||||
1401 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
1402 | ||||
1403 | handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); | |||
1404 | ||||
1405 | // Get a count of how many bytes are to be pushed on the stack. | |||
1406 | unsigned NumBytes = CCInfo.getNextStackOffset(); | |||
1407 | ||||
1408 | // If Callee is a reg, since it is used by a target specific | |||
1409 | // instruction, it must have a register class matching the | |||
1410 | // constraint of that instruction. | |||
1411 | ||||
1412 | // FIXME: We should define regbankselectable call instructions to handle | |||
1413 | // divergent call targets. | |||
1414 | if (MIB->getOperand(1).isReg()) { | |||
1415 | MIB->getOperand(1).setReg(constrainOperandRegClass( | |||
1416 | MF, *TRI, MRI, *ST.getInstrInfo(), | |||
1417 | *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), | |||
1418 | 1)); | |||
1419 | } | |||
1420 | ||||
1421 | // Now we can add the actual call instruction to the correct position. | |||
1422 | MIRBuilder.insertInstr(MIB); | |||
1423 | ||||
1424 | // Finally we can copy the returned value back into its virtual-register. In | |||
1425 | // symmetry with the arguments, the physical register must be an | |||
1426 | // implicit-define of the call instruction. | |||
1427 | if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { | |||
1428 | CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, | |||
1429 | Info.IsVarArg); | |||
1430 | IncomingValueAssigner Assigner(RetAssignFn); | |||
1431 | CallReturnHandler Handler(MIRBuilder, MRI, MIB); | |||
1432 | if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, | |||
1433 | Info.CallConv, Info.IsVarArg)) | |||
1434 | return false; | |||
1435 | } | |||
1436 | ||||
1437 | uint64_t CalleePopBytes = NumBytes; | |||
1438 | ||||
1439 | MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) | |||
1440 | .addImm(0) | |||
1441 | .addImm(CalleePopBytes); | |||
1442 | ||||
1443 | if (!Info.CanLowerReturn) { | |||
1444 | insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, | |||
1445 | Info.DemoteRegister, Info.DemoteStackIndex); | |||
1446 | } | |||
1447 | ||||
1448 | return true; | |||
1449 | } |