LLVM 23.0.0git
RISCVCallingConv.cpp
Go to the documentation of this file.
1//===-- RISCVCallingConv.cpp - RISC-V Custom CC Routines ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the custom routines for the RISC-V Calling Convention.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVCallingConv.h"
14#include "RISCVSubtarget.h"
15#include "llvm/IR/DataLayout.h"
16#include "llvm/IR/Module.h"
17#include "llvm/MC/MCRegister.h"
18
19using namespace llvm;
20
21// Calling Convention Implementation.
22// The expectations for frontend ABI lowering vary from target to target.
23// Ideally, an LLVM frontend would be able to avoid worrying about many ABI
24// details, but this is a longer term goal. For now, we simply try to keep the
25// role of the frontend as simple and well-defined as possible. The rules can
26// be summarised as:
27// * Never split up large scalar arguments. We handle them here.
28// * If a hardfloat calling convention is being used, and the struct may be
29// passed in a pair of registers (fp+fp, int+fp), and both registers are
30// available, then pass as two separate arguments. If either the GPRs or FPRs
31// are exhausted, then pass according to the rule below.
32// * If a struct could never be passed in registers or directly in a stack
33// slot (as it is larger than 2*XLEN and the floating point rules don't
34// apply), then pass it using a pointer with the byval attribute.
35// * If a struct is less than 2*XLEN, then coerce to either a two-element
36// word-sized array or a 2*XLEN scalar (depending on alignment).
37// * The frontend can determine whether a struct is returned by reference or
38// not based on its size and fields. If it will be returned by reference, the
39// frontend must modify the prototype so a pointer with the sret annotation is
40// passed as the first argument. This is not necessary for large scalar
41// returns.
42// * Struct return values and varargs should be coerced to structs containing
43// register-size fields in the same situations they would be for fixed
44// arguments.
45
46static const MCPhysReg ArgFPR16s[] = {RISCV::F10_H, RISCV::F11_H, RISCV::F12_H,
47 RISCV::F13_H, RISCV::F14_H, RISCV::F15_H,
48 RISCV::F16_H, RISCV::F17_H};
49static const MCPhysReg ArgFPR32s[] = {RISCV::F10_F, RISCV::F11_F, RISCV::F12_F,
50 RISCV::F13_F, RISCV::F14_F, RISCV::F15_F,
51 RISCV::F16_F, RISCV::F17_F};
52static const MCPhysReg ArgFPR64s[] = {RISCV::F10_D, RISCV::F11_D, RISCV::F12_D,
53 RISCV::F13_D, RISCV::F14_D, RISCV::F15_D,
54 RISCV::F16_D, RISCV::F17_D};
55// This is an interim calling convention and it may be changed in the future.
56static const MCPhysReg ArgVRs[] = {
57 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
58 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
59 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
60static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
61 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
62 RISCV::V20M2, RISCV::V22M2};
63static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
64 RISCV::V20M4};
65static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
66static const MCPhysReg ArgVRN2M1s[] = {
67 RISCV::V8_V9, RISCV::V9_V10, RISCV::V10_V11, RISCV::V11_V12,
68 RISCV::V12_V13, RISCV::V13_V14, RISCV::V14_V15, RISCV::V15_V16,
69 RISCV::V16_V17, RISCV::V17_V18, RISCV::V18_V19, RISCV::V19_V20,
70 RISCV::V20_V21, RISCV::V21_V22, RISCV::V22_V23};
71static const MCPhysReg ArgVRN3M1s[] = {
72 RISCV::V8_V9_V10, RISCV::V9_V10_V11, RISCV::V10_V11_V12,
73 RISCV::V11_V12_V13, RISCV::V12_V13_V14, RISCV::V13_V14_V15,
74 RISCV::V14_V15_V16, RISCV::V15_V16_V17, RISCV::V16_V17_V18,
75 RISCV::V17_V18_V19, RISCV::V18_V19_V20, RISCV::V19_V20_V21,
76 RISCV::V20_V21_V22, RISCV::V21_V22_V23};
77static const MCPhysReg ArgVRN4M1s[] = {
78 RISCV::V8_V9_V10_V11, RISCV::V9_V10_V11_V12, RISCV::V10_V11_V12_V13,
79 RISCV::V11_V12_V13_V14, RISCV::V12_V13_V14_V15, RISCV::V13_V14_V15_V16,
80 RISCV::V14_V15_V16_V17, RISCV::V15_V16_V17_V18, RISCV::V16_V17_V18_V19,
81 RISCV::V17_V18_V19_V20, RISCV::V18_V19_V20_V21, RISCV::V19_V20_V21_V22,
82 RISCV::V20_V21_V22_V23};
83static const MCPhysReg ArgVRN5M1s[] = {
84 RISCV::V8_V9_V10_V11_V12, RISCV::V9_V10_V11_V12_V13,
85 RISCV::V10_V11_V12_V13_V14, RISCV::V11_V12_V13_V14_V15,
86 RISCV::V12_V13_V14_V15_V16, RISCV::V13_V14_V15_V16_V17,
87 RISCV::V14_V15_V16_V17_V18, RISCV::V15_V16_V17_V18_V19,
88 RISCV::V16_V17_V18_V19_V20, RISCV::V17_V18_V19_V20_V21,
89 RISCV::V18_V19_V20_V21_V22, RISCV::V19_V20_V21_V22_V23};
90static const MCPhysReg ArgVRN6M1s[] = {
91 RISCV::V8_V9_V10_V11_V12_V13, RISCV::V9_V10_V11_V12_V13_V14,
92 RISCV::V10_V11_V12_V13_V14_V15, RISCV::V11_V12_V13_V14_V15_V16,
93 RISCV::V12_V13_V14_V15_V16_V17, RISCV::V13_V14_V15_V16_V17_V18,
94 RISCV::V14_V15_V16_V17_V18_V19, RISCV::V15_V16_V17_V18_V19_V20,
95 RISCV::V16_V17_V18_V19_V20_V21, RISCV::V17_V18_V19_V20_V21_V22,
96 RISCV::V18_V19_V20_V21_V22_V23};
97static const MCPhysReg ArgVRN7M1s[] = {
98 RISCV::V8_V9_V10_V11_V12_V13_V14, RISCV::V9_V10_V11_V12_V13_V14_V15,
99 RISCV::V10_V11_V12_V13_V14_V15_V16, RISCV::V11_V12_V13_V14_V15_V16_V17,
100 RISCV::V12_V13_V14_V15_V16_V17_V18, RISCV::V13_V14_V15_V16_V17_V18_V19,
101 RISCV::V14_V15_V16_V17_V18_V19_V20, RISCV::V15_V16_V17_V18_V19_V20_V21,
102 RISCV::V16_V17_V18_V19_V20_V21_V22, RISCV::V17_V18_V19_V20_V21_V22_V23};
103static const MCPhysReg ArgVRN8M1s[] = {RISCV::V8_V9_V10_V11_V12_V13_V14_V15,
104 RISCV::V9_V10_V11_V12_V13_V14_V15_V16,
105 RISCV::V10_V11_V12_V13_V14_V15_V16_V17,
106 RISCV::V11_V12_V13_V14_V15_V16_V17_V18,
107 RISCV::V12_V13_V14_V15_V16_V17_V18_V19,
108 RISCV::V13_V14_V15_V16_V17_V18_V19_V20,
109 RISCV::V14_V15_V16_V17_V18_V19_V20_V21,
110 RISCV::V15_V16_V17_V18_V19_V20_V21_V22,
111 RISCV::V16_V17_V18_V19_V20_V21_V22_V23};
112static const MCPhysReg ArgVRN2M2s[] = {RISCV::V8M2_V10M2, RISCV::V10M2_V12M2,
113 RISCV::V12M2_V14M2, RISCV::V14M2_V16M2,
114 RISCV::V16M2_V18M2, RISCV::V18M2_V20M2,
115 RISCV::V20M2_V22M2};
116static const MCPhysReg ArgVRN3M2s[] = {
117 RISCV::V8M2_V10M2_V12M2, RISCV::V10M2_V12M2_V14M2,
118 RISCV::V12M2_V14M2_V16M2, RISCV::V14M2_V16M2_V18M2,
119 RISCV::V16M2_V18M2_V20M2, RISCV::V18M2_V20M2_V22M2};
120static const MCPhysReg ArgVRN4M2s[] = {
121 RISCV::V8M2_V10M2_V12M2_V14M2, RISCV::V10M2_V12M2_V14M2_V16M2,
122 RISCV::V12M2_V14M2_V16M2_V18M2, RISCV::V14M2_V16M2_V18M2_V20M2,
123 RISCV::V16M2_V18M2_V20M2_V22M2};
124static const MCPhysReg ArgVRN2M4s[] = {RISCV::V8M4_V12M4, RISCV::V12M4_V16M4,
125 RISCV::V16M4_V20M4};
126
128 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
129 // the ILP32E ABI.
130 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
131 RISCV::X13, RISCV::X14, RISCV::X15,
132 RISCV::X16, RISCV::X17};
133 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
134 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
135 RISCV::X13, RISCV::X14, RISCV::X15};
136
137 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
138 return ArrayRef(ArgEGPRs);
139
140 return ArrayRef(ArgIGPRs);
141}
142
144 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
145 // the ILP32E ABI.
146 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
147 RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
148 RISCV::X16_H, RISCV::X17_H};
149 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
150 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
151 RISCV::X12_H, RISCV::X13_H,
152 RISCV::X14_H, RISCV::X15_H};
153
154 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
155 return ArrayRef(ArgEGPRs);
156
157 return ArrayRef(ArgIGPRs);
158}
159
161 // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
162 // the ILP32E ABI.
163 static const MCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
164 RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
165 RISCV::X16_W, RISCV::X17_W};
166 // The GPRs used for passing arguments in the ILP32E/LP64E ABI.
167 static const MCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
168 RISCV::X12_W, RISCV::X13_W,
169 RISCV::X14_W, RISCV::X15_W};
170
171 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
172 return ArrayRef(ArgEGPRs);
173
174 return ArrayRef(ArgIGPRs);
175}
176
178 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
179 // for save-restore libcall, so we don't use them.
180 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
181 static const MCPhysReg FastCCIGPRs[] = {
182 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, RISCV::X15,
183 RISCV::X16, RISCV::X17, RISCV::X28, RISCV::X29, RISCV::X30, RISCV::X31};
184
185 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
186 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
187 RISCV::X13, RISCV::X14, RISCV::X15};
188
189 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
190 return ArrayRef(FastCCEGPRs);
191
192 return ArrayRef(FastCCIGPRs);
193}
194
196 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
197 // for save-restore libcall, so we don't use them.
198 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
199 static const MCPhysReg FastCCIGPRs[] = {
200 RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
201 RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
202 RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
203
204 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
205 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
206 RISCV::X12_H, RISCV::X13_H,
207 RISCV::X14_H, RISCV::X15_H};
208
209 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
210 return ArrayRef(FastCCEGPRs);
211
212 return ArrayRef(FastCCIGPRs);
213}
214
216 // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
217 // for save-restore libcall, so we don't use them.
218 // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
219 static const MCPhysReg FastCCIGPRs[] = {
220 RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
221 RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
222 RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
223
224 // The GPRs used for passing arguments in the FastCC when using ILP32E/LP64E.
225 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
226 RISCV::X12_W, RISCV::X13_W,
227 RISCV::X14_W, RISCV::X15_W};
228
229 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
230 return ArrayRef(FastCCEGPRs);
231
232 return ArrayRef(FastCCIGPRs);
233}
234
235// Pass a 2*XLEN argument that has been split into two XLEN values through
236// registers or the stack as necessary.
237static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
238 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
239 MVT ValVT2, MVT LocVT2,
240 ISD::ArgFlagsTy ArgFlags2, bool EABI) {
241 unsigned XLenInBytes = XLen / 8;
242 const RISCVSubtarget &STI =
243 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
245
246 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
247 // At least one half can be passed via register.
248 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
250 } else {
251 // Both halves must be passed on the stack, with proper alignment.
252 // TODO: To be compatible with GCC's behaviors, we force them to have 4-byte
253 // alignment. This behavior may be changed when RV32E/ILP32E is ratified.
254 Align StackAlign(XLenInBytes);
255 if (!EABI || XLen != 32)
256 StackAlign = std::max(StackAlign, ArgFlags1.getNonZeroOrigAlign());
257 State.addLoc(
259 State.AllocateStack(XLenInBytes, StackAlign),
261 State.addLoc(CCValAssign::getMem(
262 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
263 LocVT2, CCValAssign::Full));
264 return false;
265 }
266
267 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
268 // The second half can also be passed via register.
269 State.addLoc(
270 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
271 } else {
272 // The second half is passed via the stack, without additional alignment.
273 State.addLoc(CCValAssign::getMem(
274 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
275 LocVT2, CCValAssign::Full));
276 }
277
278 return false;
279}
280
281static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State,
282 const RISCVTargetLowering &TLI) {
283 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
284 if (RC == &RISCV::VRRegClass) {
285 // Assign the first mask argument to V0.
286 // This is an interim calling convention and it may be changed in the
287 // future.
288 if (ValVT.getVectorElementType() == MVT::i1)
289 if (MCRegister Reg = State.AllocateReg(RISCV::V0))
290 return Reg;
291 return State.AllocateReg(ArgVRs);
292 }
293 if (RC == &RISCV::VRM2RegClass)
294 return State.AllocateReg(ArgVRM2s);
295 if (RC == &RISCV::VRM4RegClass)
296 return State.AllocateReg(ArgVRM4s);
297 if (RC == &RISCV::VRM8RegClass)
298 return State.AllocateReg(ArgVRM8s);
299 if (RC == &RISCV::VRN2M1RegClass)
300 return State.AllocateReg(ArgVRN2M1s);
301 if (RC == &RISCV::VRN3M1RegClass)
302 return State.AllocateReg(ArgVRN3M1s);
303 if (RC == &RISCV::VRN4M1RegClass)
304 return State.AllocateReg(ArgVRN4M1s);
305 if (RC == &RISCV::VRN5M1RegClass)
306 return State.AllocateReg(ArgVRN5M1s);
307 if (RC == &RISCV::VRN6M1RegClass)
308 return State.AllocateReg(ArgVRN6M1s);
309 if (RC == &RISCV::VRN7M1RegClass)
310 return State.AllocateReg(ArgVRN7M1s);
311 if (RC == &RISCV::VRN8M1RegClass)
312 return State.AllocateReg(ArgVRN8M1s);
313 if (RC == &RISCV::VRN2M2RegClass)
314 return State.AllocateReg(ArgVRN2M2s);
315 if (RC == &RISCV::VRN3M2RegClass)
316 return State.AllocateReg(ArgVRN3M2s);
317 if (RC == &RISCV::VRN4M2RegClass)
318 return State.AllocateReg(ArgVRN4M2s);
319 if (RC == &RISCV::VRN2M4RegClass)
320 return State.AllocateReg(ArgVRN2M4s);
321 llvm_unreachable("Unhandled register class for ValueType");
322}
323
324// Implements the RISC-V calling convention. Returns true upon failure.
325bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
326 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
327 CCState &State, bool IsRet, Type *OrigTy) {
328 const MachineFunction &MF = State.getMachineFunction();
329 const DataLayout &DL = MF.getDataLayout();
330 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
331 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
332
333 unsigned XLen = Subtarget.getXLen();
334 MVT XLenVT = Subtarget.getXLenVT();
335
336 if (ArgFlags.isNest()) {
337 // Static chain parameter must not be passed in normal argument registers,
338 // so we assign t2/t3 for it as done in GCC's
339 // __builtin_call_with_static_chain
340 bool HasCFBranch =
341 Subtarget.hasStdExtZicfilp() &&
342 MF.getFunction().getParent()->getModuleFlag("cf-protection-branch");
343
344 // Normal: t2, Branch control flow protection: t3
345 const auto StaticChainReg = HasCFBranch ? RISCV::X28 : RISCV::X7;
346
347 RISCVABI::ABI ABI = Subtarget.getTargetABI();
348 if (HasCFBranch &&
349 (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E))
351 "Nested functions with control flow protection are not "
352 "usable with ILP32E or LP64E ABI.");
353 if (MCRegister Reg = State.AllocateReg(StaticChainReg)) {
354 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
355 return false;
356 }
357 }
358
359 // Any return value split in to more than two values can't be returned
360 // directly. Vectors are returned via the available vector registers.
361 if ((!LocVT.isVector() || Subtarget.isPExtPackedType(ValVT)) && IsRet &&
362 ValNo > 1)
363 return true;
364
365 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
366 // variadic argument, or if no F16/F32 argument registers are available.
367 bool UseGPRForF16_F32 = true;
368 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
369 // variadic argument, or if no F64 argument registers are available.
370 bool UseGPRForF64 = true;
371
372 RISCVABI::ABI ABI = Subtarget.getTargetABI();
373 switch (ABI) {
374 default:
375 llvm_unreachable("Unexpected ABI");
380 break;
383 UseGPRForF16_F32 = ArgFlags.isVarArg();
384 break;
387 UseGPRForF16_F32 = ArgFlags.isVarArg();
388 UseGPRForF64 = ArgFlags.isVarArg();
389 break;
390 }
391
392 if ((LocVT == MVT::f16 || LocVT == MVT::bf16) && !UseGPRForF16_F32) {
393 if (MCRegister Reg = State.AllocateReg(ArgFPR16s)) {
394 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
395 return false;
396 }
397 }
398
399 if (LocVT == MVT::f32 && !UseGPRForF16_F32) {
400 if (MCRegister Reg = State.AllocateReg(ArgFPR32s)) {
401 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
402 return false;
403 }
404 }
405
406 if (LocVT == MVT::f64 && !UseGPRForF64) {
407 if (MCRegister Reg = State.AllocateReg(ArgFPR64s)) {
408 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
409 return false;
410 }
411 }
412
413 if ((ValVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
414 if (MCRegister Reg = State.AllocateReg(getArgGPR16s(ABI))) {
415 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
416 return false;
417 }
418 }
419
420 if (ValVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
421 if (MCRegister Reg = State.AllocateReg(getArgGPR32s(ABI))) {
422 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
423 return false;
424 }
425 }
426
428
429 // Zdinx use GPR without a bitcast when possible.
430 if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
431 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
432 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
433 return false;
434 }
435 }
436
437 // FP smaller than XLen, uses custom GPR.
438 if (LocVT == MVT::f16 || LocVT == MVT::bf16 ||
439 (LocVT == MVT::f32 && XLen == 64)) {
440 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
441 LocVT = XLenVT;
442 State.addLoc(
443 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
444 return false;
445 }
446 }
447
448 // Bitcast FP to GPR if we can use a GPR register.
449 if ((XLen == 32 && LocVT == MVT::f32) || (XLen == 64 && LocVT == MVT::f64)) {
450 if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
451 LocVT = XLenVT;
452 LocInfo = CCValAssign::BCvt;
453 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
454 return false;
455 }
456 }
457
458 // If this is a variadic argument, the RISC-V calling convention requires
459 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
460 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
461 // be used regardless of whether the original argument was split during
462 // legalisation or not. The argument will not be passed by registers if the
463 // original type is larger than 2*XLEN, so the register alignment rule does
464 // not apply.
465 // TODO: To be compatible with GCC's behaviors, we don't align registers
466 // currently if we are using ILP32E calling convention. This behavior may be
467 // changed when RV32E/ILP32E is ratified.
468 unsigned TwoXLenInBytes = (2 * XLen) / 8;
469 if (ArgFlags.isVarArg() && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
470 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
471 ABI != RISCVABI::ABI_ILP32E) {
472 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
473 // Skip 'odd' register if necessary.
474 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
475 State.AllocateReg(ArgGPRs);
476 }
477
478 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
479 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
480 State.getPendingArgFlags();
481
482 assert(PendingLocs.size() == PendingArgFlags.size() &&
483 "PendingLocs and PendingArgFlags out of sync");
484
485 // Handle passing f64 on RV32D with a soft float ABI or when floating point
486 // registers are exhausted.
487 if (XLen == 32 && LocVT == MVT::f64) {
488 assert(PendingLocs.empty() && "Can't lower f64 if it is split");
489 // Depending on available argument GPRS, f64 may be passed in a pair of
490 // GPRs, split between a GPR and the stack, or passed completely on the
491 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
492 // cases.
493 MCRegister Reg = State.AllocateReg(ArgGPRs);
494 if (!Reg) {
495 int64_t StackOffset = State.AllocateStack(8, Align(8));
496 State.addLoc(
497 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
498 return false;
499 }
500 LocVT = MVT::i32;
501 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
502 MCRegister HiReg = State.AllocateReg(ArgGPRs);
503 if (HiReg) {
504 State.addLoc(
505 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
506 } else {
507 int64_t StackOffset = State.AllocateStack(4, Align(4));
508 State.addLoc(
509 CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
510 }
511 return false;
512 }
513
514 // If the split argument only had two elements, it should be passed directly
515 // in registers or on the stack.
516 if ((ValVT.isScalarInteger() || Subtarget.isPExtPackedType(ValVT)) &&
517 ArgFlags.isSplitEnd() && PendingLocs.size() <= 1) {
518 assert(PendingLocs.size() == 1 && "Unexpected PendingLocs.size()");
519 // Apply the normal calling convention rules to the first half of the
520 // split argument.
521 CCValAssign VA = PendingLocs[0];
522 ISD::ArgFlagsTy AF = PendingArgFlags[0];
523 PendingLocs.clear();
524 PendingArgFlags.clear();
525 return CC_RISCVAssign2XLen(
526 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
528 }
529
530 // Split arguments might be passed indirectly, so keep track of the pending
531 // values. Split vectors excluding P extension packed vectors(see
532 // isPExtPackedType) are passed via a mix of registers and indirectly, so
533 // treat them as we would any other argument.
534 if ((ValVT.isScalarInteger() || Subtarget.isPExtPackedType(ValVT)) &&
535 (ArgFlags.isSplit() || !PendingLocs.empty())) {
536 PendingLocs.push_back(
537 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
538 PendingArgFlags.push_back(ArgFlags);
539 if (!ArgFlags.isSplitEnd()) {
540 return false;
541 }
542 }
543
544 // Allocate to a register if possible, or else a stack slot.
545 MCRegister Reg;
546 unsigned StoreSizeBytes = XLen / 8;
547 Align StackAlign = Align(XLen / 8);
548
549 // FIXME: If P extension and V extension are enabled at the same time,
550 // who should go first?
551 if (!Subtarget.isPExtPackedType(ValVT) &&
552 (ValVT.isVector() || ValVT.isRISCVVectorTuple())) {
553 Reg = allocateRVVReg(ValVT, ValNo, State, TLI);
554 if (Reg) {
555 // Fixed-length vectors are located in the corresponding scalable-vector
556 // container types.
557 if (ValVT.isFixedLengthVector()) {
558 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
559 State.addLoc(
560 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
561 return false;
562 }
563 } else {
564 // For return values, the vector must be passed fully via registers or
565 // via the stack.
566 if (IsRet)
567 return true;
568 // Try using a GPR to pass the address
569 if ((Reg = State.AllocateReg(ArgGPRs))) {
570 LocVT = XLenVT;
571 LocInfo = CCValAssign::Indirect;
572 } else if (ValVT.isScalableVector()) {
573 LocVT = XLenVT;
574 LocInfo = CCValAssign::Indirect;
575 } else {
576 StoreSizeBytes = ValVT.getStoreSize();
577 // Align vectors to their element sizes, being careful for vXi1
578 // vectors.
579 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
580 }
581 }
582 } else {
583 Reg = State.AllocateReg(ArgGPRs);
584 }
585
586 int64_t StackOffset =
587 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
588
589 // If we reach this point and PendingLocs is non-empty, we must be at the
590 // end of a split argument that must be passed indirectly.
591 if (!PendingLocs.empty()) {
592 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
593 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
594
595 for (auto &It : PendingLocs) {
596 if (Reg)
597 State.addLoc(CCValAssign::getReg(It.getValNo(), It.getValVT(), Reg,
598 XLenVT, CCValAssign::Indirect));
599 else
600 State.addLoc(CCValAssign::getMem(It.getValNo(), It.getValVT(),
601 StackOffset, XLenVT,
603 }
604 PendingLocs.clear();
605 PendingArgFlags.clear();
606 return false;
607 }
608
609 assert(((ValVT.isFloatingPoint() && !ValVT.isVector()) || LocVT == XLenVT ||
610 Subtarget.isPExtPackedType(LocVT) ||
611 (TLI.getSubtarget().hasVInstructions() &&
612 (ValVT.isVector() || ValVT.isRISCVVectorTuple()))) &&
613 "Expected an XLenVT or vector types at this stage");
614
615 if (Reg) {
616 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
617 return false;
618 }
619
620 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
621 return false;
622}
623
624// FastCC has less than 1% performance improvement for some particular
625// benchmark. But theoretically, it may have benefit for some cases.
626bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
627 CCValAssign::LocInfo LocInfo,
628 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet,
629 Type *OrigTy) {
630 const MachineFunction &MF = State.getMachineFunction();
631 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
632 const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
633 RISCVABI::ABI ABI = Subtarget.getTargetABI();
634
635 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
636 (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
637 static const MCPhysReg FPR16List[] = {
638 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
639 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
640 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
641 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
642 if (MCRegister Reg = State.AllocateReg(FPR16List)) {
643 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
644 return false;
645 }
646 }
647
648 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
649 static const MCPhysReg FPR32List[] = {
650 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
651 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
652 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
653 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
654 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
655 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
656 return false;
657 }
658 }
659
660 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
661 static const MCPhysReg FPR64List[] = {
662 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
663 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
664 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
665 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
666 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
667 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
668 return false;
669 }
670 }
671
672 MVT XLenVT = Subtarget.getXLenVT();
673
674 // Check if there is an available GPRF16 before hitting the stack.
675 if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
676 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF16s(ABI))) {
677 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
678 return false;
679 }
680 }
681
682 // Check if there is an available GPRF32 before hitting the stack.
683 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
684 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF32s(ABI))) {
685 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
686 return false;
687 }
688 }
689
690 // Check if there is an available GPR before hitting the stack.
691 if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
692 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
693 if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
694 LocVT = XLenVT;
695 State.addLoc(
696 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
697 return false;
698 }
699 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
700 return false;
701 }
702 }
703
705
706 if (LocVT.isVector()) {
707 if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
708 // Fixed-length vectors are located in the corresponding scalable-vector
709 // container types.
710 if (LocVT.isFixedLengthVector()) {
711 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
712 State.addLoc(
713 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
714 return false;
715 }
716 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
717 return false;
718 }
719
720 // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
721 // have a free GPR.
722 if (LocVT.isScalableVector() ||
723 State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) {
724 LocInfo = CCValAssign::Indirect;
725 LocVT = XLenVT;
726 }
727 }
728
729 if (LocVT == XLenVT) {
730 if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
731 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
732 return false;
733 }
734 }
735
736 if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
737 LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
738 Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
739 int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign);
740 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
741 return false;
742 }
743
744 return true; // CC didn't match.
745}
746
747bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
748 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
749 Type *OrigTy, CCState &State) {
750 if (ArgFlags.isNest()) {
752 "Attribute 'nest' is not supported in GHC calling convention");
753 }
754
755 static const MCPhysReg GPRList[] = {
756 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
757 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
758
759 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
760 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
761 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
762 if (MCRegister Reg = State.AllocateReg(GPRList)) {
763 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
764 return false;
765 }
766 }
767
768 const RISCVSubtarget &Subtarget =
769 State.getMachineFunction().getSubtarget<RISCVSubtarget>();
770
771 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
772 // Pass in STG registers: F1, ..., F6
773 // fs0 ... fs5
774 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
775 RISCV::F18_F, RISCV::F19_F,
776 RISCV::F20_F, RISCV::F21_F};
777 if (MCRegister Reg = State.AllocateReg(FPR32List)) {
778 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
779 return false;
780 }
781 }
782
783 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
784 // Pass in STG registers: D1, ..., D6
785 // fs6 ... fs11
786 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
787 RISCV::F24_D, RISCV::F25_D,
788 RISCV::F26_D, RISCV::F27_D};
789 if (MCRegister Reg = State.AllocateReg(FPR64List)) {
790 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
791 return false;
792 }
793 }
794
795 if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
796 static const MCPhysReg GPR32List[] = {
797 RISCV::X9_W, RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
798 RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
799 RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
800 if (MCRegister Reg = State.AllocateReg(GPR32List)) {
801 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
802 return false;
803 }
804 }
805
806 if (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) {
807 if (MCRegister Reg = State.AllocateReg(GPRList)) {
808 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
809 return false;
810 }
811 }
812
813 report_fatal_error("No registers left in GHC calling convention");
814 return true;
815}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Module.h This file contains the declarations for the Module class.
const MCPhysReg ArgFPR32s[]
const MCPhysReg ArgVRs[]
const MCPhysReg ArgFPR64s[]
const MCPhysReg ArgGPRs[]
Register Reg
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2, bool EABI)
static const MCPhysReg ArgVRN2M2s[]
static const MCPhysReg ArgVRM2s[]
static MCRegister allocateRVVReg(MVT ValVT, unsigned ValNo, CCState &State, const RISCVTargetLowering &TLI)
static const MCPhysReg ArgVRN3M2s[]
static const MCPhysReg ArgVRN4M1s[]
static const MCPhysReg ArgVRN6M1s[]
static ArrayRef< MCPhysReg > getFastCCArgGPRF32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN4M2s[]
static const MCPhysReg ArgVRN3M1s[]
static const MCPhysReg ArgVRN7M1s[]
static const MCPhysReg ArgVRN5M1s[]
static const MCPhysReg ArgVRN2M4s[]
static ArrayRef< MCPhysReg > getFastCCArgGPRF16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getArgGPR32s(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRN2M1s[]
static const MCPhysReg ArgVRN8M1s[]
static ArrayRef< MCPhysReg > getArgGPR16s(const RISCVABI::ABI ABI)
static ArrayRef< MCPhysReg > getFastCCArgGPRs(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRM8s[]
static const MCPhysReg ArgVRM4s[]
static const MCPhysReg ArgFPR16s[]
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Module * getParent()
Get the module that this global value is contained inside of...
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
bool isRISCVVectorTuple() const
Return true if this is a RISCV vector tuple type where the runtime length is machine dependent.
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Definition Module.cpp:358
RISCVABI::ABI getTargetABI() const
bool isPExtPackedType(MVT VT) const
unsigned getXLen() const
const RISCVTargetLowering * getTargetLowering() const override
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
ArrayRef< MCPhysReg > getArgGPRs(const RISCVABI::ABI ABI)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsRet, Type *OrigTy)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
ArrayRef(const T &OneElt) -> ArrayRef< T >
EABI
Definition CodeGen.h:73
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Align getNonZeroOrigAlign() const
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130