Line data Source code
1 : //=== ARMCallingConv.h - ARM Custom Calling Convention Routines -*- C++ -*-===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : //
10 : // This file contains the custom routines for the ARM Calling Convention that
11 : // aren't done by tablegen.
12 : //
13 : //===----------------------------------------------------------------------===//
14 :
15 : #ifndef LLVM_LIB_TARGET_ARM_ARMCALLINGCONV_H
16 : #define LLVM_LIB_TARGET_ARM_ARMCALLINGCONV_H
17 :
18 : #include "ARM.h"
19 : #include "ARMBaseInstrInfo.h"
20 : #include "ARMSubtarget.h"
21 : #include "llvm/CodeGen/CallingConvLower.h"
22 : #include "llvm/CodeGen/TargetInstrInfo.h"
23 : #include "llvm/IR/CallingConv.h"
24 :
25 : namespace llvm {
26 :
27 : // APCS f64 is in register pairs, possibly split to stack
28 888 : static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
29 : CCValAssign::LocInfo &LocInfo,
30 : CCState &State, bool CanFail) {
31 : static const MCPhysReg RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
32 :
33 : // Try to get the first register.
34 888 : if (unsigned Reg = State.AllocateReg(RegList))
35 804 : State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
36 : else {
37 : // For the 2nd half of a v2f64, do not fail.
38 486 : if (CanFail)
39 : return false;
40 :
41 : // Put the whole thing on the stack.
42 10 : State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
43 : State.AllocateStack(8, 4),
44 5 : LocVT, LocInfo));
45 5 : return true;
46 : }
47 :
48 : // Try to get the second register.
49 402 : if (unsigned Reg = State.AllocateReg(RegList))
50 728 : State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
51 : else
52 38 : State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
53 : State.AllocateStack(4, 4),
54 38 : LocVT, LocInfo));
55 : return true;
56 : }
57 :
58 0 : static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
59 : CCValAssign::LocInfo &LocInfo,
60 : ISD::ArgFlagsTy &ArgFlags,
61 : CCState &State) {
62 0 : if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
63 0 : return false;
64 0 : if (LocVT == MVT::v2f64 &&
65 0 : !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
66 0 : return false;
67 : return true; // we handled it
68 : }
69 :
70 : // AAPCS f64 is in aligned register pairs
71 1882 : static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
72 : CCValAssign::LocInfo &LocInfo,
73 : CCState &State, bool CanFail) {
74 : static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 };
75 : static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 };
76 : static const MCPhysReg ShadowRegList[] = { ARM::R0, ARM::R1 };
77 : static const MCPhysReg GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
78 :
79 1882 : unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList);
80 1882 : if (Reg == 0) {
81 :
82 : // If we had R3 unallocated only, now we still must to waste it.
83 420 : Reg = State.AllocateReg(GPRArgRegs);
84 : assert((!Reg || Reg == ARM::R3) && "Wrong GPRs usage for f64");
85 :
86 : // For the 2nd half of a v2f64, do not just fail.
87 420 : if (CanFail)
88 : return false;
89 :
90 : // Put the whole thing on the stack.
91 30 : State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
92 : State.AllocateStack(8, 8),
93 15 : LocVT, LocInfo));
94 15 : return true;
95 : }
96 :
97 : unsigned i;
98 2070 : for (i = 0; i < 2; ++i)
99 2070 : if (HiRegList[i] == Reg)
100 : break;
101 :
102 1462 : unsigned T = State.AllocateReg(LoRegList[i]);
103 : (void)T;
104 : assert(T == LoRegList[i] && "Could not allocate register");
105 :
106 2924 : State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
107 2924 : State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
108 2924 : LocVT, LocInfo));
109 1462 : return true;
110 : }
111 :
112 0 : static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
113 : CCValAssign::LocInfo &LocInfo,
114 : ISD::ArgFlagsTy &ArgFlags,
115 : CCState &State) {
116 0 : if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
117 0 : return false;
118 0 : if (LocVT == MVT::v2f64 &&
119 0 : !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
120 0 : return false;
121 : return true; // we handled it
122 : }
123 :
124 6139 : static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
125 : CCValAssign::LocInfo &LocInfo, CCState &State) {
126 : static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 };
127 : static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 };
128 :
129 6139 : unsigned Reg = State.AllocateReg(HiRegList, LoRegList);
130 6139 : if (Reg == 0)
131 : return false; // we didn't handle it
132 :
133 : unsigned i;
134 7841 : for (i = 0; i < 2; ++i)
135 7841 : if (HiRegList[i] == Reg)
136 : break;
137 :
138 12156 : State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
139 18234 : State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
140 12156 : LocVT, LocInfo));
141 6078 : return true;
142 : }
143 :
144 0 : static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
145 : CCValAssign::LocInfo &LocInfo,
146 : ISD::ArgFlagsTy &ArgFlags,
147 : CCState &State) {
148 0 : if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
149 0 : return false;
150 0 : if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
151 0 : return false;
152 : return true; // we handled it
153 : }
154 :
155 0 : static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
156 : CCValAssign::LocInfo &LocInfo,
157 : ISD::ArgFlagsTy &ArgFlags,
158 : CCState &State) {
159 3967 : return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
160 0 : State);
161 : }
162 :
163 : static const MCPhysReg RRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
164 :
165 : static const MCPhysReg SRegList[] = { ARM::S0, ARM::S1, ARM::S2, ARM::S3,
166 : ARM::S4, ARM::S5, ARM::S6, ARM::S7,
167 : ARM::S8, ARM::S9, ARM::S10, ARM::S11,
168 : ARM::S12, ARM::S13, ARM::S14, ARM::S15 };
169 : static const MCPhysReg DRegList[] = { ARM::D0, ARM::D1, ARM::D2, ARM::D3,
170 : ARM::D4, ARM::D5, ARM::D6, ARM::D7 };
171 : static const MCPhysReg QRegList[] = { ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3 };
172 :
173 :
174 : // Allocate part of an AAPCS HFA or HVA. We assume that each member of the HA
175 : // has InConsecutiveRegs set, and that the last member also has
176 : // InConsecutiveRegsLast set. We must process all members of the HA before
177 : // we can allocate it, as we need to know the total number of registers that
178 : // will be needed in order to (attempt to) allocate a contiguous block.
179 0 : static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT,
180 : MVT &LocVT,
181 : CCValAssign::LocInfo &LocInfo,
182 : ISD::ArgFlagsTy &ArgFlags,
183 : CCState &State) {
184 : SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
185 :
186 : // AAPCS HFAs must have 1-4 elements, all of the same type
187 : if (PendingMembers.size() > 0)
188 : assert(PendingMembers[0].getLocVT() == LocVT);
189 :
190 : // Add the argument to the list to be allocated once we know the size of the
191 : // aggregate. Store the type's required alignmnent as extra info for later: in
192 : // the [N x i64] case all trace has been removed by the time we actually get
193 : // to do allocation.
194 0 : PendingMembers.push_back(CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo,
195 0 : ArgFlags.getOrigAlign()));
196 :
197 0 : if (!ArgFlags.isInConsecutiveRegsLast())
198 0 : return true;
199 :
200 : // Try to allocate a contiguous block of registers, each of the correct
201 : // size to hold one member.
202 0 : auto &DL = State.getMachineFunction().getDataLayout();
203 0 : unsigned StackAlign = DL.getStackAlignment();
204 0 : unsigned Align = std::min(PendingMembers[0].getExtraInfo(), StackAlign);
205 :
206 : ArrayRef<MCPhysReg> RegList;
207 0 : switch (LocVT.SimpleTy) {
208 : case MVT::i32: {
209 : RegList = RRegList;
210 : unsigned RegIdx = State.getFirstUnallocated(RegList);
211 :
212 : // First consume all registers that would give an unaligned object. Whether
213 : // we go on stack or in regs, no-one will be using them in future.
214 0 : unsigned RegAlign = alignTo(Align, 4) / 4;
215 0 : while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
216 0 : State.AllocateReg(RegList[RegIdx++]);
217 :
218 : break;
219 : }
220 : case MVT::f16:
221 : case MVT::f32:
222 : RegList = SRegList;
223 : break;
224 : case MVT::v4f16:
225 : case MVT::f64:
226 : RegList = DRegList;
227 0 : break;
228 : case MVT::v8f16:
229 : case MVT::v2f64:
230 : RegList = QRegList;
231 0 : break;
232 0 : default:
233 0 : llvm_unreachable("Unexpected member type for block aggregate");
234 : break;
235 : }
236 :
237 0 : unsigned RegResult = State.AllocateRegBlock(RegList, PendingMembers.size());
238 0 : if (RegResult) {
239 0 : for (SmallVectorImpl<CCValAssign>::iterator It = PendingMembers.begin();
240 0 : It != PendingMembers.end(); ++It) {
241 : It->convertToReg(RegResult);
242 0 : State.addLoc(*It);
243 0 : ++RegResult;
244 : }
245 : PendingMembers.clear();
246 0 : return true;
247 : }
248 :
249 : // Register allocation failed, we'll be needing the stack
250 0 : unsigned Size = LocVT.getSizeInBits() / 8;
251 0 : if (LocVT == MVT::i32 && State.getNextStackOffset() == 0) {
252 : // If nothing else has used the stack until this point, a non-HFA aggregate
253 : // can be split between regs and stack.
254 : unsigned RegIdx = State.getFirstUnallocated(RegList);
255 0 : for (auto &It : PendingMembers) {
256 0 : if (RegIdx >= RegList.size())
257 0 : It.convertToMem(State.AllocateStack(Size, Size));
258 : else
259 0 : It.convertToReg(State.AllocateReg(RegList[RegIdx++]));
260 :
261 0 : State.addLoc(It);
262 : }
263 : PendingMembers.clear();
264 0 : return true;
265 0 : } else if (LocVT != MVT::i32)
266 : RegList = SRegList;
267 :
268 : // Mark all regs as unavailable (AAPCS rule C.2.vfp for VFP, C.6 for core)
269 0 : for (auto Reg : RegList)
270 0 : State.AllocateReg(Reg);
271 :
272 : // After the first item has been allocated, the rest are packed as tightly as
273 : // possible. (E.g. an incoming i64 would have starting Align of 8, but we'll
274 : // be allocating a bunch of i32 slots).
275 0 : unsigned RestAlign = std::min(Align, Size);
276 :
277 0 : for (auto &It : PendingMembers) {
278 0 : It.convertToMem(State.AllocateStack(Size, Align));
279 0 : State.addLoc(It);
280 0 : Align = RestAlign;
281 : }
282 :
283 : // All pending members have now been allocated
284 : PendingMembers.clear();
285 :
286 : // This will be allocated by the last member of the aggregate
287 0 : return true;
288 : }
289 :
290 : } // End llvm namespace
291 :
292 : #endif
|