File: | llvm/lib/Target/AVR/AVRISelLowering.cpp |
Warning: | line 1037, column 7 Forming reference to null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file defines the interfaces that AVR uses to lower LLVM code into a | |||
10 | // selection DAG. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "AVRISelLowering.h" | |||
15 | ||||
16 | #include "llvm/ADT/StringSwitch.h" | |||
17 | #include "llvm/CodeGen/CallingConvLower.h" | |||
18 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
19 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
20 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
21 | #include "llvm/CodeGen/SelectionDAG.h" | |||
22 | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" | |||
23 | #include "llvm/IR/Function.h" | |||
24 | #include "llvm/Support/ErrorHandling.h" | |||
25 | ||||
26 | #include "AVR.h" | |||
27 | #include "AVRMachineFunctionInfo.h" | |||
28 | #include "AVRSubtarget.h" | |||
29 | #include "AVRTargetMachine.h" | |||
30 | #include "MCTargetDesc/AVRMCTargetDesc.h" | |||
31 | ||||
32 | namespace llvm { | |||
33 | ||||
34 | AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM, | |||
35 | const AVRSubtarget &STI) | |||
36 | : TargetLowering(TM), Subtarget(STI) { | |||
37 | // Set up the register classes. | |||
38 | addRegisterClass(MVT::i8, &AVR::GPR8RegClass); | |||
39 | addRegisterClass(MVT::i16, &AVR::DREGSRegClass); | |||
40 | ||||
41 | // Compute derived properties from the register classes. | |||
42 | computeRegisterProperties(Subtarget.getRegisterInfo()); | |||
43 | ||||
44 | setBooleanContents(ZeroOrOneBooleanContent); | |||
45 | setBooleanVectorContents(ZeroOrOneBooleanContent); | |||
46 | setSchedulingPreference(Sched::RegPressure); | |||
47 | setStackPointerRegisterToSaveRestore(AVR::SP); | |||
48 | setSupportsUnalignedAtomics(true); | |||
49 | ||||
50 | setOperationAction(ISD::GlobalAddress, MVT::i16, Custom); | |||
51 | setOperationAction(ISD::BlockAddress, MVT::i16, Custom); | |||
52 | ||||
53 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); | |||
54 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); | |||
55 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand); | |||
56 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand); | |||
57 | ||||
58 | for (MVT VT : MVT::integer_valuetypes()) { | |||
59 | for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) { | |||
60 | setLoadExtAction(N, VT, MVT::i1, Promote); | |||
61 | setLoadExtAction(N, VT, MVT::i8, Expand); | |||
62 | } | |||
63 | } | |||
64 | ||||
65 | setTruncStoreAction(MVT::i16, MVT::i8, Expand); | |||
66 | ||||
67 | for (MVT VT : MVT::integer_valuetypes()) { | |||
68 | setOperationAction(ISD::ADDC, VT, Legal); | |||
69 | setOperationAction(ISD::SUBC, VT, Legal); | |||
70 | setOperationAction(ISD::ADDE, VT, Legal); | |||
71 | setOperationAction(ISD::SUBE, VT, Legal); | |||
72 | } | |||
73 | ||||
74 | // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types | |||
75 | // revert into a sub since we don't have an add with immediate instruction. | |||
76 | setOperationAction(ISD::ADD, MVT::i32, Custom); | |||
77 | setOperationAction(ISD::ADD, MVT::i64, Custom); | |||
78 | ||||
79 | // our shift instructions are only able to shift 1 bit at a time, so handle | |||
80 | // this in a custom way. | |||
81 | setOperationAction(ISD::SRA, MVT::i8, Custom); | |||
82 | setOperationAction(ISD::SHL, MVT::i8, Custom); | |||
83 | setOperationAction(ISD::SRL, MVT::i8, Custom); | |||
84 | setOperationAction(ISD::SRA, MVT::i16, Custom); | |||
85 | setOperationAction(ISD::SHL, MVT::i16, Custom); | |||
86 | setOperationAction(ISD::SRL, MVT::i16, Custom); | |||
87 | setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand); | |||
88 | setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand); | |||
89 | setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand); | |||
90 | ||||
91 | setOperationAction(ISD::ROTL, MVT::i8, Custom); | |||
92 | setOperationAction(ISD::ROTL, MVT::i16, Expand); | |||
93 | setOperationAction(ISD::ROTR, MVT::i8, Custom); | |||
94 | setOperationAction(ISD::ROTR, MVT::i16, Expand); | |||
95 | ||||
96 | setOperationAction(ISD::BR_CC, MVT::i8, Custom); | |||
97 | setOperationAction(ISD::BR_CC, MVT::i16, Custom); | |||
98 | setOperationAction(ISD::BR_CC, MVT::i32, Custom); | |||
99 | setOperationAction(ISD::BR_CC, MVT::i64, Custom); | |||
100 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); | |||
101 | ||||
102 | setOperationAction(ISD::SELECT_CC, MVT::i8, Custom); | |||
103 | setOperationAction(ISD::SELECT_CC, MVT::i16, Custom); | |||
104 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); | |||
105 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); | |||
106 | setOperationAction(ISD::SETCC, MVT::i8, Custom); | |||
107 | setOperationAction(ISD::SETCC, MVT::i16, Custom); | |||
108 | setOperationAction(ISD::SETCC, MVT::i32, Custom); | |||
109 | setOperationAction(ISD::SETCC, MVT::i64, Custom); | |||
110 | setOperationAction(ISD::SELECT, MVT::i8, Expand); | |||
111 | setOperationAction(ISD::SELECT, MVT::i16, Expand); | |||
112 | ||||
113 | setOperationAction(ISD::BSWAP, MVT::i16, Expand); | |||
114 | ||||
115 | // Add support for postincrement and predecrement load/stores. | |||
116 | setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal); | |||
117 | setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); | |||
118 | setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal); | |||
119 | setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal); | |||
120 | setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal); | |||
121 | setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal); | |||
122 | setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal); | |||
123 | setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal); | |||
124 | ||||
125 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); | |||
126 | ||||
127 | setOperationAction(ISD::VASTART, MVT::Other, Custom); | |||
128 | setOperationAction(ISD::VAEND, MVT::Other, Expand); | |||
129 | setOperationAction(ISD::VAARG, MVT::Other, Expand); | |||
130 | setOperationAction(ISD::VACOPY, MVT::Other, Expand); | |||
131 | ||||
132 | // Atomic operations which must be lowered to rtlib calls | |||
133 | for (MVT VT : MVT::integer_valuetypes()) { | |||
134 | setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); | |||
135 | setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); | |||
136 | setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); | |||
137 | setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); | |||
138 | setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); | |||
139 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); | |||
140 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); | |||
141 | } | |||
142 | ||||
143 | // Division/remainder | |||
144 | setOperationAction(ISD::UDIV, MVT::i8, Expand); | |||
145 | setOperationAction(ISD::UDIV, MVT::i16, Expand); | |||
146 | setOperationAction(ISD::UREM, MVT::i8, Expand); | |||
147 | setOperationAction(ISD::UREM, MVT::i16, Expand); | |||
148 | setOperationAction(ISD::SDIV, MVT::i8, Expand); | |||
149 | setOperationAction(ISD::SDIV, MVT::i16, Expand); | |||
150 | setOperationAction(ISD::SREM, MVT::i8, Expand); | |||
151 | setOperationAction(ISD::SREM, MVT::i16, Expand); | |||
152 | ||||
153 | // Make division and modulus custom | |||
154 | for (MVT VT : MVT::integer_valuetypes()) { | |||
155 | setOperationAction(ISD::UDIVREM, VT, Custom); | |||
156 | setOperationAction(ISD::SDIVREM, VT, Custom); | |||
157 | } | |||
158 | ||||
159 | // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co. | |||
160 | setOperationAction(ISD::MUL, MVT::i8, Expand); | |||
161 | setOperationAction(ISD::MUL, MVT::i16, Expand); | |||
162 | ||||
163 | // Expand 16 bit multiplications. | |||
164 | setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand); | |||
165 | setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand); | |||
166 | ||||
167 | // Expand multiplications to libcalls when there is | |||
168 | // no hardware MUL. | |||
169 | if (!Subtarget.supportsMultiplication()) { | |||
170 | setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand); | |||
171 | setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand); | |||
172 | } | |||
173 | ||||
174 | for (MVT VT : MVT::integer_valuetypes()) { | |||
175 | setOperationAction(ISD::MULHS, VT, Expand); | |||
176 | setOperationAction(ISD::MULHU, VT, Expand); | |||
177 | } | |||
178 | ||||
179 | for (MVT VT : MVT::integer_valuetypes()) { | |||
180 | setOperationAction(ISD::CTPOP, VT, Expand); | |||
181 | setOperationAction(ISD::CTLZ, VT, Expand); | |||
182 | setOperationAction(ISD::CTTZ, VT, Expand); | |||
183 | } | |||
184 | ||||
185 | for (MVT VT : MVT::integer_valuetypes()) { | |||
186 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); | |||
187 | // TODO: The generated code is pretty poor. Investigate using the | |||
188 | // same "shift and subtract with carry" trick that we do for | |||
189 | // extending 8-bit to 16-bit. This may require infrastructure | |||
190 | // improvements in how we treat 16-bit "registers" to be feasible. | |||
191 | } | |||
192 | ||||
193 | // Division rtlib functions (not supported) | |||
194 | setLibcallName(RTLIB::SDIV_I8, nullptr); | |||
195 | setLibcallName(RTLIB::SDIV_I16, nullptr); | |||
196 | setLibcallName(RTLIB::SDIV_I32, nullptr); | |||
197 | setLibcallName(RTLIB::SDIV_I64, nullptr); | |||
198 | setLibcallName(RTLIB::SDIV_I128, nullptr); | |||
199 | setLibcallName(RTLIB::UDIV_I8, nullptr); | |||
200 | setLibcallName(RTLIB::UDIV_I16, nullptr); | |||
201 | setLibcallName(RTLIB::UDIV_I32, nullptr); | |||
202 | setLibcallName(RTLIB::UDIV_I64, nullptr); | |||
203 | setLibcallName(RTLIB::UDIV_I128, nullptr); | |||
204 | ||||
205 | // Modulus rtlib functions (not supported) | |||
206 | setLibcallName(RTLIB::SREM_I8, nullptr); | |||
207 | setLibcallName(RTLIB::SREM_I16, nullptr); | |||
208 | setLibcallName(RTLIB::SREM_I32, nullptr); | |||
209 | setLibcallName(RTLIB::SREM_I64, nullptr); | |||
210 | setLibcallName(RTLIB::SREM_I128, nullptr); | |||
211 | setLibcallName(RTLIB::UREM_I8, nullptr); | |||
212 | setLibcallName(RTLIB::UREM_I16, nullptr); | |||
213 | setLibcallName(RTLIB::UREM_I32, nullptr); | |||
214 | setLibcallName(RTLIB::UREM_I64, nullptr); | |||
215 | setLibcallName(RTLIB::UREM_I128, nullptr); | |||
216 | ||||
217 | // Division and modulus rtlib functions | |||
218 | setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4"); | |||
219 | setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4"); | |||
220 | setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); | |||
221 | setLibcallName(RTLIB::SDIVREM_I64, "__divmoddi4"); | |||
222 | setLibcallName(RTLIB::SDIVREM_I128, "__divmodti4"); | |||
223 | setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4"); | |||
224 | setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4"); | |||
225 | setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); | |||
226 | setLibcallName(RTLIB::UDIVREM_I64, "__udivmoddi4"); | |||
227 | setLibcallName(RTLIB::UDIVREM_I128, "__udivmodti4"); | |||
228 | ||||
229 | // Several of the runtime library functions use a special calling conv | |||
230 | setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN); | |||
231 | setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN); | |||
232 | setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN); | |||
233 | setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN); | |||
234 | ||||
235 | // Trigonometric rtlib functions | |||
236 | setLibcallName(RTLIB::SIN_F32, "sin"); | |||
237 | setLibcallName(RTLIB::COS_F32, "cos"); | |||
238 | ||||
239 | setMinFunctionAlignment(Align(2)); | |||
240 | setMinimumJumpTableEntries(UINT_MAX(2147483647 *2U +1U)); | |||
241 | } | |||
242 | ||||
243 | const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const { | |||
244 | #define NODE(name) \ | |||
245 | case AVRISD::name: \ | |||
246 | return #name | |||
247 | ||||
248 | switch (Opcode) { | |||
249 | default: | |||
250 | return nullptr; | |||
251 | NODE(RET_FLAG); | |||
252 | NODE(RETI_FLAG); | |||
253 | NODE(CALL); | |||
254 | NODE(WRAPPER); | |||
255 | NODE(LSL); | |||
256 | NODE(LSR); | |||
257 | NODE(ROL); | |||
258 | NODE(ROR); | |||
259 | NODE(ASR); | |||
260 | NODE(LSLLOOP); | |||
261 | NODE(LSRLOOP); | |||
262 | NODE(ROLLOOP); | |||
263 | NODE(RORLOOP); | |||
264 | NODE(ASRLOOP); | |||
265 | NODE(BRCOND); | |||
266 | NODE(CMP); | |||
267 | NODE(CMPC); | |||
268 | NODE(TST); | |||
269 | NODE(SELECT_CC); | |||
270 | #undef NODE | |||
271 | } | |||
272 | } | |||
273 | ||||
274 | EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, | |||
275 | EVT VT) const { | |||
276 | assert(!VT.isVector() && "No AVR SetCC type for vectors!")((!VT.isVector() && "No AVR SetCC type for vectors!") ? static_cast<void> (0) : __assert_fail ("!VT.isVector() && \"No AVR SetCC type for vectors!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 276, __PRETTY_FUNCTION__)); | |||
277 | return MVT::i8; | |||
278 | } | |||
279 | ||||
280 | SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const { | |||
281 | //:TODO: this function has to be completely rewritten to produce optimal | |||
282 | // code, for now it's producing very long but correct code. | |||
283 | unsigned Opc8; | |||
284 | const SDNode *N = Op.getNode(); | |||
285 | EVT VT = Op.getValueType(); | |||
286 | SDLoc dl(N); | |||
287 | ||||
288 | // Expand non-constant shifts to loops. | |||
289 | if (!isa<ConstantSDNode>(N->getOperand(1))) { | |||
290 | switch (Op.getOpcode()) { | |||
291 | default: | |||
292 | llvm_unreachable("Invalid shift opcode!")::llvm::llvm_unreachable_internal("Invalid shift opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 292); | |||
293 | case ISD::SHL: | |||
294 | return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0), | |||
295 | N->getOperand(1)); | |||
296 | case ISD::SRL: | |||
297 | return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0), | |||
298 | N->getOperand(1)); | |||
299 | case ISD::ROTL: | |||
300 | return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), | |||
301 | N->getOperand(1)); | |||
302 | case ISD::ROTR: | |||
303 | return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), | |||
304 | N->getOperand(1)); | |||
305 | case ISD::SRA: | |||
306 | return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0), | |||
307 | N->getOperand(1)); | |||
308 | } | |||
309 | } | |||
310 | ||||
311 | uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | |||
312 | SDValue Victim = N->getOperand(0); | |||
313 | ||||
314 | switch (Op.getOpcode()) { | |||
315 | case ISD::SRA: | |||
316 | Opc8 = AVRISD::ASR; | |||
317 | break; | |||
318 | case ISD::ROTL: | |||
319 | Opc8 = AVRISD::ROL; | |||
320 | break; | |||
321 | case ISD::ROTR: | |||
322 | Opc8 = AVRISD::ROR; | |||
323 | break; | |||
324 | case ISD::SRL: | |||
325 | Opc8 = AVRISD::LSR; | |||
326 | break; | |||
327 | case ISD::SHL: | |||
328 | Opc8 = AVRISD::LSL; | |||
329 | break; | |||
330 | default: | |||
331 | llvm_unreachable("Invalid shift opcode")::llvm::llvm_unreachable_internal("Invalid shift opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 331); | |||
332 | } | |||
333 | ||||
334 | while (ShiftAmount--) { | |||
335 | Victim = DAG.getNode(Opc8, dl, VT, Victim); | |||
336 | } | |||
337 | ||||
338 | return Victim; | |||
339 | } | |||
340 | ||||
341 | SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { | |||
342 | unsigned Opcode = Op->getOpcode(); | |||
343 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&(((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && "Invalid opcode for Div/Rem lowering") ? static_cast<void > (0) : __assert_fail ("(Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && \"Invalid opcode for Div/Rem lowering\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 344, __PRETTY_FUNCTION__)) | |||
344 | "Invalid opcode for Div/Rem lowering")(((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && "Invalid opcode for Div/Rem lowering") ? static_cast<void > (0) : __assert_fail ("(Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && \"Invalid opcode for Div/Rem lowering\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 344, __PRETTY_FUNCTION__)); | |||
345 | bool IsSigned = (Opcode == ISD::SDIVREM); | |||
346 | EVT VT = Op->getValueType(0); | |||
347 | Type *Ty = VT.getTypeForEVT(*DAG.getContext()); | |||
348 | ||||
349 | RTLIB::Libcall LC; | |||
350 | switch (VT.getSimpleVT().SimpleTy) { | |||
351 | default: | |||
352 | llvm_unreachable("Unexpected request for libcall!")::llvm::llvm_unreachable_internal("Unexpected request for libcall!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 352); | |||
353 | case MVT::i8: | |||
354 | LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; | |||
355 | break; | |||
356 | case MVT::i16: | |||
357 | LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; | |||
358 | break; | |||
359 | case MVT::i32: | |||
360 | LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; | |||
361 | break; | |||
362 | case MVT::i64: | |||
363 | LC = IsSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; | |||
364 | break; | |||
365 | case MVT::i128: | |||
366 | LC = IsSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128; | |||
367 | break; | |||
368 | } | |||
369 | ||||
370 | SDValue InChain = DAG.getEntryNode(); | |||
371 | ||||
372 | TargetLowering::ArgListTy Args; | |||
373 | TargetLowering::ArgListEntry Entry; | |||
374 | for (SDValue const &Value : Op->op_values()) { | |||
375 | Entry.Node = Value; | |||
376 | Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext()); | |||
377 | Entry.IsSExt = IsSigned; | |||
378 | Entry.IsZExt = !IsSigned; | |||
379 | Args.push_back(Entry); | |||
380 | } | |||
381 | ||||
382 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), | |||
383 | getPointerTy(DAG.getDataLayout())); | |||
384 | ||||
385 | Type *RetTy = (Type *)StructType::get(Ty, Ty); | |||
386 | ||||
387 | SDLoc dl(Op); | |||
388 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
389 | CLI.setDebugLoc(dl) | |||
390 | .setChain(InChain) | |||
391 | .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) | |||
392 | .setInRegister() | |||
393 | .setSExtResult(IsSigned) | |||
394 | .setZExtResult(!IsSigned); | |||
395 | ||||
396 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); | |||
397 | return CallInfo.first; | |||
398 | } | |||
399 | ||||
400 | SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op, | |||
401 | SelectionDAG &DAG) const { | |||
402 | auto DL = DAG.getDataLayout(); | |||
403 | ||||
404 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); | |||
405 | int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); | |||
406 | ||||
407 | // Create the TargetGlobalAddress node, folding in the constant offset. | |||
408 | SDValue Result = | |||
409 | DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset); | |||
410 | return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result); | |||
411 | } | |||
412 | ||||
413 | SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op, | |||
414 | SelectionDAG &DAG) const { | |||
415 | auto DL = DAG.getDataLayout(); | |||
416 | const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); | |||
417 | ||||
418 | SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL)); | |||
419 | ||||
420 | return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result); | |||
421 | } | |||
422 | ||||
423 | /// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC. | |||
424 | static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) { | |||
425 | switch (CC) { | |||
426 | default: | |||
427 | llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 427); | |||
428 | case ISD::SETEQ: | |||
429 | return AVRCC::COND_EQ; | |||
430 | case ISD::SETNE: | |||
431 | return AVRCC::COND_NE; | |||
432 | case ISD::SETGE: | |||
433 | return AVRCC::COND_GE; | |||
434 | case ISD::SETLT: | |||
435 | return AVRCC::COND_LT; | |||
436 | case ISD::SETUGE: | |||
437 | return AVRCC::COND_SH; | |||
438 | case ISD::SETULT: | |||
439 | return AVRCC::COND_LO; | |||
440 | } | |||
441 | } | |||
442 | ||||
443 | /// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for | |||
444 | /// the given operands. | |||
445 | SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, | |||
446 | SDValue &AVRcc, SelectionDAG &DAG, | |||
447 | SDLoc DL) const { | |||
448 | SDValue Cmp; | |||
449 | EVT VT = LHS.getValueType(); | |||
450 | bool UseTest = false; | |||
451 | ||||
452 | switch (CC) { | |||
453 | default: | |||
454 | break; | |||
455 | case ISD::SETLE: { | |||
456 | // Swap operands and reverse the branching condition. | |||
457 | std::swap(LHS, RHS); | |||
458 | CC = ISD::SETGE; | |||
459 | break; | |||
460 | } | |||
461 | case ISD::SETGT: { | |||
462 | if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { | |||
463 | switch (C->getSExtValue()) { | |||
464 | case -1: { | |||
465 | // When doing lhs > -1 use a tst instruction on the top part of lhs | |||
466 | // and use brpl instead of using a chain of cp/cpc. | |||
467 | UseTest = true; | |||
468 | AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8); | |||
469 | break; | |||
470 | } | |||
471 | case 0: { | |||
472 | // Turn lhs > 0 into 0 < lhs since 0 can be materialized with | |||
473 | // __zero_reg__ in lhs. | |||
474 | RHS = LHS; | |||
475 | LHS = DAG.getConstant(0, DL, VT); | |||
476 | CC = ISD::SETLT; | |||
477 | break; | |||
478 | } | |||
479 | default: { | |||
480 | // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows | |||
481 | // us to fold the constant into the cmp instruction. | |||
482 | RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT); | |||
483 | CC = ISD::SETGE; | |||
484 | break; | |||
485 | } | |||
486 | } | |||
487 | break; | |||
488 | } | |||
489 | // Swap operands and reverse the branching condition. | |||
490 | std::swap(LHS, RHS); | |||
491 | CC = ISD::SETLT; | |||
492 | break; | |||
493 | } | |||
494 | case ISD::SETLT: { | |||
495 | if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { | |||
496 | switch (C->getSExtValue()) { | |||
497 | case 1: { | |||
498 | // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with | |||
499 | // __zero_reg__ in lhs. | |||
500 | RHS = LHS; | |||
501 | LHS = DAG.getConstant(0, DL, VT); | |||
502 | CC = ISD::SETGE; | |||
503 | break; | |||
504 | } | |||
505 | case 0: { | |||
506 | // When doing lhs < 0 use a tst instruction on the top part of lhs | |||
507 | // and use brmi instead of using a chain of cp/cpc. | |||
508 | UseTest = true; | |||
509 | AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8); | |||
510 | break; | |||
511 | } | |||
512 | } | |||
513 | } | |||
514 | break; | |||
515 | } | |||
516 | case ISD::SETULE: { | |||
517 | // Swap operands and reverse the branching condition. | |||
518 | std::swap(LHS, RHS); | |||
519 | CC = ISD::SETUGE; | |||
520 | break; | |||
521 | } | |||
522 | case ISD::SETUGT: { | |||
523 | // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to | |||
524 | // fold the constant into the cmp instruction. | |||
525 | if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { | |||
526 | RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT); | |||
527 | CC = ISD::SETUGE; | |||
528 | break; | |||
529 | } | |||
530 | // Swap operands and reverse the branching condition. | |||
531 | std::swap(LHS, RHS); | |||
532 | CC = ISD::SETULT; | |||
533 | break; | |||
534 | } | |||
535 | } | |||
536 | ||||
537 | // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of | |||
538 | // using the default and/or/xor expansion code which is much longer. | |||
539 | if (VT == MVT::i32) { | |||
540 | SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS, | |||
541 | DAG.getIntPtrConstant(0, DL)); | |||
542 | SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS, | |||
543 | DAG.getIntPtrConstant(1, DL)); | |||
544 | SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS, | |||
545 | DAG.getIntPtrConstant(0, DL)); | |||
546 | SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS, | |||
547 | DAG.getIntPtrConstant(1, DL)); | |||
548 | ||||
549 | if (UseTest) { | |||
550 | // When using tst we only care about the highest part. | |||
551 | SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi, | |||
552 | DAG.getIntPtrConstant(1, DL)); | |||
553 | Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top); | |||
554 | } else { | |||
555 | Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo); | |||
556 | Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp); | |||
557 | } | |||
558 | } else if (VT == MVT::i64) { | |||
559 | SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, | |||
560 | DAG.getIntPtrConstant(0, DL)); | |||
561 | SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, | |||
562 | DAG.getIntPtrConstant(1, DL)); | |||
563 | ||||
564 | SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0, | |||
565 | DAG.getIntPtrConstant(0, DL)); | |||
566 | SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0, | |||
567 | DAG.getIntPtrConstant(1, DL)); | |||
568 | SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1, | |||
569 | DAG.getIntPtrConstant(0, DL)); | |||
570 | SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1, | |||
571 | DAG.getIntPtrConstant(1, DL)); | |||
572 | ||||
573 | SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, | |||
574 | DAG.getIntPtrConstant(0, DL)); | |||
575 | SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, | |||
576 | DAG.getIntPtrConstant(1, DL)); | |||
577 | ||||
578 | SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0, | |||
579 | DAG.getIntPtrConstant(0, DL)); | |||
580 | SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0, | |||
581 | DAG.getIntPtrConstant(1, DL)); | |||
582 | SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1, | |||
583 | DAG.getIntPtrConstant(0, DL)); | |||
584 | SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1, | |||
585 | DAG.getIntPtrConstant(1, DL)); | |||
586 | ||||
587 | if (UseTest) { | |||
588 | // When using tst we only care about the highest part. | |||
589 | SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3, | |||
590 | DAG.getIntPtrConstant(1, DL)); | |||
591 | Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top); | |||
592 | } else { | |||
593 | Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS0, RHS0); | |||
594 | Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp); | |||
595 | Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp); | |||
596 | Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp); | |||
597 | } | |||
598 | } else if (VT == MVT::i8 || VT == MVT::i16) { | |||
599 | if (UseTest) { | |||
600 | // When using tst we only care about the highest part. | |||
601 | Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, | |||
602 | (VT == MVT::i8) | |||
603 | ? LHS | |||
604 | : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, | |||
605 | LHS, DAG.getIntPtrConstant(1, DL))); | |||
606 | } else { | |||
607 | Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS); | |||
608 | } | |||
609 | } else { | |||
610 | llvm_unreachable("Invalid comparison size")::llvm::llvm_unreachable_internal("Invalid comparison size", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 610); | |||
611 | } | |||
612 | ||||
613 | // When using a test instruction AVRcc is already set. | |||
614 | if (!UseTest) { | |||
615 | AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8); | |||
616 | } | |||
617 | ||||
618 | return Cmp; | |||
619 | } | |||
620 | ||||
621 | SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { | |||
622 | SDValue Chain = Op.getOperand(0); | |||
623 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); | |||
624 | SDValue LHS = Op.getOperand(2); | |||
625 | SDValue RHS = Op.getOperand(3); | |||
626 | SDValue Dest = Op.getOperand(4); | |||
627 | SDLoc dl(Op); | |||
628 | ||||
629 | SDValue TargetCC; | |||
630 | SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl); | |||
631 | ||||
632 | return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC, | |||
633 | Cmp); | |||
634 | } | |||
635 | ||||
636 | SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { | |||
637 | SDValue LHS = Op.getOperand(0); | |||
638 | SDValue RHS = Op.getOperand(1); | |||
639 | SDValue TrueV = Op.getOperand(2); | |||
640 | SDValue FalseV = Op.getOperand(3); | |||
641 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); | |||
642 | SDLoc dl(Op); | |||
643 | ||||
644 | SDValue TargetCC; | |||
645 | SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl); | |||
646 | ||||
647 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); | |||
648 | SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp}; | |||
649 | ||||
650 | return DAG.getNode(AVRISD::SELECT_CC, dl, VTs, Ops); | |||
651 | } | |||
652 | ||||
653 | SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { | |||
654 | SDValue LHS = Op.getOperand(0); | |||
655 | SDValue RHS = Op.getOperand(1); | |||
656 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | |||
657 | SDLoc DL(Op); | |||
658 | ||||
659 | SDValue TargetCC; | |||
660 | SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL); | |||
661 | ||||
662 | SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType()); | |||
663 | SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType()); | |||
664 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); | |||
665 | SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp}; | |||
666 | ||||
667 | return DAG.getNode(AVRISD::SELECT_CC, DL, VTs, Ops); | |||
668 | } | |||
669 | ||||
670 | SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { | |||
671 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
672 | const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); | |||
673 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
674 | auto DL = DAG.getDataLayout(); | |||
675 | SDLoc dl(Op); | |||
676 | ||||
677 | // Vastart just stores the address of the VarArgsFrameIndex slot into the | |||
678 | // memory location argument. | |||
679 | SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL)); | |||
680 | ||||
681 | return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1), | |||
682 | MachinePointerInfo(SV), 0); | |||
683 | } | |||
684 | ||||
685 | SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { | |||
686 | switch (Op.getOpcode()) { | |||
687 | default: | |||
688 | llvm_unreachable("Don't know how to custom lower this!")::llvm::llvm_unreachable_internal("Don't know how to custom lower this!" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 688); | |||
689 | case ISD::SHL: | |||
690 | case ISD::SRA: | |||
691 | case ISD::SRL: | |||
692 | case ISD::ROTL: | |||
693 | case ISD::ROTR: | |||
694 | return LowerShifts(Op, DAG); | |||
695 | case ISD::GlobalAddress: | |||
696 | return LowerGlobalAddress(Op, DAG); | |||
697 | case ISD::BlockAddress: | |||
698 | return LowerBlockAddress(Op, DAG); | |||
699 | case ISD::BR_CC: | |||
700 | return LowerBR_CC(Op, DAG); | |||
701 | case ISD::SELECT_CC: | |||
702 | return LowerSELECT_CC(Op, DAG); | |||
703 | case ISD::SETCC: | |||
704 | return LowerSETCC(Op, DAG); | |||
705 | case ISD::VASTART: | |||
706 | return LowerVASTART(Op, DAG); | |||
707 | case ISD::SDIVREM: | |||
708 | case ISD::UDIVREM: | |||
709 | return LowerDivRem(Op, DAG); | |||
710 | } | |||
711 | ||||
712 | return SDValue(); | |||
713 | } | |||
714 | ||||
715 | /// Replace a node with an illegal result type | |||
716 | /// with a new node built out of custom code. | |||
717 | void AVRTargetLowering::ReplaceNodeResults(SDNode *N, | |||
718 | SmallVectorImpl<SDValue> &Results, | |||
719 | SelectionDAG &DAG) const { | |||
720 | SDLoc DL(N); | |||
721 | ||||
722 | switch (N->getOpcode()) { | |||
723 | case ISD::ADD: { | |||
724 | // Convert add (x, imm) into sub (x, -imm). | |||
725 | if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { | |||
726 | SDValue Sub = DAG.getNode( | |||
727 | ISD::SUB, DL, N->getValueType(0), N->getOperand(0), | |||
728 | DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0))); | |||
729 | Results.push_back(Sub); | |||
730 | } | |||
731 | break; | |||
732 | } | |||
733 | default: { | |||
734 | SDValue Res = LowerOperation(SDValue(N, 0), DAG); | |||
735 | ||||
736 | for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I) | |||
737 | Results.push_back(Res.getValue(I)); | |||
738 | ||||
739 | break; | |||
740 | } | |||
741 | } | |||
742 | } | |||
743 | ||||
744 | /// Return true if the addressing mode represented | |||
745 | /// by AM is legal for this target, for a load/store of the specified type. | |||
746 | bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
747 | const AddrMode &AM, Type *Ty, | |||
748 | unsigned AS, Instruction *I) const { | |||
749 | int64_t Offs = AM.BaseOffs; | |||
750 | ||||
751 | // Allow absolute addresses. | |||
752 | if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) { | |||
753 | return true; | |||
754 | } | |||
755 | ||||
756 | // Flash memory instructions only allow zero offsets. | |||
757 | if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) { | |||
758 | return false; | |||
759 | } | |||
760 | ||||
761 | // Allow reg+<6bit> offset. | |||
762 | if (Offs < 0) | |||
763 | Offs = -Offs; | |||
764 | if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 0 && isUInt<6>(Offs)) { | |||
765 | return true; | |||
766 | } | |||
767 | ||||
768 | return false; | |||
769 | } | |||
770 | ||||
771 | /// Returns true by value, base pointer and | |||
772 | /// offset pointer and addressing mode by reference if the node's address | |||
773 | /// can be legally represented as pre-indexed load / store address. | |||
774 | bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, | |||
775 | SDValue &Offset, | |||
776 | ISD::MemIndexedMode &AM, | |||
777 | SelectionDAG &DAG) const { | |||
778 | EVT VT; | |||
779 | const SDNode *Op; | |||
780 | SDLoc DL(N); | |||
781 | ||||
782 | if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | |||
783 | VT = LD->getMemoryVT(); | |||
784 | Op = LD->getBasePtr().getNode(); | |||
785 | if (LD->getExtensionType() != ISD::NON_EXTLOAD) | |||
786 | return false; | |||
787 | if (AVR::isProgramMemoryAccess(LD)) { | |||
788 | return false; | |||
789 | } | |||
790 | } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | |||
791 | VT = ST->getMemoryVT(); | |||
792 | Op = ST->getBasePtr().getNode(); | |||
793 | if (AVR::isProgramMemoryAccess(ST)) { | |||
794 | return false; | |||
795 | } | |||
796 | } else { | |||
797 | return false; | |||
798 | } | |||
799 | ||||
800 | if (VT != MVT::i8 && VT != MVT::i16) { | |||
801 | return false; | |||
802 | } | |||
803 | ||||
804 | if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) { | |||
805 | return false; | |||
806 | } | |||
807 | ||||
808 | if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { | |||
809 | int RHSC = RHS->getSExtValue(); | |||
810 | if (Op->getOpcode() == ISD::SUB) | |||
811 | RHSC = -RHSC; | |||
812 | ||||
813 | if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) { | |||
814 | return false; | |||
815 | } | |||
816 | ||||
817 | Base = Op->getOperand(0); | |||
818 | Offset = DAG.getConstant(RHSC, DL, MVT::i8); | |||
819 | AM = ISD::PRE_DEC; | |||
820 | ||||
821 | return true; | |||
822 | } | |||
823 | ||||
824 | return false; | |||
825 | } | |||
826 | ||||
827 | /// Returns true by value, base pointer and | |||
828 | /// offset pointer and addressing mode by reference if this node can be | |||
829 | /// combined with a load / store to form a post-indexed load / store. | |||
830 | bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, | |||
831 | SDValue &Base, | |||
832 | SDValue &Offset, | |||
833 | ISD::MemIndexedMode &AM, | |||
834 | SelectionDAG &DAG) const { | |||
835 | EVT VT; | |||
836 | SDLoc DL(N); | |||
837 | ||||
838 | if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { | |||
839 | VT = LD->getMemoryVT(); | |||
840 | if (LD->getExtensionType() != ISD::NON_EXTLOAD) | |||
841 | return false; | |||
842 | } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { | |||
843 | VT = ST->getMemoryVT(); | |||
844 | if (AVR::isProgramMemoryAccess(ST)) { | |||
845 | return false; | |||
846 | } | |||
847 | } else { | |||
848 | return false; | |||
849 | } | |||
850 | ||||
851 | if (VT != MVT::i8 && VT != MVT::i16) { | |||
852 | return false; | |||
853 | } | |||
854 | ||||
855 | if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) { | |||
856 | return false; | |||
857 | } | |||
858 | ||||
859 | if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { | |||
860 | int RHSC = RHS->getSExtValue(); | |||
861 | if (Op->getOpcode() == ISD::SUB) | |||
862 | RHSC = -RHSC; | |||
863 | if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) { | |||
864 | return false; | |||
865 | } | |||
866 | ||||
867 | Base = Op->getOperand(0); | |||
868 | Offset = DAG.getConstant(RHSC, DL, MVT::i8); | |||
869 | AM = ISD::POST_INC; | |||
870 | ||||
871 | return true; | |||
872 | } | |||
873 | ||||
874 | return false; | |||
875 | } | |||
876 | ||||
877 | bool AVRTargetLowering::isOffsetFoldingLegal( | |||
878 | const GlobalAddressSDNode *GA) const { | |||
879 | return true; | |||
880 | } | |||
881 | ||||
882 | //===----------------------------------------------------------------------===// | |||
883 | // Formal Arguments Calling Convention Implementation | |||
884 | //===----------------------------------------------------------------------===// | |||
885 | ||||
886 | #include "AVRGenCallingConv.inc" | |||
887 | ||||
888 | /// For each argument in a function store the number of pieces it is composed | |||
889 | /// of. | |||
890 | static void parseFunctionArgs(const SmallVectorImpl<ISD::InputArg> &Ins, | |||
891 | SmallVectorImpl<unsigned> &Out) { | |||
892 | for (const ISD::InputArg &Arg : Ins) { | |||
893 | if(Arg.PartOffset > 0) continue; | |||
894 | unsigned Bytes = ((Arg.ArgVT.getSizeInBits()) + 7) / 8; | |||
895 | ||||
896 | Out.push_back((Bytes + 1) / 2); | |||
897 | } | |||
898 | } | |||
899 | ||||
900 | /// For external symbols there is no function prototype information so we | |||
901 | /// have to rely directly on argument sizes. | |||
902 | static void parseExternFuncCallArgs(const SmallVectorImpl<ISD::OutputArg> &In, | |||
903 | SmallVectorImpl<unsigned> &Out) { | |||
904 | for (unsigned i = 0, e = In.size(); i != e;) { | |||
905 | unsigned Size = 0; | |||
906 | unsigned Offset = 0; | |||
907 | while ((i != e) && (In[i].PartOffset == Offset)) { | |||
908 | Offset += In[i].VT.getStoreSize(); | |||
909 | ++i; | |||
910 | ++Size; | |||
911 | } | |||
912 | Out.push_back(Size); | |||
913 | } | |||
914 | } | |||
915 | ||||
916 | static StringRef getFunctionName(TargetLowering::CallLoweringInfo &CLI) { | |||
917 | SDValue Callee = CLI.Callee; | |||
918 | ||||
919 | if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
920 | return G->getSymbol(); | |||
921 | } | |||
922 | ||||
923 | if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
924 | return G->getGlobal()->getName(); | |||
925 | } | |||
926 | ||||
927 | llvm_unreachable("don't know how to get the name for this callee")::llvm::llvm_unreachable_internal("don't know how to get the name for this callee" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 927); | |||
928 | } | |||
929 | ||||
930 | /// Analyze incoming and outgoing function arguments. We need custom C++ code | |||
931 | /// to handle special constraints in the ABI like reversing the order of the | |||
932 | /// pieces of splitted arguments. In addition, all pieces of a certain argument | |||
933 | /// have to be passed either using registers or the stack but never mixing both. | |||
934 | static void analyzeStandardArguments(TargetLowering::CallLoweringInfo *CLI, | |||
935 | const Function *F, const DataLayout *TD, | |||
936 | const SmallVectorImpl<ISD::OutputArg> *Outs, | |||
937 | const SmallVectorImpl<ISD::InputArg> *Ins, | |||
938 | CallingConv::ID CallConv, | |||
939 | SmallVectorImpl<CCValAssign> &ArgLocs, | |||
940 | CCState &CCInfo, bool IsCall, bool IsVarArg) { | |||
941 | static const MCPhysReg RegList8[] = {AVR::R24, AVR::R22, AVR::R20, | |||
942 | AVR::R18, AVR::R16, AVR::R14, | |||
943 | AVR::R12, AVR::R10, AVR::R8}; | |||
944 | static const MCPhysReg RegList16[] = {AVR::R25R24, AVR::R23R22, AVR::R21R20, | |||
945 | AVR::R19R18, AVR::R17R16, AVR::R15R14, | |||
946 | AVR::R13R12, AVR::R11R10, AVR::R9R8}; | |||
947 | if (IsVarArg) { | |||
948 | // Variadic functions do not need all the analysis below. | |||
949 | if (IsCall) { | |||
950 | CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_Vararg); | |||
951 | } else { | |||
952 | CCInfo.AnalyzeFormalArguments(*Ins, ArgCC_AVR_Vararg); | |||
953 | } | |||
954 | return; | |||
955 | } | |||
956 | ||||
957 | // Fill in the Args array which will contain original argument sizes. | |||
958 | SmallVector<unsigned, 8> Args; | |||
959 | if (IsCall) { | |||
960 | parseExternFuncCallArgs(*Outs, Args); | |||
961 | } else { | |||
962 | assert(F != nullptr && "function should not be null")((F != nullptr && "function should not be null") ? static_cast <void> (0) : __assert_fail ("F != nullptr && \"function should not be null\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 962, __PRETTY_FUNCTION__)); | |||
963 | parseFunctionArgs(*Ins, Args); | |||
964 | } | |||
965 | ||||
966 | unsigned RegsLeft = array_lengthof(RegList8), ValNo = 0; | |||
967 | // Variadic functions always use the stack. | |||
968 | bool UsesStack = false; | |||
969 | for (unsigned i = 0, pos = 0, e = Args.size(); i != e; ++i) { | |||
970 | unsigned Size = Args[i]; | |||
971 | ||||
972 | // If we have a zero-sized argument, don't attempt to lower it. | |||
973 | // AVR-GCC does not support zero-sized arguments and so we need not | |||
974 | // worry about ABI compatibility. | |||
975 | if (Size == 0) continue; | |||
976 | ||||
977 | MVT LocVT = (IsCall) ? (*Outs)[pos].VT : (*Ins)[pos].VT; | |||
978 | ||||
979 | // If we have plenty of regs to pass the whole argument do it. | |||
980 | if (!UsesStack && (Size <= RegsLeft)) { | |||
981 | const MCPhysReg *RegList = (LocVT == MVT::i16) ? RegList16 : RegList8; | |||
982 | ||||
983 | for (unsigned j = 0; j != Size; ++j) { | |||
984 | unsigned Reg = CCInfo.AllocateReg( | |||
985 | ArrayRef<MCPhysReg>(RegList, array_lengthof(RegList8))); | |||
986 | CCInfo.addLoc( | |||
987 | CCValAssign::getReg(ValNo++, LocVT, Reg, LocVT, CCValAssign::Full)); | |||
988 | --RegsLeft; | |||
989 | } | |||
990 | ||||
991 | // Reverse the order of the pieces to agree with the "big endian" format | |||
992 | // required in the calling convention ABI. | |||
993 | std::reverse(ArgLocs.begin() + pos, ArgLocs.begin() + pos + Size); | |||
994 | } else { | |||
995 | // Pass the rest of arguments using the stack. | |||
996 | UsesStack = true; | |||
997 | for (unsigned j = 0; j != Size; ++j) { | |||
998 | unsigned Offset = CCInfo.AllocateStack( | |||
999 | TD->getTypeAllocSize(EVT(LocVT).getTypeForEVT(CCInfo.getContext())), | |||
1000 | TD->getABITypeAlignment( | |||
1001 | EVT(LocVT).getTypeForEVT(CCInfo.getContext()))); | |||
1002 | CCInfo.addLoc(CCValAssign::getMem(ValNo++, LocVT, Offset, LocVT, | |||
1003 | CCValAssign::Full)); | |||
1004 | } | |||
1005 | } | |||
1006 | pos += Size; | |||
1007 | } | |||
1008 | } | |||
1009 | ||||
1010 | static void analyzeBuiltinArguments(TargetLowering::CallLoweringInfo &CLI, | |||
1011 | const Function *F, const DataLayout *TD, | |||
1012 | const SmallVectorImpl<ISD::OutputArg> *Outs, | |||
1013 | const SmallVectorImpl<ISD::InputArg> *Ins, | |||
1014 | CallingConv::ID CallConv, | |||
1015 | SmallVectorImpl<CCValAssign> &ArgLocs, | |||
1016 | CCState &CCInfo, bool IsCall, bool IsVarArg) { | |||
1017 | StringRef FuncName = getFunctionName(CLI); | |||
1018 | ||||
1019 | if (FuncName.startswith("__udivmod") || FuncName.startswith("__divmod")) { | |||
1020 | CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_BUILTIN_DIV); | |||
1021 | } else { | |||
1022 | analyzeStandardArguments(&CLI, F, TD, Outs, Ins, | |||
1023 | CallConv, ArgLocs, CCInfo, | |||
1024 | IsCall, IsVarArg); | |||
1025 | } | |||
1026 | } | |||
1027 | ||||
1028 | static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, | |||
1029 | const Function *F, const DataLayout *TD, | |||
1030 | const SmallVectorImpl<ISD::OutputArg> *Outs, | |||
1031 | const SmallVectorImpl<ISD::InputArg> *Ins, | |||
1032 | CallingConv::ID CallConv, | |||
1033 | SmallVectorImpl<CCValAssign> &ArgLocs, | |||
1034 | CCState &CCInfo, bool IsCall, bool IsVarArg) { | |||
1035 | switch (CallConv) { | |||
1036 | case CallingConv::AVR_BUILTIN: { | |||
1037 | analyzeBuiltinArguments(*CLI, F, TD, Outs, Ins, | |||
| ||||
1038 | CallConv, ArgLocs, CCInfo, | |||
1039 | IsCall, IsVarArg); | |||
1040 | return; | |||
1041 | } | |||
1042 | default: { | |||
1043 | analyzeStandardArguments(CLI, F, TD, Outs, Ins, | |||
1044 | CallConv, ArgLocs, CCInfo, | |||
1045 | IsCall, IsVarArg); | |||
1046 | return; | |||
1047 | } | |||
1048 | } | |||
1049 | } | |||
1050 | ||||
1051 | SDValue AVRTargetLowering::LowerFormalArguments( | |||
1052 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, | |||
1053 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG, | |||
1054 | SmallVectorImpl<SDValue> &InVals) const { | |||
1055 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1056 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
1057 | auto DL = DAG.getDataLayout(); | |||
1058 | ||||
1059 | // Assign locations to all of the incoming arguments. | |||
1060 | SmallVector<CCValAssign, 16> ArgLocs; | |||
1061 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, | |||
1062 | *DAG.getContext()); | |||
1063 | ||||
1064 | analyzeArguments(nullptr, &MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo, | |||
| ||||
1065 | false, isVarArg); | |||
1066 | ||||
1067 | SDValue ArgValue; | |||
1068 | for (CCValAssign &VA : ArgLocs) { | |||
1069 | ||||
1070 | // Arguments stored on registers. | |||
1071 | if (VA.isRegLoc()) { | |||
1072 | EVT RegVT = VA.getLocVT(); | |||
1073 | const TargetRegisterClass *RC; | |||
1074 | if (RegVT == MVT::i8) { | |||
1075 | RC = &AVR::GPR8RegClass; | |||
1076 | } else if (RegVT == MVT::i16) { | |||
1077 | RC = &AVR::DREGSRegClass; | |||
1078 | } else { | |||
1079 | llvm_unreachable("Unknown argument type!")::llvm::llvm_unreachable_internal("Unknown argument type!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1079); | |||
1080 | } | |||
1081 | ||||
1082 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); | |||
1083 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); | |||
1084 | ||||
1085 | // :NOTE: Clang should not promote any i8 into i16 but for safety the | |||
1086 | // following code will handle zexts or sexts generated by other | |||
1087 | // front ends. Otherwise: | |||
1088 | // If this is an 8 bit value, it is really passed promoted | |||
1089 | // to 16 bits. Insert an assert[sz]ext to capture this, then | |||
1090 | // truncate to the right size. | |||
1091 | switch (VA.getLocInfo()) { | |||
1092 | default: | |||
1093 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1093); | |||
1094 | case CCValAssign::Full: | |||
1095 | break; | |||
1096 | case CCValAssign::BCvt: | |||
1097 | ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); | |||
1098 | break; | |||
1099 | case CCValAssign::SExt: | |||
1100 | ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, | |||
1101 | DAG.getValueType(VA.getValVT())); | |||
1102 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); | |||
1103 | break; | |||
1104 | case CCValAssign::ZExt: | |||
1105 | ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, | |||
1106 | DAG.getValueType(VA.getValVT())); | |||
1107 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); | |||
1108 | break; | |||
1109 | } | |||
1110 | ||||
1111 | InVals.push_back(ArgValue); | |||
1112 | } else { | |||
1113 | // Sanity check. | |||
1114 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1114, __PRETTY_FUNCTION__)); | |||
1115 | ||||
1116 | EVT LocVT = VA.getLocVT(); | |||
1117 | ||||
1118 | // Create the frame index object for this incoming parameter. | |||
1119 | int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, | |||
1120 | VA.getLocMemOffset(), true); | |||
1121 | ||||
1122 | // Create the SelectionDAG nodes corresponding to a load | |||
1123 | // from this parameter. | |||
1124 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL)); | |||
1125 | InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN, | |||
1126 | MachinePointerInfo::getFixedStack(MF, FI), | |||
1127 | 0)); | |||
1128 | } | |||
1129 | } | |||
1130 | ||||
1131 | // If the function takes variable number of arguments, make a frame index for | |||
1132 | // the start of the first vararg value... for expansion of llvm.va_start. | |||
1133 | if (isVarArg) { | |||
1134 | unsigned StackSize = CCInfo.getNextStackOffset(); | |||
1135 | AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); | |||
1136 | ||||
1137 | AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true)); | |||
1138 | } | |||
1139 | ||||
1140 | return Chain; | |||
1141 | } | |||
1142 | ||||
1143 | //===----------------------------------------------------------------------===// | |||
1144 | // Call Calling Convention Implementation | |||
1145 | //===----------------------------------------------------------------------===// | |||
1146 | ||||
1147 | SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, | |||
1148 | SmallVectorImpl<SDValue> &InVals) const { | |||
1149 | SelectionDAG &DAG = CLI.DAG; | |||
1150 | SDLoc &DL = CLI.DL; | |||
1151 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; | |||
1152 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; | |||
1153 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; | |||
1154 | SDValue Chain = CLI.Chain; | |||
1155 | SDValue Callee = CLI.Callee; | |||
1156 | bool &isTailCall = CLI.IsTailCall; | |||
1157 | CallingConv::ID CallConv = CLI.CallConv; | |||
1158 | bool isVarArg = CLI.IsVarArg; | |||
1159 | ||||
1160 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1161 | ||||
1162 | // AVR does not yet support tail call optimization. | |||
1163 | isTailCall = false; | |||
1164 | ||||
1165 | // Analyze operands of the call, assigning locations to each operand. | |||
1166 | SmallVector<CCValAssign, 16> ArgLocs; | |||
1167 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, | |||
1168 | *DAG.getContext()); | |||
1169 | ||||
1170 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every | |||
1171 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol | |||
1172 | // node so that legalize doesn't hack it. | |||
1173 | const Function *F = nullptr; | |||
1174 | if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
1175 | const GlobalValue *GV = G->getGlobal(); | |||
1176 | ||||
1177 | F = cast<Function>(GV); | |||
1178 | Callee = | |||
1179 | DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout())); | |||
1180 | } else if (const ExternalSymbolSDNode *ES = | |||
1181 | dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
1182 | Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), | |||
1183 | getPointerTy(DAG.getDataLayout())); | |||
1184 | } | |||
1185 | ||||
1186 | analyzeArguments(&CLI, F, &DAG.getDataLayout(), &Outs, 0, CallConv, ArgLocs, CCInfo, | |||
1187 | true, isVarArg); | |||
1188 | ||||
1189 | // Get a count of how many bytes are to be pushed on the stack. | |||
1190 | unsigned NumBytes = CCInfo.getNextStackOffset(); | |||
1191 | ||||
1192 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); | |||
1193 | ||||
1194 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; | |||
1195 | ||||
1196 | // First, walk the register assignments, inserting copies. | |||
1197 | unsigned AI, AE; | |||
1198 | bool HasStackArgs = false; | |||
1199 | for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) { | |||
1200 | CCValAssign &VA = ArgLocs[AI]; | |||
1201 | EVT RegVT = VA.getLocVT(); | |||
1202 | SDValue Arg = OutVals[AI]; | |||
1203 | ||||
1204 | // Promote the value if needed. With Clang this should not happen. | |||
1205 | switch (VA.getLocInfo()) { | |||
1206 | default: | |||
1207 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1207); | |||
1208 | case CCValAssign::Full: | |||
1209 | break; | |||
1210 | case CCValAssign::SExt: | |||
1211 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg); | |||
1212 | break; | |||
1213 | case CCValAssign::ZExt: | |||
1214 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg); | |||
1215 | break; | |||
1216 | case CCValAssign::AExt: | |||
1217 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg); | |||
1218 | break; | |||
1219 | case CCValAssign::BCvt: | |||
1220 | Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg); | |||
1221 | break; | |||
1222 | } | |||
1223 | ||||
1224 | // Stop when we encounter a stack argument, we need to process them | |||
1225 | // in reverse order in the loop below. | |||
1226 | if (VA.isMemLoc()) { | |||
1227 | HasStackArgs = true; | |||
1228 | break; | |||
1229 | } | |||
1230 | ||||
1231 | // Arguments that can be passed on registers must be kept in the RegsToPass | |||
1232 | // vector. | |||
1233 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); | |||
1234 | } | |||
1235 | ||||
1236 | // Second, stack arguments have to walked in reverse order by inserting | |||
1237 | // chained stores, this ensures their order is not changed by the scheduler | |||
1238 | // and that the push instruction sequence generated is correct, otherwise they | |||
1239 | // can be freely intermixed. | |||
1240 | if (HasStackArgs) { | |||
1241 | for (AE = AI, AI = ArgLocs.size(); AI != AE; --AI) { | |||
1242 | unsigned Loc = AI - 1; | |||
1243 | CCValAssign &VA = ArgLocs[Loc]; | |||
1244 | SDValue Arg = OutVals[Loc]; | |||
1245 | ||||
1246 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1246, __PRETTY_FUNCTION__)); | |||
1247 | ||||
1248 | // SP points to one stack slot further so add one to adjust it. | |||
1249 | SDValue PtrOff = DAG.getNode( | |||
1250 | ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), | |||
1251 | DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())), | |||
1252 | DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL)); | |||
1253 | ||||
1254 | Chain = | |||
1255 | DAG.getStore(Chain, DL, Arg, PtrOff, | |||
1256 | MachinePointerInfo::getStack(MF, VA.getLocMemOffset()), | |||
1257 | 0); | |||
1258 | } | |||
1259 | } | |||
1260 | ||||
1261 | // Build a sequence of copy-to-reg nodes chained together with token chain and | |||
1262 | // flag operands which copy the outgoing args into registers. The InFlag in | |||
1263 | // necessary since all emited instructions must be stuck together. | |||
1264 | SDValue InFlag; | |||
1265 | for (auto Reg : RegsToPass) { | |||
1266 | Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InFlag); | |||
1267 | InFlag = Chain.getValue(1); | |||
1268 | } | |||
1269 | ||||
1270 | // Returns a chain & a flag for retval copy to use. | |||
1271 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
1272 | SmallVector<SDValue, 8> Ops; | |||
1273 | Ops.push_back(Chain); | |||
1274 | Ops.push_back(Callee); | |||
1275 | ||||
1276 | // Add argument registers to the end of the list so that they are known live | |||
1277 | // into the call. | |||
1278 | for (auto Reg : RegsToPass) { | |||
1279 | Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); | |||
1280 | } | |||
1281 | ||||
1282 | // Add a register mask operand representing the call-preserved registers. | |||
1283 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
1284 | const uint32_t *Mask = | |||
1285 | TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); | |||
1286 | assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention" ) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1286, __PRETTY_FUNCTION__)); | |||
1287 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
1288 | ||||
1289 | if (InFlag.getNode()) { | |||
1290 | Ops.push_back(InFlag); | |||
1291 | } | |||
1292 | ||||
1293 | Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops); | |||
1294 | InFlag = Chain.getValue(1); | |||
1295 | ||||
1296 | // Create the CALLSEQ_END node. | |||
1297 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), | |||
1298 | DAG.getIntPtrConstant(0, DL, true), InFlag, DL); | |||
1299 | ||||
1300 | if (!Ins.empty()) { | |||
1301 | InFlag = Chain.getValue(1); | |||
1302 | } | |||
1303 | ||||
1304 | // Handle result values, copying them out of physregs into vregs that we | |||
1305 | // return. | |||
1306 | return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG, | |||
1307 | InVals); | |||
1308 | } | |||
1309 | ||||
1310 | /// Lower the result values of a call into the | |||
1311 | /// appropriate copies out of appropriate physical registers. | |||
1312 | /// | |||
1313 | SDValue AVRTargetLowering::LowerCallResult( | |||
1314 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, | |||
1315 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG, | |||
1316 | SmallVectorImpl<SDValue> &InVals) const { | |||
1317 | ||||
1318 | // Assign locations to each value returned by this call. | |||
1319 | SmallVector<CCValAssign, 16> RVLocs; | |||
1320 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
1321 | *DAG.getContext()); | |||
1322 | ||||
1323 | // Handle runtime calling convs. | |||
1324 | auto CCFunction = CCAssignFnForReturn(CallConv); | |||
1325 | CCInfo.AnalyzeCallResult(Ins, CCFunction); | |||
1326 | ||||
1327 | if (CallConv != CallingConv::AVR_BUILTIN && RVLocs.size() > 1) { | |||
1328 | // Reverse splitted return values to get the "big endian" format required | |||
1329 | // to agree with the calling convention ABI. | |||
1330 | std::reverse(RVLocs.begin(), RVLocs.end()); | |||
1331 | } | |||
1332 | ||||
1333 | // Copy all of the result registers out of their specified physreg. | |||
1334 | for (CCValAssign const &RVLoc : RVLocs) { | |||
1335 | Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(), | |||
1336 | InFlag) | |||
1337 | .getValue(1); | |||
1338 | InFlag = Chain.getValue(2); | |||
1339 | InVals.push_back(Chain.getValue(0)); | |||
1340 | } | |||
1341 | ||||
1342 | return Chain; | |||
1343 | } | |||
1344 | ||||
1345 | //===----------------------------------------------------------------------===// | |||
1346 | // Return Value Calling Convention Implementation | |||
1347 | //===----------------------------------------------------------------------===// | |||
1348 | ||||
1349 | CCAssignFn *AVRTargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { | |||
1350 | switch (CC) { | |||
1351 | case CallingConv::AVR_BUILTIN: | |||
1352 | return RetCC_AVR_BUILTIN; | |||
1353 | default: | |||
1354 | return RetCC_AVR; | |||
1355 | } | |||
1356 | } | |||
1357 | ||||
1358 | bool | |||
1359 | AVRTargetLowering::CanLowerReturn(CallingConv::ID CallConv, | |||
1360 | MachineFunction &MF, bool isVarArg, | |||
1361 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
1362 | LLVMContext &Context) const | |||
1363 | { | |||
1364 | SmallVector<CCValAssign, 16> RVLocs; | |||
1365 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); | |||
1366 | ||||
1367 | auto CCFunction = CCAssignFnForReturn(CallConv); | |||
1368 | return CCInfo.CheckReturn(Outs, CCFunction); | |||
1369 | } | |||
1370 | ||||
1371 | SDValue | |||
1372 | AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
1373 | bool isVarArg, | |||
1374 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
1375 | const SmallVectorImpl<SDValue> &OutVals, | |||
1376 | const SDLoc &dl, SelectionDAG &DAG) const { | |||
1377 | // CCValAssign - represent the assignment of the return value to locations. | |||
1378 | SmallVector<CCValAssign, 16> RVLocs; | |||
1379 | ||||
1380 | // CCState - Info about the registers and stack slot. | |||
1381 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
1382 | *DAG.getContext()); | |||
1383 | ||||
1384 | // Analyze return values. | |||
1385 | auto CCFunction = CCAssignFnForReturn(CallConv); | |||
1386 | CCInfo.AnalyzeReturn(Outs, CCFunction); | |||
1387 | ||||
1388 | // If this is the first return lowered for this function, add the regs to | |||
1389 | // the liveout set for the function. | |||
1390 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1391 | unsigned e = RVLocs.size(); | |||
1392 | ||||
1393 | // Reverse splitted return values to get the "big endian" format required | |||
1394 | // to agree with the calling convention ABI. | |||
1395 | if (e > 1) { | |||
1396 | std::reverse(RVLocs.begin(), RVLocs.end()); | |||
1397 | } | |||
1398 | ||||
1399 | SDValue Flag; | |||
1400 | SmallVector<SDValue, 4> RetOps(1, Chain); | |||
1401 | // Copy the result values into the output registers. | |||
1402 | for (unsigned i = 0; i != e; ++i) { | |||
1403 | CCValAssign &VA = RVLocs[i]; | |||
1404 | assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1404, __PRETTY_FUNCTION__)); | |||
1405 | ||||
1406 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); | |||
1407 | ||||
1408 | // Guarantee that all emitted copies are stuck together with flags. | |||
1409 | Flag = Chain.getValue(1); | |||
1410 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
1411 | } | |||
1412 | ||||
1413 | // Don't emit the ret/reti instruction when the naked attribute is present in | |||
1414 | // the function being compiled. | |||
1415 | if (MF.getFunction().getAttributes().hasAttribute( | |||
1416 | AttributeList::FunctionIndex, Attribute::Naked)) { | |||
1417 | return Chain; | |||
1418 | } | |||
1419 | ||||
1420 | unsigned RetOpc = | |||
1421 | (CallConv == CallingConv::AVR_INTR || CallConv == CallingConv::AVR_SIGNAL) | |||
1422 | ? AVRISD::RETI_FLAG | |||
1423 | : AVRISD::RET_FLAG; | |||
1424 | ||||
1425 | RetOps[0] = Chain; // Update chain. | |||
1426 | ||||
1427 | if (Flag.getNode()) { | |||
1428 | RetOps.push_back(Flag); | |||
1429 | } | |||
1430 | ||||
1431 | return DAG.getNode(RetOpc, dl, MVT::Other, RetOps); | |||
1432 | } | |||
1433 | ||||
1434 | //===----------------------------------------------------------------------===// | |||
1435 | // Custom Inserters | |||
1436 | //===----------------------------------------------------------------------===// | |||
1437 | ||||
1438 | MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI, | |||
1439 | MachineBasicBlock *BB) const { | |||
1440 | unsigned Opc; | |||
1441 | const TargetRegisterClass *RC; | |||
1442 | bool HasRepeatedOperand = false; | |||
1443 | MachineFunction *F = BB->getParent(); | |||
1444 | MachineRegisterInfo &RI = F->getRegInfo(); | |||
1445 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); | |||
1446 | DebugLoc dl = MI.getDebugLoc(); | |||
1447 | ||||
1448 | switch (MI.getOpcode()) { | |||
1449 | default: | |||
1450 | llvm_unreachable("Invalid shift opcode!")::llvm::llvm_unreachable_internal("Invalid shift opcode!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1450); | |||
1451 | case AVR::Lsl8: | |||
1452 | Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd | |||
1453 | RC = &AVR::GPR8RegClass; | |||
1454 | HasRepeatedOperand = true; | |||
1455 | break; | |||
1456 | case AVR::Lsl16: | |||
1457 | Opc = AVR::LSLWRd; | |||
1458 | RC = &AVR::DREGSRegClass; | |||
1459 | break; | |||
1460 | case AVR::Asr8: | |||
1461 | Opc = AVR::ASRRd; | |||
1462 | RC = &AVR::GPR8RegClass; | |||
1463 | break; | |||
1464 | case AVR::Asr16: | |||
1465 | Opc = AVR::ASRWRd; | |||
1466 | RC = &AVR::DREGSRegClass; | |||
1467 | break; | |||
1468 | case AVR::Lsr8: | |||
1469 | Opc = AVR::LSRRd; | |||
1470 | RC = &AVR::GPR8RegClass; | |||
1471 | break; | |||
1472 | case AVR::Lsr16: | |||
1473 | Opc = AVR::LSRWRd; | |||
1474 | RC = &AVR::DREGSRegClass; | |||
1475 | break; | |||
1476 | case AVR::Rol8: | |||
1477 | Opc = AVR::ROLBRd; | |||
1478 | RC = &AVR::GPR8RegClass; | |||
1479 | break; | |||
1480 | case AVR::Rol16: | |||
1481 | Opc = AVR::ROLWRd; | |||
1482 | RC = &AVR::DREGSRegClass; | |||
1483 | break; | |||
1484 | case AVR::Ror8: | |||
1485 | Opc = AVR::RORBRd; | |||
1486 | RC = &AVR::GPR8RegClass; | |||
1487 | break; | |||
1488 | case AVR::Ror16: | |||
1489 | Opc = AVR::RORWRd; | |||
1490 | RC = &AVR::DREGSRegClass; | |||
1491 | break; | |||
1492 | } | |||
1493 | ||||
1494 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
1495 | ||||
1496 | MachineFunction::iterator I; | |||
1497 | for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I); | |||
1498 | if (I != F->end()) ++I; | |||
1499 | ||||
1500 | // Create loop block. | |||
1501 | MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
1502 | MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
1503 | ||||
1504 | F->insert(I, LoopBB); | |||
1505 | F->insert(I, RemBB); | |||
1506 | ||||
1507 | // Update machine-CFG edges by transferring all successors of the current | |||
1508 | // block to the block containing instructions after shift. | |||
1509 | RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), | |||
1510 | BB->end()); | |||
1511 | RemBB->transferSuccessorsAndUpdatePHIs(BB); | |||
1512 | ||||
1513 | // Add adges BB => LoopBB => RemBB, BB => RemBB, LoopBB => LoopBB. | |||
1514 | BB->addSuccessor(LoopBB); | |||
1515 | BB->addSuccessor(RemBB); | |||
1516 | LoopBB->addSuccessor(RemBB); | |||
1517 | LoopBB->addSuccessor(LoopBB); | |||
1518 | ||||
1519 | Register ShiftAmtReg = RI.createVirtualRegister(&AVR::LD8RegClass); | |||
1520 | Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::LD8RegClass); | |||
1521 | Register ShiftReg = RI.createVirtualRegister(RC); | |||
1522 | Register ShiftReg2 = RI.createVirtualRegister(RC); | |||
1523 | Register ShiftAmtSrcReg = MI.getOperand(2).getReg(); | |||
1524 | Register SrcReg = MI.getOperand(1).getReg(); | |||
1525 | Register DstReg = MI.getOperand(0).getReg(); | |||
1526 | ||||
1527 | // BB: | |||
1528 | // cpi N, 0 | |||
1529 | // breq RemBB | |||
1530 | BuildMI(BB, dl, TII.get(AVR::CPIRdK)).addReg(ShiftAmtSrcReg).addImm(0); | |||
1531 | BuildMI(BB, dl, TII.get(AVR::BREQk)).addMBB(RemBB); | |||
1532 | ||||
1533 | // LoopBB: | |||
1534 | // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB] | |||
1535 | // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB] | |||
1536 | // ShiftReg2 = shift ShiftReg | |||
1537 | // ShiftAmt2 = ShiftAmt - 1; | |||
1538 | BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftReg) | |||
1539 | .addReg(SrcReg) | |||
1540 | .addMBB(BB) | |||
1541 | .addReg(ShiftReg2) | |||
1542 | .addMBB(LoopBB); | |||
1543 | BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftAmtReg) | |||
1544 | .addReg(ShiftAmtSrcReg) | |||
1545 | .addMBB(BB) | |||
1546 | .addReg(ShiftAmtReg2) | |||
1547 | .addMBB(LoopBB); | |||
1548 | ||||
1549 | auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg); | |||
1550 | if (HasRepeatedOperand) | |||
1551 | ShiftMI.addReg(ShiftReg); | |||
1552 | ||||
1553 | BuildMI(LoopBB, dl, TII.get(AVR::SUBIRdK), ShiftAmtReg2) | |||
1554 | .addReg(ShiftAmtReg) | |||
1555 | .addImm(1); | |||
1556 | BuildMI(LoopBB, dl, TII.get(AVR::BRNEk)).addMBB(LoopBB); | |||
1557 | ||||
1558 | // RemBB: | |||
1559 | // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB] | |||
1560 | BuildMI(*RemBB, RemBB->begin(), dl, TII.get(AVR::PHI), DstReg) | |||
1561 | .addReg(SrcReg) | |||
1562 | .addMBB(BB) | |||
1563 | .addReg(ShiftReg2) | |||
1564 | .addMBB(LoopBB); | |||
1565 | ||||
1566 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
1567 | return RemBB; | |||
1568 | } | |||
1569 | ||||
1570 | static bool isCopyMulResult(MachineBasicBlock::iterator const &I) { | |||
1571 | if (I->getOpcode() == AVR::COPY) { | |||
1572 | Register SrcReg = I->getOperand(1).getReg(); | |||
1573 | return (SrcReg == AVR::R0 || SrcReg == AVR::R1); | |||
1574 | } | |||
1575 | ||||
1576 | return false; | |||
1577 | } | |||
1578 | ||||
1579 | // The mul instructions wreak havock on our zero_reg R1. We need to clear it | |||
1580 | // after the result has been evacuated. This is probably not the best way to do | |||
1581 | // it, but it works for now. | |||
1582 | MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI, | |||
1583 | MachineBasicBlock *BB) const { | |||
1584 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); | |||
1585 | MachineBasicBlock::iterator I(MI); | |||
1586 | ++I; // in any case insert *after* the mul instruction | |||
1587 | if (isCopyMulResult(I)) | |||
1588 | ++I; | |||
1589 | if (isCopyMulResult(I)) | |||
1590 | ++I; | |||
1591 | BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1) | |||
1592 | .addReg(AVR::R1) | |||
1593 | .addReg(AVR::R1); | |||
1594 | return BB; | |||
1595 | } | |||
1596 | ||||
1597 | MachineBasicBlock * | |||
1598 | AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, | |||
1599 | MachineBasicBlock *MBB) const { | |||
1600 | int Opc = MI.getOpcode(); | |||
1601 | ||||
1602 | // Pseudo shift instructions with a non constant shift amount are expanded | |||
1603 | // into a loop. | |||
1604 | switch (Opc) { | |||
1605 | case AVR::Lsl8: | |||
1606 | case AVR::Lsl16: | |||
1607 | case AVR::Lsr8: | |||
1608 | case AVR::Lsr16: | |||
1609 | case AVR::Rol8: | |||
1610 | case AVR::Rol16: | |||
1611 | case AVR::Ror8: | |||
1612 | case AVR::Ror16: | |||
1613 | case AVR::Asr8: | |||
1614 | case AVR::Asr16: | |||
1615 | return insertShift(MI, MBB); | |||
1616 | case AVR::MULRdRr: | |||
1617 | case AVR::MULSRdRr: | |||
1618 | return insertMul(MI, MBB); | |||
1619 | } | |||
1620 | ||||
1621 | assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&(((Opc == AVR::Select16 || Opc == AVR::Select8) && "Unexpected instr type to insert" ) ? static_cast<void> (0) : __assert_fail ("(Opc == AVR::Select16 || Opc == AVR::Select8) && \"Unexpected instr type to insert\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1622, __PRETTY_FUNCTION__)) | |||
1622 | "Unexpected instr type to insert")(((Opc == AVR::Select16 || Opc == AVR::Select8) && "Unexpected instr type to insert" ) ? static_cast<void> (0) : __assert_fail ("(Opc == AVR::Select16 || Opc == AVR::Select8) && \"Unexpected instr type to insert\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1622, __PRETTY_FUNCTION__)); | |||
1623 | ||||
1624 | const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent() | |||
1625 | ->getParent() | |||
1626 | ->getSubtarget() | |||
1627 | .getInstrInfo(); | |||
1628 | DebugLoc dl = MI.getDebugLoc(); | |||
1629 | ||||
1630 | // To "insert" a SELECT instruction, we insert the diamond | |||
1631 | // control-flow pattern. The incoming instruction knows the | |||
1632 | // destination vreg to set, the condition code register to branch | |||
1633 | // on, the true/false values to select between, and a branch opcode | |||
1634 | // to use. | |||
1635 | ||||
1636 | MachineFunction *MF = MBB->getParent(); | |||
1637 | const BasicBlock *LLVM_BB = MBB->getBasicBlock(); | |||
1638 | MachineBasicBlock *FallThrough = MBB->getFallThrough(); | |||
1639 | ||||
1640 | // If the current basic block falls through to another basic block, | |||
1641 | // we must insert an unconditional branch to the fallthrough destination | |||
1642 | // if we are to insert basic blocks at the prior fallthrough point. | |||
1643 | if (FallThrough != nullptr) { | |||
1644 | BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(FallThrough); | |||
1645 | } | |||
1646 | ||||
1647 | MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
1648 | MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
1649 | ||||
1650 | MachineFunction::iterator I; | |||
1651 | for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I); | |||
1652 | if (I != MF->end()) ++I; | |||
1653 | MF->insert(I, trueMBB); | |||
1654 | MF->insert(I, falseMBB); | |||
1655 | ||||
1656 | // Transfer remaining instructions and all successors of the current | |||
1657 | // block to the block which will contain the Phi node for the | |||
1658 | // select. | |||
1659 | trueMBB->splice(trueMBB->begin(), MBB, | |||
1660 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); | |||
1661 | trueMBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
1662 | ||||
1663 | AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm(); | |||
1664 | BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB); | |||
1665 | BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB); | |||
1666 | MBB->addSuccessor(falseMBB); | |||
1667 | MBB->addSuccessor(trueMBB); | |||
1668 | ||||
1669 | // Unconditionally flow back to the true block | |||
1670 | BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB); | |||
1671 | falseMBB->addSuccessor(trueMBB); | |||
1672 | ||||
1673 | // Set up the Phi node to determine where we came from | |||
1674 | BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI), MI.getOperand(0).getReg()) | |||
1675 | .addReg(MI.getOperand(1).getReg()) | |||
1676 | .addMBB(MBB) | |||
1677 | .addReg(MI.getOperand(2).getReg()) | |||
1678 | .addMBB(falseMBB) ; | |||
1679 | ||||
1680 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
1681 | return trueMBB; | |||
1682 | } | |||
1683 | ||||
1684 | //===----------------------------------------------------------------------===// | |||
1685 | // Inline Asm Support | |||
1686 | //===----------------------------------------------------------------------===// | |||
1687 | ||||
1688 | AVRTargetLowering::ConstraintType | |||
1689 | AVRTargetLowering::getConstraintType(StringRef Constraint) const { | |||
1690 | if (Constraint.size() == 1) { | |||
1691 | // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html | |||
1692 | switch (Constraint[0]) { | |||
1693 | default: | |||
1694 | break; | |||
1695 | case 'a': // Simple upper registers | |||
1696 | case 'b': // Base pointer registers pairs | |||
1697 | case 'd': // Upper register | |||
1698 | case 'l': // Lower registers | |||
1699 | case 'e': // Pointer register pairs | |||
1700 | case 'q': // Stack pointer register | |||
1701 | case 'r': // Any register | |||
1702 | case 'w': // Special upper register pairs | |||
1703 | return C_RegisterClass; | |||
1704 | case 't': // Temporary register | |||
1705 | case 'x': case 'X': // Pointer register pair X | |||
1706 | case 'y': case 'Y': // Pointer register pair Y | |||
1707 | case 'z': case 'Z': // Pointer register pair Z | |||
1708 | return C_Register; | |||
1709 | case 'Q': // A memory address based on Y or Z pointer with displacement. | |||
1710 | return C_Memory; | |||
1711 | case 'G': // Floating point constant | |||
1712 | case 'I': // 6-bit positive integer constant | |||
1713 | case 'J': // 6-bit negative integer constant | |||
1714 | case 'K': // Integer constant (Range: 2) | |||
1715 | case 'L': // Integer constant (Range: 0) | |||
1716 | case 'M': // 8-bit integer constant | |||
1717 | case 'N': // Integer constant (Range: -1) | |||
1718 | case 'O': // Integer constant (Range: 8, 16, 24) | |||
1719 | case 'P': // Integer constant (Range: 1) | |||
1720 | case 'R': // Integer constant (Range: -6 to 5)x | |||
1721 | return C_Immediate; | |||
1722 | } | |||
1723 | } | |||
1724 | ||||
1725 | return TargetLowering::getConstraintType(Constraint); | |||
1726 | } | |||
1727 | ||||
1728 | unsigned | |||
1729 | AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { | |||
1730 | // Not sure if this is actually the right thing to do, but we got to do | |||
1731 | // *something* [agnat] | |||
1732 | switch (ConstraintCode[0]) { | |||
1733 | case 'Q': | |||
1734 | return InlineAsm::Constraint_Q; | |||
1735 | } | |||
1736 | return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); | |||
1737 | } | |||
1738 | ||||
1739 | AVRTargetLowering::ConstraintWeight | |||
1740 | AVRTargetLowering::getSingleConstraintMatchWeight( | |||
1741 | AsmOperandInfo &info, const char *constraint) const { | |||
1742 | ConstraintWeight weight = CW_Invalid; | |||
1743 | Value *CallOperandVal = info.CallOperandVal; | |||
1744 | ||||
1745 | // If we don't have a value, we can't do a match, | |||
1746 | // but allow it at the lowest weight. | |||
1747 | // (this behaviour has been copied from the ARM backend) | |||
1748 | if (!CallOperandVal) { | |||
1749 | return CW_Default; | |||
1750 | } | |||
1751 | ||||
1752 | // Look at the constraint type. | |||
1753 | switch (*constraint) { | |||
1754 | default: | |||
1755 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); | |||
1756 | break; | |||
1757 | case 'd': | |||
1758 | case 'r': | |||
1759 | case 'l': | |||
1760 | weight = CW_Register; | |||
1761 | break; | |||
1762 | case 'a': | |||
1763 | case 'b': | |||
1764 | case 'e': | |||
1765 | case 'q': | |||
1766 | case 't': | |||
1767 | case 'w': | |||
1768 | case 'x': case 'X': | |||
1769 | case 'y': case 'Y': | |||
1770 | case 'z': case 'Z': | |||
1771 | weight = CW_SpecificReg; | |||
1772 | break; | |||
1773 | case 'G': | |||
1774 | if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) { | |||
1775 | if (C->isZero()) { | |||
1776 | weight = CW_Constant; | |||
1777 | } | |||
1778 | } | |||
1779 | break; | |||
1780 | case 'I': | |||
1781 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1782 | if (isUInt<6>(C->getZExtValue())) { | |||
1783 | weight = CW_Constant; | |||
1784 | } | |||
1785 | } | |||
1786 | break; | |||
1787 | case 'J': | |||
1788 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1789 | if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) { | |||
1790 | weight = CW_Constant; | |||
1791 | } | |||
1792 | } | |||
1793 | break; | |||
1794 | case 'K': | |||
1795 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1796 | if (C->getZExtValue() == 2) { | |||
1797 | weight = CW_Constant; | |||
1798 | } | |||
1799 | } | |||
1800 | break; | |||
1801 | case 'L': | |||
1802 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1803 | if (C->getZExtValue() == 0) { | |||
1804 | weight = CW_Constant; | |||
1805 | } | |||
1806 | } | |||
1807 | break; | |||
1808 | case 'M': | |||
1809 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1810 | if (isUInt<8>(C->getZExtValue())) { | |||
1811 | weight = CW_Constant; | |||
1812 | } | |||
1813 | } | |||
1814 | break; | |||
1815 | case 'N': | |||
1816 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1817 | if (C->getSExtValue() == -1) { | |||
1818 | weight = CW_Constant; | |||
1819 | } | |||
1820 | } | |||
1821 | break; | |||
1822 | case 'O': | |||
1823 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1824 | if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) || | |||
1825 | (C->getZExtValue() == 24)) { | |||
1826 | weight = CW_Constant; | |||
1827 | } | |||
1828 | } | |||
1829 | break; | |||
1830 | case 'P': | |||
1831 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1832 | if (C->getZExtValue() == 1) { | |||
1833 | weight = CW_Constant; | |||
1834 | } | |||
1835 | } | |||
1836 | break; | |||
1837 | case 'R': | |||
1838 | if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
1839 | if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) { | |||
1840 | weight = CW_Constant; | |||
1841 | } | |||
1842 | } | |||
1843 | break; | |||
1844 | case 'Q': | |||
1845 | weight = CW_Memory; | |||
1846 | break; | |||
1847 | } | |||
1848 | ||||
1849 | return weight; | |||
1850 | } | |||
1851 | ||||
1852 | std::pair<unsigned, const TargetRegisterClass *> | |||
1853 | AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, | |||
1854 | StringRef Constraint, | |||
1855 | MVT VT) const { | |||
1856 | // We only support i8 and i16. | |||
1857 | // | |||
1858 | //:FIXME: remove this assert for now since it gets sometimes executed | |||
1859 | // assert((VT == MVT::i16 || VT == MVT::i8) && "Wrong operand type."); | |||
1860 | ||||
1861 | if (Constraint.size() == 1) { | |||
1862 | switch (Constraint[0]) { | |||
1863 | case 'a': // Simple upper registers r16..r23. | |||
1864 | return std::make_pair(0U, &AVR::LD8loRegClass); | |||
1865 | case 'b': // Base pointer registers: y, z. | |||
1866 | return std::make_pair(0U, &AVR::PTRDISPREGSRegClass); | |||
1867 | case 'd': // Upper registers r16..r31. | |||
1868 | return std::make_pair(0U, &AVR::LD8RegClass); | |||
1869 | case 'l': // Lower registers r0..r15. | |||
1870 | return std::make_pair(0U, &AVR::GPR8loRegClass); | |||
1871 | case 'e': // Pointer register pairs: x, y, z. | |||
1872 | return std::make_pair(0U, &AVR::PTRREGSRegClass); | |||
1873 | case 'q': // Stack pointer register: SPH:SPL. | |||
1874 | return std::make_pair(0U, &AVR::GPRSPRegClass); | |||
1875 | case 'r': // Any register: r0..r31. | |||
1876 | if (VT == MVT::i8) | |||
1877 | return std::make_pair(0U, &AVR::GPR8RegClass); | |||
1878 | ||||
1879 | assert(VT == MVT::i16 && "inline asm constraint too large")((VT == MVT::i16 && "inline asm constraint too large" ) ? static_cast<void> (0) : __assert_fail ("VT == MVT::i16 && \"inline asm constraint too large\"" , "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/AVR/AVRISelLowering.cpp" , 1879, __PRETTY_FUNCTION__)); | |||
1880 | return std::make_pair(0U, &AVR::DREGSRegClass); | |||
1881 | case 't': // Temporary register: r0. | |||
1882 | return std::make_pair(unsigned(AVR::R0), &AVR::GPR8RegClass); | |||
1883 | case 'w': // Special upper register pairs: r24, r26, r28, r30. | |||
1884 | return std::make_pair(0U, &AVR::IWREGSRegClass); | |||
1885 | case 'x': // Pointer register pair X: r27:r26. | |||
1886 | case 'X': | |||
1887 | return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass); | |||
1888 | case 'y': // Pointer register pair Y: r29:r28. | |||
1889 | case 'Y': | |||
1890 | return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass); | |||
1891 | case 'z': // Pointer register pair Z: r31:r30. | |||
1892 | case 'Z': | |||
1893 | return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass); | |||
1894 | default: | |||
1895 | break; | |||
1896 | } | |||
1897 | } | |||
1898 | ||||
1899 | return TargetLowering::getRegForInlineAsmConstraint( | |||
1900 | Subtarget.getRegisterInfo(), Constraint, VT); | |||
1901 | } | |||
1902 | ||||
1903 | void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op, | |||
1904 | std::string &Constraint, | |||
1905 | std::vector<SDValue> &Ops, | |||
1906 | SelectionDAG &DAG) const { | |||
1907 | SDValue Result(0, 0); | |||
1908 | SDLoc DL(Op); | |||
1909 | EVT Ty = Op.getValueType(); | |||
1910 | ||||
1911 | // Currently only support length 1 constraints. | |||
1912 | if (Constraint.length() != 1) { | |||
1913 | return; | |||
1914 | } | |||
1915 | ||||
1916 | char ConstraintLetter = Constraint[0]; | |||
1917 | switch (ConstraintLetter) { | |||
1918 | default: | |||
1919 | break; | |||
1920 | // Deal with integers first: | |||
1921 | case 'I': | |||
1922 | case 'J': | |||
1923 | case 'K': | |||
1924 | case 'L': | |||
1925 | case 'M': | |||
1926 | case 'N': | |||
1927 | case 'O': | |||
1928 | case 'P': | |||
1929 | case 'R': { | |||
1930 | const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); | |||
1931 | if (!C) { | |||
1932 | return; | |||
1933 | } | |||
1934 | ||||
1935 | int64_t CVal64 = C->getSExtValue(); | |||
1936 | uint64_t CUVal64 = C->getZExtValue(); | |||
1937 | switch (ConstraintLetter) { | |||
1938 | case 'I': // 0..63 | |||
1939 | if (!isUInt<6>(CUVal64)) | |||
1940 | return; | |||
1941 | Result = DAG.getTargetConstant(CUVal64, DL, Ty); | |||
1942 | break; | |||
1943 | case 'J': // -63..0 | |||
1944 | if (CVal64 < -63 || CVal64 > 0) | |||
1945 | return; | |||
1946 | Result = DAG.getTargetConstant(CVal64, DL, Ty); | |||
1947 | break; | |||
1948 | case 'K': // 2 | |||
1949 | if (CUVal64 != 2) | |||
1950 | return; | |||
1951 | Result = DAG.getTargetConstant(CUVal64, DL, Ty); | |||
1952 | break; | |||
1953 | case 'L': // 0 | |||
1954 | if (CUVal64 != 0) | |||
1955 | return; | |||
1956 | Result = DAG.getTargetConstant(CUVal64, DL, Ty); | |||
1957 | break; | |||
1958 | case 'M': // 0..255 | |||
1959 | if (!isUInt<8>(CUVal64)) | |||
1960 | return; | |||
1961 | // i8 type may be printed as a negative number, | |||
1962 | // e.g. 254 would be printed as -2, | |||
1963 | // so we force it to i16 at least. | |||
1964 | if (Ty.getSimpleVT() == MVT::i8) { | |||
1965 | Ty = MVT::i16; | |||
1966 | } | |||
1967 | Result = DAG.getTargetConstant(CUVal64, DL, Ty); | |||
1968 | break; | |||
1969 | case 'N': // -1 | |||
1970 | if (CVal64 != -1) | |||
1971 | return; | |||
1972 | Result = DAG.getTargetConstant(CVal64, DL, Ty); | |||
1973 | break; | |||
1974 | case 'O': // 8, 16, 24 | |||
1975 | if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24) | |||
1976 | return; | |||
1977 | Result = DAG.getTargetConstant(CUVal64, DL, Ty); | |||
1978 | break; | |||
1979 | case 'P': // 1 | |||
1980 | if (CUVal64 != 1) | |||
1981 | return; | |||
1982 | Result = DAG.getTargetConstant(CUVal64, DL, Ty); | |||
1983 | break; | |||
1984 | case 'R': // -6..5 | |||
1985 | if (CVal64 < -6 || CVal64 > 5) | |||
1986 | return; | |||
1987 | Result = DAG.getTargetConstant(CVal64, DL, Ty); | |||
1988 | break; | |||
1989 | } | |||
1990 | ||||
1991 | break; | |||
1992 | } | |||
1993 | case 'G': | |||
1994 | const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op); | |||
1995 | if (!FC || !FC->isZero()) | |||
1996 | return; | |||
1997 | // Soften float to i8 0 | |||
1998 | Result = DAG.getTargetConstant(0, DL, MVT::i8); | |||
1999 | break; | |||
2000 | } | |||
2001 | ||||
2002 | if (Result.getNode()) { | |||
2003 | Ops.push_back(Result); | |||
2004 | return; | |||
2005 | } | |||
2006 | ||||
2007 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | |||
2008 | } | |||
2009 | ||||
2010 | Register AVRTargetLowering::getRegisterByName(const char *RegName, LLT VT, | |||
2011 | const MachineFunction &MF) const { | |||
2012 | Register Reg; | |||
2013 | ||||
2014 | if (VT == LLT::scalar(8)) { | |||
2015 | Reg = StringSwitch<unsigned>(RegName) | |||
2016 | .Case("r0", AVR::R0).Case("r1", AVR::R1).Case("r2", AVR::R2) | |||
2017 | .Case("r3", AVR::R3).Case("r4", AVR::R4).Case("r5", AVR::R5) | |||
2018 | .Case("r6", AVR::R6).Case("r7", AVR::R7).Case("r8", AVR::R8) | |||
2019 | .Case("r9", AVR::R9).Case("r10", AVR::R10).Case("r11", AVR::R11) | |||
2020 | .Case("r12", AVR::R12).Case("r13", AVR::R13).Case("r14", AVR::R14) | |||
2021 | .Case("r15", AVR::R15).Case("r16", AVR::R16).Case("r17", AVR::R17) | |||
2022 | .Case("r18", AVR::R18).Case("r19", AVR::R19).Case("r20", AVR::R20) | |||
2023 | .Case("r21", AVR::R21).Case("r22", AVR::R22).Case("r23", AVR::R23) | |||
2024 | .Case("r24", AVR::R24).Case("r25", AVR::R25).Case("r26", AVR::R26) | |||
2025 | .Case("r27", AVR::R27).Case("r28", AVR::R28).Case("r29", AVR::R29) | |||
2026 | .Case("r30", AVR::R30).Case("r31", AVR::R31) | |||
2027 | .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30) | |||
2028 | .Default(0); | |||
2029 | } else { | |||
2030 | Reg = StringSwitch<unsigned>(RegName) | |||
2031 | .Case("r0", AVR::R1R0).Case("r2", AVR::R3R2) | |||
2032 | .Case("r4", AVR::R5R4).Case("r6", AVR::R7R6) | |||
2033 | .Case("r8", AVR::R9R8).Case("r10", AVR::R11R10) | |||
2034 | .Case("r12", AVR::R13R12).Case("r14", AVR::R15R14) | |||
2035 | .Case("r16", AVR::R17R16).Case("r18", AVR::R19R18) | |||
2036 | .Case("r20", AVR::R21R20).Case("r22", AVR::R23R22) | |||
2037 | .Case("r24", AVR::R25R24).Case("r26", AVR::R27R26) | |||
2038 | .Case("r28", AVR::R29R28).Case("r30", AVR::R31R30) | |||
2039 | .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30) | |||
2040 | .Default(0); | |||
2041 | } | |||
2042 | ||||
2043 | if (Reg) | |||
2044 | return Reg; | |||
2045 | ||||
2046 | report_fatal_error("Invalid register name global variable"); | |||
2047 | } | |||
2048 | ||||
2049 | } // end of namespace llvm |