LLVM 19.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
18#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/Module.h"
37using namespace llvm;
38
39
40//===----------------------------------------------------------------------===//
41// Calling Convention Implementation
42//===----------------------------------------------------------------------===//
43
44static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
45 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
46 ISD::ArgFlagsTy &ArgFlags, CCState &State)
47{
48 assert (ArgFlags.isSRet());
49
50 // Assign SRet argument.
51 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
52 0,
53 LocVT, LocInfo));
54 return true;
55}
56
57static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
58 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
59 ISD::ArgFlagsTy &ArgFlags, CCState &State)
60{
61 static const MCPhysReg RegList[] = {
62 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
63 };
64 // Try to get first reg.
65 if (Register Reg = State.AllocateReg(RegList)) {
66 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
67 } else {
68 // Assign whole thing in stack.
70 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
71 return true;
72 }
73
74 // Try to get second reg.
75 if (Register Reg = State.AllocateReg(RegList))
76 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
77 else
79 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
80 return true;
81}
82
83static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
84 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
85 ISD::ArgFlagsTy &ArgFlags, CCState &State)
86{
87 static const MCPhysReg RegList[] = {
88 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
89 };
90
91 // Try to get first reg.
92 if (Register Reg = State.AllocateReg(RegList))
93 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
94 else
95 return false;
96
97 // Try to get second reg.
98 if (Register Reg = State.AllocateReg(RegList))
99 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
100 else
101 return false;
102
103 return true;
104}
105
106// Allocate a full-sized argument for the 64-bit ABI.
107static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
108 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
109 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
110 assert((LocVT == MVT::f32 || LocVT == MVT::f128
111 || LocVT.getSizeInBits() == 64) &&
112 "Can't handle non-64 bits locations");
113
114 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
115 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
116 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
117 unsigned Offset = State.AllocateStack(size, alignment);
118 unsigned Reg = 0;
119
120 if (LocVT == MVT::i64 && Offset < 6*8)
121 // Promote integers to %i0-%i5.
122 Reg = SP::I0 + Offset/8;
123 else if (LocVT == MVT::f64 && Offset < 16*8)
124 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
125 Reg = SP::D0 + Offset/8;
126 else if (LocVT == MVT::f32 && Offset < 16*8)
127 // Promote floats to %f1, %f3, ...
128 Reg = SP::F1 + Offset/4;
129 else if (LocVT == MVT::f128 && Offset < 16*8)
130 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
131 Reg = SP::Q0 + Offset/16;
132
133 // Promote to register when possible, otherwise use the stack slot.
134 if (Reg) {
135 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
136 return true;
137 }
138
139 // Bail out if this is a return CC and we run out of registers to place
140 // values into.
141 if (IsReturn)
142 return false;
143
144 // This argument goes on the stack in an 8-byte slot.
145 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
146 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
147 if (LocVT == MVT::f32)
148 Offset += 4;
149
150 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
151 return true;
152}
153
154// Allocate a half-sized argument for the 64-bit ABI.
155//
156// This is used when passing { float, int } structs by value in registers.
157static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
158 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
159 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
160 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
161 unsigned Offset = State.AllocateStack(4, Align(4));
162
163 if (LocVT == MVT::f32 && Offset < 16*8) {
164 // Promote floats to %f0-%f31.
165 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
166 LocVT, LocInfo));
167 return true;
168 }
169
170 if (LocVT == MVT::i32 && Offset < 6*8) {
171 // Promote integers to %i0-%i5, using half the register.
172 unsigned Reg = SP::I0 + Offset/8;
173 LocVT = MVT::i64;
174 LocInfo = CCValAssign::AExt;
175
176 // Set the Custom bit if this i32 goes in the high bits of a register.
177 if (Offset % 8 == 0)
178 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
179 LocVT, LocInfo));
180 else
181 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
182 return true;
183 }
184
185 // Bail out if this is a return CC and we run out of registers to place
186 // values into.
187 if (IsReturn)
188 return false;
189
190 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
191 return true;
192}
193
194static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
195 CCValAssign::LocInfo &LocInfo,
196 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
197 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
198 State);
199}
200
201static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
202 CCValAssign::LocInfo &LocInfo,
203 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
204 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
205 State);
206}
207
208static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
209 CCValAssign::LocInfo &LocInfo,
210 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
211 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
212 State);
213}
214
215static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
216 CCValAssign::LocInfo &LocInfo,
217 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
218 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
219 State);
220}
221
222#include "SparcGenCallingConv.inc"
223
224// The calling conventions in SparcCallingConv.td are described in terms of the
225// callee's register window. This function translates registers to the
226// corresponding caller window %o register.
227static unsigned toCallerWindow(unsigned Reg) {
228 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
229 "Unexpected enum");
230 if (Reg >= SP::I0 && Reg <= SP::I7)
231 return Reg - SP::I0 + SP::O0;
232 return Reg;
233}
234
236 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
237 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
239 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
240 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
241 : RetCC_Sparc32);
242}
243
246 bool IsVarArg,
248 const SmallVectorImpl<SDValue> &OutVals,
249 const SDLoc &DL, SelectionDAG &DAG) const {
250 if (Subtarget->is64Bit())
251 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
252 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
253}
254
257 bool IsVarArg,
259 const SmallVectorImpl<SDValue> &OutVals,
260 const SDLoc &DL, SelectionDAG &DAG) const {
262
263 // CCValAssign - represent the assignment of the return value to locations.
265
266 // CCState - Info about the registers and stack slot.
267 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
268 *DAG.getContext());
269
270 // Analyze return values.
271 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
272
273 SDValue Glue;
274 SmallVector<SDValue, 4> RetOps(1, Chain);
275 // Make room for the return address offset.
276 RetOps.push_back(SDValue());
277
278 // Copy the result values into the output registers.
279 for (unsigned i = 0, realRVLocIdx = 0;
280 i != RVLocs.size();
281 ++i, ++realRVLocIdx) {
282 CCValAssign &VA = RVLocs[i];
283 assert(VA.isRegLoc() && "Can only return in registers!");
284
285 SDValue Arg = OutVals[realRVLocIdx];
286
287 if (VA.needsCustom()) {
288 assert(VA.getLocVT() == MVT::v2i32);
289 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
290 // happen by default if this wasn't a legal type)
291
292 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
293 Arg,
295 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
296 Arg,
298
299 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
300 Glue = Chain.getValue(1);
301 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
302 VA = RVLocs[++i]; // skip ahead to next loc
303 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
304 Glue);
305 } else
306 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
307
308 // Guarantee that all emitted copies are stuck together with flags.
309 Glue = Chain.getValue(1);
310 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
311 }
312
313 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
314 // If the function returns a struct, copy the SRetReturnReg to I0
315 if (MF.getFunction().hasStructRetAttr()) {
317 Register Reg = SFI->getSRetReturnReg();
318 if (!Reg)
319 llvm_unreachable("sret virtual register not created in the entry block");
320 auto PtrVT = getPointerTy(DAG.getDataLayout());
321 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
322 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
323 Glue = Chain.getValue(1);
324 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
325 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
326 }
327
328 RetOps[0] = Chain; // Update chain.
329 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
330
331 // Add the glue if we have it.
332 if (Glue.getNode())
333 RetOps.push_back(Glue);
334
335 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
336}
337
338// Lower return values for the 64-bit ABI.
339// Return values are passed the exactly the same way as function arguments.
342 bool IsVarArg,
344 const SmallVectorImpl<SDValue> &OutVals,
345 const SDLoc &DL, SelectionDAG &DAG) const {
346 // CCValAssign - represent the assignment of the return value to locations.
348
349 // CCState - Info about the registers and stack slot.
350 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
351 *DAG.getContext());
352
353 // Analyze return values.
354 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
355
356 SDValue Glue;
357 SmallVector<SDValue, 4> RetOps(1, Chain);
358
359 // The second operand on the return instruction is the return address offset.
360 // The return address is always %i7+8 with the 64-bit ABI.
361 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
362
363 // Copy the result values into the output registers.
364 for (unsigned i = 0; i != RVLocs.size(); ++i) {
365 CCValAssign &VA = RVLocs[i];
366 assert(VA.isRegLoc() && "Can only return in registers!");
367 SDValue OutVal = OutVals[i];
368
369 // Integer return values must be sign or zero extended by the callee.
370 switch (VA.getLocInfo()) {
371 case CCValAssign::Full: break;
373 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
374 break;
376 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
377 break;
379 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
380 break;
381 default:
382 llvm_unreachable("Unknown loc info!");
383 }
384
385 // The custom bit on an i32 return value indicates that it should be passed
386 // in the high bits of the register.
387 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
388 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
389 DAG.getConstant(32, DL, MVT::i32));
390
391 // The next value may go in the low bits of the same register.
392 // Handle both at once.
393 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
394 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
395 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
396 // Skip the next value, it's already done.
397 ++i;
398 }
399 }
400
401 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
402
403 // Guarantee that all emitted copies are stuck together with flags.
404 Glue = Chain.getValue(1);
405 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
406 }
407
408 RetOps[0] = Chain; // Update chain.
409
410 // Add the flag if we have it.
411 if (Glue.getNode())
412 RetOps.push_back(Glue);
413
414 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
415}
416
418 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
419 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
420 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
421 if (Subtarget->is64Bit())
422 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
423 DL, DAG, InVals);
424 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
425 DL, DAG, InVals);
426}
427
428/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
429/// passed in either one or two GPRs, including FP values. TODO: we should
430/// pass FP values in FP registers for fastcc functions.
432 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
433 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
434 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
436 MachineRegisterInfo &RegInfo = MF.getRegInfo();
438
439 // Assign locations to all of the incoming arguments.
441 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
442 *DAG.getContext());
443 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
444
445 const unsigned StackOffset = 92;
446 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
447
448 unsigned InIdx = 0;
449 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
450 CCValAssign &VA = ArgLocs[i];
451
452 if (Ins[InIdx].Flags.isSRet()) {
453 if (InIdx != 0)
454 report_fatal_error("sparc only supports sret on the first parameter");
455 // Get SRet from [%fp+64].
456 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
457 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
458 SDValue Arg =
459 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
460 InVals.push_back(Arg);
461 continue;
462 }
463
464 if (VA.isRegLoc()) {
465 if (VA.needsCustom()) {
466 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
467
468 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
469 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
470 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
471
472 assert(i+1 < e);
473 CCValAssign &NextVA = ArgLocs[++i];
474
475 SDValue LoVal;
476 if (NextVA.isMemLoc()) {
477 int FrameIdx = MF.getFrameInfo().
478 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
479 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
480 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
481 } else {
482 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
483 &SP::IntRegsRegClass);
484 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
485 }
486
487 if (IsLittleEndian)
488 std::swap(LoVal, HiVal);
489
490 SDValue WholeValue =
491 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
492 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
493 InVals.push_back(WholeValue);
494 continue;
495 }
496 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
497 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
498 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
499 if (VA.getLocVT() == MVT::f32)
500 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
501 else if (VA.getLocVT() != MVT::i32) {
502 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
503 DAG.getValueType(VA.getLocVT()));
504 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
505 }
506 InVals.push_back(Arg);
507 continue;
508 }
509
510 assert(VA.isMemLoc());
511
512 unsigned Offset = VA.getLocMemOffset()+StackOffset;
513 auto PtrVT = getPointerTy(DAG.getDataLayout());
514
515 if (VA.needsCustom()) {
516 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
517 // If it is double-word aligned, just load.
518 if (Offset % 8 == 0) {
519 int FI = MF.getFrameInfo().CreateFixedObject(8,
520 Offset,
521 true);
522 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
523 SDValue Load =
524 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
525 InVals.push_back(Load);
526 continue;
527 }
528
529 int FI = MF.getFrameInfo().CreateFixedObject(4,
530 Offset,
531 true);
532 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
533 SDValue HiVal =
534 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
535 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
536 Offset+4,
537 true);
538 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
539
540 SDValue LoVal =
541 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
542
543 if (IsLittleEndian)
544 std::swap(LoVal, HiVal);
545
546 SDValue WholeValue =
547 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
548 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
549 InVals.push_back(WholeValue);
550 continue;
551 }
552
553 int FI = MF.getFrameInfo().CreateFixedObject(4,
554 Offset,
555 true);
556 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
557 SDValue Load ;
558 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
559 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
560 } else if (VA.getValVT() == MVT::f128) {
561 report_fatal_error("SPARCv8 does not handle f128 in calls; "
562 "pass indirectly");
563 } else {
564 // We shouldn't see any other value types here.
565 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
566 }
567 InVals.push_back(Load);
568 }
569
570 if (MF.getFunction().hasStructRetAttr()) {
571 // Copy the SRet Argument to SRetReturnReg.
573 Register Reg = SFI->getSRetReturnReg();
574 if (!Reg) {
575 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
576 SFI->setSRetReturnReg(Reg);
577 }
578 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
579 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
580 }
581
582 // Store remaining ArgRegs to the stack if this is a varargs function.
583 if (isVarArg) {
584 static const MCPhysReg ArgRegs[] = {
585 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
586 };
587 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
588 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
589 unsigned ArgOffset = CCInfo.getStackSize();
590 if (NumAllocated == 6)
591 ArgOffset += StackOffset;
592 else {
593 assert(!ArgOffset);
594 ArgOffset = 68+4*NumAllocated;
595 }
596
597 // Remember the vararg offset for the va_start implementation.
598 FuncInfo->setVarArgsFrameOffset(ArgOffset);
599
600 std::vector<SDValue> OutChains;
601
602 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
603 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
604 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
605 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
606
607 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
608 true);
609 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
610
611 OutChains.push_back(
612 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
613 ArgOffset += 4;
614 }
615
616 if (!OutChains.empty()) {
617 OutChains.push_back(Chain);
618 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
619 }
620 }
621
622 return Chain;
623}
624
625// Lower formal arguments for the 64 bit ABI.
627 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
628 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
629 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
631
632 // Analyze arguments according to CC_Sparc64.
634 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
635 *DAG.getContext());
636 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
637
638 // The argument array begins at %fp+BIAS+128, after the register save area.
639 const unsigned ArgArea = 128;
640
641 for (const CCValAssign &VA : ArgLocs) {
642 if (VA.isRegLoc()) {
643 // This argument is passed in a register.
644 // All integer register arguments are promoted by the caller to i64.
645
646 // Create a virtual register for the promoted live-in value.
647 Register VReg = MF.addLiveIn(VA.getLocReg(),
648 getRegClassFor(VA.getLocVT()));
649 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
650
651 // Get the high bits for i32 struct elements.
652 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
653 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
654 DAG.getConstant(32, DL, MVT::i32));
655
656 // The caller promoted the argument, so insert an Assert?ext SDNode so we
657 // won't promote the value again in this function.
658 switch (VA.getLocInfo()) {
660 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
661 DAG.getValueType(VA.getValVT()));
662 break;
664 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
665 DAG.getValueType(VA.getValVT()));
666 break;
667 default:
668 break;
669 }
670
671 // Truncate the register down to the argument type.
672 if (VA.isExtInLoc())
673 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
674
675 InVals.push_back(Arg);
676 continue;
677 }
678
679 // The registers are exhausted. This argument was passed on the stack.
680 assert(VA.isMemLoc());
681 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
682 // beginning of the arguments area at %fp+BIAS+128.
683 unsigned Offset = VA.getLocMemOffset() + ArgArea;
684 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
685 // Adjust offset for extended arguments, SPARC is big-endian.
686 // The caller will have written the full slot with extended bytes, but we
687 // prefer our own extending loads.
688 if (VA.isExtInLoc())
689 Offset += 8 - ValSize;
690 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
691 InVals.push_back(
692 DAG.getLoad(VA.getValVT(), DL, Chain,
695 }
696
697 if (!IsVarArg)
698 return Chain;
699
700 // This function takes variable arguments, some of which may have been passed
701 // in registers %i0-%i5. Variable floating point arguments are never passed
702 // in floating point registers. They go on %i0-%i5 or on the stack like
703 // integer arguments.
704 //
705 // The va_start intrinsic needs to know the offset to the first variable
706 // argument.
707 unsigned ArgOffset = CCInfo.getStackSize();
709 // Skip the 128 bytes of register save area.
710 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
711 Subtarget->getStackPointerBias());
712
713 // Save the variable arguments that were passed in registers.
714 // The caller is required to reserve stack space for 6 arguments regardless
715 // of how many arguments were actually passed.
716 SmallVector<SDValue, 8> OutChains;
717 for (; ArgOffset < 6*8; ArgOffset += 8) {
718 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
719 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
720 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
721 auto PtrVT = getPointerTy(MF.getDataLayout());
722 OutChains.push_back(
723 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
725 }
726
727 if (!OutChains.empty())
728 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
729
730 return Chain;
731}
732
733// Check whether any of the argument registers are reserved
735 const MachineFunction &MF) {
736 // The register window design means that outgoing parameters at O*
737 // will appear in the callee as I*.
738 // Be conservative and check both sides of the register names.
739 bool Outgoing =
740 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
741 return TRI->isReservedReg(MF, r);
742 });
743 bool Incoming =
744 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
745 return TRI->isReservedReg(MF, r);
746 });
747 return Outgoing || Incoming;
748}
749
751 const Function &F = MF.getFunction();
752 F.getContext().diagnose(DiagnosticInfoUnsupported{
753 F, ("SPARC doesn't support"
754 " function calls if any of the argument registers is reserved.")});
755}
756
759 SmallVectorImpl<SDValue> &InVals) const {
760 if (Subtarget->is64Bit())
761 return LowerCall_64(CLI, InVals);
762 return LowerCall_32(CLI, InVals);
763}
764
765static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
766 const CallBase *Call) {
767 if (Call)
768 return Call->hasFnAttr(Attribute::ReturnsTwice);
769
770 const Function *CalleeFn = nullptr;
771 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
772 CalleeFn = dyn_cast<Function>(G->getGlobal());
773 } else if (ExternalSymbolSDNode *E =
774 dyn_cast<ExternalSymbolSDNode>(Callee)) {
775 const Function &Fn = DAG.getMachineFunction().getFunction();
776 const Module *M = Fn.getParent();
777 const char *CalleeName = E->getSymbol();
778 CalleeFn = M->getFunction(CalleeName);
779 }
780
781 if (!CalleeFn)
782 return false;
783 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
784}
785
786/// IsEligibleForTailCallOptimization - Check whether the call is eligible
787/// for tail call optimization.
789 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
790
791 auto &Outs = CLI.Outs;
792 auto &Caller = MF.getFunction();
793
794 // Do not tail call opt functions with "disable-tail-calls" attribute.
795 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
796 return false;
797
798 // Do not tail call opt if the stack is used to pass parameters.
799 // 64-bit targets have a slightly higher limit since the ABI requires
800 // to allocate some space even when all the parameters fit inside registers.
801 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
802 if (CCInfo.getStackSize() > StackSizeLimit)
803 return false;
804
805 // Do not tail call opt if either the callee or caller returns
806 // a struct and the other does not.
807 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
808 return false;
809
810 // Byval parameters hand the function a pointer directly into the stack area
811 // we want to reuse during a tail call.
812 for (auto &Arg : Outs)
813 if (Arg.Flags.isByVal())
814 return false;
815
816 return true;
817}
818
819// Lower a call for the 32-bit ABI.
822 SmallVectorImpl<SDValue> &InVals) const {
823 SelectionDAG &DAG = CLI.DAG;
824 SDLoc &dl = CLI.DL;
826 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
828 SDValue Chain = CLI.Chain;
829 SDValue Callee = CLI.Callee;
830 bool &isTailCall = CLI.IsTailCall;
831 CallingConv::ID CallConv = CLI.CallConv;
832 bool isVarArg = CLI.IsVarArg;
834
835 // Analyze operands of the call, assigning locations to each operand.
837 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
838 *DAG.getContext());
839 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
840
841 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
842 CCInfo, CLI, DAG.getMachineFunction());
843
844 // Get the size of the outgoing arguments stack space requirement.
845 unsigned ArgsSize = CCInfo.getStackSize();
846
847 // Keep stack frames 8-byte aligned.
848 ArgsSize = (ArgsSize+7) & ~7;
849
851
852 // Create local copies for byval args.
853 SmallVector<SDValue, 8> ByValArgs;
854 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
855 ISD::ArgFlagsTy Flags = Outs[i].Flags;
856 if (!Flags.isByVal())
857 continue;
858
859 SDValue Arg = OutVals[i];
860 unsigned Size = Flags.getByValSize();
861 Align Alignment = Flags.getNonZeroByValAlign();
862
863 if (Size > 0U) {
864 int FI = MFI.CreateStackObject(Size, Alignment, false);
865 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
866 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
867
868 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
869 false, // isVolatile,
870 (Size <= 32), // AlwaysInline if size <= 32,
871 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
873 ByValArgs.push_back(FIPtr);
874 }
875 else {
876 SDValue nullVal;
877 ByValArgs.push_back(nullVal);
878 }
879 }
880
881 assert(!isTailCall || ArgsSize == 0);
882
883 if (!isTailCall)
884 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
885
887 SmallVector<SDValue, 8> MemOpChains;
888
889 const unsigned StackOffset = 92;
890 bool hasStructRetAttr = false;
891 unsigned SRetArgSize = 0;
892 // Walk the register/memloc assignments, inserting copies/loads.
893 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
894 i != e;
895 ++i, ++realArgIdx) {
896 CCValAssign &VA = ArgLocs[i];
897 SDValue Arg = OutVals[realArgIdx];
898
899 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
900
901 // Use local copy if it is a byval arg.
902 if (Flags.isByVal()) {
903 Arg = ByValArgs[byvalArgIdx++];
904 if (!Arg) {
905 continue;
906 }
907 }
908
909 // Promote the value if needed.
910 switch (VA.getLocInfo()) {
911 default: llvm_unreachable("Unknown loc info!");
912 case CCValAssign::Full: break;
914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
915 break;
917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
918 break;
920 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
921 break;
923 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
924 break;
925 }
926
927 if (Flags.isSRet()) {
928 assert(VA.needsCustom());
929
930 if (isTailCall)
931 continue;
932
933 // store SRet argument in %sp+64
934 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
935 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
936 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
937 MemOpChains.push_back(
938 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
939 hasStructRetAttr = true;
940 // sret only allowed on first argument
941 assert(Outs[realArgIdx].OrigArgIndex == 0);
942 SRetArgSize =
943 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
944 continue;
945 }
946
947 if (VA.needsCustom()) {
948 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
949
950 if (VA.isMemLoc()) {
951 unsigned Offset = VA.getLocMemOffset() + StackOffset;
952 // if it is double-word aligned, just store.
953 if (Offset % 8 == 0) {
954 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
955 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
956 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
957 MemOpChains.push_back(
958 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
959 continue;
960 }
961 }
962
963 if (VA.getLocVT() == MVT::f64) {
964 // Move from the float value from float registers into the
965 // integer registers.
966 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
967 Arg = bitcastConstantFPToInt(C, dl, DAG);
968 else
969 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
970 }
971
972 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
973 Arg,
974 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
975 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
976 Arg,
977 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
978
979 if (VA.isRegLoc()) {
980 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
981 assert(i+1 != e);
982 CCValAssign &NextVA = ArgLocs[++i];
983 if (NextVA.isRegLoc()) {
984 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
985 } else {
986 // Store the second part in stack.
987 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
988 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
989 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
990 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
991 MemOpChains.push_back(
992 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
993 }
994 } else {
995 unsigned Offset = VA.getLocMemOffset() + StackOffset;
996 // Store the first part.
997 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
998 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
999 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1000 MemOpChains.push_back(
1001 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1002 // Store the second part.
1003 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1004 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1005 MemOpChains.push_back(
1006 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1007 }
1008 continue;
1009 }
1010
1011 // Arguments that can be passed on register must be kept at
1012 // RegsToPass vector
1013 if (VA.isRegLoc()) {
1014 if (VA.getLocVT() != MVT::f32) {
1015 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1016 continue;
1017 }
1018 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1019 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1020 continue;
1021 }
1022
1023 assert(VA.isMemLoc());
1024
1025 // Create a store off the stack pointer for this argument.
1026 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1028 dl);
1029 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1030 MemOpChains.push_back(
1031 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1032 }
1033
1034
1035 // Emit all stores, make sure the occur before any copies into physregs.
1036 if (!MemOpChains.empty())
1037 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1038
1039 // Build a sequence of copy-to-reg nodes chained together with token
1040 // chain and flag operands which copy the outgoing args into registers.
1041 // The InGlue in necessary since all emitted instructions must be
1042 // stuck together.
1043 SDValue InGlue;
1044 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1045 Register Reg = RegsToPass[i].first;
1046 if (!isTailCall)
1047 Reg = toCallerWindow(Reg);
1048 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InGlue);
1049 InGlue = Chain.getValue(1);
1050 }
1051
1052 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1053
1054 // If the callee is a GlobalAddress node (quite common, every direct call is)
1055 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1056 // Likewise ExternalSymbol -> TargetExternalSymbol.
1059 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1060 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
1061 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1062 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
1063
1064 // Returns a chain & a flag for retval copy to use
1065 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1067 Ops.push_back(Chain);
1068 Ops.push_back(Callee);
1069 if (hasStructRetAttr)
1070 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1071 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1072 Register Reg = RegsToPass[i].first;
1073 if (!isTailCall)
1074 Reg = toCallerWindow(Reg);
1075 Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
1076 }
1077
1078 // Add a register mask operand representing the call-preserved registers.
1079 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1080 const uint32_t *Mask =
1081 ((hasReturnsTwice)
1082 ? TRI->getRTCallPreservedMask(CallConv)
1083 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1084
1085 if (isAnyArgRegReserved(TRI, MF))
1087
1088 assert(Mask && "Missing call preserved mask for calling convention");
1089 Ops.push_back(DAG.getRegisterMask(Mask));
1090
1091 if (InGlue.getNode())
1092 Ops.push_back(InGlue);
1093
1094 if (isTailCall) {
1096 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1097 }
1098
1099 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1100 InGlue = Chain.getValue(1);
1101
1102 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1103 InGlue = Chain.getValue(1);
1104
1105 // Assign locations to each value returned by this call.
1107 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1108 *DAG.getContext());
1109
1110 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1111
1112 // Copy all of the result registers out of their specified physreg.
1113 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1114 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1115 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1116 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1118 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1119 Chain = Lo.getValue(1);
1120 InGlue = Lo.getValue(2);
1121 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1122 DAG.getConstant(0, dl, MVT::i32));
1124 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1125 Chain = Hi.getValue(1);
1126 InGlue = Hi.getValue(2);
1127 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1128 DAG.getConstant(1, dl, MVT::i32));
1129 InVals.push_back(Vec);
1130 } else {
1131 Chain =
1132 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1133 RVLocs[i].getValVT(), InGlue)
1134 .getValue(1);
1135 InGlue = Chain.getValue(2);
1136 InVals.push_back(Chain.getValue(0));
1137 }
1138 }
1139
1140 return Chain;
1141}
1142
1143// FIXME? Maybe this could be a TableGen attribute on some registers and
1144// this table could be generated automatically from RegInfo.
1146 const MachineFunction &MF) const {
1148 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1149 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1150 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1151 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1152 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1153 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1154 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1155 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1156 .Default(0);
1157
1158 // If we're directly referencing register names
1159 // (e.g in GCC C extension `register int r asm("g1");`),
1160 // make sure that said register is in the reserve list.
1161 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1162 if (!TRI->isReservedReg(MF, Reg))
1163 Reg = 0;
1164
1165 if (Reg)
1166 return Reg;
1167
1168 report_fatal_error("Invalid register name global variable");
1169}
1170
1171// Fixup floating point arguments in the ... part of a varargs call.
1172//
1173// The SPARC v9 ABI requires that floating point arguments are treated the same
1174// as integers when calling a varargs function. This does not apply to the
1175// fixed arguments that are part of the function's prototype.
1176//
1177// This function post-processes a CCValAssign array created by
1178// AnalyzeCallOperands().
1181 for (CCValAssign &VA : ArgLocs) {
1182 MVT ValTy = VA.getLocVT();
1183 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1184 // varargs functions.
1185 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1186 continue;
1187 // The fixed arguments to a varargs function still go in FP registers.
1188 if (Outs[VA.getValNo()].IsFixed)
1189 continue;
1190
1191 // This floating point argument should be reassigned.
1192 // Determine the offset into the argument array.
1193 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1194 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1195 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1196 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1197
1198 if (Offset < 6*8) {
1199 // This argument should go in %i0-%i5.
1200 unsigned IReg = SP::I0 + Offset/8;
1201 if (ValTy == MVT::f64)
1202 // Full register, just bitconvert into i64.
1203 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1205 else {
1206 assert(ValTy == MVT::f128 && "Unexpected type!");
1207 // Full register, just bitconvert into i128 -- We will lower this into
1208 // two i64s in LowerCall_64.
1209 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1210 MVT::i128, CCValAssign::BCvt);
1211 }
1212 } else {
1213 // This needs to go to memory, we're out of integer registers.
1214 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1215 VA.getLocVT(), VA.getLocInfo());
1216 }
1217 }
1218}
1219
1220// Lower a call for the 64-bit ABI.
1221SDValue
1223 SmallVectorImpl<SDValue> &InVals) const {
1224 SelectionDAG &DAG = CLI.DAG;
1225 SDLoc DL = CLI.DL;
1226 SDValue Chain = CLI.Chain;
1227 auto PtrVT = getPointerTy(DAG.getDataLayout());
1229
1230 // Analyze operands of the call, assigning locations to each operand.
1232 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1233 *DAG.getContext());
1234 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1235
1237 CCInfo, CLI, DAG.getMachineFunction());
1238
1239 // Get the size of the outgoing arguments stack space requirement.
1240 // The stack offset computed by CC_Sparc64 includes all arguments.
1241 // Called functions expect 6 argument words to exist in the stack frame, used
1242 // or not.
1243 unsigned StackReserved = 6 * 8u;
1244 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1245
1246 // Keep stack frames 16-byte aligned.
1247 ArgsSize = alignTo(ArgsSize, 16);
1248
1249 // Varargs calls require special treatment.
1250 if (CLI.IsVarArg)
1251 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1252
1253 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1254
1255 // Adjust the stack pointer to make room for the arguments.
1256 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1257 // with more than 6 arguments.
1258 if (!CLI.IsTailCall)
1259 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1260
1261 // Collect the set of registers to pass to the function and their values.
1262 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1263 // instruction.
1265
1266 // Collect chains from all the memory opeations that copy arguments to the
1267 // stack. They must follow the stack pointer adjustment above and precede the
1268 // call instruction itself.
1269 SmallVector<SDValue, 8> MemOpChains;
1270
1271 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1272 const CCValAssign &VA = ArgLocs[i];
1273 SDValue Arg = CLI.OutVals[i];
1274
1275 // Promote the value if needed.
1276 switch (VA.getLocInfo()) {
1277 default:
1278 llvm_unreachable("Unknown location info!");
1279 case CCValAssign::Full:
1280 break;
1281 case CCValAssign::SExt:
1282 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1283 break;
1284 case CCValAssign::ZExt:
1285 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1286 break;
1287 case CCValAssign::AExt:
1288 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1289 break;
1290 case CCValAssign::BCvt:
1291 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1292 // SPARC does not support i128 natively. Lower it into two i64, see below.
1293 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1294 || VA.getLocVT() != MVT::i128)
1295 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1296 break;
1297 }
1298
1299 if (VA.isRegLoc()) {
1300 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1301 && VA.getLocVT() == MVT::i128) {
1302 // Store and reload into the integer register reg and reg+1.
1303 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1304 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1305 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1306 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1307 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1308 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1309 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1310
1311 // Store to %sp+BIAS+128+Offset
1312 SDValue Store =
1313 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1314 // Load into Reg and Reg+1
1315 SDValue Hi64 =
1316 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1317 SDValue Lo64 =
1318 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1319
1320 Register HiReg = VA.getLocReg();
1321 Register LoReg = VA.getLocReg() + 1;
1322 if (!CLI.IsTailCall) {
1323 HiReg = toCallerWindow(HiReg);
1324 LoReg = toCallerWindow(LoReg);
1325 }
1326
1327 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1328 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1329 continue;
1330 }
1331
1332 // The custom bit on an i32 return value indicates that it should be
1333 // passed in the high bits of the register.
1334 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1335 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1336 DAG.getConstant(32, DL, MVT::i32));
1337
1338 // The next value may go in the low bits of the same register.
1339 // Handle both at once.
1340 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1341 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1342 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1343 CLI.OutVals[i+1]);
1344 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1345 // Skip the next value, it's already done.
1346 ++i;
1347 }
1348 }
1349
1350 Register Reg = VA.getLocReg();
1351 if (!CLI.IsTailCall)
1352 Reg = toCallerWindow(Reg);
1353 RegsToPass.push_back(std::make_pair(Reg, Arg));
1354 continue;
1355 }
1356
1357 assert(VA.isMemLoc());
1358
1359 // Create a store off the stack pointer for this argument.
1360 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1361 // The argument area starts at %fp+BIAS+128 in the callee frame,
1362 // %sp+BIAS+128 in ours.
1363 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1364 Subtarget->getStackPointerBias() +
1365 128, DL);
1366 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1367 MemOpChains.push_back(
1368 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1369 }
1370
1371 // Emit all stores, make sure they occur before the call.
1372 if (!MemOpChains.empty())
1373 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1374
1375 // Build a sequence of CopyToReg nodes glued together with token chain and
1376 // glue operands which copy the outgoing args into registers. The InGlue is
1377 // necessary since all emitted instructions must be stuck together in order
1378 // to pass the live physical registers.
1379 SDValue InGlue;
1380 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1381 Chain = DAG.getCopyToReg(Chain, DL,
1382 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1383 InGlue = Chain.getValue(1);
1384 }
1385
1386 // If the callee is a GlobalAddress node (quite common, every direct call is)
1387 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1388 // Likewise ExternalSymbol -> TargetExternalSymbol.
1389 SDValue Callee = CLI.Callee;
1390 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1393 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1394 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1395 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1396 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1397
1398 // Build the operands for the call instruction itself.
1400 Ops.push_back(Chain);
1401 Ops.push_back(Callee);
1402 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1403 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1404 RegsToPass[i].second.getValueType()));
1405
1406 // Add a register mask operand representing the call-preserved registers.
1407 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1408 const uint32_t *Mask =
1409 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1410 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1411 CLI.CallConv));
1412
1413 if (isAnyArgRegReserved(TRI, MF))
1415
1416 assert(Mask && "Missing call preserved mask for calling convention");
1417 Ops.push_back(DAG.getRegisterMask(Mask));
1418
1419 // Make sure the CopyToReg nodes are glued to the call instruction which
1420 // consumes the registers.
1421 if (InGlue.getNode())
1422 Ops.push_back(InGlue);
1423
1424 // Now the call itself.
1425 if (CLI.IsTailCall) {
1427 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1428 }
1429 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1430 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1431 InGlue = Chain.getValue(1);
1432
1433 // Revert the stack pointer immediately after the call.
1434 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1435 InGlue = Chain.getValue(1);
1436
1437 // Now extract the return values. This is more or less the same as
1438 // LowerFormalArguments_64.
1439
1440 // Assign locations to each value returned by this call.
1442 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1443 *DAG.getContext());
1444
1445 // Set inreg flag manually for codegen generated library calls that
1446 // return float.
1447 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1448 CLI.Ins[0].Flags.setInReg();
1449
1450 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1451
1452 // Copy all of the result registers out of their specified physreg.
1453 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1454 CCValAssign &VA = RVLocs[i];
1455 assert(VA.isRegLoc() && "Can only return in registers!");
1456 unsigned Reg = toCallerWindow(VA.getLocReg());
1457
1458 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1459 // reside in the same register in the high and low bits. Reuse the
1460 // CopyFromReg previous node to avoid duplicate copies.
1461 SDValue RV;
1462 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1463 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1464 RV = Chain.getValue(0);
1465
1466 // But usually we'll create a new CopyFromReg for a different register.
1467 if (!RV.getNode()) {
1468 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1469 Chain = RV.getValue(1);
1470 InGlue = Chain.getValue(2);
1471 }
1472
1473 // Get the high bits for i32 struct elements.
1474 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1475 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1476 DAG.getConstant(32, DL, MVT::i32));
1477
1478 // The callee promoted the return value, so insert an Assert?ext SDNode so
1479 // we won't promote the value again in this function.
1480 switch (VA.getLocInfo()) {
1481 case CCValAssign::SExt:
1482 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1483 DAG.getValueType(VA.getValVT()));
1484 break;
1485 case CCValAssign::ZExt:
1486 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1487 DAG.getValueType(VA.getValVT()));
1488 break;
1489 default:
1490 break;
1491 }
1492
1493 // Truncate the register down to the return value type.
1494 if (VA.isExtInLoc())
1495 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1496
1497 InVals.push_back(RV);
1498 }
1499
1500 return Chain;
1501}
1502
1503//===----------------------------------------------------------------------===//
1504// TargetLowering Implementation
1505//===----------------------------------------------------------------------===//
1506
1508 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1509 AI->getType()->getPrimitiveSizeInBits() == 32)
1510 return AtomicExpansionKind::None; // Uses xchg instruction
1511
1513}
1514
1515/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1516/// rcond condition.
1518 switch (CC) {
1519 default:
1520 llvm_unreachable("Unknown/unsigned integer condition code!");
1521 case ISD::SETEQ:
1522 return SPCC::REG_Z;
1523 case ISD::SETNE:
1524 return SPCC::REG_NZ;
1525 case ISD::SETLT:
1526 return SPCC::REG_LZ;
1527 case ISD::SETGT:
1528 return SPCC::REG_GZ;
1529 case ISD::SETLE:
1530 return SPCC::REG_LEZ;
1531 case ISD::SETGE:
1532 return SPCC::REG_GEZ;
1533 }
1534}
1535
1536/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1537/// condition.
1539 switch (CC) {
1540 default: llvm_unreachable("Unknown integer condition code!");
1541 case ISD::SETEQ: return SPCC::ICC_E;
1542 case ISD::SETNE: return SPCC::ICC_NE;
1543 case ISD::SETLT: return SPCC::ICC_L;
1544 case ISD::SETGT: return SPCC::ICC_G;
1545 case ISD::SETLE: return SPCC::ICC_LE;
1546 case ISD::SETGE: return SPCC::ICC_GE;
1547 case ISD::SETULT: return SPCC::ICC_CS;
1548 case ISD::SETULE: return SPCC::ICC_LEU;
1549 case ISD::SETUGT: return SPCC::ICC_GU;
1550 case ISD::SETUGE: return SPCC::ICC_CC;
1551 }
1552}
1553
1554/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1555/// FCC condition.
1557 switch (CC) {
1558 default: llvm_unreachable("Unknown fp condition code!");
1559 case ISD::SETEQ:
1560 case ISD::SETOEQ: return SPCC::FCC_E;
1561 case ISD::SETNE:
1562 case ISD::SETUNE: return SPCC::FCC_NE;
1563 case ISD::SETLT:
1564 case ISD::SETOLT: return SPCC::FCC_L;
1565 case ISD::SETGT:
1566 case ISD::SETOGT: return SPCC::FCC_G;
1567 case ISD::SETLE:
1568 case ISD::SETOLE: return SPCC::FCC_LE;
1569 case ISD::SETGE:
1570 case ISD::SETOGE: return SPCC::FCC_GE;
1571 case ISD::SETULT: return SPCC::FCC_UL;
1572 case ISD::SETULE: return SPCC::FCC_ULE;
1573 case ISD::SETUGT: return SPCC::FCC_UG;
1574 case ISD::SETUGE: return SPCC::FCC_UGE;
1575 case ISD::SETUO: return SPCC::FCC_U;
1576 case ISD::SETO: return SPCC::FCC_O;
1577 case ISD::SETONE: return SPCC::FCC_LG;
1578 case ISD::SETUEQ: return SPCC::FCC_UE;
1579 }
1580}
1581
1583 const SparcSubtarget &STI)
1584 : TargetLowering(TM), Subtarget(&STI) {
1585 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1586
1587 // Instructions which use registers as conditionals examine all the
1588 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1589 // matters much whether it's ZeroOrOneBooleanContent, or
1590 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1591 // former.
1594
1595 // Set up the register classes.
1596 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1597 if (!Subtarget->useSoftFloat()) {
1598 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1599 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1600 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1601 }
1602 if (Subtarget->is64Bit()) {
1603 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1604 } else {
1605 // On 32bit sparc, we define a double-register 32bit register
1606 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1607 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1608
1609 // ...but almost all operations must be expanded, so set that as
1610 // the default.
1611 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1612 setOperationAction(Op, MVT::v2i32, Expand);
1613 }
1614 // Truncating/extending stores/loads are also not supported.
1616 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1617 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1618 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1619
1620 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1621 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1622 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1623
1624 setTruncStoreAction(VT, MVT::v2i32, Expand);
1625 setTruncStoreAction(MVT::v2i32, VT, Expand);
1626 }
1627 // However, load and store *are* legal.
1628 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1629 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1632
1633 // And we need to promote i64 loads/stores into vector load/store
1636
1637 // Sadly, this doesn't work:
1638 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1639 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1640 }
1641
1642 // Turn FP extload into load/fpextend
1643 for (MVT VT : MVT::fp_valuetypes()) {
1644 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1645 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1646 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1647 }
1648
1649 // Sparc doesn't have i1 sign extending load
1650 for (MVT VT : MVT::integer_valuetypes())
1651 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1652
1653 // Turn FP truncstore into trunc + store.
1654 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1655 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1656 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1657 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1658 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1659 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1660
1661 // Custom legalize GlobalAddress nodes into LO/HI parts.
1666
1667 // Sparc doesn't have sext_inreg, replace them with shl/sra
1671
1672 // Sparc has no REM or DIVREM operations.
1677
1678 // ... nor does SparcV9.
1679 if (Subtarget->is64Bit()) {
1684 }
1685
1686 // Custom expand fp<->sint
1691
1692 // Custom Expand fp<->uint
1697
1698 // Lower f16 conversion operations into library calls
1705
1708
1709 // Sparc has no select or setcc: expand to SELECT_CC.
1714
1719
1720 // Sparc doesn't have BRCOND either, it has BR_CC.
1722 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1723 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1728
1733
1738
1739 if (Subtarget->is64Bit()) {
1750
1752 Subtarget->usePopc() ? Legal : Expand);
1753 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1754 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1756 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1757 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1759 }
1760
1761 // ATOMICs.
1762 // Atomics are supported on SparcV9. 32-bit atomics are also
1763 // supported by some Leon SparcV8 variants. Otherwise, atomics
1764 // are unsupported.
1765 if (Subtarget->isV9()) {
1766 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1767 // but it hasn't been implemented in the backend yet.
1768 if (Subtarget->is64Bit())
1770 else
1772 } else if (Subtarget->hasLeonCasa())
1774 else
1776
1778
1780
1782
1783 // Custom Lower Atomic LOAD/STORE
1786
1787 if (Subtarget->is64Bit()) {
1792 }
1793
1794 if (!Subtarget->isV9()) {
1795 // SparcV8 does not have FNEGD and FABSD.
1798 }
1799
1800 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1801 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1803 setOperationAction(ISD::FREM , MVT::f128, Expand);
1804 setOperationAction(ISD::FMA , MVT::f128, Expand);
1805 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1806 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1808 setOperationAction(ISD::FREM , MVT::f64, Expand);
1809 setOperationAction(ISD::FMA , MVT::f64, Expand);
1810 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1811 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1813 setOperationAction(ISD::FREM , MVT::f32, Expand);
1814 setOperationAction(ISD::FMA , MVT::f32, Expand);
1815 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1816 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1817 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1818 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1823 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1824 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1825 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1826
1830
1831 // Expands to [SU]MUL_LOHI.
1835
1836 if (Subtarget->useSoftMulDiv()) {
1837 // .umul works for both signed and unsigned
1840 setLibcallName(RTLIB::MUL_I32, ".umul");
1841
1843 setLibcallName(RTLIB::SDIV_I32, ".div");
1844
1846 setLibcallName(RTLIB::UDIV_I32, ".udiv");
1847
1848 setLibcallName(RTLIB::SREM_I32, ".rem");
1849 setLibcallName(RTLIB::UREM_I32, ".urem");
1850 }
1851
1852 if (Subtarget->is64Bit()) {
1857
1860
1864 }
1865
1866 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1867 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1868 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1869 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1870
1871 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1873
1874 // Use the default implementation.
1875 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1876 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1880
1882
1884 Subtarget->usePopc() ? Legal : Expand);
1885
1886 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1887 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1888 setOperationAction(ISD::STORE, MVT::f128, Legal);
1889 } else {
1890 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1892 }
1893
1894 if (Subtarget->hasHardQuad()) {
1895 setOperationAction(ISD::FADD, MVT::f128, Legal);
1896 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1897 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1898 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1899 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1902 if (Subtarget->isV9()) {
1903 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1904 setOperationAction(ISD::FABS, MVT::f128, Legal);
1905 } else {
1906 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1907 setOperationAction(ISD::FABS, MVT::f128, Custom);
1908 }
1909
1910 if (!Subtarget->is64Bit()) {
1911 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1912 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1913 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1914 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1915 }
1916
1917 } else {
1918 // Custom legalize f128 operations.
1919
1920 setOperationAction(ISD::FADD, MVT::f128, Custom);
1921 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1922 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1923 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1925 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1926 setOperationAction(ISD::FABS, MVT::f128, Custom);
1927
1931
1932 // Setup Runtime library names.
1933 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1934 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1935 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1936 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1937 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1938 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1939 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1940 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1941 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1942 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1943 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1944 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1945 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1946 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1947 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1948 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1949 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1950 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1951 } else if (!Subtarget->useSoftFloat()) {
1952 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1953 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1954 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1955 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1956 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1957 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1958 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1959 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1960 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1961 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1962 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1963 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1964 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1965 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1966 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1967 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1968 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1969 }
1970 }
1971
1972 if (Subtarget->fixAllFDIVSQRT()) {
1973 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1974 // the former instructions generate errata on LEON processors.
1977 }
1978
1979 if (Subtarget->hasNoFMULS()) {
1981 }
1982
1983 // Custom combine bitcast between f64 and v2i32
1984 if (!Subtarget->is64Bit())
1986
1987 if (Subtarget->hasLeonCycleCounter())
1989
1991
1993
1995}
1996
1998 return Subtarget->useSoftFloat();
1999}
2000
2001const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
2002 switch ((SPISD::NodeType)Opcode) {
2003 case SPISD::FIRST_NUMBER: break;
2004 case SPISD::CMPICC: return "SPISD::CMPICC";
2005 case SPISD::CMPFCC: return "SPISD::CMPFCC";
2006 case SPISD::CMPFCC_V9:
2007 return "SPISD::CMPFCC_V9";
2008 case SPISD::BRICC: return "SPISD::BRICC";
2009 case SPISD::BPICC:
2010 return "SPISD::BPICC";
2011 case SPISD::BPXCC:
2012 return "SPISD::BPXCC";
2013 case SPISD::BRFCC: return "SPISD::BRFCC";
2014 case SPISD::BRFCC_V9:
2015 return "SPISD::BRFCC_V9";
2016 case SPISD::BR_REG:
2017 return "SPISD::BR_REG";
2018 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
2019 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
2020 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
2021 case SPISD::SELECT_REG:
2022 return "SPISD::SELECT_REG";
2023 case SPISD::Hi: return "SPISD::Hi";
2024 case SPISD::Lo: return "SPISD::Lo";
2025 case SPISD::FTOI: return "SPISD::FTOI";
2026 case SPISD::ITOF: return "SPISD::ITOF";
2027 case SPISD::FTOX: return "SPISD::FTOX";
2028 case SPISD::XTOF: return "SPISD::XTOF";
2029 case SPISD::CALL: return "SPISD::CALL";
2030 case SPISD::RET_GLUE: return "SPISD::RET_GLUE";
2031 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
2032 case SPISD::FLUSHW: return "SPISD::FLUSHW";
2033 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
2034 case SPISD::TLS_LD: return "SPISD::TLS_LD";
2035 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
2036 case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
2037 case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
2038 }
2039 return nullptr;
2040}
2041
2043 EVT VT) const {
2044 if (!VT.isVector())
2045 return MVT::i32;
2047}
2048
2049/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2050/// be zero. Op is expected to be a target specific node. Used by DAG
2051/// combiner.
2053 (const SDValue Op,
2054 KnownBits &Known,
2055 const APInt &DemandedElts,
2056 const SelectionDAG &DAG,
2057 unsigned Depth) const {
2058 KnownBits Known2;
2059 Known.resetAll();
2060
2061 switch (Op.getOpcode()) {
2062 default: break;
2063 case SPISD::SELECT_ICC:
2064 case SPISD::SELECT_XCC:
2065 case SPISD::SELECT_FCC:
2066 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2067 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2068
2069 // Only known if known in both the LHS and RHS.
2070 Known = Known.intersectWith(Known2);
2071 break;
2072 }
2073}
2074
2075// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2076// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2077static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
2078 ISD::CondCode CC, unsigned &SPCC) {
2079 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2080 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2081 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2082 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2083 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2084 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2085 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2086 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2087 SDValue CMPCC = LHS.getOperand(3);
2088 SPCC = LHS.getConstantOperandVal(2);
2089 LHS = CMPCC.getOperand(0);
2090 RHS = CMPCC.getOperand(1);
2091 }
2092}
2093
2094// Convert to a target node and set target flags.
2096 SelectionDAG &DAG) const {
2097 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
2098 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2099 SDLoc(GA),
2100 GA->getValueType(0),
2101 GA->getOffset(), TF);
2102
2103 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
2104 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2105 CP->getAlign(), CP->getOffset(), TF);
2106
2107 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
2108 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2109 Op.getValueType(),
2110 0,
2111 TF);
2112
2113 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
2114 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2115 ES->getValueType(0), TF);
2116
2117 llvm_unreachable("Unhandled address SDNode");
2118}
2119
2120// Split Op into high and low parts according to HiTF and LoTF.
2121// Return an ADD node combining the parts.
2123 unsigned HiTF, unsigned LoTF,
2124 SelectionDAG &DAG) const {
2125 SDLoc DL(Op);
2126 EVT VT = Op.getValueType();
2127 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2128 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2129 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2130}
2131
2132// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2133// or ExternalSymbol SDNode.
2135 SDLoc DL(Op);
2136 EVT VT = getPointerTy(DAG.getDataLayout());
2137
2138 // Handle PIC mode first. SPARC needs a got load for every variable!
2139 if (isPositionIndependent()) {
2140 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2141 PICLevel::Level picLevel = M->getPICLevel();
2142 SDValue Idx;
2143
2144 if (picLevel == PICLevel::SmallPIC) {
2145 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2146 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2148 } else {
2149 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2152 }
2153
2154 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2155 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2156 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2157 // function has calls.
2159 MFI.setHasCalls(true);
2160 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2162 }
2163
2164 // This is one of the absolute code models.
2165 switch(getTargetMachine().getCodeModel()) {
2166 default:
2167 llvm_unreachable("Unsupported absolute code model");
2168 case CodeModel::Small:
2169 // abs32.
2172 case CodeModel::Medium: {
2173 // abs44.
2176 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2178 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2179 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2180 }
2181 case CodeModel::Large: {
2182 // abs64.
2185 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2188 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2189 }
2190 }
2191}
2192
2194 SelectionDAG &DAG) const {
2195 return makeAddress(Op, DAG);
2196}
2197
2199 SelectionDAG &DAG) const {
2200 return makeAddress(Op, DAG);
2201}
2202
2204 SelectionDAG &DAG) const {
2205 return makeAddress(Op, DAG);
2206}
2207
2209 SelectionDAG &DAG) const {
2210
2211 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2212 if (DAG.getTarget().useEmulatedTLS())
2213 return LowerToTLSEmulatedModel(GA, DAG);
2214
2215 SDLoc DL(GA);
2216 const GlobalValue *GV = GA->getGlobal();
2217 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2218
2220
2221 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2222 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2225 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2228 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2231 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2234
2235 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2237 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2238 withTargetFlags(Op, addTF, DAG));
2239
2240 SDValue Chain = DAG.getEntryNode();
2241 SDValue InGlue;
2242
2243 Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2244 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2245 InGlue = Chain.getValue(1);
2246 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2247 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2248
2249 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2250 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2252 assert(Mask && "Missing call preserved mask for calling convention");
2253 SDValue Ops[] = {Chain,
2254 Callee,
2255 Symbol,
2256 DAG.getRegister(SP::O0, PtrVT),
2257 DAG.getRegisterMask(Mask),
2258 InGlue};
2259 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2260 InGlue = Chain.getValue(1);
2261 Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InGlue, DL);
2262 InGlue = Chain.getValue(1);
2263 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2264
2265 if (model != TLSModel::LocalDynamic)
2266 return Ret;
2267
2268 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2270 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2272 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2273 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2275 }
2276
2277 if (model == TLSModel::InitialExec) {
2278 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2280
2282
2283 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2284 // function has calls.
2286 MFI.setHasCalls(true);
2287
2288 SDValue TGA = makeHiLoPair(Op,
2291 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2293 DL, PtrVT, Ptr,
2294 withTargetFlags(Op, ldTF, DAG));
2295 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2296 DAG.getRegister(SP::G7, PtrVT), Offset,
2299 }
2300
2301 assert(model == TLSModel::LocalExec);
2302 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2304 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2306 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2307
2308 return DAG.getNode(ISD::ADD, DL, PtrVT,
2309 DAG.getRegister(SP::G7, PtrVT), Offset);
2310}
2311
2313 ArgListTy &Args, SDValue Arg,
2314 const SDLoc &DL,
2315 SelectionDAG &DAG) const {
2317 EVT ArgVT = Arg.getValueType();
2318 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2319
2320 ArgListEntry Entry;
2321 Entry.Node = Arg;
2322 Entry.Ty = ArgTy;
2323
2324 if (ArgTy->isFP128Ty()) {
2325 // Create a stack object and pass the pointer to the library function.
2326 int FI = MFI.CreateStackObject(16, Align(8), false);
2327 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2328 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2329 Align(8));
2330
2331 Entry.Node = FIPtr;
2332 Entry.Ty = PointerType::getUnqual(ArgTy);
2333 }
2334 Args.push_back(Entry);
2335 return Chain;
2336}
2337
2338SDValue
2340 const char *LibFuncName,
2341 unsigned numArgs) const {
2342
2343 ArgListTy Args;
2344
2346 auto PtrVT = getPointerTy(DAG.getDataLayout());
2347
2348 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2349 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2350 Type *RetTyABI = RetTy;
2351 SDValue Chain = DAG.getEntryNode();
2352 SDValue RetPtr;
2353
2354 if (RetTy->isFP128Ty()) {
2355 // Create a Stack Object to receive the return value of type f128.
2356 ArgListEntry Entry;
2357 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2358 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2359 Entry.Node = RetPtr;
2360 Entry.Ty = PointerType::getUnqual(RetTy);
2361 if (!Subtarget->is64Bit()) {
2362 Entry.IsSRet = true;
2363 Entry.IndirectType = RetTy;
2364 }
2365 Entry.IsReturned = false;
2366 Args.push_back(Entry);
2367 RetTyABI = Type::getVoidTy(*DAG.getContext());
2368 }
2369
2370 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2371 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2372 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2373 }
2375 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2376 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2377
2378 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2379
2380 // chain is in second result.
2381 if (RetTyABI == RetTy)
2382 return CallInfo.first;
2383
2384 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2385
2386 Chain = CallInfo.second;
2387
2388 // Load RetPtr to get the return value.
2389 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2391}
2392
2394 unsigned &SPCC, const SDLoc &DL,
2395 SelectionDAG &DAG) const {
2396
2397 const char *LibCall = nullptr;
2398 bool is64Bit = Subtarget->is64Bit();
2399 switch(SPCC) {
2400 default: llvm_unreachable("Unhandled conditional code!");
2401 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2402 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2403 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2404 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2405 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2406 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2407 case SPCC::FCC_UL :
2408 case SPCC::FCC_ULE:
2409 case SPCC::FCC_UG :
2410 case SPCC::FCC_UGE:
2411 case SPCC::FCC_U :
2412 case SPCC::FCC_O :
2413 case SPCC::FCC_LG :
2414 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2415 }
2416
2417 auto PtrVT = getPointerTy(DAG.getDataLayout());
2418 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2420 ArgListTy Args;
2421 SDValue Chain = DAG.getEntryNode();
2422 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2423 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2424
2426 CLI.setDebugLoc(DL).setChain(Chain)
2427 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2428
2429 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2430
2431 // result is in first, and chain is in second result.
2432 SDValue Result = CallInfo.first;
2433
2434 switch(SPCC) {
2435 default: {
2436 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2437 SPCC = SPCC::ICC_NE;
2438 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2439 }
2440 case SPCC::FCC_UL : {
2441 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2442 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2443 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2444 SPCC = SPCC::ICC_NE;
2445 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2446 }
2447 case SPCC::FCC_ULE: {
2448 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2449 SPCC = SPCC::ICC_NE;
2450 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2451 }
2452 case SPCC::FCC_UG : {
2453 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2454 SPCC = SPCC::ICC_G;
2455 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2456 }
2457 case SPCC::FCC_UGE: {
2458 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2459 SPCC = SPCC::ICC_NE;
2460 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2461 }
2462
2463 case SPCC::FCC_U : {
2464 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2465 SPCC = SPCC::ICC_E;
2466 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2467 }
2468 case SPCC::FCC_O : {
2469 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2470 SPCC = SPCC::ICC_NE;
2471 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2472 }
2473 case SPCC::FCC_LG : {
2474 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2475 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2476 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2477 SPCC = SPCC::ICC_NE;
2478 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2479 }
2480 case SPCC::FCC_UE : {
2481 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2482 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2483 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2484 SPCC = SPCC::ICC_E;
2485 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2486 }
2487 }
2488}
2489
2490static SDValue
2492 const SparcTargetLowering &TLI) {
2493
2494 if (Op.getOperand(0).getValueType() == MVT::f64)
2495 return TLI.LowerF128Op(Op, DAG,
2496 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2497
2498 if (Op.getOperand(0).getValueType() == MVT::f32)
2499 return TLI.LowerF128Op(Op, DAG,
2500 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2501
2502 llvm_unreachable("fpextend with non-float operand!");
2503 return SDValue();
2504}
2505
2506static SDValue
2508 const SparcTargetLowering &TLI) {
2509 // FP_ROUND on f64 and f32 are legal.
2510 if (Op.getOperand(0).getValueType() != MVT::f128)
2511 return Op;
2512
2513 if (Op.getValueType() == MVT::f64)
2514 return TLI.LowerF128Op(Op, DAG,
2515 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2516 if (Op.getValueType() == MVT::f32)
2517 return TLI.LowerF128Op(Op, DAG,
2518 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2519
2520 llvm_unreachable("fpround to non-float!");
2521 return SDValue();
2522}
2523
2525 const SparcTargetLowering &TLI,
2526 bool hasHardQuad) {
2527 SDLoc dl(Op);
2528 EVT VT = Op.getValueType();
2529 assert(VT == MVT::i32 || VT == MVT::i64);
2530
2531 // Expand f128 operations to fp128 abi calls.
2532 if (Op.getOperand(0).getValueType() == MVT::f128
2533 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2534 const char *libName = TLI.getLibcallName(VT == MVT::i32
2535 ? RTLIB::FPTOSINT_F128_I32
2536 : RTLIB::FPTOSINT_F128_I64);
2537 return TLI.LowerF128Op(Op, DAG, libName, 1);
2538 }
2539
2540 // Expand if the resulting type is illegal.
2541 if (!TLI.isTypeLegal(VT))
2542 return SDValue();
2543
2544 // Otherwise, Convert the fp value to integer in an FP register.
2545 if (VT == MVT::i32)
2546 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2547 else
2548 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2549
2550 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2551}
2552
2554 const SparcTargetLowering &TLI,
2555 bool hasHardQuad) {
2556 SDLoc dl(Op);
2557 EVT OpVT = Op.getOperand(0).getValueType();
2558 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2559
2560 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2561
2562 // Expand f128 operations to fp128 ABI calls.
2563 if (Op.getValueType() == MVT::f128
2564 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2565 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2566 ? RTLIB::SINTTOFP_I32_F128
2567 : RTLIB::SINTTOFP_I64_F128);
2568 return TLI.LowerF128Op(Op, DAG, libName, 1);
2569 }
2570
2571 // Expand if the operand type is illegal.
2572 if (!TLI.isTypeLegal(OpVT))
2573 return SDValue();
2574
2575 // Otherwise, Convert the int value to FP in an FP register.
2576 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2577 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2578 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2579}
2580
2582 const SparcTargetLowering &TLI,
2583 bool hasHardQuad) {
2584 SDLoc dl(Op);
2585 EVT VT = Op.getValueType();
2586
2587 // Expand if it does not involve f128 or the target has support for
2588 // quad floating point instructions and the resulting type is legal.
2589 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2590 (hasHardQuad && TLI.isTypeLegal(VT)))
2591 return SDValue();
2592
2593 assert(VT == MVT::i32 || VT == MVT::i64);
2594
2595 return TLI.LowerF128Op(Op, DAG,
2596 TLI.getLibcallName(VT == MVT::i32
2597 ? RTLIB::FPTOUINT_F128_I32
2598 : RTLIB::FPTOUINT_F128_I64),
2599 1);
2600}
2601
2603 const SparcTargetLowering &TLI,
2604 bool hasHardQuad) {
2605 SDLoc dl(Op);
2606 EVT OpVT = Op.getOperand(0).getValueType();
2607 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2608
2609 // Expand if it does not involve f128 or the target has support for
2610 // quad floating point instructions and the operand type is legal.
2611 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2612 return SDValue();
2613
2614 return TLI.LowerF128Op(Op, DAG,
2615 TLI.getLibcallName(OpVT == MVT::i32
2616 ? RTLIB::UINTTOFP_I32_F128
2617 : RTLIB::UINTTOFP_I64_F128),
2618 1);
2619}
2620
2622 const SparcTargetLowering &TLI, bool hasHardQuad,
2623 bool isV9, bool is64Bit) {
2624 SDValue Chain = Op.getOperand(0);
2625 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2626 SDValue LHS = Op.getOperand(2);
2627 SDValue RHS = Op.getOperand(3);
2628 SDValue Dest = Op.getOperand(4);
2629 SDLoc dl(Op);
2630 unsigned Opc, SPCC = ~0U;
2631
2632 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2633 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2634 LookThroughSetCC(LHS, RHS, CC, SPCC);
2635 assert(LHS.getValueType() == RHS.getValueType());
2636
2637 // Get the condition flag.
2638 SDValue CompareFlag;
2639 if (LHS.getValueType().isInteger()) {
2640 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2641 // and the RHS is zero we might be able to use a specialized branch.
2642 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2644 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2645 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2646 LHS);
2647
2648 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2649 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2650 if (isV9)
2651 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2652 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2653 else
2654 // Non-v9 targets don't have xcc.
2655 Opc = SPISD::BRICC;
2656 } else {
2657 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2658 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2659 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2660 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2661 } else {
2662 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2663 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2664 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2665 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2666 }
2667 }
2668 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2669 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2670}
2671
2673 const SparcTargetLowering &TLI, bool hasHardQuad,
2674 bool isV9, bool is64Bit) {
2675 SDValue LHS = Op.getOperand(0);
2676 SDValue RHS = Op.getOperand(1);
2677 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2678 SDValue TrueVal = Op.getOperand(2);
2679 SDValue FalseVal = Op.getOperand(3);
2680 SDLoc dl(Op);
2681 unsigned Opc, SPCC = ~0U;
2682
2683 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2684 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2685 LookThroughSetCC(LHS, RHS, CC, SPCC);
2686 assert(LHS.getValueType() == RHS.getValueType());
2687
2688 SDValue CompareFlag;
2689 if (LHS.getValueType().isInteger()) {
2690 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2691 // and the RHS is zero we might be able to use a specialized select.
2692 // All SELECT_CC between any two scalar integer types are eligible for
2693 // lowering to specialized instructions. Additionally, f32 and f64 types
2694 // are also eligible, but for f128 we can only use the specialized
2695 // instruction when we have hardquad.
2696 EVT ValType = TrueVal.getValueType();
2697 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2698 ValType == MVT::f64 ||
2699 (ValType == MVT::f128 && hasHardQuad);
2700 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2701 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2702 return DAG.getNode(
2703 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2704 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2705
2706 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2707 Opc = LHS.getValueType() == MVT::i32 ?
2709 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2710 } else {
2711 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2712 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2713 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2714 Opc = SPISD::SELECT_ICC;
2715 } else {
2716 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2717 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2718 Opc = SPISD::SELECT_FCC;
2719 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2720 }
2721 }
2722 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2723 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2724}
2725
2727 const SparcTargetLowering &TLI) {
2730 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2731
2732 // Need frame address to find the address of VarArgsFrameIndex.
2734
2735 // vastart just stores the address of the VarArgsFrameIndex slot into the
2736 // memory location argument.
2737 SDLoc DL(Op);
2738 SDValue Offset =
2739 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2740 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2741 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2742 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2743 MachinePointerInfo(SV));
2744}
2745
2747 SDNode *Node = Op.getNode();
2748 EVT VT = Node->getValueType(0);
2749 SDValue InChain = Node->getOperand(0);
2750 SDValue VAListPtr = Node->getOperand(1);
2751 EVT PtrVT = VAListPtr.getValueType();
2752 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2753 SDLoc DL(Node);
2754 SDValue VAList =
2755 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2756 // Increment the pointer, VAList, to the next vaarg.
2757 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2759 DL));
2760 // Store the incremented VAList to the legalized pointer.
2761 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2762 MachinePointerInfo(SV));
2763 // Load the actual argument out of the pointer VAList.
2764 // We can't count on greater alignment than the word size.
2765 return DAG.getLoad(
2766 VT, DL, InChain, VAList, MachinePointerInfo(),
2767 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2768}
2769
2771 const SparcSubtarget *Subtarget) {
2772 SDValue Chain = Op.getOperand(0); // Legalize the chain.
2773 SDValue Size = Op.getOperand(1); // Legalize the size.
2774 MaybeAlign Alignment =
2775 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2776 Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2777 EVT VT = Size->getValueType(0);
2778 SDLoc dl(Op);
2779
2780 // TODO: implement over-aligned alloca. (Note: also implies
2781 // supporting support for overaligned function frames + dynamic
2782 // allocations, at all, which currently isn't supported)
2783 if (Alignment && *Alignment > StackAlign) {
2784 const MachineFunction &MF = DAG.getMachineFunction();
2785 report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2786 "over-aligned dynamic alloca not supported.");
2787 }
2788
2789 // The resultant pointer needs to be above the register spill area
2790 // at the bottom of the stack.
2791 unsigned regSpillArea;
2792 if (Subtarget->is64Bit()) {
2793 regSpillArea = 128;
2794 } else {
2795 // On Sparc32, the size of the spill area is 92. Unfortunately,
2796 // that's only 4-byte aligned, not 8-byte aligned (the stack
2797 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2798 // aligned dynamic allocation, we actually need to add 96 to the
2799 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2800
2801 // That also means adding 4 to the size of the allocation --
2802 // before applying the 8-byte rounding. Unfortunately, we the
2803 // value we get here has already had rounding applied. So, we need
2804 // to add 8, instead, wasting a bit more memory.
2805
2806 // Further, this only actually needs to be done if the required
2807 // alignment is > 4, but, we've lost that info by this point, too,
2808 // so we always apply it.
2809
2810 // (An alternative approach would be to always reserve 96 bytes
2811 // instead of the required 92, but then we'd waste 4 extra bytes
2812 // in every frame, not just those with dynamic stack allocations)
2813
2814 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2815
2816 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2817 DAG.getConstant(8, dl, VT));
2818 regSpillArea = 96;
2819 }
2820
2821 unsigned SPReg = SP::O6;
2822 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2823 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2824 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2825
2826 regSpillArea += Subtarget->getStackPointerBias();
2827
2828 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2829 DAG.getConstant(regSpillArea, dl, VT));
2830 SDValue Ops[2] = { NewVal, Chain };
2831 return DAG.getMergeValues(Ops, dl);
2832}
2833
2834
2836 SDLoc dl(Op);
2837 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2838 dl, MVT::Other, DAG.getEntryNode());
2839 return Chain;
2840}
2841
2843 const SparcSubtarget *Subtarget,
2844 bool AlwaysFlush = false) {
2846 MFI.setFrameAddressIsTaken(true);
2847
2848 EVT VT = Op.getValueType();
2849 SDLoc dl(Op);
2850 unsigned FrameReg = SP::I6;
2851 unsigned stackBias = Subtarget->getStackPointerBias();
2852
2853 SDValue FrameAddr;
2854 SDValue Chain;
2855
2856 // flush first to make sure the windowed registers' values are in stack
2857 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2858
2859 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2860
2861 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2862
2863 while (depth--) {
2864 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2865 DAG.getIntPtrConstant(Offset, dl));
2866 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2867 }
2868 if (Subtarget->is64Bit())
2869 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2870 DAG.getIntPtrConstant(stackBias, dl));
2871 return FrameAddr;
2872}
2873
2874
2876 const SparcSubtarget *Subtarget) {
2877
2878 uint64_t depth = Op.getConstantOperandVal(0);
2879
2880 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2881
2882}
2883
2885 const SparcTargetLowering &TLI,
2886 const SparcSubtarget *Subtarget) {
2888 MachineFrameInfo &MFI = MF.getFrameInfo();
2889 MFI.setReturnAddressIsTaken(true);
2890
2892 return SDValue();
2893
2894 EVT VT = Op.getValueType();
2895 SDLoc dl(Op);
2896 uint64_t depth = Op.getConstantOperandVal(0);
2897
2898 SDValue RetAddr;
2899 if (depth == 0) {
2900 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2901 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2902 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2903 return RetAddr;
2904 }
2905
2906 // Need frame address to find return address of the caller.
2907 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2908
2909 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2911 dl, VT,
2912 FrameAddr,
2913 DAG.getIntPtrConstant(Offset, dl));
2914 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2915
2916 return RetAddr;
2917}
2918
2919static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2920 unsigned opcode) {
2921 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2922 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2923
2924 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2925 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2926 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2927
2928 // Note: in little-endian, the floating-point value is stored in the
2929 // registers are in the opposite order, so the subreg with the sign
2930 // bit is the highest-numbered (odd), rather than the
2931 // lowest-numbered (even).
2932
2933 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2934 SrcReg64);
2935 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2936 SrcReg64);
2937
2938 if (DAG.getDataLayout().isLittleEndian())
2939 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2940 else
2941 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2942
2943 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2944 dl, MVT::f64), 0);
2945 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2946 DstReg64, Hi32);
2947 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2948 DstReg64, Lo32);
2949 return DstReg64;
2950}
2951
2952// Lower a f128 load into two f64 loads.
2954{
2955 SDLoc dl(Op);
2956 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2957 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2958
2959 Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2960
2961 SDValue Hi64 =
2962 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2963 LdNode->getPointerInfo(), Alignment);
2964 EVT addrVT = LdNode->getBasePtr().getValueType();
2965 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2966 LdNode->getBasePtr(),
2967 DAG.getConstant(8, dl, addrVT));
2968 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2969 LdNode->getPointerInfo().getWithOffset(8),
2970 Alignment);
2971
2972 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2973 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2974
2975 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2976 dl, MVT::f128);
2977 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2978 MVT::f128,
2979 SDValue(InFP128, 0),
2980 Hi64,
2981 SubRegEven);
2982 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2983 MVT::f128,
2984 SDValue(InFP128, 0),
2985 Lo64,
2986 SubRegOdd);
2987 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2988 SDValue(Lo64.getNode(), 1) };
2989 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2990 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2991 return DAG.getMergeValues(Ops, dl);
2992}
2993
2995{
2996 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2997
2998 EVT MemVT = LdNode->getMemoryVT();
2999 if (MemVT == MVT::f128)
3000 return LowerF128Load(Op, DAG);
3001
3002 return Op;
3003}
3004
3005// Lower a f128 store into two f64 stores.
3007 SDLoc dl(Op);
3008 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
3009 assert(StNode->getOffset().isUndef() && "Unexpected node type");
3010
3011 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
3012 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
3013
3014 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3015 dl,
3016 MVT::f64,
3017 StNode->getValue(),
3018 SubRegEven);
3019 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3020 dl,
3021 MVT::f64,
3022 StNode->getValue(),
3023 SubRegOdd);
3024
3025 Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
3026
3027 SDValue OutChains[2];
3028 OutChains[0] =
3029 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
3030 StNode->getBasePtr(), StNode->getPointerInfo(),
3031 Alignment);
3032 EVT addrVT = StNode->getBasePtr().getValueType();
3033 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3034 StNode->getBasePtr(),
3035 DAG.getConstant(8, dl, addrVT));
3036 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3037 StNode->getPointerInfo().getWithOffset(8),
3038 Alignment);
3039 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3040}
3041
3043{
3044 SDLoc dl(Op);
3045 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3046
3047 EVT MemVT = St->getMemoryVT();
3048 if (MemVT == MVT::f128)
3049 return LowerF128Store(Op, DAG);
3050
3051 if (MemVT == MVT::i64) {
3052 // Custom handling for i64 stores: turn it into a bitcast and a
3053 // v2i32 store.
3054 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3055 SDValue Chain = DAG.getStore(
3056 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3057 St->getOriginalAlign(), St->getMemOperand()->getFlags(),
3058 St->getAAInfo());
3059 return Chain;
3060 }
3061
3062 return SDValue();
3063}
3064
3066 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3067 && "invalid opcode");
3068
3069 SDLoc dl(Op);
3070
3071 if (Op.getValueType() == MVT::f64)
3072 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3073 if (Op.getValueType() != MVT::f128)
3074 return Op;
3075
3076 // Lower fabs/fneg on f128 to fabs/fneg on f64
3077 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3078 // (As with LowerF64Op, on little-endian, we need to negate the odd
3079 // subreg)
3080
3081 SDValue SrcReg128 = Op.getOperand(0);
3082 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3083 SrcReg128);
3084 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3085 SrcReg128);
3086
3087 if (DAG.getDataLayout().isLittleEndian()) {
3088 if (isV9)
3089 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3090 else
3091 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3092 } else {
3093 if (isV9)
3094 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3095 else
3096 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3097 }
3098
3099 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3100 dl, MVT::f128), 0);
3101 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3102 DstReg128, Hi64);
3103 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3104 DstReg128, Lo64);
3105 return DstReg128;
3106}
3107
3109
3110 if (Op.getValueType() != MVT::i64)
3111 return Op;
3112
3113 SDLoc dl(Op);
3114 SDValue Src1 = Op.getOperand(0);
3115 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
3116 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
3117 DAG.getConstant(32, dl, MVT::i64));
3118 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
3119
3120 SDValue Src2 = Op.getOperand(1);
3121 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
3122 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
3123 DAG.getConstant(32, dl, MVT::i64));
3124 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
3125
3126
3127 bool hasChain = false;
3128 unsigned hiOpc = Op.getOpcode();
3129 switch (Op.getOpcode()) {
3130 default: llvm_unreachable("Invalid opcode");
3131 case ISD::ADDC: hiOpc = ISD::ADDE; break;
3132 case ISD::ADDE: hasChain = true; break;
3133 case ISD::SUBC: hiOpc = ISD::SUBE; break;
3134 case ISD::SUBE: hasChain = true; break;
3135 }
3136 SDValue Lo;
3137 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
3138 if (hasChain) {
3139 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
3140 Op.getOperand(2));
3141 } else {
3142 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
3143 }
3144 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
3145 SDValue Carry = Hi.getValue(1);
3146
3147 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
3148 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
3149 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
3150 DAG.getConstant(32, dl, MVT::i64));
3151
3152 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
3153 SDValue Ops[2] = { Dst, Carry };
3154 return DAG.getMergeValues(Ops, dl);
3155}
3156
3157// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3158// in LegalizeDAG.cpp except the order of arguments to the library function.
3160 const SparcTargetLowering &TLI)
3161{
3162 unsigned opcode = Op.getOpcode();
3163 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3164
3165 bool isSigned = (opcode == ISD::SMULO);
3166 EVT VT = MVT::i64;
3167 EVT WideVT = MVT::i128;
3168 SDLoc dl(Op);
3169 SDValue LHS = Op.getOperand(0);
3170
3171 if (LHS.getValueType() != VT)
3172 return Op;
3173
3174 SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3175
3176 SDValue RHS = Op.getOperand(1);
3177 SDValue HiLHS, HiRHS;
3178 if (isSigned) {
3179 HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3180 HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3181 } else {
3182 HiLHS = DAG.getConstant(0, dl, VT);
3183 HiRHS = DAG.getConstant(0, dl, MVT::i64);
3184 }
3185
3186 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3187
3189 CallOptions.setSExt(isSigned);
3190 SDValue MulResult = TLI.makeLibCall(DAG,
3191 RTLIB::MUL_I128, WideVT,
3192 Args, CallOptions, dl).first;
3193 SDValue BottomHalf, TopHalf;
3194 std::tie(BottomHalf, TopHalf) = DAG.SplitScalar(MulResult, dl, VT, VT);
3195 if (isSigned) {
3196 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3197 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3198 } else {
3199 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3200 ISD::SETNE);
3201 }
3202 // MulResult is a node with an illegal type. Because such things are not
3203 // generally permitted during this phase of legalization, ensure that
3204 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3205 // been folded.
3206 assert(MulResult->use_empty() && "Illegally typed node still in use!");
3207
3208 SDValue Ops[2] = { BottomHalf, TopHalf } ;
3209 return DAG.getMergeValues(Ops, dl);
3210}
3211
3213 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3214 // Expand with a fence.
3215 return SDValue();
3216 }
3217
3218 // Monotonic load/stores are legal.
3219 return Op;
3220}
3221
3223 SelectionDAG &DAG) const {
3224 unsigned IntNo = Op.getConstantOperandVal(0);
3225 SDLoc dl(Op);
3226 switch (IntNo) {
3227 default: return SDValue(); // Don't custom lower most intrinsics.
3228 case Intrinsic::thread_pointer: {
3229 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3230 return DAG.getRegister(SP::G7, PtrVT);
3231 }
3232 }
3233}
3234
3237
3238 bool hasHardQuad = Subtarget->hasHardQuad();
3239 bool isV9 = Subtarget->isV9();
3240 bool is64Bit = Subtarget->is64Bit();
3241
3242 switch (Op.getOpcode()) {
3243 default: llvm_unreachable("Should not custom lower this!");
3244
3245 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3246 Subtarget);
3247 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3248 Subtarget);
3250 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3251 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3252 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3253 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3254 hasHardQuad);
3255 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3256 hasHardQuad);
3257 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3258 hasHardQuad);
3259 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3260 hasHardQuad);
3261 case ISD::BR_CC:
3262 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3263 case ISD::SELECT_CC:
3264 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3265 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3266 case ISD::VAARG: return LowerVAARG(Op, DAG);
3268 Subtarget);
3269
3270 case ISD::LOAD: return LowerLOAD(Op, DAG);
3271 case ISD::STORE: return LowerSTORE(Op, DAG);
3272 case ISD::FADD: return LowerF128Op(Op, DAG,
3273 getLibcallName(RTLIB::ADD_F128), 2);
3274 case ISD::FSUB: return LowerF128Op(Op, DAG,
3275 getLibcallName(RTLIB::SUB_F128), 2);
3276 case ISD::FMUL: return LowerF128Op(Op, DAG,
3277 getLibcallName(RTLIB::MUL_F128), 2);
3278 case ISD::FDIV: return LowerF128Op(Op, DAG,
3279 getLibcallName(RTLIB::DIV_F128), 2);
3280 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3281 getLibcallName(RTLIB::SQRT_F128),1);
3282 case ISD::FABS:
3283 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3284 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3285 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3286 case ISD::ADDC:
3287 case ISD::ADDE:
3288 case ISD::SUBC:
3289 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3290 case ISD::UMULO:
3291 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3292 case ISD::ATOMIC_LOAD:
3293 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3295 }
3296}
3297
3299 const SDLoc &DL,
3300 SelectionDAG &DAG) const {
3301 APInt V = C->getValueAPF().bitcastToAPInt();
3302 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3303 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3304 if (DAG.getDataLayout().isLittleEndian())
3305 std::swap(Lo, Hi);
3306 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3307}
3308
3310 DAGCombinerInfo &DCI) const {
3311 SDLoc dl(N);
3312 SDValue Src = N->getOperand(0);
3313
3314 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3315 Src.getSimpleValueType() == MVT::f64)
3316 return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3317
3318 return SDValue();
3319}
3320
3322 DAGCombinerInfo &DCI) const {
3323 switch (N->getOpcode()) {
3324 default:
3325 break;
3326 case ISD::BITCAST:
3327 return PerformBITCASTCombine(N, DCI);
3328 }
3329 return SDValue();
3330}
3331
3334 MachineBasicBlock *BB) const {
3335 switch (MI.getOpcode()) {
3336 default: llvm_unreachable("Unknown SELECT_CC!");
3337 case SP::SELECT_CC_Int_ICC:
3338 case SP::SELECT_CC_FP_ICC:
3339 case SP::SELECT_CC_DFP_ICC:
3340 case SP::SELECT_CC_QFP_ICC:
3341 if (Subtarget->isV9())
3342 return expandSelectCC(MI, BB, SP::BPICC);
3343 return expandSelectCC(MI, BB, SP::BCOND);
3344 case SP::SELECT_CC_Int_XCC:
3345 case SP::SELECT_CC_FP_XCC:
3346 case SP::SELECT_CC_DFP_XCC:
3347 case SP::SELECT_CC_QFP_XCC:
3348 return expandSelectCC(MI, BB, SP::BPXCC);
3349 case SP::SELECT_CC_Int_FCC:
3350 case SP::SELECT_CC_FP_FCC:
3351 case SP::SELECT_CC_DFP_FCC:
3352 case SP::SELECT_CC_QFP_FCC:
3353 if (Subtarget->isV9())
3354 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3355 return expandSelectCC(MI, BB, SP::FBCOND);
3356 }
3357}
3358
3361 unsigned BROpcode) const {
3362 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3363 DebugLoc dl = MI.getDebugLoc();
3364 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3365
3366 // To "insert" a SELECT_CC instruction, we actually have to insert the
3367 // triangle control-flow pattern. The incoming instruction knows the
3368 // destination vreg to set, the condition code register to branch on, the
3369 // true/false values to select between, and the condition code for the branch.
3370 //
3371 // We produce the following control flow:
3372 // ThisMBB
3373 // | \
3374 // | IfFalseMBB
3375 // | /
3376 // SinkMBB
3377 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3379
3380 MachineBasicBlock *ThisMBB = BB;
3381 MachineFunction *F = BB->getParent();
3382 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3383 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3384 F->insert(It, IfFalseMBB);
3385 F->insert(It, SinkMBB);
3386
3387 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3388 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3389 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3390 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3391
3392 // Set the new successors for ThisMBB.
3393 ThisMBB->addSuccessor(IfFalseMBB);
3394 ThisMBB->addSuccessor(SinkMBB);
3395
3396 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3397 .addMBB(SinkMBB)
3398 .addImm(CC);
3399
3400 // IfFalseMBB just falls through to SinkMBB.
3401 IfFalseMBB->addSuccessor(SinkMBB);
3402
3403 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3404 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3405 MI.getOperand(0).getReg())
3406 .addReg(MI.getOperand(1).getReg())
3407 .addMBB(ThisMBB)
3408 .addReg(MI.getOperand(2).getReg())
3409 .addMBB(IfFalseMBB);
3410
3411 MI.eraseFromParent(); // The pseudo instruction is gone now.
3412 return SinkMBB;
3413}
3414
3415//===----------------------------------------------------------------------===//
3416// Sparc Inline Assembly Support
3417//===----------------------------------------------------------------------===//
3418
3419/// getConstraintType - Given a constraint letter, return the type of
3420/// constraint it is for this target.
3423 if (Constraint.size() == 1) {
3424 switch (Constraint[0]) {
3425 default: break;
3426 case 'r':
3427 case 'f':
3428 case 'e':
3429 return C_RegisterClass;
3430 case 'I': // SIMM13
3431 return C_Immediate;
3432 }
3433 }
3434
3435 return TargetLowering::getConstraintType(Constraint);
3436}
3437
3440 const char *constraint) const {
3442 Value *CallOperandVal = info.CallOperandVal;
3443 // If we don't have a value, we can't do a match,
3444 // but allow it at the lowest weight.
3445 if (!CallOperandVal)
3446 return CW_Default;
3447
3448 // Look at the constraint type.
3449 switch (*constraint) {
3450 default:
3452 break;
3453 case 'I': // SIMM13
3454 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3455 if (isInt<13>(C->getSExtValue()))
3456 weight = CW_Constant;
3457 }
3458 break;
3459 }
3460 return weight;
3461}
3462
3463/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3464/// vector. If it is invalid, don't add anything to Ops.
3466 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3467 SelectionDAG &DAG) const {
3468 SDValue Result;
3469
3470 // Only support length 1 constraints for now.
3471 if (Constraint.size() > 1)
3472 return;
3473
3474 char ConstraintLetter = Constraint[0];
3475 switch (ConstraintLetter) {
3476 default: break;
3477 case 'I':
3478 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3479 if (isInt<13>(C->getSExtValue())) {
3480 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3481 Op.getValueType());
3482 break;
3483 }
3484 return;
3485 }
3486 }
3487
3488 if (Result.getNode()) {
3489 Ops.push_back(Result);
3490 return;
3491 }
3492 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3493}
3494
3495std::pair<unsigned, const TargetRegisterClass *>
3497 StringRef Constraint,
3498 MVT VT) const {
3499 if (Constraint.empty())
3500 return std::make_pair(0U, nullptr);
3501
3502 if (Constraint.size() == 1) {
3503 switch (Constraint[0]) {
3504 case 'r':
3505 if (VT == MVT::v2i32)
3506 return std::make_pair(0U, &SP::IntPairRegClass);
3507 else if (Subtarget->is64Bit())
3508 return std::make_pair(0U, &SP::I64RegsRegClass);
3509 else
3510 return std::make_pair(0U, &SP::IntRegsRegClass);
3511 case 'f':
3512 if (VT == MVT::f32 || VT == MVT::i32)
3513 return std::make_pair(0U, &SP::FPRegsRegClass);
3514 else if (VT == MVT::f64 || VT == MVT::i64)
3515 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3516 else if (VT == MVT::f128)
3517 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3518 // This will generate an error message
3519 return std::make_pair(0U, nullptr);
3520 case 'e':
3521 if (VT == MVT::f32 || VT == MVT::i32)
3522 return std::make_pair(0U, &SP::FPRegsRegClass);
3523 else if (VT == MVT::f64 || VT == MVT::i64 )
3524 return std::make_pair(0U, &SP::DFPRegsRegClass);
3525 else if (VT == MVT::f128)
3526 return std::make_pair(0U, &SP::QFPRegsRegClass);
3527 // This will generate an error message
3528 return std::make_pair(0U, nullptr);
3529 }
3530 }
3531
3532 if (Constraint.front() != '{')
3533 return std::make_pair(0U, nullptr);
3534
3535 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3536 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3537 if (RegName.empty())
3538 return std::make_pair(0U, nullptr);
3539
3540 unsigned long long RegNo;
3541 // Handle numbered register aliases.
3542 if (RegName[0] == 'r' &&
3543 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3544 // r0-r7 -> g0-g7
3545 // r8-r15 -> o0-o7
3546 // r16-r23 -> l0-l7
3547 // r24-r31 -> i0-i7
3548 if (RegNo > 31)
3549 return std::make_pair(0U, nullptr);
3550 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3551 char RegType = RegTypes[RegNo / 8];
3552 char RegIndex = '0' + (RegNo % 8);
3553 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3554 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3555 }
3556
3557 // Rewrite the fN constraint according to the value type if needed.
3558 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3559 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3560 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3562 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3563 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3565 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3566 } else {
3567 return std::make_pair(0U, nullptr);
3568 }
3569 }
3570
3571 auto ResultPair =
3573 if (!ResultPair.second)
3574 return std::make_pair(0U, nullptr);
3575
3576 // Force the use of I64Regs over IntRegs for 64-bit values.
3577 if (Subtarget->is64Bit() && VT == MVT::i64) {
3578 assert(ResultPair.second == &SP::IntRegsRegClass &&
3579 "Unexpected register class");
3580 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3581 }
3582
3583 return ResultPair;
3584}
3585
3586bool
3588 // The Sparc target isn't yet aware of offsets.
3589 return false;
3590}
3591
3594 SelectionDAG &DAG) const {
3595
3596 SDLoc dl(N);
3597
3598 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3599
3600 switch (N->getOpcode()) {
3601 default:
3602 llvm_unreachable("Do not know how to custom type legalize this operation!");
3603
3604 case ISD::FP_TO_SINT:
3605 case ISD::FP_TO_UINT:
3606 // Custom lower only if it involves f128 or i64.
3607 if (N->getOperand(0).getValueType() != MVT::f128
3608 || N->getValueType(0) != MVT::i64)
3609 return;
3610 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3611 ? RTLIB::FPTOSINT_F128_I64
3612 : RTLIB::FPTOUINT_F128_I64);
3613
3614 Results.push_back(LowerF128Op(SDValue(N, 0),
3615 DAG,
3616 getLibcallName(libCall),
3617 1));
3618 return;
3619 case ISD::READCYCLECOUNTER: {
3620 assert(Subtarget->hasLeonCycleCounter());
3621 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3622 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3623 SDValue Ops[] = { Lo, Hi };
3624 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3625 Results.push_back(Pair);
3626 Results.push_back(N->getOperand(0));
3627 return;
3628 }
3629 case ISD::SINT_TO_FP:
3630 case ISD::UINT_TO_FP:
3631 // Custom lower only if it involves f128 or i64.
3632 if (N->getValueType(0) != MVT::f128
3633 || N->getOperand(0).getValueType() != MVT::i64)
3634 return;
3635
3636 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3637 ? RTLIB::SINTTOFP_I64_F128
3638 : RTLIB::UINTTOFP_I64_F128);
3639
3640 Results.push_back(LowerF128Op(SDValue(N, 0),
3641 DAG,
3642 getLibcallName(libCall),
3643 1));
3644 return;
3645 case ISD::LOAD: {
3646 LoadSDNode *Ld = cast<LoadSDNode>(N);
3647 // Custom handling only for i64: turn i64 load into a v2i32 load,
3648 // and a bitcast.
3649 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3650 return;
3651
3652 SDLoc dl(N);
3653 SDValue LoadRes = DAG.getExtLoad(
3654 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3655 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3656 Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3657 Ld->getAAInfo());
3658
3659 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3660 Results.push_back(Res);
3661 Results.push_back(LoadRes.getValue(1));
3662 return;
3663 }
3664 }
3665}
3666
3667// Override to enable LOAD_STACK_GUARD lowering on Linux.
3669 if (!Subtarget->isTargetLinux())
3671 return true;
3672}
3673
3674// Override to disable global variable loading on Linux.
3676 if (!Subtarget->isTargetLinux())
3678}
3679
3681 SDNode *Node) const {
3682 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3683 // If the result is dead, replace it with %g0.
3684 if (!Node->hasAnyUseOfValue(0))
3685 MI.getOperand(0).setReg(SP::G0);
3686}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
BinOp getOperation() const
Definition: Instructions.h:787
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:238
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:679
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:719
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
<