LLVM 18.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
18#include "SparcTargetMachine.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/Module.h"
35using namespace llvm;
36
37
38//===----------------------------------------------------------------------===//
39// Calling Convention Implementation
40//===----------------------------------------------------------------------===//
41
42static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
43 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
44 ISD::ArgFlagsTy &ArgFlags, CCState &State)
45{
46 assert (ArgFlags.isSRet());
47
48 // Assign SRet argument.
49 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
50 0,
51 LocVT, LocInfo));
52 return true;
53}
54
55static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
56 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
57 ISD::ArgFlagsTy &ArgFlags, CCState &State)
58{
59 static const MCPhysReg RegList[] = {
60 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
61 };
62 // Try to get first reg.
63 if (Register Reg = State.AllocateReg(RegList)) {
64 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
65 } else {
66 // Assign whole thing in stack.
68 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
69 return true;
70 }
71
72 // Try to get second reg.
73 if (Register Reg = State.AllocateReg(RegList))
74 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75 else
77 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
78 return true;
79}
80
81static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
82 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
83 ISD::ArgFlagsTy &ArgFlags, CCState &State)
84{
85 static const MCPhysReg RegList[] = {
86 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
87 };
88
89 // Try to get first reg.
90 if (Register Reg = State.AllocateReg(RegList))
91 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
92 else
93 return false;
94
95 // Try to get second reg.
96 if (Register Reg = State.AllocateReg(RegList))
97 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
98 else
99 return false;
100
101 return true;
102}
103
104// Allocate a full-sized argument for the 64-bit ABI.
105static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
106 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
107 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
108 assert((LocVT == MVT::f32 || LocVT == MVT::f128
109 || LocVT.getSizeInBits() == 64) &&
110 "Can't handle non-64 bits locations");
111
112 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
113 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
114 Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
115 unsigned Offset = State.AllocateStack(size, alignment);
116 unsigned Reg = 0;
117
118 if (LocVT == MVT::i64 && Offset < 6*8)
119 // Promote integers to %i0-%i5.
120 Reg = SP::I0 + Offset/8;
121 else if (LocVT == MVT::f64 && Offset < 16*8)
122 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
123 Reg = SP::D0 + Offset/8;
124 else if (LocVT == MVT::f32 && Offset < 16*8)
125 // Promote floats to %f1, %f3, ...
126 Reg = SP::F1 + Offset/4;
127 else if (LocVT == MVT::f128 && Offset < 16*8)
128 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
129 Reg = SP::Q0 + Offset/16;
130
131 // Promote to register when possible, otherwise use the stack slot.
132 if (Reg) {
133 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
134 return true;
135 }
136
137 // Bail out if this is a return CC and we run out of registers to place
138 // values into.
139 if (IsReturn)
140 return false;
141
142 // This argument goes on the stack in an 8-byte slot.
143 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
144 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
145 if (LocVT == MVT::f32)
146 Offset += 4;
147
148 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
149 return true;
150}
151
152// Allocate a half-sized argument for the 64-bit ABI.
153//
154// This is used when passing { float, int } structs by value in registers.
155static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
156 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
157 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
158 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
159 unsigned Offset = State.AllocateStack(4, Align(4));
160
161 if (LocVT == MVT::f32 && Offset < 16*8) {
162 // Promote floats to %f0-%f31.
163 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
164 LocVT, LocInfo));
165 return true;
166 }
167
168 if (LocVT == MVT::i32 && Offset < 6*8) {
169 // Promote integers to %i0-%i5, using half the register.
170 unsigned Reg = SP::I0 + Offset/8;
171 LocVT = MVT::i64;
172 LocInfo = CCValAssign::AExt;
173
174 // Set the Custom bit if this i32 goes in the high bits of a register.
175 if (Offset % 8 == 0)
176 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
177 LocVT, LocInfo));
178 else
179 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
180 return true;
181 }
182
183 // Bail out if this is a return CC and we run out of registers to place
184 // values into.
185 if (IsReturn)
186 return false;
187
188 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
189 return true;
190}
191
192static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
193 CCValAssign::LocInfo &LocInfo,
194 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
195 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
196 State);
197}
198
199static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
200 CCValAssign::LocInfo &LocInfo,
201 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
202 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
203 State);
204}
205
206static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
207 CCValAssign::LocInfo &LocInfo,
208 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
209 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
210 State);
211}
212
213static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
214 CCValAssign::LocInfo &LocInfo,
215 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
216 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
217 State);
218}
219
220#include "SparcGenCallingConv.inc"
221
222// The calling conventions in SparcCallingConv.td are described in terms of the
223// callee's register window. This function translates registers to the
224// corresponding caller window %o register.
225static unsigned toCallerWindow(unsigned Reg) {
226 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
227 "Unexpected enum");
228 if (Reg >= SP::I0 && Reg <= SP::I7)
229 return Reg - SP::I0 + SP::O0;
230 return Reg;
231}
232
234 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
235 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
237 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
238 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
239 : RetCC_Sparc32);
240}
241
244 bool IsVarArg,
246 const SmallVectorImpl<SDValue> &OutVals,
247 const SDLoc &DL, SelectionDAG &DAG) const {
248 if (Subtarget->is64Bit())
249 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
250 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
251}
252
255 bool IsVarArg,
257 const SmallVectorImpl<SDValue> &OutVals,
258 const SDLoc &DL, SelectionDAG &DAG) const {
260
261 // CCValAssign - represent the assignment of the return value to locations.
263
264 // CCState - Info about the registers and stack slot.
265 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
266 *DAG.getContext());
267
268 // Analyze return values.
269 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
270
271 SDValue Glue;
272 SmallVector<SDValue, 4> RetOps(1, Chain);
273 // Make room for the return address offset.
274 RetOps.push_back(SDValue());
275
276 // Copy the result values into the output registers.
277 for (unsigned i = 0, realRVLocIdx = 0;
278 i != RVLocs.size();
279 ++i, ++realRVLocIdx) {
280 CCValAssign &VA = RVLocs[i];
281 assert(VA.isRegLoc() && "Can only return in registers!");
282
283 SDValue Arg = OutVals[realRVLocIdx];
284
285 if (VA.needsCustom()) {
286 assert(VA.getLocVT() == MVT::v2i32);
287 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
288 // happen by default if this wasn't a legal type)
289
290 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
291 Arg,
293 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
294 Arg,
296
297 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
298 Glue = Chain.getValue(1);
299 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
300 VA = RVLocs[++i]; // skip ahead to next loc
301 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
302 Glue);
303 } else
304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
305
306 // Guarantee that all emitted copies are stuck together with flags.
307 Glue = Chain.getValue(1);
308 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
309 }
310
311 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
312 // If the function returns a struct, copy the SRetReturnReg to I0
313 if (MF.getFunction().hasStructRetAttr()) {
315 Register Reg = SFI->getSRetReturnReg();
316 if (!Reg)
317 llvm_unreachable("sret virtual register not created in the entry block");
318 auto PtrVT = getPointerTy(DAG.getDataLayout());
319 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
320 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
321 Glue = Chain.getValue(1);
322 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
323 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
324 }
325
326 RetOps[0] = Chain; // Update chain.
327 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
328
329 // Add the glue if we have it.
330 if (Glue.getNode())
331 RetOps.push_back(Glue);
332
333 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
334}
335
336// Lower return values for the 64-bit ABI.
337// Return values are passed the exactly the same way as function arguments.
340 bool IsVarArg,
342 const SmallVectorImpl<SDValue> &OutVals,
343 const SDLoc &DL, SelectionDAG &DAG) const {
344 // CCValAssign - represent the assignment of the return value to locations.
346
347 // CCState - Info about the registers and stack slot.
348 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
349 *DAG.getContext());
350
351 // Analyze return values.
352 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
353
354 SDValue Glue;
355 SmallVector<SDValue, 4> RetOps(1, Chain);
356
357 // The second operand on the return instruction is the return address offset.
358 // The return address is always %i7+8 with the 64-bit ABI.
359 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
360
361 // Copy the result values into the output registers.
362 for (unsigned i = 0; i != RVLocs.size(); ++i) {
363 CCValAssign &VA = RVLocs[i];
364 assert(VA.isRegLoc() && "Can only return in registers!");
365 SDValue OutVal = OutVals[i];
366
367 // Integer return values must be sign or zero extended by the callee.
368 switch (VA.getLocInfo()) {
369 case CCValAssign::Full: break;
371 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
372 break;
374 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
375 break;
377 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
378 break;
379 default:
380 llvm_unreachable("Unknown loc info!");
381 }
382
383 // The custom bit on an i32 return value indicates that it should be passed
384 // in the high bits of the register.
385 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
386 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
387 DAG.getConstant(32, DL, MVT::i32));
388
389 // The next value may go in the low bits of the same register.
390 // Handle both at once.
391 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
392 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
393 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
394 // Skip the next value, it's already done.
395 ++i;
396 }
397 }
398
399 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
400
401 // Guarantee that all emitted copies are stuck together with flags.
402 Glue = Chain.getValue(1);
403 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
404 }
405
406 RetOps[0] = Chain; // Update chain.
407
408 // Add the flag if we have it.
409 if (Glue.getNode())
410 RetOps.push_back(Glue);
411
412 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
413}
414
416 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
417 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
418 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
419 if (Subtarget->is64Bit())
420 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
421 DL, DAG, InVals);
422 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
423 DL, DAG, InVals);
424}
425
426/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
427/// passed in either one or two GPRs, including FP values. TODO: we should
428/// pass FP values in FP registers for fastcc functions.
430 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
431 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
432 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
434 MachineRegisterInfo &RegInfo = MF.getRegInfo();
436
437 // Assign locations to all of the incoming arguments.
439 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
440 *DAG.getContext());
441 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
442
443 const unsigned StackOffset = 92;
444 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
445
446 unsigned InIdx = 0;
447 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
448 CCValAssign &VA = ArgLocs[i];
449
450 if (Ins[InIdx].Flags.isSRet()) {
451 if (InIdx != 0)
452 report_fatal_error("sparc only supports sret on the first parameter");
453 // Get SRet from [%fp+64].
454 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
455 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
456 SDValue Arg =
457 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
458 InVals.push_back(Arg);
459 continue;
460 }
461
462 if (VA.isRegLoc()) {
463 if (VA.needsCustom()) {
464 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
465
466 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
467 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
468 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
469
470 assert(i+1 < e);
471 CCValAssign &NextVA = ArgLocs[++i];
472
473 SDValue LoVal;
474 if (NextVA.isMemLoc()) {
475 int FrameIdx = MF.getFrameInfo().
476 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
477 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
478 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
479 } else {
480 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
481 &SP::IntRegsRegClass);
482 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
483 }
484
485 if (IsLittleEndian)
486 std::swap(LoVal, HiVal);
487
488 SDValue WholeValue =
489 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
490 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
491 InVals.push_back(WholeValue);
492 continue;
493 }
494 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
495 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
496 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
497 if (VA.getLocVT() == MVT::f32)
498 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
499 else if (VA.getLocVT() != MVT::i32) {
500 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
501 DAG.getValueType(VA.getLocVT()));
502 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
503 }
504 InVals.push_back(Arg);
505 continue;
506 }
507
508 assert(VA.isMemLoc());
509
510 unsigned Offset = VA.getLocMemOffset()+StackOffset;
511 auto PtrVT = getPointerTy(DAG.getDataLayout());
512
513 if (VA.needsCustom()) {
514 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
515 // If it is double-word aligned, just load.
516 if (Offset % 8 == 0) {
517 int FI = MF.getFrameInfo().CreateFixedObject(8,
518 Offset,
519 true);
520 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
521 SDValue Load =
522 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
523 InVals.push_back(Load);
524 continue;
525 }
526
527 int FI = MF.getFrameInfo().CreateFixedObject(4,
528 Offset,
529 true);
530 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
531 SDValue HiVal =
532 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
533 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
534 Offset+4,
535 true);
536 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
537
538 SDValue LoVal =
539 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
540
541 if (IsLittleEndian)
542 std::swap(LoVal, HiVal);
543
544 SDValue WholeValue =
545 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
546 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
547 InVals.push_back(WholeValue);
548 continue;
549 }
550
551 int FI = MF.getFrameInfo().CreateFixedObject(4,
552 Offset,
553 true);
554 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
555 SDValue Load ;
556 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
557 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
558 } else if (VA.getValVT() == MVT::f128) {
559 report_fatal_error("SPARCv8 does not handle f128 in calls; "
560 "pass indirectly");
561 } else {
562 // We shouldn't see any other value types here.
563 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
564 }
565 InVals.push_back(Load);
566 }
567
568 if (MF.getFunction().hasStructRetAttr()) {
569 // Copy the SRet Argument to SRetReturnReg.
571 Register Reg = SFI->getSRetReturnReg();
572 if (!Reg) {
573 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
574 SFI->setSRetReturnReg(Reg);
575 }
576 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
577 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
578 }
579
580 // Store remaining ArgRegs to the stack if this is a varargs function.
581 if (isVarArg) {
582 static const MCPhysReg ArgRegs[] = {
583 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
584 };
585 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
586 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
587 unsigned ArgOffset = CCInfo.getStackSize();
588 if (NumAllocated == 6)
589 ArgOffset += StackOffset;
590 else {
591 assert(!ArgOffset);
592 ArgOffset = 68+4*NumAllocated;
593 }
594
595 // Remember the vararg offset for the va_start implementation.
596 FuncInfo->setVarArgsFrameOffset(ArgOffset);
597
598 std::vector<SDValue> OutChains;
599
600 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
601 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
602 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
603 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
604
605 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
606 true);
607 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
608
609 OutChains.push_back(
610 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
611 ArgOffset += 4;
612 }
613
614 if (!OutChains.empty()) {
615 OutChains.push_back(Chain);
616 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
617 }
618 }
619
620 return Chain;
621}
622
623// Lower formal arguments for the 64 bit ABI.
625 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
626 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
627 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
629
630 // Analyze arguments according to CC_Sparc64.
632 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
633 *DAG.getContext());
634 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
635
636 // The argument array begins at %fp+BIAS+128, after the register save area.
637 const unsigned ArgArea = 128;
638
639 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
640 CCValAssign &VA = ArgLocs[i];
641 if (VA.isRegLoc()) {
642 // This argument is passed in a register.
643 // All integer register arguments are promoted by the caller to i64.
644
645 // Create a virtual register for the promoted live-in value.
646 Register VReg = MF.addLiveIn(VA.getLocReg(),
648 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
649
650 // Get the high bits for i32 struct elements.
651 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
652 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
653 DAG.getConstant(32, DL, MVT::i32));
654
655 // The caller promoted the argument, so insert an Assert?ext SDNode so we
656 // won't promote the value again in this function.
657 switch (VA.getLocInfo()) {
659 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
660 DAG.getValueType(VA.getValVT()));
661 break;
663 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
664 DAG.getValueType(VA.getValVT()));
665 break;
666 default:
667 break;
668 }
669
670 // Truncate the register down to the argument type.
671 if (VA.isExtInLoc())
672 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
673
674 InVals.push_back(Arg);
675 continue;
676 }
677
678 // The registers are exhausted. This argument was passed on the stack.
679 assert(VA.isMemLoc());
680 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
681 // beginning of the arguments area at %fp+BIAS+128.
682 unsigned Offset = VA.getLocMemOffset() + ArgArea;
683 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
684 // Adjust offset for extended arguments, SPARC is big-endian.
685 // The caller will have written the full slot with extended bytes, but we
686 // prefer our own extending loads.
687 if (VA.isExtInLoc())
688 Offset += 8 - ValSize;
689 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
690 InVals.push_back(
691 DAG.getLoad(VA.getValVT(), DL, Chain,
694 }
695
696 if (!IsVarArg)
697 return Chain;
698
699 // This function takes variable arguments, some of which may have been passed
700 // in registers %i0-%i5. Variable floating point arguments are never passed
701 // in floating point registers. They go on %i0-%i5 or on the stack like
702 // integer arguments.
703 //
704 // The va_start intrinsic needs to know the offset to the first variable
705 // argument.
706 unsigned ArgOffset = CCInfo.getStackSize();
708 // Skip the 128 bytes of register save area.
709 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
710 Subtarget->getStackPointerBias());
711
712 // Save the variable arguments that were passed in registers.
713 // The caller is required to reserve stack space for 6 arguments regardless
714 // of how many arguments were actually passed.
715 SmallVector<SDValue, 8> OutChains;
716 for (; ArgOffset < 6*8; ArgOffset += 8) {
717 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
718 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
719 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
720 auto PtrVT = getPointerTy(MF.getDataLayout());
721 OutChains.push_back(
722 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
724 }
725
726 if (!OutChains.empty())
727 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
728
729 return Chain;
730}
731
734 SmallVectorImpl<SDValue> &InVals) const {
735 if (Subtarget->is64Bit())
736 return LowerCall_64(CLI, InVals);
737 return LowerCall_32(CLI, InVals);
738}
739
740static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
741 const CallBase *Call) {
742 if (Call)
743 return Call->hasFnAttr(Attribute::ReturnsTwice);
744
745 const Function *CalleeFn = nullptr;
746 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
747 CalleeFn = dyn_cast<Function>(G->getGlobal());
748 } else if (ExternalSymbolSDNode *E =
749 dyn_cast<ExternalSymbolSDNode>(Callee)) {
750 const Function &Fn = DAG.getMachineFunction().getFunction();
751 const Module *M = Fn.getParent();
752 const char *CalleeName = E->getSymbol();
753 CalleeFn = M->getFunction(CalleeName);
754 }
755
756 if (!CalleeFn)
757 return false;
758 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
759}
760
761/// IsEligibleForTailCallOptimization - Check whether the call is eligible
762/// for tail call optimization.
764 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
765
766 auto &Outs = CLI.Outs;
767 auto &Caller = MF.getFunction();
768
769 // Do not tail call opt functions with "disable-tail-calls" attribute.
770 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
771 return false;
772
773 // Do not tail call opt if the stack is used to pass parameters.
774 // 64-bit targets have a slightly higher limit since the ABI requires
775 // to allocate some space even when all the parameters fit inside registers.
776 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
777 if (CCInfo.getStackSize() > StackSizeLimit)
778 return false;
779
780 // Do not tail call opt if either the callee or caller returns
781 // a struct and the other does not.
782 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
783 return false;
784
785 // Byval parameters hand the function a pointer directly into the stack area
786 // we want to reuse during a tail call.
787 for (auto &Arg : Outs)
788 if (Arg.Flags.isByVal())
789 return false;
790
791 return true;
792}
793
794// Lower a call for the 32-bit ABI.
797 SmallVectorImpl<SDValue> &InVals) const {
798 SelectionDAG &DAG = CLI.DAG;
799 SDLoc &dl = CLI.DL;
801 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
803 SDValue Chain = CLI.Chain;
804 SDValue Callee = CLI.Callee;
805 bool &isTailCall = CLI.IsTailCall;
806 CallingConv::ID CallConv = CLI.CallConv;
807 bool isVarArg = CLI.IsVarArg;
808
809 // Analyze operands of the call, assigning locations to each operand.
811 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
812 *DAG.getContext());
813 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
814
815 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
816 CCInfo, CLI, DAG.getMachineFunction());
817
818 // Get the size of the outgoing arguments stack space requirement.
819 unsigned ArgsSize = CCInfo.getStackSize();
820
821 // Keep stack frames 8-byte aligned.
822 ArgsSize = (ArgsSize+7) & ~7;
823
825
826 // Create local copies for byval args.
827 SmallVector<SDValue, 8> ByValArgs;
828 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
829 ISD::ArgFlagsTy Flags = Outs[i].Flags;
830 if (!Flags.isByVal())
831 continue;
832
833 SDValue Arg = OutVals[i];
834 unsigned Size = Flags.getByValSize();
835 Align Alignment = Flags.getNonZeroByValAlign();
836
837 if (Size > 0U) {
838 int FI = MFI.CreateStackObject(Size, Alignment, false);
839 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
840 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
841
842 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
843 false, // isVolatile,
844 (Size <= 32), // AlwaysInline if size <= 32,
845 false, // isTailCall
847 ByValArgs.push_back(FIPtr);
848 }
849 else {
850 SDValue nullVal;
851 ByValArgs.push_back(nullVal);
852 }
853 }
854
855 assert(!isTailCall || ArgsSize == 0);
856
857 if (!isTailCall)
858 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
859
861 SmallVector<SDValue, 8> MemOpChains;
862
863 const unsigned StackOffset = 92;
864 bool hasStructRetAttr = false;
865 unsigned SRetArgSize = 0;
866 // Walk the register/memloc assignments, inserting copies/loads.
867 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
868 i != e;
869 ++i, ++realArgIdx) {
870 CCValAssign &VA = ArgLocs[i];
871 SDValue Arg = OutVals[realArgIdx];
872
873 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
874
875 // Use local copy if it is a byval arg.
876 if (Flags.isByVal()) {
877 Arg = ByValArgs[byvalArgIdx++];
878 if (!Arg) {
879 continue;
880 }
881 }
882
883 // Promote the value if needed.
884 switch (VA.getLocInfo()) {
885 default: llvm_unreachable("Unknown loc info!");
886 case CCValAssign::Full: break;
888 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
889 break;
891 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
892 break;
894 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
895 break;
897 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
898 break;
899 }
900
901 if (Flags.isSRet()) {
902 assert(VA.needsCustom());
903
904 if (isTailCall)
905 continue;
906
907 // store SRet argument in %sp+64
908 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
909 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
910 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
911 MemOpChains.push_back(
912 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
913 hasStructRetAttr = true;
914 // sret only allowed on first argument
915 assert(Outs[realArgIdx].OrigArgIndex == 0);
916 SRetArgSize =
917 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
918 continue;
919 }
920
921 if (VA.needsCustom()) {
922 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
923
924 if (VA.isMemLoc()) {
925 unsigned Offset = VA.getLocMemOffset() + StackOffset;
926 // if it is double-word aligned, just store.
927 if (Offset % 8 == 0) {
928 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
929 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
930 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
931 MemOpChains.push_back(
932 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
933 continue;
934 }
935 }
936
937 if (VA.getLocVT() == MVT::f64) {
938 // Move from the float value from float registers into the
939 // integer registers.
940 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
941 Arg = bitcastConstantFPToInt(C, dl, DAG);
942 else
943 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
944 }
945
946 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
947 Arg,
948 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
949 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
950 Arg,
951 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
952
953 if (VA.isRegLoc()) {
954 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
955 assert(i+1 != e);
956 CCValAssign &NextVA = ArgLocs[++i];
957 if (NextVA.isRegLoc()) {
958 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
959 } else {
960 // Store the second part in stack.
961 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
962 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
963 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
964 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
965 MemOpChains.push_back(
966 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
967 }
968 } else {
969 unsigned Offset = VA.getLocMemOffset() + StackOffset;
970 // Store the first part.
971 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
972 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
973 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
974 MemOpChains.push_back(
975 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
976 // Store the second part.
977 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
978 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
979 MemOpChains.push_back(
980 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
981 }
982 continue;
983 }
984
985 // Arguments that can be passed on register must be kept at
986 // RegsToPass vector
987 if (VA.isRegLoc()) {
988 if (VA.getLocVT() != MVT::f32) {
989 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
990 continue;
991 }
992 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
993 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
994 continue;
995 }
996
997 assert(VA.isMemLoc());
998
999 // Create a store off the stack pointer for this argument.
1000 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1002 dl);
1003 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1004 MemOpChains.push_back(
1005 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1006 }
1007
1008
1009 // Emit all stores, make sure the occur before any copies into physregs.
1010 if (!MemOpChains.empty())
1011 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1012
1013 // Build a sequence of copy-to-reg nodes chained together with token
1014 // chain and flag operands which copy the outgoing args into registers.
1015 // The InGlue in necessary since all emitted instructions must be
1016 // stuck together.
1017 SDValue InGlue;
1018 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1019 Register Reg = RegsToPass[i].first;
1020 if (!isTailCall)
1021 Reg = toCallerWindow(Reg);
1022 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InGlue);
1023 InGlue = Chain.getValue(1);
1024 }
1025
1026 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1027
1028 // If the callee is a GlobalAddress node (quite common, every direct call is)
1029 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1030 // Likewise ExternalSymbol -> TargetExternalSymbol.
1033 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1034 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
1035 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1036 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
1037
1038 // Returns a chain & a flag for retval copy to use
1039 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1041 Ops.push_back(Chain);
1042 Ops.push_back(Callee);
1043 if (hasStructRetAttr)
1044 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1045 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1046 Register Reg = RegsToPass[i].first;
1047 if (!isTailCall)
1048 Reg = toCallerWindow(Reg);
1049 Ops.push_back(DAG.getRegister(Reg, RegsToPass[i].second.getValueType()));
1050 }
1051
1052 // Add a register mask operand representing the call-preserved registers.
1053 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1054 const uint32_t *Mask =
1055 ((hasReturnsTwice)
1056 ? TRI->getRTCallPreservedMask(CallConv)
1057 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1058 assert(Mask && "Missing call preserved mask for calling convention");
1059 Ops.push_back(DAG.getRegisterMask(Mask));
1060
1061 if (InGlue.getNode())
1062 Ops.push_back(InGlue);
1063
1064 if (isTailCall) {
1066 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1067 }
1068
1069 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1070 InGlue = Chain.getValue(1);
1071
1072 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1073 InGlue = Chain.getValue(1);
1074
1075 // Assign locations to each value returned by this call.
1077 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1078 *DAG.getContext());
1079
1080 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1081
1082 // Copy all of the result registers out of their specified physreg.
1083 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1084 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1085 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1086 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1088 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1089 Chain = Lo.getValue(1);
1090 InGlue = Lo.getValue(2);
1091 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1092 DAG.getConstant(0, dl, MVT::i32));
1094 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1095 Chain = Hi.getValue(1);
1096 InGlue = Hi.getValue(2);
1097 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1098 DAG.getConstant(1, dl, MVT::i32));
1099 InVals.push_back(Vec);
1100 } else {
1101 Chain =
1102 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1103 RVLocs[i].getValVT(), InGlue)
1104 .getValue(1);
1105 InGlue = Chain.getValue(2);
1106 InVals.push_back(Chain.getValue(0));
1107 }
1108 }
1109
1110 return Chain;
1111}
1112
1113// FIXME? Maybe this could be a TableGen attribute on some registers and
1114// this table could be generated automatically from RegInfo.
1116 const MachineFunction &MF) const {
1118 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1119 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1120 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1121 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1122 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1123 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1124 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1125 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1126 .Default(0);
1127
1128 if (Reg)
1129 return Reg;
1130
1131 report_fatal_error("Invalid register name global variable");
1132}
1133
1134// Fixup floating point arguments in the ... part of a varargs call.
1135//
1136// The SPARC v9 ABI requires that floating point arguments are treated the same
1137// as integers when calling a varargs function. This does not apply to the
1138// fixed arguments that are part of the function's prototype.
1139//
1140// This function post-processes a CCValAssign array created by
1141// AnalyzeCallOperands().
1144 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1145 CCValAssign &VA = ArgLocs[i];
1146 MVT ValTy = VA.getLocVT();
1147 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1148 // varargs functions.
1149 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1150 continue;
1151 // The fixed arguments to a varargs function still go in FP registers.
1152 if (Outs[VA.getValNo()].IsFixed)
1153 continue;
1154
1155 // This floating point argument should be reassigned.
1156 // Determine the offset into the argument array.
1157 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1158 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1159 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1160 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1161
1162 if (Offset < 6*8) {
1163 // This argument should go in %i0-%i5.
1164 unsigned IReg = SP::I0 + Offset/8;
1165 if (ValTy == MVT::f64)
1166 // Full register, just bitconvert into i64.
1167 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1169 else {
1170 assert(ValTy == MVT::f128 && "Unexpected type!");
1171 // Full register, just bitconvert into i128 -- We will lower this into
1172 // two i64s in LowerCall_64.
1173 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1174 MVT::i128, CCValAssign::BCvt);
1175 }
1176 } else {
1177 // This needs to go to memory, we're out of integer registers.
1179 VA.getLocVT(), VA.getLocInfo());
1180 }
1181 }
1182}
1183
1184// Lower a call for the 64-bit ABI.
1185SDValue
1187 SmallVectorImpl<SDValue> &InVals) const {
1188 SelectionDAG &DAG = CLI.DAG;
1189 SDLoc DL = CLI.DL;
1190 SDValue Chain = CLI.Chain;
1191 auto PtrVT = getPointerTy(DAG.getDataLayout());
1192
1193 // Analyze operands of the call, assigning locations to each operand.
1195 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1196 *DAG.getContext());
1197 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1198
1200 CCInfo, CLI, DAG.getMachineFunction());
1201
1202 // Get the size of the outgoing arguments stack space requirement.
1203 // The stack offset computed by CC_Sparc64 includes all arguments.
1204 // Called functions expect 6 argument words to exist in the stack frame, used
1205 // or not.
1206 unsigned StackReserved = 6 * 8u;
1207 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1208
1209 // Keep stack frames 16-byte aligned.
1210 ArgsSize = alignTo(ArgsSize, 16);
1211
1212 // Varargs calls require special treatment.
1213 if (CLI.IsVarArg)
1214 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1215
1216 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1217
1218 // Adjust the stack pointer to make room for the arguments.
1219 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1220 // with more than 6 arguments.
1221 if (!CLI.IsTailCall)
1222 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1223
1224 // Collect the set of registers to pass to the function and their values.
1225 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1226 // instruction.
1228
1229 // Collect chains from all the memory opeations that copy arguments to the
1230 // stack. They must follow the stack pointer adjustment above and precede the
1231 // call instruction itself.
1232 SmallVector<SDValue, 8> MemOpChains;
1233
1234 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1235 const CCValAssign &VA = ArgLocs[i];
1236 SDValue Arg = CLI.OutVals[i];
1237
1238 // Promote the value if needed.
1239 switch (VA.getLocInfo()) {
1240 default:
1241 llvm_unreachable("Unknown location info!");
1242 case CCValAssign::Full:
1243 break;
1244 case CCValAssign::SExt:
1245 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1246 break;
1247 case CCValAssign::ZExt:
1248 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1249 break;
1250 case CCValAssign::AExt:
1251 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1252 break;
1253 case CCValAssign::BCvt:
1254 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1255 // SPARC does not support i128 natively. Lower it into two i64, see below.
1256 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1257 || VA.getLocVT() != MVT::i128)
1258 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1259 break;
1260 }
1261
1262 if (VA.isRegLoc()) {
1263 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1264 && VA.getLocVT() == MVT::i128) {
1265 // Store and reload into the integer register reg and reg+1.
1266 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1267 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1268 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1269 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1270 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1271 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1272 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1273
1274 // Store to %sp+BIAS+128+Offset
1275 SDValue Store =
1276 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1277 // Load into Reg and Reg+1
1278 SDValue Hi64 =
1279 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1280 SDValue Lo64 =
1281 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1282
1283 Register HiReg = VA.getLocReg();
1284 Register LoReg = VA.getLocReg() + 1;
1285 if (!CLI.IsTailCall) {
1286 HiReg = toCallerWindow(HiReg);
1287 LoReg = toCallerWindow(LoReg);
1288 }
1289
1290 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1291 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1292 continue;
1293 }
1294
1295 // The custom bit on an i32 return value indicates that it should be
1296 // passed in the high bits of the register.
1297 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1298 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1299 DAG.getConstant(32, DL, MVT::i32));
1300
1301 // The next value may go in the low bits of the same register.
1302 // Handle both at once.
1303 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1304 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1305 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1306 CLI.OutVals[i+1]);
1307 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1308 // Skip the next value, it's already done.
1309 ++i;
1310 }
1311 }
1312
1313 Register Reg = VA.getLocReg();
1314 if (!CLI.IsTailCall)
1315 Reg = toCallerWindow(Reg);
1316 RegsToPass.push_back(std::make_pair(Reg, Arg));
1317 continue;
1318 }
1319
1320 assert(VA.isMemLoc());
1321
1322 // Create a store off the stack pointer for this argument.
1323 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1324 // The argument area starts at %fp+BIAS+128 in the callee frame,
1325 // %sp+BIAS+128 in ours.
1326 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1327 Subtarget->getStackPointerBias() +
1328 128, DL);
1329 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1330 MemOpChains.push_back(
1331 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1332 }
1333
1334 // Emit all stores, make sure they occur before the call.
1335 if (!MemOpChains.empty())
1336 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1337
1338 // Build a sequence of CopyToReg nodes glued together with token chain and
1339 // glue operands which copy the outgoing args into registers. The InGlue is
1340 // necessary since all emitted instructions must be stuck together in order
1341 // to pass the live physical registers.
1342 SDValue InGlue;
1343 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1344 Chain = DAG.getCopyToReg(Chain, DL,
1345 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1346 InGlue = Chain.getValue(1);
1347 }
1348
1349 // If the callee is a GlobalAddress node (quite common, every direct call is)
1350 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1351 // Likewise ExternalSymbol -> TargetExternalSymbol.
1352 SDValue Callee = CLI.Callee;
1353 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1356 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1357 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1358 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1359 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1360
1361 // Build the operands for the call instruction itself.
1363 Ops.push_back(Chain);
1364 Ops.push_back(Callee);
1365 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1366 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1367 RegsToPass[i].second.getValueType()));
1368
1369 // Add a register mask operand representing the call-preserved registers.
1370 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1371 const uint32_t *Mask =
1372 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1373 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1374 CLI.CallConv));
1375 assert(Mask && "Missing call preserved mask for calling convention");
1376 Ops.push_back(DAG.getRegisterMask(Mask));
1377
1378 // Make sure the CopyToReg nodes are glued to the call instruction which
1379 // consumes the registers.
1380 if (InGlue.getNode())
1381 Ops.push_back(InGlue);
1382
1383 // Now the call itself.
1384 if (CLI.IsTailCall) {
1386 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1387 }
1388 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1389 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1390 InGlue = Chain.getValue(1);
1391
1392 // Revert the stack pointer immediately after the call.
1393 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1394 InGlue = Chain.getValue(1);
1395
1396 // Now extract the return values. This is more or less the same as
1397 // LowerFormalArguments_64.
1398
1399 // Assign locations to each value returned by this call.
1401 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1402 *DAG.getContext());
1403
1404 // Set inreg flag manually for codegen generated library calls that
1405 // return float.
1406 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1407 CLI.Ins[0].Flags.setInReg();
1408
1409 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1410
1411 // Copy all of the result registers out of their specified physreg.
1412 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1413 CCValAssign &VA = RVLocs[i];
1414 assert(VA.isRegLoc() && "Can only return in registers!");
1415 unsigned Reg = toCallerWindow(VA.getLocReg());
1416
1417 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1418 // reside in the same register in the high and low bits. Reuse the
1419 // CopyFromReg previous node to avoid duplicate copies.
1420 SDValue RV;
1421 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1422 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1423 RV = Chain.getValue(0);
1424
1425 // But usually we'll create a new CopyFromReg for a different register.
1426 if (!RV.getNode()) {
1427 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1428 Chain = RV.getValue(1);
1429 InGlue = Chain.getValue(2);
1430 }
1431
1432 // Get the high bits for i32 struct elements.
1433 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1434 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1435 DAG.getConstant(32, DL, MVT::i32));
1436
1437 // The callee promoted the return value, so insert an Assert?ext SDNode so
1438 // we won't promote the value again in this function.
1439 switch (VA.getLocInfo()) {
1440 case CCValAssign::SExt:
1441 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1442 DAG.getValueType(VA.getValVT()));
1443 break;
1444 case CCValAssign::ZExt:
1445 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1446 DAG.getValueType(VA.getValVT()));
1447 break;
1448 default:
1449 break;
1450 }
1451
1452 // Truncate the register down to the return value type.
1453 if (VA.isExtInLoc())
1454 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1455
1456 InVals.push_back(RV);
1457 }
1458
1459 return Chain;
1460}
1461
1462//===----------------------------------------------------------------------===//
1463// TargetLowering Implementation
1464//===----------------------------------------------------------------------===//
1465
1467 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1468 AI->getType()->getPrimitiveSizeInBits() == 32)
1469 return AtomicExpansionKind::None; // Uses xchg instruction
1470
1472}
1473
1474/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1475/// rcond condition.
1477 switch (CC) {
1478 default:
1479 llvm_unreachable("Unknown/unsigned integer condition code!");
1480 case ISD::SETEQ:
1481 return SPCC::REG_Z;
1482 case ISD::SETNE:
1483 return SPCC::REG_NZ;
1484 case ISD::SETLT:
1485 return SPCC::REG_LZ;
1486 case ISD::SETGT:
1487 return SPCC::REG_GZ;
1488 case ISD::SETLE:
1489 return SPCC::REG_LEZ;
1490 case ISD::SETGE:
1491 return SPCC::REG_GEZ;
1492 }
1493}
1494
1495/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1496/// condition.
1498 switch (CC) {
1499 default: llvm_unreachable("Unknown integer condition code!");
1500 case ISD::SETEQ: return SPCC::ICC_E;
1501 case ISD::SETNE: return SPCC::ICC_NE;
1502 case ISD::SETLT: return SPCC::ICC_L;
1503 case ISD::SETGT: return SPCC::ICC_G;
1504 case ISD::SETLE: return SPCC::ICC_LE;
1505 case ISD::SETGE: return SPCC::ICC_GE;
1506 case ISD::SETULT: return SPCC::ICC_CS;
1507 case ISD::SETULE: return SPCC::ICC_LEU;
1508 case ISD::SETUGT: return SPCC::ICC_GU;
1509 case ISD::SETUGE: return SPCC::ICC_CC;
1510 }
1511}
1512
1513/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1514/// FCC condition.
1516 switch (CC) {
1517 default: llvm_unreachable("Unknown fp condition code!");
1518 case ISD::SETEQ:
1519 case ISD::SETOEQ: return SPCC::FCC_E;
1520 case ISD::SETNE:
1521 case ISD::SETUNE: return SPCC::FCC_NE;
1522 case ISD::SETLT:
1523 case ISD::SETOLT: return SPCC::FCC_L;
1524 case ISD::SETGT:
1525 case ISD::SETOGT: return SPCC::FCC_G;
1526 case ISD::SETLE:
1527 case ISD::SETOLE: return SPCC::FCC_LE;
1528 case ISD::SETGE:
1529 case ISD::SETOGE: return SPCC::FCC_GE;
1530 case ISD::SETULT: return SPCC::FCC_UL;
1531 case ISD::SETULE: return SPCC::FCC_ULE;
1532 case ISD::SETUGT: return SPCC::FCC_UG;
1533 case ISD::SETUGE: return SPCC::FCC_UGE;
1534 case ISD::SETUO: return SPCC::FCC_U;
1535 case ISD::SETO: return SPCC::FCC_O;
1536 case ISD::SETONE: return SPCC::FCC_LG;
1537 case ISD::SETUEQ: return SPCC::FCC_UE;
1538 }
1539}
1540
1542 const SparcSubtarget &STI)
1543 : TargetLowering(TM), Subtarget(&STI) {
1544 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1545
1546 // Instructions which use registers as conditionals examine all the
1547 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1548 // matters much whether it's ZeroOrOneBooleanContent, or
1549 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1550 // former.
1553
1554 // Set up the register classes.
1555 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1556 if (!Subtarget->useSoftFloat()) {
1557 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1558 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1559 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1560 }
1561 if (Subtarget->is64Bit()) {
1562 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1563 } else {
1564 // On 32bit sparc, we define a double-register 32bit register
1565 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1566 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1567
1568 // ...but almost all operations must be expanded, so set that as
1569 // the default.
1570 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1571 setOperationAction(Op, MVT::v2i32, Expand);
1572 }
1573 // Truncating/extending stores/loads are also not supported.
1575 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1576 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1577 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1578
1579 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1580 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1581 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1582
1583 setTruncStoreAction(VT, MVT::v2i32, Expand);
1584 setTruncStoreAction(MVT::v2i32, VT, Expand);
1585 }
1586 // However, load and store *are* legal.
1587 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1588 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1591
1592 // And we need to promote i64 loads/stores into vector load/store
1595
1596 // Sadly, this doesn't work:
1597 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1598 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1599 }
1600
1601 // Turn FP extload into load/fpextend
1602 for (MVT VT : MVT::fp_valuetypes()) {
1603 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1604 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1605 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1606 }
1607
1608 // Sparc doesn't have i1 sign extending load
1609 for (MVT VT : MVT::integer_valuetypes())
1610 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1611
1612 // Turn FP truncstore into trunc + store.
1613 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1614 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1615 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1616 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1617 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1618 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1619
1620 // Custom legalize GlobalAddress nodes into LO/HI parts.
1625
1626 // Sparc doesn't have sext_inreg, replace them with shl/sra
1630
1631 // Sparc has no REM or DIVREM operations.
1636
1637 // ... nor does SparcV9.
1638 if (Subtarget->is64Bit()) {
1643 }
1644
1645 // Custom expand fp<->sint
1650
1651 // Custom Expand fp<->uint
1656
1657 // Lower f16 conversion operations into library calls
1664
1667
1668 // Sparc has no select or setcc: expand to SELECT_CC.
1673
1678
1679 // Sparc doesn't have BRCOND either, it has BR_CC.
1681 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1682 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1687
1692
1697
1698 if (Subtarget->is64Bit()) {
1709
1711 Subtarget->usePopc() ? Legal : Expand);
1712 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1713 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1715 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1716 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1718 }
1719
1720 // ATOMICs.
1721 // Atomics are supported on SparcV9. 32-bit atomics are also
1722 // supported by some Leon SparcV8 variants. Otherwise, atomics
1723 // are unsupported.
1724 if (Subtarget->isV9())
1726 else if (Subtarget->hasLeonCasa())
1728 else
1730
1732
1734
1736
1737 // Custom Lower Atomic LOAD/STORE
1740
1741 if (Subtarget->is64Bit()) {
1746 }
1747
1748 if (!Subtarget->is64Bit()) {
1749 // These libcalls are not available in 32-bit.
1750 setLibcallName(RTLIB::MULO_I64, nullptr);
1751 setLibcallName(RTLIB::MUL_I128, nullptr);
1752 setLibcallName(RTLIB::SHL_I128, nullptr);
1753 setLibcallName(RTLIB::SRL_I128, nullptr);
1754 setLibcallName(RTLIB::SRA_I128, nullptr);
1755 }
1756
1757 setLibcallName(RTLIB::MULO_I128, nullptr);
1758
1759 if (!Subtarget->isV9()) {
1760 // SparcV8 does not have FNEGD and FABSD.
1763 }
1764
1765 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1766 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1768 setOperationAction(ISD::FREM , MVT::f128, Expand);
1769 setOperationAction(ISD::FMA , MVT::f128, Expand);
1770 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1771 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1773 setOperationAction(ISD::FREM , MVT::f64, Expand);
1774 setOperationAction(ISD::FMA , MVT::f64, Expand);
1775 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1776 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1778 setOperationAction(ISD::FREM , MVT::f32, Expand);
1779 setOperationAction(ISD::FMA , MVT::f32, Expand);
1780 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1781 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1782 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1783 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1788 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1789 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1790 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1791
1795
1796 // Expands to [SU]MUL_LOHI.
1800
1801 if (Subtarget->useSoftMulDiv()) {
1802 // .umul works for both signed and unsigned
1805 setLibcallName(RTLIB::MUL_I32, ".umul");
1806
1808 setLibcallName(RTLIB::SDIV_I32, ".div");
1809
1811 setLibcallName(RTLIB::UDIV_I32, ".udiv");
1812
1813 setLibcallName(RTLIB::SREM_I32, ".rem");
1814 setLibcallName(RTLIB::UREM_I32, ".urem");
1815 }
1816
1817 if (Subtarget->is64Bit()) {
1822
1825
1829 }
1830
1831 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1832 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1833 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1834 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1835
1836 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1838
1839 // Use the default implementation.
1840 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1841 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1845
1847
1849 Subtarget->usePopc() ? Legal : Expand);
1850
1851 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1852 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1853 setOperationAction(ISD::STORE, MVT::f128, Legal);
1854 } else {
1855 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1857 }
1858
1859 if (Subtarget->hasHardQuad()) {
1860 setOperationAction(ISD::FADD, MVT::f128, Legal);
1861 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1862 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1863 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1864 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1867 if (Subtarget->isV9()) {
1868 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1869 setOperationAction(ISD::FABS, MVT::f128, Legal);
1870 } else {
1871 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1872 setOperationAction(ISD::FABS, MVT::f128, Custom);
1873 }
1874
1875 if (!Subtarget->is64Bit()) {
1876 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1877 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1878 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1879 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1880 }
1881
1882 } else {
1883 // Custom legalize f128 operations.
1884
1885 setOperationAction(ISD::FADD, MVT::f128, Custom);
1886 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1887 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1888 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1890 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1891 setOperationAction(ISD::FABS, MVT::f128, Custom);
1892
1896
1897 // Setup Runtime library names.
1898 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1899 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1900 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1901 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1902 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1903 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1904 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1905 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1906 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1907 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1908 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1909 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1910 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1911 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1912 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1913 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1914 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1915 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1916 } else if (!Subtarget->useSoftFloat()) {
1917 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1918 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1919 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1920 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1921 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1922 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1923 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1924 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1925 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1926 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1927 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1928 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1929 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1930 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1931 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1932 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1933 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1934 }
1935 }
1936
1937 if (Subtarget->fixAllFDIVSQRT()) {
1938 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1939 // the former instructions generate errata on LEON processors.
1942 }
1943
1944 if (Subtarget->hasNoFMULS()) {
1946 }
1947
1948 // Custom combine bitcast between f64 and v2i32
1949 if (!Subtarget->is64Bit())
1951
1952 if (Subtarget->hasLeonCycleCounter())
1954
1956
1958
1960}
1961
1963 return Subtarget->useSoftFloat();
1964}
1965
1966const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1967 switch ((SPISD::NodeType)Opcode) {
1968 case SPISD::FIRST_NUMBER: break;
1969 case SPISD::CMPICC: return "SPISD::CMPICC";
1970 case SPISD::CMPFCC: return "SPISD::CMPFCC";
1971 case SPISD::CMPFCC_V9:
1972 return "SPISD::CMPFCC_V9";
1973 case SPISD::BRICC: return "SPISD::BRICC";
1974 case SPISD::BPICC:
1975 return "SPISD::BPICC";
1976 case SPISD::BPXCC:
1977 return "SPISD::BPXCC";
1978 case SPISD::BRFCC: return "SPISD::BRFCC";
1979 case SPISD::BRFCC_V9:
1980 return "SPISD::BRFCC_V9";
1981 case SPISD::BR_REG:
1982 return "SPISD::BR_REG";
1983 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1984 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1985 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1986 case SPISD::SELECT_REG:
1987 return "SPISD::SELECT_REG";
1988 case SPISD::Hi: return "SPISD::Hi";
1989 case SPISD::Lo: return "SPISD::Lo";
1990 case SPISD::FTOI: return "SPISD::FTOI";
1991 case SPISD::ITOF: return "SPISD::ITOF";
1992 case SPISD::FTOX: return "SPISD::FTOX";
1993 case SPISD::XTOF: return "SPISD::XTOF";
1994 case SPISD::CALL: return "SPISD::CALL";
1995 case SPISD::RET_GLUE: return "SPISD::RET_GLUE";
1996 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1997 case SPISD::FLUSHW: return "SPISD::FLUSHW";
1998 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1999 case SPISD::TLS_LD: return "SPISD::TLS_LD";
2000 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
2001 case SPISD::TAIL_CALL: return "SPISD::TAIL_CALL";
2002 case SPISD::LOAD_GDOP: return "SPISD::LOAD_GDOP";
2003 }
2004 return nullptr;
2005}
2006
2008 EVT VT) const {
2009 if (!VT.isVector())
2010 return MVT::i32;
2012}
2013
2014/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2015/// be zero. Op is expected to be a target specific node. Used by DAG
2016/// combiner.
2018 (const SDValue Op,
2019 KnownBits &Known,
2020 const APInt &DemandedElts,
2021 const SelectionDAG &DAG,
2022 unsigned Depth) const {
2023 KnownBits Known2;
2024 Known.resetAll();
2025
2026 switch (Op.getOpcode()) {
2027 default: break;
2028 case SPISD::SELECT_ICC:
2029 case SPISD::SELECT_XCC:
2030 case SPISD::SELECT_FCC:
2031 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2032 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2033
2034 // Only known if known in both the LHS and RHS.
2035 Known = Known.intersectWith(Known2);
2036 break;
2037 }
2038}
2039
2040// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2041// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2042static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
2043 ISD::CondCode CC, unsigned &SPCC) {
2044 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2045 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2046 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2047 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2048 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2049 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2050 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2051 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2052 SDValue CMPCC = LHS.getOperand(3);
2053 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
2054 LHS = CMPCC.getOperand(0);
2055 RHS = CMPCC.getOperand(1);
2056 }
2057}
2058
2059// Convert to a target node and set target flags.
2061 SelectionDAG &DAG) const {
2062 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
2063 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2064 SDLoc(GA),
2065 GA->getValueType(0),
2066 GA->getOffset(), TF);
2067
2068 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
2069 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2070 CP->getAlign(), CP->getOffset(), TF);
2071
2072 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
2073 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2074 Op.getValueType(),
2075 0,
2076 TF);
2077
2078 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
2079 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2080 ES->getValueType(0), TF);
2081
2082 llvm_unreachable("Unhandled address SDNode");
2083}
2084
2085// Split Op into high and low parts according to HiTF and LoTF.
2086// Return an ADD node combining the parts.
2088 unsigned HiTF, unsigned LoTF,
2089 SelectionDAG &DAG) const {
2090 SDLoc DL(Op);
2091 EVT VT = Op.getValueType();
2092 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2093 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2094 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2095}
2096
2097// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2098// or ExternalSymbol SDNode.
2100 SDLoc DL(Op);
2101 EVT VT = getPointerTy(DAG.getDataLayout());
2102
2103 // Handle PIC mode first. SPARC needs a got load for every variable!
2104 if (isPositionIndependent()) {
2105 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2106 PICLevel::Level picLevel = M->getPICLevel();
2107 SDValue Idx;
2108
2109 if (picLevel == PICLevel::SmallPIC) {
2110 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2111 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2113 } else {
2114 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2117 }
2118
2119 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2120 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2121 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2122 // function has calls.
2124 MFI.setHasCalls(true);
2125 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2127 }
2128
2129 // This is one of the absolute code models.
2130 switch(getTargetMachine().getCodeModel()) {
2131 default:
2132 llvm_unreachable("Unsupported absolute code model");
2133 case CodeModel::Small:
2134 // abs32.
2137 case CodeModel::Medium: {
2138 // abs44.
2141 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2143 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2144 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2145 }
2146 case CodeModel::Large: {
2147 // abs64.
2150 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2153 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2154 }
2155 }
2156}
2157
2159 SelectionDAG &DAG) const {
2160 return makeAddress(Op, DAG);
2161}
2162
2164 SelectionDAG &DAG) const {
2165 return makeAddress(Op, DAG);
2166}
2167
2169 SelectionDAG &DAG) const {
2170 return makeAddress(Op, DAG);
2171}
2172
2174 SelectionDAG &DAG) const {
2175
2176 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2177 if (DAG.getTarget().useEmulatedTLS())
2178 return LowerToTLSEmulatedModel(GA, DAG);
2179
2180 SDLoc DL(GA);
2181 const GlobalValue *GV = GA->getGlobal();
2182 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2183
2185
2186 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2187 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2190 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2193 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2196 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2199
2200 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2202 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2203 withTargetFlags(Op, addTF, DAG));
2204
2205 SDValue Chain = DAG.getEntryNode();
2206 SDValue InGlue;
2207
2208 Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2209 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2210 InGlue = Chain.getValue(1);
2211 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2212 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2213
2214 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2215 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2217 assert(Mask && "Missing call preserved mask for calling convention");
2218 SDValue Ops[] = {Chain,
2219 Callee,
2220 Symbol,
2221 DAG.getRegister(SP::O0, PtrVT),
2222 DAG.getRegisterMask(Mask),
2223 InGlue};
2224 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2225 InGlue = Chain.getValue(1);
2226 Chain = DAG.getCALLSEQ_END(Chain, 1, 0, InGlue, DL);
2227 InGlue = Chain.getValue(1);
2228 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2229
2230 if (model != TLSModel::LocalDynamic)
2231 return Ret;
2232
2233 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2235 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2237 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2238 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2240 }
2241
2242 if (model == TLSModel::InitialExec) {
2243 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2245
2247
2248 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2249 // function has calls.
2251 MFI.setHasCalls(true);
2252
2253 SDValue TGA = makeHiLoPair(Op,
2256 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2258 DL, PtrVT, Ptr,
2259 withTargetFlags(Op, ldTF, DAG));
2260 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2261 DAG.getRegister(SP::G7, PtrVT), Offset,
2264 }
2265
2266 assert(model == TLSModel::LocalExec);
2267 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2269 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2271 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2272
2273 return DAG.getNode(ISD::ADD, DL, PtrVT,
2274 DAG.getRegister(SP::G7, PtrVT), Offset);
2275}
2276
2278 ArgListTy &Args, SDValue Arg,
2279 const SDLoc &DL,
2280 SelectionDAG &DAG) const {
2282 EVT ArgVT = Arg.getValueType();
2283 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2284
2285 ArgListEntry Entry;
2286 Entry.Node = Arg;
2287 Entry.Ty = ArgTy;
2288
2289 if (ArgTy->isFP128Ty()) {
2290 // Create a stack object and pass the pointer to the library function.
2291 int FI = MFI.CreateStackObject(16, Align(8), false);
2292 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2293 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2294 Align(8));
2295
2296 Entry.Node = FIPtr;
2297 Entry.Ty = PointerType::getUnqual(ArgTy);
2298 }
2299 Args.push_back(Entry);
2300 return Chain;
2301}
2302
2303SDValue
2305 const char *LibFuncName,
2306 unsigned numArgs) const {
2307
2308 ArgListTy Args;
2309
2311 auto PtrVT = getPointerTy(DAG.getDataLayout());
2312
2313 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2314 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2315 Type *RetTyABI = RetTy;
2316 SDValue Chain = DAG.getEntryNode();
2317 SDValue RetPtr;
2318
2319 if (RetTy->isFP128Ty()) {
2320 // Create a Stack Object to receive the return value of type f128.
2321 ArgListEntry Entry;
2322 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2323 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2324 Entry.Node = RetPtr;
2325 Entry.Ty = PointerType::getUnqual(RetTy);
2326 if (!Subtarget->is64Bit()) {
2327 Entry.IsSRet = true;
2328 Entry.IndirectType = RetTy;
2329 }
2330 Entry.IsReturned = false;
2331 Args.push_back(Entry);
2332 RetTyABI = Type::getVoidTy(*DAG.getContext());
2333 }
2334
2335 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2336 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2337 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2338 }
2340 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2341 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2342
2343 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2344
2345 // chain is in second result.
2346 if (RetTyABI == RetTy)
2347 return CallInfo.first;
2348
2349 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2350
2351 Chain = CallInfo.second;
2352
2353 // Load RetPtr to get the return value.
2354 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2356}
2357
2359 unsigned &SPCC, const SDLoc &DL,
2360 SelectionDAG &DAG) const {
2361
2362 const char *LibCall = nullptr;
2363 bool is64Bit = Subtarget->is64Bit();
2364 switch(SPCC) {
2365 default: llvm_unreachable("Unhandled conditional code!");
2366 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2367 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2368 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2369 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2370 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2371 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2372 case SPCC::FCC_UL :
2373 case SPCC::FCC_ULE:
2374 case SPCC::FCC_UG :
2375 case SPCC::FCC_UGE:
2376 case SPCC::FCC_U :
2377 case SPCC::FCC_O :
2378 case SPCC::FCC_LG :
2379 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2380 }
2381
2382 auto PtrVT = getPointerTy(DAG.getDataLayout());
2383 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2385 ArgListTy Args;
2386 SDValue Chain = DAG.getEntryNode();
2387 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2388 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2389
2391 CLI.setDebugLoc(DL).setChain(Chain)
2392 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2393
2394 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2395
2396 // result is in first, and chain is in second result.
2397 SDValue Result = CallInfo.first;
2398
2399 switch(SPCC) {
2400 default: {
2401 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2402 SPCC = SPCC::ICC_NE;
2403 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2404 }
2405 case SPCC::FCC_UL : {
2406 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2407 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2408 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2409 SPCC = SPCC::ICC_NE;
2410 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2411 }
2412 case SPCC::FCC_ULE: {
2413 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2414 SPCC = SPCC::ICC_NE;
2415 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2416 }
2417 case SPCC::FCC_UG : {
2418 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2419 SPCC = SPCC::ICC_G;
2420 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2421 }
2422 case SPCC::FCC_UGE: {
2423 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2424 SPCC = SPCC::ICC_NE;
2425 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2426 }
2427
2428 case SPCC::FCC_U : {
2429 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2430 SPCC = SPCC::ICC_E;
2431 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2432 }
2433 case SPCC::FCC_O : {
2434 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2435 SPCC = SPCC::ICC_NE;
2436 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2437 }
2438 case SPCC::FCC_LG : {
2439 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2440 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2441 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2442 SPCC = SPCC::ICC_NE;
2443 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2444 }
2445 case SPCC::FCC_UE : {
2446 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2447 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2448 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2449 SPCC = SPCC::ICC_E;
2450 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2451 }
2452 }
2453}
2454
2455static SDValue
2457 const SparcTargetLowering &TLI) {
2458
2459 if (Op.getOperand(0).getValueType() == MVT::f64)
2460 return TLI.LowerF128Op(Op, DAG,
2461 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2462
2463 if (Op.getOperand(0).getValueType() == MVT::f32)
2464 return TLI.LowerF128Op(Op, DAG,
2465 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2466
2467 llvm_unreachable("fpextend with non-float operand!");
2468 return SDValue();
2469}
2470
2471static SDValue
2473 const SparcTargetLowering &TLI) {
2474 // FP_ROUND on f64 and f32 are legal.
2475 if (Op.getOperand(0).getValueType() != MVT::f128)
2476 return Op;
2477
2478 if (Op.getValueType() == MVT::f64)
2479 return TLI.LowerF128Op(Op, DAG,
2480 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2481 if (Op.getValueType() == MVT::f32)
2482 return TLI.LowerF128Op(Op, DAG,
2483 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2484
2485 llvm_unreachable("fpround to non-float!");
2486 return SDValue();
2487}
2488
2490 const SparcTargetLowering &TLI,
2491 bool hasHardQuad) {
2492 SDLoc dl(Op);
2493 EVT VT = Op.getValueType();
2494 assert(VT == MVT::i32 || VT == MVT::i64);
2495
2496 // Expand f128 operations to fp128 abi calls.
2497 if (Op.getOperand(0).getValueType() == MVT::f128
2498 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2499 const char *libName = TLI.getLibcallName(VT == MVT::i32
2500 ? RTLIB::FPTOSINT_F128_I32
2501 : RTLIB::FPTOSINT_F128_I64);
2502 return TLI.LowerF128Op(Op, DAG, libName, 1);
2503 }
2504
2505 // Expand if the resulting type is illegal.
2506 if (!TLI.isTypeLegal(VT))
2507 return SDValue();
2508
2509 // Otherwise, Convert the fp value to integer in an FP register.
2510 if (VT == MVT::i32)
2511 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2512 else
2513 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2514
2515 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2516}
2517
2519 const SparcTargetLowering &TLI,
2520 bool hasHardQuad) {
2521 SDLoc dl(Op);
2522 EVT OpVT = Op.getOperand(0).getValueType();
2523 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2524
2525 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2526
2527 // Expand f128 operations to fp128 ABI calls.
2528 if (Op.getValueType() == MVT::f128
2529 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2530 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2531 ? RTLIB::SINTTOFP_I32_F128
2532 : RTLIB::SINTTOFP_I64_F128);
2533 return TLI.LowerF128Op(Op, DAG, libName, 1);
2534 }
2535
2536 // Expand if the operand type is illegal.
2537 if (!TLI.isTypeLegal(OpVT))
2538 return SDValue();
2539
2540 // Otherwise, Convert the int value to FP in an FP register.
2541 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2542 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2543 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2544}
2545
2547 const SparcTargetLowering &TLI,
2548 bool hasHardQuad) {
2549 SDLoc dl(Op);
2550 EVT VT = Op.getValueType();
2551
2552 // Expand if it does not involve f128 or the target has support for
2553 // quad floating point instructions and the resulting type is legal.
2554 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2555 (hasHardQuad && TLI.isTypeLegal(VT)))
2556 return SDValue();
2557
2558 assert(VT == MVT::i32 || VT == MVT::i64);
2559
2560 return TLI.LowerF128Op(Op, DAG,
2561 TLI.getLibcallName(VT == MVT::i32
2562 ? RTLIB::FPTOUINT_F128_I32
2563 : RTLIB::FPTOUINT_F128_I64),
2564 1);
2565}
2566
2568 const SparcTargetLowering &TLI,
2569 bool hasHardQuad) {
2570 SDLoc dl(Op);
2571 EVT OpVT = Op.getOperand(0).getValueType();
2572 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2573
2574 // Expand if it does not involve f128 or the target has support for
2575 // quad floating point instructions and the operand type is legal.
2576 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2577 return SDValue();
2578
2579 return TLI.LowerF128Op(Op, DAG,
2580 TLI.getLibcallName(OpVT == MVT::i32
2581 ? RTLIB::UINTTOFP_I32_F128
2582 : RTLIB::UINTTOFP_I64_F128),
2583 1);
2584}
2585
2587 const SparcTargetLowering &TLI, bool hasHardQuad,
2588 bool isV9, bool is64Bit) {
2589 SDValue Chain = Op.getOperand(0);
2590 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2591 SDValue LHS = Op.getOperand(2);
2592 SDValue RHS = Op.getOperand(3);
2593 SDValue Dest = Op.getOperand(4);
2594 SDLoc dl(Op);
2595 unsigned Opc, SPCC = ~0U;
2596
2597 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2598 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2599 LookThroughSetCC(LHS, RHS, CC, SPCC);
2600 assert(LHS.getValueType() == RHS.getValueType());
2601
2602 // Get the condition flag.
2603 SDValue CompareFlag;
2604 if (LHS.getValueType().isInteger()) {
2605 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2606 // and the RHS is zero we might be able to use a specialized branch.
2607 const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS);
2608 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 && RHSC &&
2609 RHSC->isZero() && !ISD::isUnsignedIntSetCC(CC))
2610 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2611 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2612 LHS);
2613
2614 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2615 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2616 if (isV9)
2617 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2618 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2619 else
2620 // Non-v9 targets don't have xcc.
2621 Opc = SPISD::BRICC;
2622 } else {
2623 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2624 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2625 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2626 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2627 } else {
2628 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2629 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2630 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2631 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2632 }
2633 }
2634 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2635 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2636}
2637
2639 const SparcTargetLowering &TLI, bool hasHardQuad,
2640 bool isV9, bool is64Bit) {
2641 SDValue LHS = Op.getOperand(0);
2642 SDValue RHS = Op.getOperand(1);
2643 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2644 SDValue TrueVal = Op.getOperand(2);
2645 SDValue FalseVal = Op.getOperand(3);
2646 SDLoc dl(Op);
2647 unsigned Opc, SPCC = ~0U;
2648
2649 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2650 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2651 LookThroughSetCC(LHS, RHS, CC, SPCC);
2652 assert(LHS.getValueType() == RHS.getValueType());
2653
2654 SDValue CompareFlag;
2655 if (LHS.getValueType().isInteger()) {
2656 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2657 // and the RHS is zero we might be able to use a specialized select.
2658 // All SELECT_CC between any two scalar integer types are eligible for
2659 // lowering to specialized instructions. Additionally, f32 and f64 types
2660 // are also eligible, but for f128 we can only use the specialized
2661 // instruction when we have hardquad.
2662 EVT ValType = TrueVal.getValueType();
2663 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2664 ValType == MVT::f64 ||
2665 (ValType == MVT::f128 && hasHardQuad);
2666 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2667 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2668 return DAG.getNode(
2669 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2670 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2671
2672 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2673 Opc = LHS.getValueType() == MVT::i32 ?
2675 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2676 } else {
2677 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2678 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2679 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2680 Opc = SPISD::SELECT_ICC;
2681 } else {
2682 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2683 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2684 Opc = SPISD::SELECT_FCC;
2685 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2686 }
2687 }
2688 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2689 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2690}
2691
2693 const SparcTargetLowering &TLI) {
2696 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2697
2698 // Need frame address to find the address of VarArgsFrameIndex.
2700
2701 // vastart just stores the address of the VarArgsFrameIndex slot into the
2702 // memory location argument.
2703 SDLoc DL(Op);
2704 SDValue Offset =
2705 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2706 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2707 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2708 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2709 MachinePointerInfo(SV));
2710}
2711
2713 SDNode *Node = Op.getNode();
2714 EVT VT = Node->getValueType(0);
2715 SDValue InChain = Node->getOperand(0);
2716 SDValue VAListPtr = Node->getOperand(1);
2717 EVT PtrVT = VAListPtr.getValueType();
2718 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2719 SDLoc DL(Node);
2720 SDValue VAList =
2721 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2722 // Increment the pointer, VAList, to the next vaarg.
2723 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2725 DL));
2726 // Store the incremented VAList to the legalized pointer.
2727 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2728 MachinePointerInfo(SV));
2729 // Load the actual argument out of the pointer VAList.
2730 // We can't count on greater alignment than the word size.
2731 return DAG.getLoad(
2732 VT, DL, InChain, VAList, MachinePointerInfo(),
2733 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2734}
2735
2737 const SparcSubtarget *Subtarget) {
2738 SDValue Chain = Op.getOperand(0); // Legalize the chain.
2739 SDValue Size = Op.getOperand(1); // Legalize the size.
2740 MaybeAlign Alignment =
2741 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
2742 Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
2743 EVT VT = Size->getValueType(0);
2744 SDLoc dl(Op);
2745
2746 // TODO: implement over-aligned alloca. (Note: also implies
2747 // supporting support for overaligned function frames + dynamic
2748 // allocations, at all, which currently isn't supported)
2749 if (Alignment && *Alignment > StackAlign) {
2750 const MachineFunction &MF = DAG.getMachineFunction();
2751 report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2752 "over-aligned dynamic alloca not supported.");
2753 }
2754
2755 // The resultant pointer needs to be above the register spill area
2756 // at the bottom of the stack.
2757 unsigned regSpillArea;
2758 if (Subtarget->is64Bit()) {
2759 regSpillArea = 128;
2760 } else {
2761 // On Sparc32, the size of the spill area is 92. Unfortunately,
2762 // that's only 4-byte aligned, not 8-byte aligned (the stack
2763 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2764 // aligned dynamic allocation, we actually need to add 96 to the
2765 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2766
2767 // That also means adding 4 to the size of the allocation --
2768 // before applying the 8-byte rounding. Unfortunately, we the
2769 // value we get here has already had rounding applied. So, we need
2770 // to add 8, instead, wasting a bit more memory.
2771
2772 // Further, this only actually needs to be done if the required
2773 // alignment is > 4, but, we've lost that info by this point, too,
2774 // so we always apply it.
2775
2776 // (An alternative approach would be to always reserve 96 bytes
2777 // instead of the required 92, but then we'd waste 4 extra bytes
2778 // in every frame, not just those with dynamic stack allocations)
2779
2780 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2781
2782 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2783 DAG.getConstant(8, dl, VT));
2784 regSpillArea = 96;
2785 }
2786
2787 unsigned SPReg = SP::O6;
2788 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2789 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2790 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2791
2792 regSpillArea += Subtarget->getStackPointerBias();
2793
2794 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2795 DAG.getConstant(regSpillArea, dl, VT));
2796 SDValue Ops[2] = { NewVal, Chain };
2797 return DAG.getMergeValues(Ops, dl);
2798}
2799
2800
2802 SDLoc dl(Op);
2803 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2804 dl, MVT::Other, DAG.getEntryNode());
2805 return Chain;
2806}
2807
2809 const SparcSubtarget *Subtarget,
2810 bool AlwaysFlush = false) {
2812 MFI.setFrameAddressIsTaken(true);
2813
2814 EVT VT = Op.getValueType();
2815 SDLoc dl(Op);
2816 unsigned FrameReg = SP::I6;
2817 unsigned stackBias = Subtarget->getStackPointerBias();
2818
2819 SDValue FrameAddr;
2820 SDValue Chain;
2821
2822 // flush first to make sure the windowed registers' values are in stack
2823 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2824
2825 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2826
2827 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2828
2829 while (depth--) {
2830 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2831 DAG.getIntPtrConstant(Offset, dl));
2832 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2833 }
2834 if (Subtarget->is64Bit())
2835 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2836 DAG.getIntPtrConstant(stackBias, dl));
2837 return FrameAddr;
2838}
2839
2840
2842 const SparcSubtarget *Subtarget) {
2843
2844 uint64_t depth = Op.getConstantOperandVal(0);
2845
2846 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2847
2848}
2849
2851 const SparcTargetLowering &TLI,
2852 const SparcSubtarget *Subtarget) {
2854 MachineFrameInfo &MFI = MF.getFrameInfo();
2855 MFI.setReturnAddressIsTaken(true);
2856
2858 return SDValue();
2859
2860 EVT VT = Op.getValueType();
2861 SDLoc dl(Op);
2862 uint64_t depth = Op.getConstantOperandVal(0);
2863
2864 SDValue RetAddr;
2865 if (depth == 0) {
2866 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2867 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2868 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2869 return RetAddr;
2870 }
2871
2872 // Need frame address to find return address of the caller.
2873 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2874
2875 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2877 dl, VT,
2878 FrameAddr,
2879 DAG.getIntPtrConstant(Offset, dl));
2880 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2881
2882 return RetAddr;
2883}
2884
2885static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2886 unsigned opcode) {
2887 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2888 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2889
2890 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2891 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2892 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2893
2894 // Note: in little-endian, the floating-point value is stored in the
2895 // registers are in the opposite order, so the subreg with the sign
2896 // bit is the highest-numbered (odd), rather than the
2897 // lowest-numbered (even).
2898
2899 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2900 SrcReg64);
2901 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2902 SrcReg64);
2903
2904 if (DAG.getDataLayout().isLittleEndian())
2905 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2906 else
2907 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2908
2909 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2910 dl, MVT::f64), 0);
2911 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2912 DstReg64, Hi32);
2913 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2914 DstReg64, Lo32);
2915 return DstReg64;
2916}
2917
2918// Lower a f128 load into two f64 loads.
2920{
2921 SDLoc dl(Op);
2922 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2923 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2924
2925 Align Alignment = commonAlignment(LdNode->getOriginalAlign(), 8);
2926
2927 SDValue Hi64 =
2928 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2929 LdNode->getPointerInfo(), Alignment);
2930 EVT addrVT = LdNode->getBasePtr().getValueType();
2931 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2932 LdNode->getBasePtr(),
2933 DAG.getConstant(8, dl, addrVT));
2934 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2935 LdNode->getPointerInfo().getWithOffset(8),
2936 Alignment);
2937
2938 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2939 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2940
2941 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2942 dl, MVT::f128);
2943 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2944 MVT::f128,
2945 SDValue(InFP128, 0),
2946 Hi64,
2947 SubRegEven);
2948 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2949 MVT::f128,
2950 SDValue(InFP128, 0),
2951 Lo64,
2952 SubRegOdd);
2953 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2954 SDValue(Lo64.getNode(), 1) };
2955 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2956 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2957 return DAG.getMergeValues(Ops, dl);
2958}
2959
2961{
2962 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2963
2964 EVT MemVT = LdNode->getMemoryVT();
2965 if (MemVT == MVT::f128)
2966 return LowerF128Load(Op, DAG);
2967
2968 return Op;
2969}
2970
2971// Lower a f128 store into two f64 stores.
2973 SDLoc dl(Op);
2974 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2975 assert(StNode->getOffset().isUndef() && "Unexpected node type");
2976
2977 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2978 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2979
2980 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2981 dl,
2982 MVT::f64,
2983 StNode->getValue(),
2984 SubRegEven);
2985 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2986 dl,
2987 MVT::f64,
2988 StNode->getValue(),
2989 SubRegOdd);
2990
2991 Align Alignment = commonAlignment(StNode->getOriginalAlign(), 8);
2992
2993 SDValue OutChains[2];
2994 OutChains[0] =
2995 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2996 StNode->getBasePtr(), StNode->getPointerInfo(),
2997 Alignment);
2998 EVT addrVT = StNode->getBasePtr().getValueType();
2999 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3000 StNode->getBasePtr(),
3001 DAG.getConstant(8, dl, addrVT));
3002 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3003 StNode->getPointerInfo().getWithOffset(8),
3004 Alignment);
3005 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3006}
3007
3009{
3010 SDLoc dl(Op);
3011 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3012
3013 EVT MemVT = St->getMemoryVT();
3014 if (MemVT == MVT::f128)
3015 return LowerF128Store(Op, DAG);
3016
3017 if (MemVT == MVT::i64) {
3018 // Custom handling for i64 stores: turn it into a bitcast and a
3019 // v2i32 store.
3020 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3021 SDValue Chain = DAG.getStore(
3022 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3023 St->getOriginalAlign(), St->getMemOperand()->getFlags(),
3024 St->getAAInfo());
3025 return Chain;
3026 }
3027
3028 return SDValue();
3029}
3030
3032 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3033 && "invalid opcode");
3034
3035 SDLoc dl(Op);
3036
3037 if (Op.getValueType() == MVT::f64)
3038 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3039 if (Op.getValueType() != MVT::f128)
3040 return Op;
3041
3042 // Lower fabs/fneg on f128 to fabs/fneg on f64
3043 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3044 // (As with LowerF64Op, on little-endian, we need to negate the odd
3045 // subreg)
3046
3047 SDValue SrcReg128 = Op.getOperand(0);
3048 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3049 SrcReg128);
3050 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3051 SrcReg128);
3052
3053 if (DAG.getDataLayout().isLittleEndian()) {
3054 if (isV9)
3055 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3056 else
3057 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3058 } else {
3059 if (isV9)
3060 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3061 else
3062 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3063 }
3064
3065 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3066 dl, MVT::f128), 0);
3067 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3068 DstReg128, Hi64);
3069 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3070 DstReg128, Lo64);
3071 return DstReg128;
3072}
3073
3075
3076 if (Op.getValueType() != MVT::i64)
3077 return Op;
3078
3079 SDLoc dl(Op);
3080 SDValue Src1 = Op.getOperand(0);
3081 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
3082 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
3083 DAG.getConstant(32, dl, MVT::i64));
3084 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
3085
3086 SDValue Src2 = Op.getOperand(1);
3087 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
3088 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
3089 DAG.getConstant(32, dl, MVT::i64));
3090 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
3091
3092
3093 bool hasChain = false;
3094 unsigned hiOpc = Op.getOpcode();
3095 switch (Op.getOpcode()) {
3096 default: llvm_unreachable("Invalid opcode");
3097 case ISD::ADDC: hiOpc = ISD::ADDE; break;
3098 case ISD::ADDE: hasChain = true; break;
3099 case ISD::SUBC: hiOpc = ISD::SUBE; break;
3100 case ISD::SUBE: hasChain = true; break;
3101 }
3102 SDValue Lo;
3103 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
3104 if (hasChain) {
3105 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
3106 Op.getOperand(2));
3107 } else {
3108 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
3109 }
3110 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
3111 SDValue Carry = Hi.getValue(1);
3112
3113 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
3114 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
3115 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
3116 DAG.getConstant(32, dl, MVT::i64));
3117
3118 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
3119 SDValue Ops[2] = { Dst, Carry };
3120 return DAG.getMergeValues(Ops, dl);
3121}
3122
3123// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3124// in LegalizeDAG.cpp except the order of arguments to the library function.
3126 const SparcTargetLowering &TLI)
3127{
3128 unsigned opcode = Op.getOpcode();
3129 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3130
3131 bool isSigned = (opcode == ISD::SMULO);
3132 EVT VT = MVT::i64;
3133 EVT WideVT = MVT::i128;
3134 SDLoc dl(Op);
3135 SDValue LHS = Op.getOperand(0);
3136
3137 if (LHS.getValueType() != VT)
3138 return Op;
3139
3140 SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3141
3142 SDValue RHS = Op.getOperand(1);
3143 SDValue HiLHS, HiRHS;
3144 if (isSigned) {
3145 HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3146 HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3147 } else {
3148 HiLHS = DAG.getConstant(0, dl, VT);
3149 HiRHS = DAG.getConstant(0, dl, MVT::i64);
3150 }
3151
3152 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3153
3155 CallOptions.setSExt(isSigned);
3156 SDValue MulResult = TLI.makeLibCall(DAG,
3157 RTLIB::MUL_I128, WideVT,
3158 Args, CallOptions, dl).first;
3159 SDValue BottomHalf, TopHalf;
3160 std::tie(BottomHalf, TopHalf) = DAG.SplitScalar(MulResult, dl, VT, VT);
3161 if (isSigned) {
3162 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3163 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3164 } else {
3165 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3166 ISD::SETNE);
3167 }
3168 // MulResult is a node with an illegal type. Because such things are not
3169 // generally permitted during this phase of legalization, ensure that
3170 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3171 // been folded.
3172 assert(MulResult->use_empty() && "Illegally typed node still in use!");
3173
3174 SDValue Ops[2] = { BottomHalf, TopHalf } ;
3175 return DAG.getMergeValues(Ops, dl);
3176}
3177
3179 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3180 // Expand with a fence.
3181 return SDValue();
3182 }
3183
3184 // Monotonic load/stores are legal.
3185 return Op;
3186}
3187
3189 SelectionDAG &DAG) const {
3190 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3191 SDLoc dl(Op);
3192 switch (IntNo) {
3193 default: return SDValue(); // Don't custom lower most intrinsics.
3194 case Intrinsic::thread_pointer: {
3195 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3196 return DAG.getRegister(SP::G7, PtrVT);
3197 }
3198 }
3199}
3200
3203
3204 bool hasHardQuad = Subtarget->hasHardQuad();
3205 bool isV9 = Subtarget->isV9();
3206 bool is64Bit = Subtarget->is64Bit();
3207
3208 switch (Op.getOpcode()) {
3209 default: llvm_unreachable("Should not custom lower this!");
3210
3211 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3212 Subtarget);
3213 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3214 Subtarget);
3216 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3217 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3218 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3219 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3220 hasHardQuad);
3221 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3222 hasHardQuad);
3223 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3224 hasHardQuad);
3225 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3226 hasHardQuad);
3227 case ISD::BR_CC:
3228 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3229 case ISD::SELECT_CC:
3230 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3231 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3232 case ISD::VAARG: return LowerVAARG(Op, DAG);
3234 Subtarget);
3235
3236 case ISD::LOAD: return LowerLOAD(Op, DAG);
3237 case ISD::STORE: return LowerSTORE(Op, DAG);
3238 case ISD::FADD: return LowerF128Op(Op, DAG,
3239 getLibcallName(RTLIB::ADD_F128), 2);
3240 case ISD::FSUB: return LowerF128Op(Op, DAG,
3241 getLibcallName(RTLIB::SUB_F128), 2);
3242 case ISD::FMUL: return LowerF128Op(Op, DAG,
3243 getLibcallName(RTLIB::MUL_F128), 2);
3244 case ISD::FDIV: return LowerF128Op(Op, DAG,
3245 getLibcallName(RTLIB::DIV_F128), 2);
3246 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3247 getLibcallName(RTLIB::SQRT_F128),1);
3248 case ISD::FABS:
3249 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3250 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3251 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3252 case ISD::ADDC:
3253 case ISD::ADDE:
3254 case ISD::SUBC:
3255 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3256 case ISD::UMULO:
3257 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3258 case ISD::ATOMIC_LOAD:
3259 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3261 }
3262}
3263
3265 const SDLoc &DL,
3266 SelectionDAG &DAG) const {
3267 APInt V = C->getValueAPF().bitcastToAPInt();
3268 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3269 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3270 if (DAG.getDataLayout().isLittleEndian())
3271 std::swap(Lo, Hi);
3272 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3273}
3274
3276 DAGCombinerInfo &DCI) const {
3277 SDLoc dl(N);
3278 SDValue Src = N->getOperand(0);
3279
3280 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3281 Src.getSimpleValueType() == MVT::f64)
3282 return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3283
3284 return SDValue();
3285}
3286
3288 DAGCombinerInfo &DCI) const {
3289 switch (N->getOpcode()) {
3290 default:
3291 break;
3292 case ISD::BITCAST:
3293 return PerformBITCASTCombine(N, DCI);
3294 }
3295 return SDValue();
3296}
3297
3300 MachineBasicBlock *BB) const {
3301 switch (MI.getOpcode()) {
3302 default: llvm_unreachable("Unknown SELECT_CC!");
3303 case SP::SELECT_CC_Int_ICC:
3304 case SP::SELECT_CC_FP_ICC:
3305 case SP::SELECT_CC_DFP_ICC:
3306 case SP::SELECT_CC_QFP_ICC:
3307 if (Subtarget->isV9())
3308 return expandSelectCC(MI, BB, SP::BPICC);
3309 return expandSelectCC(MI, BB, SP::BCOND);
3310 case SP::SELECT_CC_Int_XCC:
3311 case SP::SELECT_CC_FP_XCC:
3312 case SP::SELECT_CC_DFP_XCC:
3313 case SP::SELECT_CC_QFP_XCC:
3314 return expandSelectCC(MI, BB, SP::BPXCC);
3315 case SP::SELECT_CC_Int_FCC:
3316 case SP::SELECT_CC_FP_FCC:
3317 case SP::SELECT_CC_DFP_FCC:
3318 case SP::SELECT_CC_QFP_FCC:
3319 if (Subtarget->isV9())
3320 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3321 return expandSelectCC(MI, BB, SP::FBCOND);
3322 }
3323}
3324
3327 unsigned BROpcode) const {
3328 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3329 DebugLoc dl = MI.getDebugLoc();
3330 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3331
3332 // To "insert" a SELECT_CC instruction, we actually have to insert the
3333 // triangle control-flow pattern. The incoming instruction knows the
3334 // destination vreg to set, the condition code register to branch on, the
3335 // true/false values to select between, and the condition code for the branch.
3336 //
3337 // We produce the following control flow:
3338 // ThisMBB
3339 // | \
3340 // | IfFalseMBB
3341 // | /
3342 // SinkMBB
3343 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3345
3346 MachineBasicBlock *ThisMBB = BB;
3347 MachineFunction *F = BB->getParent();
3348 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3349 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3350 F->insert(It, IfFalseMBB);
3351 F->insert(It, SinkMBB);
3352
3353 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3354 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3355 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3356 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3357
3358 // Set the new successors for ThisMBB.
3359 ThisMBB->addSuccessor(IfFalseMBB);
3360 ThisMBB->addSuccessor(SinkMBB);
3361
3362 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3363 .addMBB(SinkMBB)
3364 .addImm(CC);
3365
3366 // IfFalseMBB just falls through to SinkMBB.
3367 IfFalseMBB->addSuccessor(SinkMBB);
3368
3369 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3370 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3371 MI.getOperand(0).getReg())
3372 .addReg(MI.getOperand(1).getReg())
3373 .addMBB(ThisMBB)
3374 .addReg(MI.getOperand(2).getReg())
3375 .addMBB(IfFalseMBB);
3376
3377 MI.eraseFromParent(); // The pseudo instruction is gone now.
3378 return SinkMBB;
3379}
3380
3381//===----------------------------------------------------------------------===//
3382// Sparc Inline Assembly Support
3383//===----------------------------------------------------------------------===//
3384
3385/// getConstraintType - Given a constraint letter, return the type of
3386/// constraint it is for this target.
3389 if (Constraint.size() == 1) {
3390 switch (Constraint[0]) {
3391 default: break;
3392 case 'r':
3393 case 'f':
3394 case 'e':
3395 return C_RegisterClass;
3396 case 'I': // SIMM13
3397 return C_Immediate;
3398 }
3399 }
3400
3401 return TargetLowering::getConstraintType(Constraint);
3402}
3403
3406 const char *constraint) const {
3408 Value *CallOperandVal = info.CallOperandVal;
3409 // If we don't have a value, we can't do a match,
3410 // but allow it at the lowest weight.
3411 if (!CallOperandVal)
3412 return CW_Default;
3413
3414 // Look at the constraint type.
3415 switch (*constraint) {
3416 default:
3418 break;
3419 case 'I': // SIMM13
3420 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3421 if (isInt<13>(C->getSExtValue()))
3422 weight = CW_Constant;
3423 }
3424 break;
3425 }
3426 return weight;
3427}
3428
3429/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3430/// vector. If it is invalid, don't add anything to Ops.
3433 std::string &Constraint,
3434 std::vector<SDValue> &Ops,
3435 SelectionDAG &DAG) const {
3436 SDValue Result;
3437
3438 // Only support length 1 constraints for now.
3439 if (Constraint.length() > 1)
3440 return;
3441
3442 char ConstraintLetter = Constraint[0];
3443 switch (ConstraintLetter) {
3444 default: break;
3445 case 'I':
3446 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3447 if (isInt<13>(C->getSExtValue())) {
3448 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3449 Op.getValueType());
3450 break;
3451 }
3452 return;
3453 }
3454 }
3455
3456 if (Result.getNode()) {
3457 Ops.push_back(Result);
3458 return;
3459 }
3460 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3461}
3462
3463std::pair<unsigned, const TargetRegisterClass *>
3465 StringRef Constraint,
3466 MVT VT) const {
3467 if (Constraint.empty())
3468 return std::make_pair(0U, nullptr);
3469
3470 if (Constraint.size() == 1) {
3471 switch (Constraint[0]) {
3472 case 'r':
3473 if (VT == MVT::v2i32)
3474 return std::make_pair(0U, &SP::IntPairRegClass);
3475 else if (Subtarget->is64Bit())
3476 return std::make_pair(0U, &SP::I64RegsRegClass);
3477 else
3478 return std::make_pair(0U, &SP::IntRegsRegClass);
3479 case 'f':
3480 if (VT == MVT::f32 || VT == MVT::i32)
3481 return std::make_pair(0U, &SP::FPRegsRegClass);
3482 else if (VT == MVT::f64 || VT == MVT::i64)
3483 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3484 else if (VT == MVT::f128)
3485 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3486 // This will generate an error message
3487 return std::make_pair(0U, nullptr);
3488 case 'e':
3489 if (VT == MVT::f32 || VT == MVT::i32)
3490 return std::make_pair(0U, &SP::FPRegsRegClass);
3491 else if (VT == MVT::f64 || VT == MVT::i64 )
3492 return std::make_pair(0U, &SP::DFPRegsRegClass);
3493 else if (VT == MVT::f128)
3494 return std::make_pair(0U, &SP::QFPRegsRegClass);
3495 // This will generate an error message
3496 return std::make_pair(0U, nullptr);
3497 }
3498 }
3499
3500 if (Constraint.front() != '{')
3501 return std::make_pair(0U, nullptr);
3502
3503 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3504 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3505 if (RegName.empty())
3506 return std::make_pair(0U, nullptr);
3507
3508 unsigned long long RegNo;
3509 // Handle numbered register aliases.
3510 if (RegName[0] == 'r' &&
3511 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3512 // r0-r7 -> g0-g7
3513 // r8-r15 -> o0-o7
3514 // r16-r23 -> l0-l7
3515 // r24-r31 -> i0-i7
3516 if (RegNo > 31)
3517 return std::make_pair(0U, nullptr);
3518 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3519 char RegType = RegTypes[RegNo / 8];
3520 char RegIndex = '0' + (RegNo % 8);
3521 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3522 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3523 }
3524
3525 // Rewrite the fN constraint according to the value type if needed.
3526 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3527 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3528 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3530 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3531 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3533 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3534 } else {
3535 return std::make_pair(0U, nullptr);
3536 }
3537 }
3538
3539 auto ResultPair =
3541 if (!ResultPair.second)
3542 return std::make_pair(0U, nullptr);
3543
3544 // Force the use of I64Regs over IntRegs for 64-bit values.
3545 if (Subtarget->is64Bit() && VT == MVT::i64) {
3546 assert(ResultPair.second == &SP::IntRegsRegClass &&
3547 "Unexpected register class");
3548 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3549 }
3550
3551 return ResultPair;
3552}
3553
3554bool
3556 // The Sparc target isn't yet aware of offsets.
3557 return false;
3558}
3559
3562 SelectionDAG &DAG) const {
3563
3564 SDLoc dl(N);
3565
3566 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3567
3568 switch (N->getOpcode()) {
3569 default:
3570 llvm_unreachable("Do not know how to custom type legalize this operation!");
3571
3572 case ISD::FP_TO_SINT:
3573 case ISD::FP_TO_UINT:
3574 // Custom lower only if it involves f128 or i64.
3575 if (N->getOperand(0).getValueType() != MVT::f128
3576 || N->getValueType(0) != MVT::i64)
3577 return;
3578 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3579 ? RTLIB::FPTOSINT_F128_I64
3580 : RTLIB::FPTOUINT_F128_I64);
3581
3582 Results.push_back(LowerF128Op(SDValue(N, 0),
3583 DAG,
3584 getLibcallName(libCall),
3585 1));
3586 return;
3587 case ISD::READCYCLECOUNTER: {
3588 assert(Subtarget->hasLeonCycleCounter());
3589 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3590 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3591 SDValue Ops[] = { Lo, Hi };
3592 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3593 Results.push_back(Pair);
3594 Results.push_back(N->getOperand(0));
3595 return;
3596 }
3597 case ISD::SINT_TO_FP:
3598 case ISD::UINT_TO_FP:
3599 // Custom lower only if it involves f128 or i64.
3600 if (N->getValueType(0) != MVT::f128
3601 || N->getOperand(0).getValueType() != MVT::i64)
3602 return;
3603
3604 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3605 ? RTLIB::SINTTOFP_I64_F128
3606 : RTLIB::UINTTOFP_I64_F128);
3607
3608 Results.push_back(LowerF128Op(SDValue(N, 0),
3609 DAG,
3610 getLibcallName(libCall),
3611 1));
3612 return;
3613 case ISD::LOAD: {
3614 LoadSDNode *Ld = cast<LoadSDNode>(N);
3615 // Custom handling only for i64: turn i64 load into a v2i32 load,
3616 // and a bitcast.
3617 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3618 return;
3619
3620 SDLoc dl(N);
3621 SDValue LoadRes = DAG.getExtLoad(
3622 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3623 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32,
3624 Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags(),
3625 Ld->getAAInfo());
3626
3627 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3628 Results.push_back(Res);
3629 Results.push_back(LoadRes.getValue(1));
3630 return;
3631 }
3632 }
3633}
3634
3635// Override to enable LOAD_STACK_GUARD lowering on Linux.
3637 if (!Subtarget->isTargetLinux())
3639 return true;
3640}
3641
3642// Override to disable global variable loading on Linux.
3644 if (!Subtarget->isTargetLinux())
3646}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition: MD5.cpp:55
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:76
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
BinOp getOperation() const
Definition: Instructions.h:812
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isMemLoc() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
bool isExtInLoc() const
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:238
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
A debug info location.
Definition: DebugLoc.h:33
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:628
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:645
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
Align getOriginalAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:659
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool use_empty() const
Return true if there are no uses of this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL,