LLVM  7.0.0svn
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Sparc uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SparcISelLowering.h"
18 #include "SparcRegisterInfo.h"
19 #include "SparcTargetMachine.h"
20 #include "SparcTargetObjectFile.h"
21 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
33 #include "llvm/Support/KnownBits.h"
34 using namespace llvm;
35 
36 
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
40 
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43  ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 {
45  assert (ArgFlags.isSRet());
46 
47  // Assign SRet argument.
48  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
49  0,
50  LocVT, LocInfo));
51  return true;
52 }
53 
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56  ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 {
58  static const MCPhysReg RegList[] = {
59  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60  };
61  // Try to get first reg.
62  if (unsigned Reg = State.AllocateReg(RegList)) {
63  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64  } else {
65  // Assign whole thing in stack.
66  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
67  State.AllocateStack(8,4),
68  LocVT, LocInfo));
69  return true;
70  }
71 
72  // Try to get second reg.
73  if (unsigned Reg = State.AllocateReg(RegList))
74  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75  else
76  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
77  State.AllocateStack(4,4),
78  LocVT, LocInfo));
79  return true;
80 }
81 
82 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
83  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
84  ISD::ArgFlagsTy &ArgFlags, CCState &State)
85 {
86  static const MCPhysReg RegList[] = {
87  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
88  };
89 
90  // Try to get first reg.
91  if (unsigned Reg = State.AllocateReg(RegList))
92  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
93  else
94  return false;
95 
96  // Try to get second reg.
97  if (unsigned Reg = State.AllocateReg(RegList))
98  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
99  else
100  return false;
101 
102  return true;
103 }
104 
105 // Allocate a full-sized argument for the 64-bit ABI.
106 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
107  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
108  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
109  assert((LocVT == MVT::f32 || LocVT == MVT::f128
110  || LocVT.getSizeInBits() == 64) &&
111  "Can't handle non-64 bits locations");
112 
113  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
114  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
115  unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
116  unsigned Offset = State.AllocateStack(size, alignment);
117  unsigned Reg = 0;
118 
119  if (LocVT == MVT::i64 && Offset < 6*8)
120  // Promote integers to %i0-%i5.
121  Reg = SP::I0 + Offset/8;
122  else if (LocVT == MVT::f64 && Offset < 16*8)
123  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
124  Reg = SP::D0 + Offset/8;
125  else if (LocVT == MVT::f32 && Offset < 16*8)
126  // Promote floats to %f1, %f3, ...
127  Reg = SP::F1 + Offset/4;
128  else if (LocVT == MVT::f128 && Offset < 16*8)
129  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
130  Reg = SP::Q0 + Offset/16;
131 
132  // Promote to register when possible, otherwise use the stack slot.
133  if (Reg) {
134  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
135  return true;
136  }
137 
138  // This argument goes on the stack in an 8-byte slot.
139  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
140  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
141  if (LocVT == MVT::f32)
142  Offset += 4;
143 
144  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
145  return true;
146 }
147 
148 // Allocate a half-sized argument for the 64-bit ABI.
149 //
150 // This is used when passing { float, int } structs by value in registers.
151 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
152  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
153  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
154  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
155  unsigned Offset = State.AllocateStack(4, 4);
156 
157  if (LocVT == MVT::f32 && Offset < 16*8) {
158  // Promote floats to %f0-%f31.
159  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
160  LocVT, LocInfo));
161  return true;
162  }
163 
164  if (LocVT == MVT::i32 && Offset < 6*8) {
165  // Promote integers to %i0-%i5, using half the register.
166  unsigned Reg = SP::I0 + Offset/8;
167  LocVT = MVT::i64;
168  LocInfo = CCValAssign::AExt;
169 
170  // Set the Custom bit if this i32 goes in the high bits of a register.
171  if (Offset % 8 == 0)
172  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
173  LocVT, LocInfo));
174  else
175  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
176  return true;
177  }
178 
179  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
180  return true;
181 }
182 
183 #include "SparcGenCallingConv.inc"
184 
185 // The calling conventions in SparcCallingConv.td are described in terms of the
186 // callee's register window. This function translates registers to the
187 // corresponding caller window %o register.
188 static unsigned toCallerWindow(unsigned Reg) {
189  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
190  "Unexpected enum");
191  if (Reg >= SP::I0 && Reg <= SP::I7)
192  return Reg - SP::I0 + SP::O0;
193  return Reg;
194 }
195 
196 SDValue
198  bool IsVarArg,
200  const SmallVectorImpl<SDValue> &OutVals,
201  const SDLoc &DL, SelectionDAG &DAG) const {
202  if (Subtarget->is64Bit())
203  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
204  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
205 }
206 
207 SDValue
209  bool IsVarArg,
211  const SmallVectorImpl<SDValue> &OutVals,
212  const SDLoc &DL, SelectionDAG &DAG) const {
214 
215  // CCValAssign - represent the assignment of the return value to locations.
217 
218  // CCState - Info about the registers and stack slot.
219  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
220  *DAG.getContext());
221 
222  // Analyze return values.
223  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
224 
225  SDValue Flag;
226  SmallVector<SDValue, 4> RetOps(1, Chain);
227  // Make room for the return address offset.
228  RetOps.push_back(SDValue());
229 
230  // Copy the result values into the output registers.
231  for (unsigned i = 0, realRVLocIdx = 0;
232  i != RVLocs.size();
233  ++i, ++realRVLocIdx) {
234  CCValAssign &VA = RVLocs[i];
235  assert(VA.isRegLoc() && "Can only return in registers!");
236 
237  SDValue Arg = OutVals[realRVLocIdx];
238 
239  if (VA.needsCustom()) {
240  assert(VA.getLocVT() == MVT::v2i32);
241  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
242  // happen by default if this wasn't a legal type)
243 
245  Arg,
246  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
248  Arg,
249  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
250 
251  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
252  Flag = Chain.getValue(1);
253  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
254  VA = RVLocs[++i]; // skip ahead to next loc
255  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
256  Flag);
257  } else
258  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
259 
260  // Guarantee that all emitted copies are stuck together with flags.
261  Flag = Chain.getValue(1);
262  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
263  }
264 
265  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
266  // If the function returns a struct, copy the SRetReturnReg to I0
267  if (MF.getFunction().hasStructRetAttr()) {
269  unsigned Reg = SFI->getSRetReturnReg();
270  if (!Reg)
271  llvm_unreachable("sret virtual register not created in the entry block");
272  auto PtrVT = getPointerTy(DAG.getDataLayout());
273  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
274  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
275  Flag = Chain.getValue(1);
276  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
277  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
278  }
279 
280  RetOps[0] = Chain; // Update chain.
281  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
282 
283  // Add the flag if we have it.
284  if (Flag.getNode())
285  RetOps.push_back(Flag);
286 
287  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
288 }
289 
290 // Lower return values for the 64-bit ABI.
291 // Return values are passed the exactly the same way as function arguments.
292 SDValue
294  bool IsVarArg,
296  const SmallVectorImpl<SDValue> &OutVals,
297  const SDLoc &DL, SelectionDAG &DAG) const {
298  // CCValAssign - represent the assignment of the return value to locations.
300 
301  // CCState - Info about the registers and stack slot.
302  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
303  *DAG.getContext());
304 
305  // Analyze return values.
306  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
307 
308  SDValue Flag;
309  SmallVector<SDValue, 4> RetOps(1, Chain);
310 
311  // The second operand on the return instruction is the return address offset.
312  // The return address is always %i7+8 with the 64-bit ABI.
313  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
314 
315  // Copy the result values into the output registers.
316  for (unsigned i = 0; i != RVLocs.size(); ++i) {
317  CCValAssign &VA = RVLocs[i];
318  assert(VA.isRegLoc() && "Can only return in registers!");
319  SDValue OutVal = OutVals[i];
320 
321  // Integer return values must be sign or zero extended by the callee.
322  switch (VA.getLocInfo()) {
323  case CCValAssign::Full: break;
324  case CCValAssign::SExt:
325  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
326  break;
327  case CCValAssign::ZExt:
328  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
329  break;
330  case CCValAssign::AExt:
331  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
332  break;
333  default:
334  llvm_unreachable("Unknown loc info!");
335  }
336 
337  // The custom bit on an i32 return value indicates that it should be passed
338  // in the high bits of the register.
339  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
340  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
341  DAG.getConstant(32, DL, MVT::i32));
342 
343  // The next value may go in the low bits of the same register.
344  // Handle both at once.
345  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
346  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
347  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
348  // Skip the next value, it's already done.
349  ++i;
350  }
351  }
352 
353  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
354 
355  // Guarantee that all emitted copies are stuck together with flags.
356  Flag = Chain.getValue(1);
357  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
358  }
359 
360  RetOps[0] = Chain; // Update chain.
361 
362  // Add the flag if we have it.
363  if (Flag.getNode())
364  RetOps.push_back(Flag);
365 
366  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
367 }
368 
370  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
371  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
372  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
373  if (Subtarget->is64Bit())
374  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
375  DL, DAG, InVals);
376  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
377  DL, DAG, InVals);
378 }
379 
380 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
381 /// passed in either one or two GPRs, including FP values. TODO: we should
382 /// pass FP values in FP registers for fastcc functions.
384  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
385  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
386  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
388  MachineRegisterInfo &RegInfo = MF.getRegInfo();
390 
391  // Assign locations to all of the incoming arguments.
393  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
394  *DAG.getContext());
395  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
396 
397  const unsigned StackOffset = 92;
398  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
399 
400  unsigned InIdx = 0;
401  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
402  CCValAssign &VA = ArgLocs[i];
403 
404  if (Ins[InIdx].Flags.isSRet()) {
405  if (InIdx != 0)
406  report_fatal_error("sparc only supports sret on the first parameter");
407  // Get SRet from [%fp+64].
408  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
409  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
410  SDValue Arg =
411  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
412  InVals.push_back(Arg);
413  continue;
414  }
415 
416  if (VA.isRegLoc()) {
417  if (VA.needsCustom()) {
418  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
419 
420  unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
421  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
422  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
423 
424  assert(i+1 < e);
425  CCValAssign &NextVA = ArgLocs[++i];
426 
427  SDValue LoVal;
428  if (NextVA.isMemLoc()) {
429  int FrameIdx = MF.getFrameInfo().
430  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
431  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
432  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
433  } else {
434  unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
435  &SP::IntRegsRegClass);
436  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
437  }
438 
439  if (IsLittleEndian)
440  std::swap(LoVal, HiVal);
441 
442  SDValue WholeValue =
443  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
444  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
445  InVals.push_back(WholeValue);
446  continue;
447  }
448  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
449  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
450  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
451  if (VA.getLocVT() == MVT::f32)
452  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
453  else if (VA.getLocVT() != MVT::i32) {
454  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
455  DAG.getValueType(VA.getLocVT()));
456  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
457  }
458  InVals.push_back(Arg);
459  continue;
460  }
461 
462  assert(VA.isMemLoc());
463 
464  unsigned Offset = VA.getLocMemOffset()+StackOffset;
465  auto PtrVT = getPointerTy(DAG.getDataLayout());
466 
467  if (VA.needsCustom()) {
468  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
469  // If it is double-word aligned, just load.
470  if (Offset % 8 == 0) {
471  int FI = MF.getFrameInfo().CreateFixedObject(8,
472  Offset,
473  true);
474  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
475  SDValue Load =
476  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
477  InVals.push_back(Load);
478  continue;
479  }
480 
481  int FI = MF.getFrameInfo().CreateFixedObject(4,
482  Offset,
483  true);
484  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
485  SDValue HiVal =
486  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
487  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
488  Offset+4,
489  true);
490  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
491 
492  SDValue LoVal =
493  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
494 
495  if (IsLittleEndian)
496  std::swap(LoVal, HiVal);
497 
498  SDValue WholeValue =
499  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
500  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
501  InVals.push_back(WholeValue);
502  continue;
503  }
504 
505  int FI = MF.getFrameInfo().CreateFixedObject(4,
506  Offset,
507  true);
508  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
509  SDValue Load ;
510  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
511  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
512  } else if (VA.getValVT() == MVT::f128) {
513  report_fatal_error("SPARCv8 does not handle f128 in calls; "
514  "pass indirectly");
515  } else {
516  // We shouldn't see any other value types here.
517  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
518  }
519  InVals.push_back(Load);
520  }
521 
522  if (MF.getFunction().hasStructRetAttr()) {
523  // Copy the SRet Argument to SRetReturnReg.
525  unsigned Reg = SFI->getSRetReturnReg();
526  if (!Reg) {
527  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
528  SFI->setSRetReturnReg(Reg);
529  }
530  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
531  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
532  }
533 
534  // Store remaining ArgRegs to the stack if this is a varargs function.
535  if (isVarArg) {
536  static const MCPhysReg ArgRegs[] = {
537  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
538  };
539  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
540  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
541  unsigned ArgOffset = CCInfo.getNextStackOffset();
542  if (NumAllocated == 6)
543  ArgOffset += StackOffset;
544  else {
545  assert(!ArgOffset);
546  ArgOffset = 68+4*NumAllocated;
547  }
548 
549  // Remember the vararg offset for the va_start implementation.
550  FuncInfo->setVarArgsFrameOffset(ArgOffset);
551 
552  std::vector<SDValue> OutChains;
553 
554  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
555  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
556  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
557  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
558 
559  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
560  true);
561  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
562 
563  OutChains.push_back(
564  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
565  ArgOffset += 4;
566  }
567 
568  if (!OutChains.empty()) {
569  OutChains.push_back(Chain);
570  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
571  }
572  }
573 
574  return Chain;
575 }
576 
577 // Lower formal arguments for the 64 bit ABI.
579  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
580  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
581  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
583 
584  // Analyze arguments according to CC_Sparc64.
586  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
587  *DAG.getContext());
588  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
589 
590  // The argument array begins at %fp+BIAS+128, after the register save area.
591  const unsigned ArgArea = 128;
592 
593  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
594  CCValAssign &VA = ArgLocs[i];
595  if (VA.isRegLoc()) {
596  // This argument is passed in a register.
597  // All integer register arguments are promoted by the caller to i64.
598 
599  // Create a virtual register for the promoted live-in value.
600  unsigned VReg = MF.addLiveIn(VA.getLocReg(),
601  getRegClassFor(VA.getLocVT()));
602  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
603 
604  // Get the high bits for i32 struct elements.
605  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
606  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
607  DAG.getConstant(32, DL, MVT::i32));
608 
609  // The caller promoted the argument, so insert an Assert?ext SDNode so we
610  // won't promote the value again in this function.
611  switch (VA.getLocInfo()) {
612  case CCValAssign::SExt:
613  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
614  DAG.getValueType(VA.getValVT()));
615  break;
616  case CCValAssign::ZExt:
617  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
618  DAG.getValueType(VA.getValVT()));
619  break;
620  default:
621  break;
622  }
623 
624  // Truncate the register down to the argument type.
625  if (VA.isExtInLoc())
626  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
627 
628  InVals.push_back(Arg);
629  continue;
630  }
631 
632  // The registers are exhausted. This argument was passed on the stack.
633  assert(VA.isMemLoc());
634  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
635  // beginning of the arguments area at %fp+BIAS+128.
636  unsigned Offset = VA.getLocMemOffset() + ArgArea;
637  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
638  // Adjust offset for extended arguments, SPARC is big-endian.
639  // The caller will have written the full slot with extended bytes, but we
640  // prefer our own extending loads.
641  if (VA.isExtInLoc())
642  Offset += 8 - ValSize;
643  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
644  InVals.push_back(
645  DAG.getLoad(VA.getValVT(), DL, Chain,
648  }
649 
650  if (!IsVarArg)
651  return Chain;
652 
653  // This function takes variable arguments, some of which may have been passed
654  // in registers %i0-%i5. Variable floating point arguments are never passed
655  // in floating point registers. They go on %i0-%i5 or on the stack like
656  // integer arguments.
657  //
658  // The va_start intrinsic needs to know the offset to the first variable
659  // argument.
660  unsigned ArgOffset = CCInfo.getNextStackOffset();
662  // Skip the 128 bytes of register save area.
663  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
664  Subtarget->getStackPointerBias());
665 
666  // Save the variable arguments that were passed in registers.
667  // The caller is required to reserve stack space for 6 arguments regardless
668  // of how many arguments were actually passed.
669  SmallVector<SDValue, 8> OutChains;
670  for (; ArgOffset < 6*8; ArgOffset += 8) {
671  unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
672  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
673  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
674  auto PtrVT = getPointerTy(MF.getDataLayout());
675  OutChains.push_back(
676  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
678  }
679 
680  if (!OutChains.empty())
681  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
682 
683  return Chain;
684 }
685 
686 SDValue
688  SmallVectorImpl<SDValue> &InVals) const {
689  if (Subtarget->is64Bit())
690  return LowerCall_64(CLI, InVals);
691  return LowerCall_32(CLI, InVals);
692 }
693 
695  ImmutableCallSite CS) {
696  if (CS)
697  return CS.hasFnAttr(Attribute::ReturnsTwice);
698 
699  const Function *CalleeFn = nullptr;
700  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
701  CalleeFn = dyn_cast<Function>(G->getGlobal());
702  } else if (ExternalSymbolSDNode *E =
703  dyn_cast<ExternalSymbolSDNode>(Callee)) {
704  const Function &Fn = DAG.getMachineFunction().getFunction();
705  const Module *M = Fn.getParent();
706  const char *CalleeName = E->getSymbol();
707  CalleeFn = M->getFunction(CalleeName);
708  }
709 
710  if (!CalleeFn)
711  return false;
712  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
713 }
714 
715 // Lower a call for the 32-bit ABI.
716 SDValue
718  SmallVectorImpl<SDValue> &InVals) const {
719  SelectionDAG &DAG = CLI.DAG;
720  SDLoc &dl = CLI.DL;
722  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
724  SDValue Chain = CLI.Chain;
725  SDValue Callee = CLI.Callee;
726  bool &isTailCall = CLI.IsTailCall;
727  CallingConv::ID CallConv = CLI.CallConv;
728  bool isVarArg = CLI.IsVarArg;
729 
730  // Sparc target does not yet support tail call optimization.
731  isTailCall = false;
732 
733  // Analyze operands of the call, assigning locations to each operand.
735  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
736  *DAG.getContext());
737  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
738 
739  // Get the size of the outgoing arguments stack space requirement.
740  unsigned ArgsSize = CCInfo.getNextStackOffset();
741 
742  // Keep stack frames 8-byte aligned.
743  ArgsSize = (ArgsSize+7) & ~7;
744 
746 
747  // Create local copies for byval args.
748  SmallVector<SDValue, 8> ByValArgs;
749  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
750  ISD::ArgFlagsTy Flags = Outs[i].Flags;
751  if (!Flags.isByVal())
752  continue;
753 
754  SDValue Arg = OutVals[i];
755  unsigned Size = Flags.getByValSize();
756  unsigned Align = Flags.getByValAlign();
757 
758  if (Size > 0U) {
759  int FI = MFI.CreateStackObject(Size, Align, false);
760  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
761  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
762 
763  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
764  false, // isVolatile,
765  (Size <= 32), // AlwaysInline if size <= 32,
766  false, // isTailCall
768  ByValArgs.push_back(FIPtr);
769  }
770  else {
771  SDValue nullVal;
772  ByValArgs.push_back(nullVal);
773  }
774  }
775 
776  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
777 
779  SmallVector<SDValue, 8> MemOpChains;
780 
781  const unsigned StackOffset = 92;
782  bool hasStructRetAttr = false;
783  // Walk the register/memloc assignments, inserting copies/loads.
784  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
785  i != e;
786  ++i, ++realArgIdx) {
787  CCValAssign &VA = ArgLocs[i];
788  SDValue Arg = OutVals[realArgIdx];
789 
790  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
791 
792  // Use local copy if it is a byval arg.
793  if (Flags.isByVal()) {
794  Arg = ByValArgs[byvalArgIdx++];
795  if (!Arg) {
796  continue;
797  }
798  }
799 
800  // Promote the value if needed.
801  switch (VA.getLocInfo()) {
802  default: llvm_unreachable("Unknown loc info!");
803  case CCValAssign::Full: break;
804  case CCValAssign::SExt:
805  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
806  break;
807  case CCValAssign::ZExt:
808  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
809  break;
810  case CCValAssign::AExt:
811  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
812  break;
813  case CCValAssign::BCvt:
814  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
815  break;
816  }
817 
818  if (Flags.isSRet()) {
819  assert(VA.needsCustom());
820  // store SRet argument in %sp+64
821  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
822  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
823  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
824  MemOpChains.push_back(
825  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
826  hasStructRetAttr = true;
827  continue;
828  }
829 
830  if (VA.needsCustom()) {
831  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
832 
833  if (VA.isMemLoc()) {
834  unsigned Offset = VA.getLocMemOffset() + StackOffset;
835  // if it is double-word aligned, just store.
836  if (Offset % 8 == 0) {
837  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
838  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
839  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
840  MemOpChains.push_back(
841  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
842  continue;
843  }
844  }
845 
846  if (VA.getLocVT() == MVT::f64) {
847  // Move from the float value from float registers into the
848  // integer registers.
849 
850  // TODO: The f64 -> v2i32 conversion is super-inefficient for
851  // constants: it sticks them in the constant pool, then loads
852  // to a fp register, then stores to temp memory, then loads to
853  // integer registers.
854  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
855  }
856 
858  Arg,
859  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
861  Arg,
862  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
863 
864  if (VA.isRegLoc()) {
865  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
866  assert(i+1 != e);
867  CCValAssign &NextVA = ArgLocs[++i];
868  if (NextVA.isRegLoc()) {
869  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
870  } else {
871  // Store the second part in stack.
872  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
873  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
874  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
875  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
876  MemOpChains.push_back(
877  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
878  }
879  } else {
880  unsigned Offset = VA.getLocMemOffset() + StackOffset;
881  // Store the first part.
882  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
883  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
884  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
885  MemOpChains.push_back(
886  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
887  // Store the second part.
888  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
889  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
890  MemOpChains.push_back(
891  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
892  }
893  continue;
894  }
895 
896  // Arguments that can be passed on register must be kept at
897  // RegsToPass vector
898  if (VA.isRegLoc()) {
899  if (VA.getLocVT() != MVT::f32) {
900  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
901  continue;
902  }
903  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
904  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
905  continue;
906  }
907 
908  assert(VA.isMemLoc());
909 
910  // Create a store off the stack pointer for this argument.
911  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
912  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
913  dl);
914  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
915  MemOpChains.push_back(
916  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
917  }
918 
919 
920  // Emit all stores, make sure the occur before any copies into physregs.
921  if (!MemOpChains.empty())
922  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
923 
924  // Build a sequence of copy-to-reg nodes chained together with token
925  // chain and flag operands which copy the outgoing args into registers.
926  // The InFlag in necessary since all emitted instructions must be
927  // stuck together.
928  SDValue InFlag;
929  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
930  unsigned Reg = toCallerWindow(RegsToPass[i].first);
931  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
932  InFlag = Chain.getValue(1);
933  }
934 
935  unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
936  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
937 
938  // If the callee is a GlobalAddress node (quite common, every direct call is)
939  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
940  // Likewise ExternalSymbol -> TargetExternalSymbol.
942  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
943  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
944  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
945  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
946 
947  // Returns a chain & a flag for retval copy to use
948  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
950  Ops.push_back(Chain);
951  Ops.push_back(Callee);
952  if (hasStructRetAttr)
953  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
954  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
955  Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
956  RegsToPass[i].second.getValueType()));
957 
958  // Add a register mask operand representing the call-preserved registers.
959  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
960  const uint32_t *Mask =
961  ((hasReturnsTwice)
962  ? TRI->getRTCallPreservedMask(CallConv)
963  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
964  assert(Mask && "Missing call preserved mask for calling convention");
965  Ops.push_back(DAG.getRegisterMask(Mask));
966 
967  if (InFlag.getNode())
968  Ops.push_back(InFlag);
969 
970  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
971  InFlag = Chain.getValue(1);
972 
973  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
974  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
975  InFlag = Chain.getValue(1);
976 
977  // Assign locations to each value returned by this call.
979  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
980  *DAG.getContext());
981 
982  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
983 
984  // Copy all of the result registers out of their specified physreg.
985  for (unsigned i = 0; i != RVLocs.size(); ++i) {
986  if (RVLocs[i].getLocVT() == MVT::v2i32) {
987  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
988  SDValue Lo = DAG.getCopyFromReg(
989  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
990  Chain = Lo.getValue(1);
991  InFlag = Lo.getValue(2);
992  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
993  DAG.getConstant(0, dl, MVT::i32));
994  SDValue Hi = DAG.getCopyFromReg(
995  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
996  Chain = Hi.getValue(1);
997  InFlag = Hi.getValue(2);
998  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
999  DAG.getConstant(1, dl, MVT::i32));
1000  InVals.push_back(Vec);
1001  } else {
1002  Chain =
1003  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1004  RVLocs[i].getValVT(), InFlag)
1005  .getValue(1);
1006  InFlag = Chain.getValue(2);
1007  InVals.push_back(Chain.getValue(0));
1008  }
1009  }
1010 
1011  return Chain;
1012 }
1013 
1014 // FIXME? Maybe this could be a TableGen attribute on some registers and
1015 // this table could be generated automatically from RegInfo.
1016 unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
1017  SelectionDAG &DAG) const {
1018  unsigned Reg = StringSwitch<unsigned>(RegName)
1019  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1020  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1021  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1022  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1023  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1024  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1025  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1026  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1027  .Default(0);
1028 
1029  if (Reg)
1030  return Reg;
1031 
1032  report_fatal_error("Invalid register name global variable");
1033 }
1034 
1035 // This functions returns true if CalleeName is a ABI function that returns
1036 // a long double (fp128).
1037 static bool isFP128ABICall(const char *CalleeName)
1038 {
1039  static const char *const ABICalls[] =
1040  { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
1041  "_Q_sqrt", "_Q_neg",
1042  "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
1043  "_Q_lltoq", "_Q_ulltoq",
1044  nullptr
1045  };
1046  for (const char * const *I = ABICalls; *I != nullptr; ++I)
1047  if (strcmp(CalleeName, *I) == 0)
1048  return true;
1049  return false;
1050 }
1051 
1052 unsigned
1054 {
1055  const Function *CalleeFn = nullptr;
1056  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1057  CalleeFn = dyn_cast<Function>(G->getGlobal());
1058  } else if (ExternalSymbolSDNode *E =
1059  dyn_cast<ExternalSymbolSDNode>(Callee)) {
1060  const Function &F = DAG.getMachineFunction().getFunction();
1061  const Module *M = F.getParent();
1062  const char *CalleeName = E->getSymbol();
1063  CalleeFn = M->getFunction(CalleeName);
1064  if (!CalleeFn && isFP128ABICall(CalleeName))
1065  return 16; // Return sizeof(fp128)
1066  }
1067 
1068  if (!CalleeFn)
1069  return 0;
1070 
1071  // It would be nice to check for the sret attribute on CalleeFn here,
1072  // but since it is not part of the function type, any check will misfire.
1073 
1074  PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
1075  Type *ElementTy = Ty->getElementType();
1076  return DAG.getDataLayout().getTypeAllocSize(ElementTy);
1077 }
1078 
1079 
1080 // Fixup floating point arguments in the ... part of a varargs call.
1081 //
1082 // The SPARC v9 ABI requires that floating point arguments are treated the same
1083 // as integers when calling a varargs function. This does not apply to the
1084 // fixed arguments that are part of the function's prototype.
1085 //
1086 // This function post-processes a CCValAssign array created by
1087 // AnalyzeCallOperands().
1089  ArrayRef<ISD::OutputArg> Outs) {
1090  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1091  const CCValAssign &VA = ArgLocs[i];
1092  MVT ValTy = VA.getLocVT();
1093  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1094  // varargs functions.
1095  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1096  continue;
1097  // The fixed arguments to a varargs function still go in FP registers.
1098  if (Outs[VA.getValNo()].IsFixed)
1099  continue;
1100 
1101  // This floating point argument should be reassigned.
1102  CCValAssign NewVA;
1103 
1104  // Determine the offset into the argument array.
1105  unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1106  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1107  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1108  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1109 
1110  if (Offset < 6*8) {
1111  // This argument should go in %i0-%i5.
1112  unsigned IReg = SP::I0 + Offset/8;
1113  if (ValTy == MVT::f64)
1114  // Full register, just bitconvert into i64.
1115  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1116  IReg, MVT::i64, CCValAssign::BCvt);
1117  else {
1118  assert(ValTy == MVT::f128 && "Unexpected type!");
1119  // Full register, just bitconvert into i128 -- We will lower this into
1120  // two i64s in LowerCall_64.
1121  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1122  IReg, MVT::i128, CCValAssign::BCvt);
1123  }
1124  } else {
1125  // This needs to go to memory, we're out of integer registers.
1126  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1127  Offset, VA.getLocVT(), VA.getLocInfo());
1128  }
1129  ArgLocs[i] = NewVA;
1130  }
1131 }
1132 
1133 // Lower a call for the 64-bit ABI.
1134 SDValue
1136  SmallVectorImpl<SDValue> &InVals) const {
1137  SelectionDAG &DAG = CLI.DAG;
1138  SDLoc DL = CLI.DL;
1139  SDValue Chain = CLI.Chain;
1140  auto PtrVT = getPointerTy(DAG.getDataLayout());
1141 
1142  // Sparc target does not yet support tail call optimization.
1143  CLI.IsTailCall = false;
1144 
1145  // Analyze operands of the call, assigning locations to each operand.
1147  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1148  *DAG.getContext());
1149  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1150 
1151  // Get the size of the outgoing arguments stack space requirement.
1152  // The stack offset computed by CC_Sparc64 includes all arguments.
1153  // Called functions expect 6 argument words to exist in the stack frame, used
1154  // or not.
1155  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1156 
1157  // Keep stack frames 16-byte aligned.
1158  ArgsSize = alignTo(ArgsSize, 16);
1159 
1160  // Varargs calls require special treatment.
1161  if (CLI.IsVarArg)
1162  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1163 
1164  // Adjust the stack pointer to make room for the arguments.
1165  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1166  // with more than 6 arguments.
1167  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1168 
1169  // Collect the set of registers to pass to the function and their values.
1170  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1171  // instruction.
1173 
1174  // Collect chains from all the memory opeations that copy arguments to the
1175  // stack. They must follow the stack pointer adjustment above and precede the
1176  // call instruction itself.
1177  SmallVector<SDValue, 8> MemOpChains;
1178 
1179  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1180  const CCValAssign &VA = ArgLocs[i];
1181  SDValue Arg = CLI.OutVals[i];
1182 
1183  // Promote the value if needed.
1184  switch (VA.getLocInfo()) {
1185  default:
1186  llvm_unreachable("Unknown location info!");
1187  case CCValAssign::Full:
1188  break;
1189  case CCValAssign::SExt:
1190  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1191  break;
1192  case CCValAssign::ZExt:
1193  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1194  break;
1195  case CCValAssign::AExt:
1196  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1197  break;
1198  case CCValAssign::BCvt:
1199  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1200  // SPARC does not support i128 natively. Lower it into two i64, see below.
1201  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1202  || VA.getLocVT() != MVT::i128)
1203  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1204  break;
1205  }
1206 
1207  if (VA.isRegLoc()) {
1208  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1209  && VA.getLocVT() == MVT::i128) {
1210  // Store and reload into the integer register reg and reg+1.
1211  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1212  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1213  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1214  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1215  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1216  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1217  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1218 
1219  // Store to %sp+BIAS+128+Offset
1220  SDValue Store =
1221  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1222  // Load into Reg and Reg+1
1223  SDValue Hi64 =
1224  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1225  SDValue Lo64 =
1226  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1227  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1228  Hi64));
1229  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1230  Lo64));
1231  continue;
1232  }
1233 
1234  // The custom bit on an i32 return value indicates that it should be
1235  // passed in the high bits of the register.
1236  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1237  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1238  DAG.getConstant(32, DL, MVT::i32));
1239 
1240  // The next value may go in the low bits of the same register.
1241  // Handle both at once.
1242  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1243  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1245  CLI.OutVals[i+1]);
1246  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1247  // Skip the next value, it's already done.
1248  ++i;
1249  }
1250  }
1251  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1252  continue;
1253  }
1254 
1255  assert(VA.isMemLoc());
1256 
1257  // Create a store off the stack pointer for this argument.
1258  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1259  // The argument area starts at %fp+BIAS+128 in the callee frame,
1260  // %sp+BIAS+128 in ours.
1261  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1262  Subtarget->getStackPointerBias() +
1263  128, DL);
1264  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1265  MemOpChains.push_back(
1266  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1267  }
1268 
1269  // Emit all stores, make sure they occur before the call.
1270  if (!MemOpChains.empty())
1271  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1272 
1273  // Build a sequence of CopyToReg nodes glued together with token chain and
1274  // glue operands which copy the outgoing args into registers. The InGlue is
1275  // necessary since all emitted instructions must be stuck together in order
1276  // to pass the live physical registers.
1277  SDValue InGlue;
1278  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1279  Chain = DAG.getCopyToReg(Chain, DL,
1280  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1281  InGlue = Chain.getValue(1);
1282  }
1283 
1284  // If the callee is a GlobalAddress node (quite common, every direct call is)
1285  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1286  // Likewise ExternalSymbol -> TargetExternalSymbol.
1287  SDValue Callee = CLI.Callee;
1288  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
1290  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1291  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1292  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1293  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1294 
1295  // Build the operands for the call instruction itself.
1297  Ops.push_back(Chain);
1298  Ops.push_back(Callee);
1299  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1300  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1301  RegsToPass[i].second.getValueType()));
1302 
1303  // Add a register mask operand representing the call-preserved registers.
1304  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1305  const uint32_t *Mask =
1306  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1308  CLI.CallConv));
1309  assert(Mask && "Missing call preserved mask for calling convention");
1310  Ops.push_back(DAG.getRegisterMask(Mask));
1311 
1312  // Make sure the CopyToReg nodes are glued to the call instruction which
1313  // consumes the registers.
1314  if (InGlue.getNode())
1315  Ops.push_back(InGlue);
1316 
1317  // Now the call itself.
1318  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1319  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1320  InGlue = Chain.getValue(1);
1321 
1322  // Revert the stack pointer immediately after the call.
1323  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1324  DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1325  InGlue = Chain.getValue(1);
1326 
1327  // Now extract the return values. This is more or less the same as
1328  // LowerFormalArguments_64.
1329 
1330  // Assign locations to each value returned by this call.
1332  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1333  *DAG.getContext());
1334 
1335  // Set inreg flag manually for codegen generated library calls that
1336  // return float.
1337  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
1338  CLI.Ins[0].Flags.setInReg();
1339 
1340  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1341 
1342  // Copy all of the result registers out of their specified physreg.
1343  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1344  CCValAssign &VA = RVLocs[i];
1345  unsigned Reg = toCallerWindow(VA.getLocReg());
1346 
1347  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1348  // reside in the same register in the high and low bits. Reuse the
1349  // CopyFromReg previous node to avoid duplicate copies.
1350  SDValue RV;
1351  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1352  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1353  RV = Chain.getValue(0);
1354 
1355  // But usually we'll create a new CopyFromReg for a different register.
1356  if (!RV.getNode()) {
1357  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1358  Chain = RV.getValue(1);
1359  InGlue = Chain.getValue(2);
1360  }
1361 
1362  // Get the high bits for i32 struct elements.
1363  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1364  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1365  DAG.getConstant(32, DL, MVT::i32));
1366 
1367  // The callee promoted the return value, so insert an Assert?ext SDNode so
1368  // we won't promote the value again in this function.
1369  switch (VA.getLocInfo()) {
1370  case CCValAssign::SExt:
1371  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1372  DAG.getValueType(VA.getValVT()));
1373  break;
1374  case CCValAssign::ZExt:
1375  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1376  DAG.getValueType(VA.getValVT()));
1377  break;
1378  default:
1379  break;
1380  }
1381 
1382  // Truncate the register down to the return value type.
1383  if (VA.isExtInLoc())
1384  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1385 
1386  InVals.push_back(RV);
1387  }
1388 
1389  return Chain;
1390 }
1391 
1392 //===----------------------------------------------------------------------===//
1393 // TargetLowering Implementation
1394 //===----------------------------------------------------------------------===//
1395 
1397  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1398  AI->getType()->getPrimitiveSizeInBits() == 32)
1399  return AtomicExpansionKind::None; // Uses xchg instruction
1400 
1402 }
1403 
1404 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1405 /// condition.
1407  switch (CC) {
1408  default: llvm_unreachable("Unknown integer condition code!");
1409  case ISD::SETEQ: return SPCC::ICC_E;
1410  case ISD::SETNE: return SPCC::ICC_NE;
1411  case ISD::SETLT: return SPCC::ICC_L;
1412  case ISD::SETGT: return SPCC::ICC_G;
1413  case ISD::SETLE: return SPCC::ICC_LE;
1414  case ISD::SETGE: return SPCC::ICC_GE;
1415  case ISD::SETULT: return SPCC::ICC_CS;
1416  case ISD::SETULE: return SPCC::ICC_LEU;
1417  case ISD::SETUGT: return SPCC::ICC_GU;
1418  case ISD::SETUGE: return SPCC::ICC_CC;
1419  }
1420 }
1421 
1422 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1423 /// FCC condition.
1425  switch (CC) {
1426  default: llvm_unreachable("Unknown fp condition code!");
1427  case ISD::SETEQ:
1428  case ISD::SETOEQ: return SPCC::FCC_E;
1429  case ISD::SETNE:
1430  case ISD::SETUNE: return SPCC::FCC_NE;
1431  case ISD::SETLT:
1432  case ISD::SETOLT: return SPCC::FCC_L;
1433  case ISD::SETGT:
1434  case ISD::SETOGT: return SPCC::FCC_G;
1435  case ISD::SETLE:
1436  case ISD::SETOLE: return SPCC::FCC_LE;
1437  case ISD::SETGE:
1438  case ISD::SETOGE: return SPCC::FCC_GE;
1439  case ISD::SETULT: return SPCC::FCC_UL;
1440  case ISD::SETULE: return SPCC::FCC_ULE;
1441  case ISD::SETUGT: return SPCC::FCC_UG;
1442  case ISD::SETUGE: return SPCC::FCC_UGE;
1443  case ISD::SETUO: return SPCC::FCC_U;
1444  case ISD::SETO: return SPCC::FCC_O;
1445  case ISD::SETONE: return SPCC::FCC_LG;
1446  case ISD::SETUEQ: return SPCC::FCC_UE;
1447  }
1448 }
1449 
1451  const SparcSubtarget &STI)
1452  : TargetLowering(TM), Subtarget(&STI) {
1453  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
1454 
1455  // Instructions which use registers as conditionals examine all the
1456  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1457  // matters much whether it's ZeroOrOneBooleanContent, or
1458  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1459  // former.
1462 
1463  // Set up the register classes.
1464  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1465  if (!Subtarget->useSoftFloat()) {
1466  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1467  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1468  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1469  }
1470  if (Subtarget->is64Bit()) {
1471  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1472  } else {
1473  // On 32bit sparc, we define a double-register 32bit register
1474  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1475  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1476 
1477  // ...but almost all operations must be expanded, so set that as
1478  // the default.
1479  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1481  }
1482  // Truncating/extending stores/loads are also not supported.
1483  for (MVT VT : MVT::integer_vector_valuetypes()) {
1487 
1491 
1494  }
1495  // However, load and store *are* legal.
1500 
1501  // And we need to promote i64 loads/stores into vector load/store
1504 
1505  // Sadly, this doesn't work:
1506  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1507  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1508  }
1509 
1510  // Turn FP extload into load/fpextend
1511  for (MVT VT : MVT::fp_valuetypes()) {
1514  }
1515 
1516  // Sparc doesn't have i1 sign extending load
1517  for (MVT VT : MVT::integer_valuetypes())
1519 
1520  // Turn FP truncstore into trunc + store.
1524 
1525  // Custom legalize GlobalAddress nodes into LO/HI parts.
1530 
1531  // Sparc doesn't have sext_inreg, replace them with shl/sra
1535 
1536  // Sparc has no REM or DIVREM operations.
1541 
1542  // ... nor does SparcV9.
1543  if (Subtarget->is64Bit()) {
1548  }
1549 
1550  // Custom expand fp<->sint
1555 
1556  // Custom Expand fp<->uint
1561 
1564 
1565  // Sparc has no select or setcc: expand to SELECT_CC.
1570 
1575 
1576  // Sparc doesn't have BRCOND either, it has BR_CC.
1584 
1589 
1592 
1593  if (Subtarget->is64Bit()) {
1604 
1606  Subtarget->usePopc() ? Legal : Expand);
1613  }
1614 
1615  // ATOMICs.
1616  // Atomics are supported on SparcV9. 32-bit atomics are also
1617  // supported by some Leon SparcV8 variants. Otherwise, atomics
1618  // are unsupported.
1619  if (Subtarget->isV9())
1621  else if (Subtarget->hasLeonCasa())
1623  else
1625 
1627 
1629 
1631 
1632  // Custom Lower Atomic LOAD/STORE
1635 
1636  if (Subtarget->is64Bit()) {
1641  }
1642 
1643  if (!Subtarget->is64Bit()) {
1644  // These libcalls are not available in 32-bit.
1645  setLibcallName(RTLIB::SHL_I128, nullptr);
1646  setLibcallName(RTLIB::SRL_I128, nullptr);
1647  setLibcallName(RTLIB::SRA_I128, nullptr);
1648  }
1649 
1650  if (!Subtarget->isV9()) {
1651  // SparcV8 does not have FNEGD and FABSD.
1654  }
1655 
1682 
1686 
1687  // Expands to [SU]MUL_LOHI.
1691 
1692  if (Subtarget->useSoftMulDiv()) {
1693  // .umul works for both signed and unsigned
1696  setLibcallName(RTLIB::MUL_I32, ".umul");
1697 
1699  setLibcallName(RTLIB::SDIV_I32, ".div");
1700 
1702  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1703  }
1704 
1705  if (Subtarget->is64Bit()) {
1710 
1713 
1717  }
1718 
1719  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1721  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1723 
1725 
1726  // Use the default implementation.
1732 
1734 
1736  Subtarget->usePopc() ? Legal : Expand);
1737 
1738  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1741  } else {
1744  }
1745 
1746  if (Subtarget->hasHardQuad()) {
1754  if (Subtarget->isV9()) {
1757  } else {
1760  }
1761 
1762  if (!Subtarget->is64Bit()) {
1763  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1764  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1765  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1766  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1767  }
1768 
1769  } else {
1770  // Custom legalize f128 operations.
1771 
1779 
1783 
1784  // Setup Runtime library names.
1785  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1786  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1787  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1788  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1789  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1790  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1791  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1792  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1793  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1794  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1795  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1796  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1797  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1798  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1799  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1800  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1801  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1802  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1803  } else if (!Subtarget->useSoftFloat()) {
1804  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1805  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1806  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1807  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1808  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1809  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1810  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1811  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1812  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1813  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1814  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1815  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1816  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1817  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1818  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1819  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1820  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1821  }
1822  }
1823 
1824  if (Subtarget->fixAllFDIVSQRT()) {
1825  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1826  // the former instructions generate errata on LEON processors.
1829  }
1830 
1831  if (Subtarget->hasNoFMULS()) {
1833  }
1834 
1836 
1838 
1840 }
1841 
1843  return Subtarget->useSoftFloat();
1844 }
1845 
1846 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1847  switch ((SPISD::NodeType)Opcode) {
1848  case SPISD::FIRST_NUMBER: break;
1849  case SPISD::CMPICC: return "SPISD::CMPICC";
1850  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1851  case SPISD::BRICC: return "SPISD::BRICC";
1852  case SPISD::BRXCC: return "SPISD::BRXCC";
1853  case SPISD::BRFCC: return "SPISD::BRFCC";
1854  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1855  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1856  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1857  case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP";
1858  case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP";
1859  case SPISD::Hi: return "SPISD::Hi";
1860  case SPISD::Lo: return "SPISD::Lo";
1861  case SPISD::FTOI: return "SPISD::FTOI";
1862  case SPISD::ITOF: return "SPISD::ITOF";
1863  case SPISD::FTOX: return "SPISD::FTOX";
1864  case SPISD::XTOF: return "SPISD::XTOF";
1865  case SPISD::CALL: return "SPISD::CALL";
1866  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1867  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1868  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1869  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1870  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1871  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1872  }
1873  return nullptr;
1874 }
1875 
1877  EVT VT) const {
1878  if (!VT.isVector())
1879  return MVT::i32;
1881 }
1882 
1883 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1884 /// be zero. Op is expected to be a target specific node. Used by DAG
1885 /// combiner.
1887  (const SDValue Op,
1888  KnownBits &Known,
1889  const APInt &DemandedElts,
1890  const SelectionDAG &DAG,
1891  unsigned Depth) const {
1892  KnownBits Known2;
1893  Known.resetAll();
1894 
1895  switch (Op.getOpcode()) {
1896  default: break;
1897  case SPISD::SELECT_ICC:
1898  case SPISD::SELECT_XCC:
1899  case SPISD::SELECT_FCC:
1900  DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
1901  DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
1902 
1903  // Only known if known in both the LHS and RHS.
1904  Known.One &= Known2.One;
1905  Known.Zero &= Known2.Zero;
1906  break;
1907  }
1908 }
1909 
1910 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1911 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1912 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1913  ISD::CondCode CC, unsigned &SPCC) {
1914  if (isNullConstant(RHS) &&
1915  CC == ISD::SETNE &&
1916  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1917  LHS.getOpcode() == SPISD::SELECT_XCC) &&
1918  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1919  (LHS.getOpcode() == SPISD::SELECT_FCC &&
1920  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1921  isOneConstant(LHS.getOperand(0)) &&
1922  isNullConstant(LHS.getOperand(1))) {
1923  SDValue CMPCC = LHS.getOperand(3);
1924  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1925  LHS = CMPCC.getOperand(0);
1926  RHS = CMPCC.getOperand(1);
1927  }
1928 }
1929 
1930 // Convert to a target node and set target flags.
1932  SelectionDAG &DAG) const {
1933  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1934  return DAG.getTargetGlobalAddress(GA->getGlobal(),
1935  SDLoc(GA),
1936  GA->getValueType(0),
1937  GA->getOffset(), TF);
1938 
1939  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1940  return DAG.getTargetConstantPool(CP->getConstVal(),
1941  CP->getValueType(0),
1942  CP->getAlignment(),
1943  CP->getOffset(), TF);
1944 
1945  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1946  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1947  Op.getValueType(),
1948  0,
1949  TF);
1950 
1951  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1952  return DAG.getTargetExternalSymbol(ES->getSymbol(),
1953  ES->getValueType(0), TF);
1954 
1955  llvm_unreachable("Unhandled address SDNode");
1956 }
1957 
1958 // Split Op into high and low parts according to HiTF and LoTF.
1959 // Return an ADD node combining the parts.
1961  unsigned HiTF, unsigned LoTF,
1962  SelectionDAG &DAG) const {
1963  SDLoc DL(Op);
1964  EVT VT = Op.getValueType();
1965  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1966  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1967  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1968 }
1969 
1970 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1971 // or ExternalSymbol SDNode.
1973  SDLoc DL(Op);
1974  EVT VT = getPointerTy(DAG.getDataLayout());
1975 
1976  // Handle PIC mode first. SPARC needs a got load for every variable!
1977  if (isPositionIndependent()) {
1978  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1981  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1982  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
1983  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1984  // function has calls.
1986  MFI.setHasCalls(true);
1987  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1989  }
1990 
1991  // This is one of the absolute code models.
1992  switch(getTargetMachine().getCodeModel()) {
1993  default:
1994  llvm_unreachable("Unsupported absolute code model");
1995  case CodeModel::Small:
1996  // abs32.
1999  case CodeModel::Medium: {
2000  // abs44.
2003  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2005  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2006  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2007  }
2008  case CodeModel::Large: {
2009  // abs64.
2012  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2015  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2016  }
2017  }
2018 }
2019 
2021  SelectionDAG &DAG) const {
2022  return makeAddress(Op, DAG);
2023 }
2024 
2026  SelectionDAG &DAG) const {
2027  return makeAddress(Op, DAG);
2028 }
2029 
2031  SelectionDAG &DAG) const {
2032  return makeAddress(Op, DAG);
2033 }
2034 
2036  SelectionDAG &DAG) const {
2037 
2038  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2039  if (DAG.getTarget().Options.EmulatedTLS)
2040  return LowerToTLSEmulatedModel(GA, DAG);
2041 
2042  SDLoc DL(GA);
2043  const GlobalValue *GV = GA->getGlobal();
2044  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2045 
2047 
2048  if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2049  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2052  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2055  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2058  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2061 
2062  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2063  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2064  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2065  withTargetFlags(Op, addTF, DAG));
2066 
2067  SDValue Chain = DAG.getEntryNode();
2068  SDValue InFlag;
2069 
2070  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2071  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2072  InFlag = Chain.getValue(1);
2073  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2074  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2075 
2076  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2077  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2079  assert(Mask && "Missing call preserved mask for calling convention");
2080  SDValue Ops[] = {Chain,
2081  Callee,
2082  Symbol,
2083  DAG.getRegister(SP::O0, PtrVT),
2084  DAG.getRegisterMask(Mask),
2085  InFlag};
2086  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2087  InFlag = Chain.getValue(1);
2088  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2089  DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2090  InFlag = Chain.getValue(1);
2091  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2092 
2093  if (model != TLSModel::LocalDynamic)
2094  return Ret;
2095 
2096  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2098  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2100  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2101  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2103  }
2104 
2105  if (model == TLSModel::InitialExec) {
2106  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2108 
2109  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2110 
2111  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2112  // function has calls.
2114  MFI.setHasCalls(true);
2115 
2116  SDValue TGA = makeHiLoPair(Op,
2119  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2121  DL, PtrVT, Ptr,
2122  withTargetFlags(Op, ldTF, DAG));
2123  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2124  DAG.getRegister(SP::G7, PtrVT), Offset,
2125  withTargetFlags(Op,
2127  }
2128 
2129  assert(model == TLSModel::LocalExec);
2130  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2132  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2134  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2135 
2136  return DAG.getNode(ISD::ADD, DL, PtrVT,
2137  DAG.getRegister(SP::G7, PtrVT), Offset);
2138 }
2139 
2142  const SDLoc &DL,
2143  SelectionDAG &DAG) const {
2145  EVT ArgVT = Arg.getValueType();
2146  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2147 
2148  ArgListEntry Entry;
2149  Entry.Node = Arg;
2150  Entry.Ty = ArgTy;
2151 
2152  if (ArgTy->isFP128Ty()) {
2153  // Create a stack object and pass the pointer to the library function.
2154  int FI = MFI.CreateStackObject(16, 8, false);
2155  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2156  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2157  /* Alignment = */ 8);
2158 
2159  Entry.Node = FIPtr;
2160  Entry.Ty = PointerType::getUnqual(ArgTy);
2161  }
2162  Args.push_back(Entry);
2163  return Chain;
2164 }
2165 
2166 SDValue
2168  const char *LibFuncName,
2169  unsigned numArgs) const {
2170 
2171  ArgListTy Args;
2172 
2174  auto PtrVT = getPointerTy(DAG.getDataLayout());
2175 
2176  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2177  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2178  Type *RetTyABI = RetTy;
2179  SDValue Chain = DAG.getEntryNode();
2180  SDValue RetPtr;
2181 
2182  if (RetTy->isFP128Ty()) {
2183  // Create a Stack Object to receive the return value of type f128.
2184  ArgListEntry Entry;
2185  int RetFI = MFI.CreateStackObject(16, 8, false);
2186  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2187  Entry.Node = RetPtr;
2188  Entry.Ty = PointerType::getUnqual(RetTy);
2189  if (!Subtarget->is64Bit())
2190  Entry.IsSRet = true;
2191  Entry.IsReturned = false;
2192  Args.push_back(Entry);
2193  RetTyABI = Type::getVoidTy(*DAG.getContext());
2194  }
2195 
2196  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2197  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2198  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2199  }
2201  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2202  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2203 
2204  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2205 
2206  // chain is in second result.
2207  if (RetTyABI == RetTy)
2208  return CallInfo.first;
2209 
2210  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2211 
2212  Chain = CallInfo.second;
2213 
2214  // Load RetPtr to get the return value.
2215  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2216  MachinePointerInfo(), /* Alignment = */ 8);
2217 }
2218 
2220  unsigned &SPCC, const SDLoc &DL,
2221  SelectionDAG &DAG) const {
2222 
2223  const char *LibCall = nullptr;
2224  bool is64Bit = Subtarget->is64Bit();
2225  switch(SPCC) {
2226  default: llvm_unreachable("Unhandled conditional code!");
2227  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2228  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2229  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2230  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2231  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2232  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2233  case SPCC::FCC_UL :
2234  case SPCC::FCC_ULE:
2235  case SPCC::FCC_UG :
2236  case SPCC::FCC_UGE:
2237  case SPCC::FCC_U :
2238  case SPCC::FCC_O :
2239  case SPCC::FCC_LG :
2240  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2241  }
2242 
2243  auto PtrVT = getPointerTy(DAG.getDataLayout());
2244  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2245  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2246  ArgListTy Args;
2247  SDValue Chain = DAG.getEntryNode();
2248  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2249  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2250 
2252  CLI.setDebugLoc(DL).setChain(Chain)
2253  .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2254 
2255  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2256 
2257  // result is in first, and chain is in second result.
2258  SDValue Result = CallInfo.first;
2259 
2260  switch(SPCC) {
2261  default: {
2262  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2263  SPCC = SPCC::ICC_NE;
2264  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2265  }
2266  case SPCC::FCC_UL : {
2267  SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
2268  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2269  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2270  SPCC = SPCC::ICC_NE;
2271  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2272  }
2273  case SPCC::FCC_ULE: {
2274  SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
2275  SPCC = SPCC::ICC_NE;
2276  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2277  }
2278  case SPCC::FCC_UG : {
2279  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2280  SPCC = SPCC::ICC_G;
2281  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2282  }
2283  case SPCC::FCC_UGE: {
2284  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2285  SPCC = SPCC::ICC_NE;
2286  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2287  }
2288 
2289  case SPCC::FCC_U : {
2290  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2291  SPCC = SPCC::ICC_E;
2292  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2293  }
2294  case SPCC::FCC_O : {
2295  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2296  SPCC = SPCC::ICC_NE;
2297  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2298  }
2299  case SPCC::FCC_LG : {
2300  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2301  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2302  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2303  SPCC = SPCC::ICC_NE;
2304  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2305  }
2306  case SPCC::FCC_UE : {
2307  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2308  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2309  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2310  SPCC = SPCC::ICC_E;
2311  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2312  }
2313  }
2314 }
2315 
2316 static SDValue
2318  const SparcTargetLowering &TLI) {
2319 
2320  if (Op.getOperand(0).getValueType() == MVT::f64)
2321  return TLI.LowerF128Op(Op, DAG,
2322  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2323 
2324  if (Op.getOperand(0).getValueType() == MVT::f32)
2325  return TLI.LowerF128Op(Op, DAG,
2326  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2327 
2328  llvm_unreachable("fpextend with non-float operand!");
2329  return SDValue();
2330 }
2331 
2332 static SDValue
2334  const SparcTargetLowering &TLI) {
2335  // FP_ROUND on f64 and f32 are legal.
2336  if (Op.getOperand(0).getValueType() != MVT::f128)
2337  return Op;
2338 
2339  if (Op.getValueType() == MVT::f64)
2340  return TLI.LowerF128Op(Op, DAG,
2341  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2342  if (Op.getValueType() == MVT::f32)
2343  return TLI.LowerF128Op(Op, DAG,
2344  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2345 
2346  llvm_unreachable("fpround to non-float!");
2347  return SDValue();
2348 }
2349 
2351  const SparcTargetLowering &TLI,
2352  bool hasHardQuad) {
2353  SDLoc dl(Op);
2354  EVT VT = Op.getValueType();
2355  assert(VT == MVT::i32 || VT == MVT::i64);
2356 
2357  // Expand f128 operations to fp128 abi calls.
2358  if (Op.getOperand(0).getValueType() == MVT::f128
2359  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2360  const char *libName = TLI.getLibcallName(VT == MVT::i32
2361  ? RTLIB::FPTOSINT_F128_I32
2362  : RTLIB::FPTOSINT_F128_I64);
2363  return TLI.LowerF128Op(Op, DAG, libName, 1);
2364  }
2365 
2366  // Expand if the resulting type is illegal.
2367  if (!TLI.isTypeLegal(VT))
2368  return SDValue();
2369 
2370  // Otherwise, Convert the fp value to integer in an FP register.
2371  if (VT == MVT::i32)
2372  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2373  else
2374  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2375 
2376  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2377 }
2378 
2380  const SparcTargetLowering &TLI,
2381  bool hasHardQuad) {
2382  SDLoc dl(Op);
2383  EVT OpVT = Op.getOperand(0).getValueType();
2384  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2385 
2386  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2387 
2388  // Expand f128 operations to fp128 ABI calls.
2389  if (Op.getValueType() == MVT::f128
2390  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2391  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2392  ? RTLIB::SINTTOFP_I32_F128
2393  : RTLIB::SINTTOFP_I64_F128);
2394  return TLI.LowerF128Op(Op, DAG, libName, 1);
2395  }
2396 
2397  // Expand if the operand type is illegal.
2398  if (!TLI.isTypeLegal(OpVT))
2399  return SDValue();
2400 
2401  // Otherwise, Convert the int value to FP in an FP register.
2402  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2403  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2404  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2405 }
2406 
2408  const SparcTargetLowering &TLI,
2409  bool hasHardQuad) {
2410  SDLoc dl(Op);
2411  EVT VT = Op.getValueType();
2412 
2413  // Expand if it does not involve f128 or the target has support for
2414  // quad floating point instructions and the resulting type is legal.
2415  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2416  (hasHardQuad && TLI.isTypeLegal(VT)))
2417  return SDValue();
2418 
2419  assert(VT == MVT::i32 || VT == MVT::i64);
2420 
2421  return TLI.LowerF128Op(Op, DAG,
2422  TLI.getLibcallName(VT == MVT::i32
2423  ? RTLIB::FPTOUINT_F128_I32
2424  : RTLIB::FPTOUINT_F128_I64),
2425  1);
2426 }
2427 
2429  const SparcTargetLowering &TLI,
2430  bool hasHardQuad) {
2431  SDLoc dl(Op);
2432  EVT OpVT = Op.getOperand(0).getValueType();
2433  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2434 
2435  // Expand if it does not involve f128 or the target has support for
2436  // quad floating point instructions and the operand type is legal.
2437  if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2438  return SDValue();
2439 
2440  return TLI.LowerF128Op(Op, DAG,
2441  TLI.getLibcallName(OpVT == MVT::i32
2442  ? RTLIB::UINTTOFP_I32_F128
2443  : RTLIB::UINTTOFP_I64_F128),
2444  1);
2445 }
2446 
2448  const SparcTargetLowering &TLI,
2449  bool hasHardQuad) {
2450  SDValue Chain = Op.getOperand(0);
2451  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2452  SDValue LHS = Op.getOperand(2);
2453  SDValue RHS = Op.getOperand(3);
2454  SDValue Dest = Op.getOperand(4);
2455  SDLoc dl(Op);
2456  unsigned Opc, SPCC = ~0U;
2457 
2458  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2459  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2460  LookThroughSetCC(LHS, RHS, CC, SPCC);
2461 
2462  // Get the condition flag.
2463  SDValue CompareFlag;
2464  if (LHS.getValueType().isInteger()) {
2465  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2466  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2467  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2468  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2469  } else {
2470  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2471  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2472  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2473  Opc = SPISD::BRICC;
2474  } else {
2475  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2476  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2477  Opc = SPISD::BRFCC;
2478  }
2479  }
2480  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2481  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2482 }
2483 
2485  const SparcTargetLowering &TLI,
2486  bool hasHardQuad) {
2487  SDValue LHS = Op.getOperand(0);
2488  SDValue RHS = Op.getOperand(1);
2489  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2490  SDValue TrueVal = Op.getOperand(2);
2491  SDValue FalseVal = Op.getOperand(3);
2492  SDLoc dl(Op);
2493  unsigned Opc, SPCC = ~0U;
2494 
2495  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2496  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2497  LookThroughSetCC(LHS, RHS, CC, SPCC);
2498 
2499  SDValue CompareFlag;
2500  if (LHS.getValueType().isInteger()) {
2501  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2502  Opc = LHS.getValueType() == MVT::i32 ?
2504  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2505  } else {
2506  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2507  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2508  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2509  Opc = SPISD::SELECT_ICC;
2510  } else {
2511  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2512  Opc = SPISD::SELECT_FCC;
2513  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2514  }
2515  }
2516  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2517  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2518 }
2519 
2521  const SparcTargetLowering &TLI) const {
2522  SDLoc DL(Op);
2523  return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL,
2524  DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1));
2525 
2526 }
2527 
2529  const SparcTargetLowering &TLI) const {
2530  SDLoc DL(Op);
2531  return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1));
2532 }
2533 
2535  const SparcTargetLowering &TLI) {
2536  MachineFunction &MF = DAG.getMachineFunction();
2538  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2539 
2540  // Need frame address to find the address of VarArgsFrameIndex.
2542 
2543  // vastart just stores the address of the VarArgsFrameIndex slot into the
2544  // memory location argument.
2545  SDLoc DL(Op);
2546  SDValue Offset =
2547  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2548  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2549  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2550  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2551  MachinePointerInfo(SV));
2552 }
2553 
2555  SDNode *Node = Op.getNode();
2556  EVT VT = Node->getValueType(0);
2557  SDValue InChain = Node->getOperand(0);
2558  SDValue VAListPtr = Node->getOperand(1);
2559  EVT PtrVT = VAListPtr.getValueType();
2560  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2561  SDLoc DL(Node);
2562  SDValue VAList =
2563  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2564  // Increment the pointer, VAList, to the next vaarg.
2565  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2566  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2567  DL));
2568  // Store the incremented VAList to the legalized pointer.
2569  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2570  MachinePointerInfo(SV));
2571  // Load the actual argument out of the pointer VAList.
2572  // We can't count on greater alignment than the word size.
2573  return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2574  std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
2575 }
2576 
2578  const SparcSubtarget *Subtarget) {
2579  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2580  SDValue Size = Op.getOperand(1); // Legalize the size.
2581  unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2582  unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment();
2583  EVT VT = Size->getValueType(0);
2584  SDLoc dl(Op);
2585 
2586  // TODO: implement over-aligned alloca. (Note: also implies
2587  // supporting support for overaligned function frames + dynamic
2588  // allocations, at all, which currently isn't supported)
2589  if (Align > StackAlign) {
2590  const MachineFunction &MF = DAG.getMachineFunction();
2591  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2592  "over-aligned dynamic alloca not supported.");
2593  }
2594 
2595  // The resultant pointer needs to be above the register spill area
2596  // at the bottom of the stack.
2597  unsigned regSpillArea;
2598  if (Subtarget->is64Bit()) {
2599  regSpillArea = 128;
2600  } else {
2601  // On Sparc32, the size of the spill area is 92. Unfortunately,
2602  // that's only 4-byte aligned, not 8-byte aligned (the stack
2603  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2604  // aligned dynamic allocation, we actually need to add 96 to the
2605  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2606 
2607  // That also means adding 4 to the size of the allocation --
2608  // before applying the 8-byte rounding. Unfortunately, we the
2609  // value we get here has already had rounding applied. So, we need
2610  // to add 8, instead, wasting a bit more memory.
2611 
2612  // Further, this only actually needs to be done if the required
2613  // alignment is > 4, but, we've lost that info by this point, too,
2614  // so we always apply it.
2615 
2616  // (An alternative approach would be to always reserve 96 bytes
2617  // instead of the required 92, but then we'd waste 4 extra bytes
2618  // in every frame, not just those with dynamic stack allocations)
2619 
2620  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2621 
2622  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2623  DAG.getConstant(8, dl, VT));
2624  regSpillArea = 96;
2625  }
2626 
2627  unsigned SPReg = SP::O6;
2628  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2629  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2630  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2631 
2632  regSpillArea += Subtarget->getStackPointerBias();
2633 
2634  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2635  DAG.getConstant(regSpillArea, dl, VT));
2636  SDValue Ops[2] = { NewVal, Chain };
2637  return DAG.getMergeValues(Ops, dl);
2638 }
2639 
2640 
2642  SDLoc dl(Op);
2643  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2644  dl, MVT::Other, DAG.getEntryNode());
2645  return Chain;
2646 }
2647 
2648 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2649  const SparcSubtarget *Subtarget) {
2651  MFI.setFrameAddressIsTaken(true);
2652 
2653  EVT VT = Op.getValueType();
2654  SDLoc dl(Op);
2655  unsigned FrameReg = SP::I6;
2656  unsigned stackBias = Subtarget->getStackPointerBias();
2657 
2658  SDValue FrameAddr;
2659 
2660  if (depth == 0) {
2661  FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2662  if (Subtarget->is64Bit())
2663  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2664  DAG.getIntPtrConstant(stackBias, dl));
2665  return FrameAddr;
2666  }
2667 
2668  // flush first to make sure the windowed registers' values are in stack
2669  SDValue Chain = getFLUSHW(Op, DAG);
2670  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2671 
2672  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2673 
2674  while (depth--) {
2675  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2676  DAG.getIntPtrConstant(Offset, dl));
2677  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2678  }
2679  if (Subtarget->is64Bit())
2680  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2681  DAG.getIntPtrConstant(stackBias, dl));
2682  return FrameAddr;
2683 }
2684 
2685 
2687  const SparcSubtarget *Subtarget) {
2688 
2689  uint64_t depth = Op.getConstantOperandVal(0);
2690 
2691  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2692 
2693 }
2694 
2696  const SparcTargetLowering &TLI,
2697  const SparcSubtarget *Subtarget) {
2698  MachineFunction &MF = DAG.getMachineFunction();
2699  MachineFrameInfo &MFI = MF.getFrameInfo();
2700  MFI.setReturnAddressIsTaken(true);
2701 
2702  if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2703  return SDValue();
2704 
2705  EVT VT = Op.getValueType();
2706  SDLoc dl(Op);
2707  uint64_t depth = Op.getConstantOperandVal(0);
2708 
2709  SDValue RetAddr;
2710  if (depth == 0) {
2711  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2712  unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2713  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2714  return RetAddr;
2715  }
2716 
2717  // Need frame address to find return address of the caller.
2718  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget);
2719 
2720  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2721  SDValue Ptr = DAG.getNode(ISD::ADD,
2722  dl, VT,
2723  FrameAddr,
2724  DAG.getIntPtrConstant(Offset, dl));
2725  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2726 
2727  return RetAddr;
2728 }
2729 
2730 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2731  unsigned opcode) {
2732  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2733  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2734 
2735  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2736  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2737  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2738 
2739  // Note: in little-endian, the floating-point value is stored in the
2740  // registers are in the opposite order, so the subreg with the sign
2741  // bit is the highest-numbered (odd), rather than the
2742  // lowest-numbered (even).
2743 
2744  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2745  SrcReg64);
2746  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2747  SrcReg64);
2748 
2749  if (DAG.getDataLayout().isLittleEndian())
2750  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2751  else
2752  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2753 
2754  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2755  dl, MVT::f64), 0);
2756  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2757  DstReg64, Hi32);
2758  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2759  DstReg64, Lo32);
2760  return DstReg64;
2761 }
2762 
2763 // Lower a f128 load into two f64 loads.
2765 {
2766  SDLoc dl(Op);
2767  LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2768  assert(LdNode && LdNode->getOffset().isUndef()
2769  && "Unexpected node type");
2770 
2771  unsigned alignment = LdNode->getAlignment();
2772  if (alignment > 8)
2773  alignment = 8;
2774 
2775  SDValue Hi64 =
2776  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2777  LdNode->getPointerInfo(), alignment);
2778  EVT addrVT = LdNode->getBasePtr().getValueType();
2779  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2780  LdNode->getBasePtr(),
2781  DAG.getConstant(8, dl, addrVT));
2782  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2783  LdNode->getPointerInfo(), alignment);
2784 
2785  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2786  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2787 
2788  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2789  dl, MVT::f128);
2790  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2791  MVT::f128,
2792  SDValue(InFP128, 0),
2793  Hi64,
2794  SubRegEven);
2795  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2796  MVT::f128,
2797  SDValue(InFP128, 0),
2798  Lo64,
2799  SubRegOdd);
2800  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2801  SDValue(Lo64.getNode(), 1) };
2802  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2803  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2804  return DAG.getMergeValues(Ops, dl);
2805 }
2806 
2808 {
2809  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2810 
2811  EVT MemVT = LdNode->getMemoryVT();
2812  if (MemVT == MVT::f128)
2813  return LowerF128Load(Op, DAG);
2814 
2815  return Op;
2816 }
2817 
2818 // Lower a f128 store into two f64 stores.
2820  SDLoc dl(Op);
2821  StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2822  assert(StNode && StNode->getOffset().isUndef()
2823  && "Unexpected node type");
2824  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2825  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2826 
2827  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2828  dl,
2829  MVT::f64,
2830  StNode->getValue(),
2831  SubRegEven);
2832  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2833  dl,
2834  MVT::f64,
2835  StNode->getValue(),
2836  SubRegOdd);
2837 
2838  unsigned alignment = StNode->getAlignment();
2839  if (alignment > 8)
2840  alignment = 8;
2841 
2842  SDValue OutChains[2];
2843  OutChains[0] =
2844  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2845  StNode->getBasePtr(), MachinePointerInfo(), alignment);
2846  EVT addrVT = StNode->getBasePtr().getValueType();
2847  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2848  StNode->getBasePtr(),
2849  DAG.getConstant(8, dl, addrVT));
2850  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2851  MachinePointerInfo(), alignment);
2852  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2853 }
2854 
2856 {
2857  SDLoc dl(Op);
2858  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2859 
2860  EVT MemVT = St->getMemoryVT();
2861  if (MemVT == MVT::f128)
2862  return LowerF128Store(Op, DAG);
2863 
2864  if (MemVT == MVT::i64) {
2865  // Custom handling for i64 stores: turn it into a bitcast and a
2866  // v2i32 store.
2867  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2868  SDValue Chain = DAG.getStore(
2869  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2870  St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo());
2871  return Chain;
2872  }
2873 
2874  return SDValue();
2875 }
2876 
2877 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2878  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2879  && "invalid opcode");
2880 
2881  SDLoc dl(Op);
2882 
2883  if (Op.getValueType() == MVT::f64)
2884  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2885  if (Op.getValueType() != MVT::f128)
2886  return Op;
2887 
2888  // Lower fabs/fneg on f128 to fabs/fneg on f64
2889  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2890  // (As with LowerF64Op, on little-endian, we need to negate the odd
2891  // subreg)
2892 
2893  SDValue SrcReg128 = Op.getOperand(0);
2894  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2895  SrcReg128);
2896  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2897  SrcReg128);
2898 
2899  if (DAG.getDataLayout().isLittleEndian()) {
2900  if (isV9)
2901  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2902  else
2903  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2904  } else {
2905  if (isV9)
2906  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2907  else
2908  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2909  }
2910 
2911  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2912  dl, MVT::f128), 0);
2913  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2914  DstReg128, Hi64);
2915  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2916  DstReg128, Lo64);
2917  return DstReg128;
2918 }
2919 
2921 
2922  if (Op.getValueType() != MVT::i64)
2923  return Op;
2924 
2925  SDLoc dl(Op);
2926  SDValue Src1 = Op.getOperand(0);
2927  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2928  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2929  DAG.getConstant(32, dl, MVT::i64));
2930  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2931 
2932  SDValue Src2 = Op.getOperand(1);
2933  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2934  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2935  DAG.getConstant(32, dl, MVT::i64));
2936  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2937 
2938 
2939  bool hasChain = false;
2940  unsigned hiOpc = Op.getOpcode();
2941  switch (Op.getOpcode()) {
2942  default: llvm_unreachable("Invalid opcode");
2943  case ISD::ADDC: hiOpc = ISD::ADDE; break;
2944  case ISD::ADDE: hasChain = true; break;
2945  case ISD::SUBC: hiOpc = ISD::SUBE; break;
2946  case ISD::SUBE: hasChain = true; break;
2947  }
2948  SDValue Lo;
2949  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2950  if (hasChain) {
2951  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2952  Op.getOperand(2));
2953  } else {
2954  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2955  }
2956  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2957  SDValue Carry = Hi.getValue(1);
2958 
2959  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2960  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2961  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2962  DAG.getConstant(32, dl, MVT::i64));
2963 
2964  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2965  SDValue Ops[2] = { Dst, Carry };
2966  return DAG.getMergeValues(Ops, dl);
2967 }
2968 
2969 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2970 // in LegalizeDAG.cpp except the order of arguments to the library function.
2972  const SparcTargetLowering &TLI)
2973 {
2974  unsigned opcode = Op.getOpcode();
2975  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
2976 
2977  bool isSigned = (opcode == ISD::SMULO);
2978  EVT VT = MVT::i64;
2979  EVT WideVT = MVT::i128;
2980  SDLoc dl(Op);
2981  SDValue LHS = Op.getOperand(0);
2982 
2983  if (LHS.getValueType() != VT)
2984  return Op;
2985 
2986  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
2987 
2988  SDValue RHS = Op.getOperand(1);
2989  SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
2990  SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
2991  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
2992 
2993  SDValue MulResult = TLI.makeLibCall(DAG,
2994  RTLIB::MUL_I128, WideVT,
2995  Args, isSigned, dl).first;
2996  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2997  MulResult, DAG.getIntPtrConstant(0, dl));
2998  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2999  MulResult, DAG.getIntPtrConstant(1, dl));
3000  if (isSigned) {
3001  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3002  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3003  } else {
3004  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3005  ISD::SETNE);
3006  }
3007  // MulResult is a node with an illegal type. Because such things are not
3008  // generally permitted during this phase of legalization, ensure that
3009  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3010  // been folded.
3011  assert(MulResult->use_empty() && "Illegally typed node still in use!");
3012 
3013  SDValue Ops[2] = { BottomHalf, TopHalf } ;
3014  return DAG.getMergeValues(Ops, dl);
3015 }
3016 
3018  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
3019  // Expand with a fence.
3020  return SDValue();
3021 
3022  // Monotonic load/stores are legal.
3023  return Op;
3024 }
3025 
3027  SelectionDAG &DAG) const {
3028  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3029  SDLoc dl(Op);
3030  switch (IntNo) {
3031  default: return SDValue(); // Don't custom lower most intrinsics.
3032  case Intrinsic::thread_pointer: {
3033  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3034  return DAG.getRegister(SP::G7, PtrVT);
3035  }
3036  }
3037 }
3038 
3041 
3042  bool hasHardQuad = Subtarget->hasHardQuad();
3043  bool isV9 = Subtarget->isV9();
3044 
3045  switch (Op.getOpcode()) {
3046  default: llvm_unreachable("Should not custom lower this!");
3047 
3048  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3049  Subtarget);
3050  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3051  Subtarget);
3052  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3053  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3054  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3055  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3056  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3057  hasHardQuad);
3058  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3059  hasHardQuad);
3060  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3061  hasHardQuad);
3062  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3063  hasHardQuad);
3064  case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3065  hasHardQuad);
3066  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3067  hasHardQuad);
3068  case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this);
3069  case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this);
3070  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3071  case ISD::VAARG: return LowerVAARG(Op, DAG);
3072  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3073  Subtarget);
3074 
3075  case ISD::LOAD: return LowerLOAD(Op, DAG);
3076  case ISD::STORE: return LowerSTORE(Op, DAG);
3077  case ISD::FADD: return LowerF128Op(Op, DAG,
3078  getLibcallName(RTLIB::ADD_F128), 2);
3079  case ISD::FSUB: return LowerF128Op(Op, DAG,
3080  getLibcallName(RTLIB::SUB_F128), 2);
3081  case ISD::FMUL: return LowerF128Op(Op, DAG,
3082  getLibcallName(RTLIB::MUL_F128), 2);
3083  case ISD::FDIV: return LowerF128Op(Op, DAG,
3084  getLibcallName(RTLIB::DIV_F128), 2);
3085  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3086  getLibcallName(RTLIB::SQRT_F128),1);
3087  case ISD::FABS:
3088  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3089  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3090  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3091  case ISD::ADDC:
3092  case ISD::ADDE:
3093  case ISD::SUBC:
3094  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3095  case ISD::UMULO:
3096  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3097  case ISD::ATOMIC_LOAD:
3098  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3099  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3100  }
3101 }
3102 
3105  MachineBasicBlock *BB) const {
3106  switch (MI.getOpcode()) {
3107  default: llvm_unreachable("Unknown SELECT_CC!");
3108  case SP::SELECT_CC_Int_ICC:
3109  case SP::SELECT_CC_FP_ICC:
3110  case SP::SELECT_CC_DFP_ICC:
3111  case SP::SELECT_CC_QFP_ICC:
3112  return expandSelectCC(MI, BB, SP::BCOND);
3113  case SP::SELECT_CC_Int_FCC:
3114  case SP::SELECT_CC_FP_FCC:
3115  case SP::SELECT_CC_DFP_FCC:
3116  case SP::SELECT_CC_QFP_FCC:
3117  return expandSelectCC(MI, BB, SP::FBCOND);
3118  case SP::EH_SJLJ_SETJMP32ri:
3119  case SP::EH_SJLJ_SETJMP32rr:
3120  return emitEHSjLjSetJmp(MI, BB);
3121  case SP::EH_SJLJ_LONGJMP32rr:
3122  case SP::EH_SJLJ_LONGJMP32ri:
3123  return emitEHSjLjLongJmp(MI, BB);
3124 
3125  }
3126 }
3127 
3130  unsigned BROpcode) const {
3131  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3132  DebugLoc dl = MI.getDebugLoc();
3133  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3134 
3135  // To "insert" a SELECT_CC instruction, we actually have to insert the
3136  // triangle control-flow pattern. The incoming instruction knows the
3137  // destination vreg to set, the condition code register to branch on, the
3138  // true/false values to select between, and the condition code for the branch.
3139  //
3140  // We produce the following control flow:
3141  // ThisMBB
3142  // | \
3143  // | IfFalseMBB
3144  // | /
3145  // SinkMBB
3146  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3148 
3149  MachineBasicBlock *ThisMBB = BB;
3150  MachineFunction *F = BB->getParent();
3151  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3152  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3153  F->insert(It, IfFalseMBB);
3154  F->insert(It, SinkMBB);
3155 
3156  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3157  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3158  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3159  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3160 
3161  // Set the new successors for ThisMBB.
3162  ThisMBB->addSuccessor(IfFalseMBB);
3163  ThisMBB->addSuccessor(SinkMBB);
3164 
3165  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3166  .addMBB(SinkMBB)
3167  .addImm(CC);
3168 
3169  // IfFalseMBB just falls through to SinkMBB.
3170  IfFalseMBB->addSuccessor(SinkMBB);
3171 
3172  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3173  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3174  MI.getOperand(0).getReg())
3175  .addReg(MI.getOperand(1).getReg())
3176  .addMBB(ThisMBB)
3177  .addReg(MI.getOperand(2).getReg())
3178  .addMBB(IfFalseMBB);
3179 
3180  MI.eraseFromParent(); // The pseudo instruction is gone now.
3181  return SinkMBB;
3182 }
3183 
3186  MachineBasicBlock *MBB) const {
3187  DebugLoc DL = MI.getDebugLoc();
3188  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3189 
3190  MachineFunction *MF = MBB->getParent();
3192  MachineInstrBuilder MIB;
3193 
3194  MVT PVT = getPointerTy(MF->getDataLayout());
3195  unsigned RegSize = PVT.getStoreSize();
3196  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3197 
3198  unsigned Buf = MI.getOperand(0).getReg();
3199  unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3200 
3201  // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3
3202  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A);
3203 
3204  // Instruction to restore FP
3205  const unsigned FP = SP::I6;
3206  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3207  .addReg(FP)
3208  .addReg(Buf)
3209  .addImm(0);
3210 
3211  // Instruction to load jmp location
3212  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3213  .addReg(JmpLoc, RegState::Define)
3214  .addReg(Buf)
3215  .addImm(RegSize);
3216 
3217  // Instruction to restore SP
3218  const unsigned SP = SP::O6;
3219  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3220  .addReg(SP)
3221  .addReg(Buf)
3222  .addImm(2 * RegSize);
3223 
3224  // Instruction to restore I7
3225  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3226  .addReg(SP::I7)
3227  .addReg(Buf, RegState::Kill)
3228  .addImm(3 * RegSize);
3229 
3230  // Jump to JmpLoc
3231  BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0);
3232 
3233  MI.eraseFromParent();
3234  return MBB;
3235 }
3236 
3239  MachineBasicBlock *MBB) const {
3240  DebugLoc DL = MI.getDebugLoc();
3241  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3242  const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3243 
3244  MachineFunction *MF = MBB->getParent();
3246  MachineInstrBuilder MIB;
3247 
3248  MVT PVT = getPointerTy(MF->getDataLayout());
3249  unsigned RegSize = PVT.getStoreSize();
3250  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3251 
3252  unsigned DstReg = MI.getOperand(0).getReg();
3253  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
3254  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
3255  (void)TRI;
3256  unsigned mainDstReg = MRI.createVirtualRegister(RC);
3257  unsigned restoreDstReg = MRI.createVirtualRegister(RC);
3258 
3259  // For v = setjmp(buf), we generate
3260  //
3261  // thisMBB:
3262  // buf[0] = FP
3263  // buf[RegSize] = restoreMBB <-- takes address of restoreMBB
3264  // buf[RegSize * 2] = O6
3265  // buf[RegSize * 3] = I7
3266  // Ensure restoreMBB remains in the relocations list (done using a bn instruction)
3267  // b mainMBB
3268  //
3269  // mainMBB:
3270  // v_main = 0
3271  // b sinkMBB
3272  //
3273  // restoreMBB:
3274  // v_restore = 1
3275  // --fall through--
3276  //
3277  // sinkMBB:
3278  // v = phi(main, restore)
3279 
3280  const BasicBlock *BB = MBB->getBasicBlock();
3281  MachineFunction::iterator It = ++MBB->getIterator();
3282  MachineBasicBlock *thisMBB = MBB;
3283  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
3284  MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
3285  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
3286 
3287  MF->insert(It, mainMBB);
3288  MF->insert(It, restoreMBB);
3289  MF->insert(It, sinkMBB);
3290  restoreMBB->setHasAddressTaken();
3291 
3292  // Transfer the remainder of BB and its successor edges to sinkMBB.
3293  sinkMBB->splice(sinkMBB->begin(), MBB,
3294  std::next(MachineBasicBlock::iterator(MI)),
3295  MBB->end());
3296  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
3297 
3298  unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3299  unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3300  unsigned BufReg = MI.getOperand(1).getReg();
3301 
3302  // Instruction to store FP
3303  const unsigned FP = SP::I6;
3304  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3305  .addReg(BufReg)
3306  .addImm(0)
3307  .addReg(FP);
3308 
3309  // Instructions to store jmp location
3310  MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi))
3311  .addReg(LabelReg, RegState::Define)
3312  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI);
3313 
3314  MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri))
3315  .addReg(LabelReg2, RegState::Define)
3316  .addReg(LabelReg, RegState::Kill)
3317  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO);
3318 
3319  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3320  .addReg(BufReg)
3321  .addImm(RegSize)
3322  .addReg(LabelReg2, RegState::Kill);
3323 
3324  // Instruction to store SP
3325  const unsigned SP = SP::O6;
3326  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3327  .addReg(BufReg)
3328  .addImm(2 * RegSize)
3329  .addReg(SP);
3330 
3331  // Instruction to store I7
3332  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3333  .addReg(BufReg)
3334  .addImm(3 * RegSize)
3335  .addReg(SP::I7);
3336 
3337 
3338  // FIX ME: This next instruction ensures that the restoreMBB block address remains
3339  // valid through optimization passes and serves no other purpose. The ICC_N ensures
3340  // that the branch is never taken. This commented-out code here was an alternative
3341  // attempt to achieve this which brought myriad problems.
3342  //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None);
3343  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3344  .addMBB(restoreMBB)
3345  .addImm(SPCC::ICC_N);
3346 
3347  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3348  .addMBB(mainMBB)
3349  .addImm(SPCC::ICC_A);
3350 
3351  thisMBB->addSuccessor(mainMBB);
3352  thisMBB->addSuccessor(restoreMBB);
3353 
3354 
3355  // mainMBB:
3356  MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr))
3357  .addReg(mainDstReg, RegState::Define)
3358  .addReg(SP::G0)
3359  .addReg(SP::G0);
3360  MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3361 
3362  mainMBB->addSuccessor(sinkMBB);
3363 
3364 
3365  // restoreMBB:
3366  MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri))
3367  .addReg(restoreDstReg, RegState::Define)
3368  .addReg(SP::G0)
3369  .addImm(1);
3370  //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3371  restoreMBB->addSuccessor(sinkMBB);
3372 
3373  // sinkMBB:
3374  MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL,
3375  TII->get(SP::PHI), DstReg)
3376  .addReg(mainDstReg).addMBB(mainMBB)
3377  .addReg(restoreDstReg).addMBB(restoreMBB);
3378 
3379  MI.eraseFromParent();
3380  return sinkMBB;
3381 }
3382 
3383 //===----------------------------------------------------------------------===//
3384 // Sparc Inline Assembly Support
3385 //===----------------------------------------------------------------------===//
3386 
3387 /// getConstraintType - Given a constraint letter, return the type of
3388 /// constraint it is for this target.
3391  if (Constraint.size() == 1) {
3392  switch (Constraint[0]) {
3393  default: break;
3394  case 'r':
3395  case 'f':
3396  case 'e':
3397  return C_RegisterClass;
3398  case 'I': // SIMM13
3399  return C_Other;
3400  }
3401  }
3402 
3403  return TargetLowering::getConstraintType(Constraint);
3404 }
3405 
3408  const char *constraint) const {
3409  ConstraintWeight weight = CW_Invalid;
3410  Value *CallOperandVal = info.CallOperandVal;
3411  // If we don't have a value, we can't do a match,
3412  // but allow it at the lowest weight.
3413  if (!CallOperandVal)
3414  return CW_Default;
3415 
3416  // Look at the constraint type.
3417  switch (*constraint) {
3418  default:
3419  weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3420  break;
3421  case 'I': // SIMM13
3422  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3423  if (isInt<13>(C->getSExtValue()))
3424  weight = CW_Constant;
3425  }
3426  break;
3427  }
3428  return weight;
3429 }
3430 
3431 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3432 /// vector. If it is invalid, don't add anything to Ops.
3435  std::string &Constraint,
3436  std::vector<SDValue> &Ops,
3437  SelectionDAG &DAG) const {
3438  SDValue Result(nullptr, 0);
3439 
3440  // Only support length 1 constraints for now.
3441  if (Constraint.length() > 1)
3442  return;
3443 
3444  char ConstraintLetter = Constraint[0];
3445  switch (ConstraintLetter) {
3446  default: break;
3447  case 'I':
3448  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3449  if (isInt<13>(C->getSExtValue())) {
3450  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3451  Op.getValueType());
3452  break;
3453  }
3454  return;
3455  }
3456  }
3457 
3458  if (Result.getNode()) {
3459  Ops.push_back(Result);
3460  return;
3461  }
3462  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3463 }
3464 
3465 std::pair<unsigned, const TargetRegisterClass *>
3467  StringRef Constraint,
3468  MVT VT) const {
3469  if (Constraint.size() == 1) {
3470  switch (Constraint[0]) {
3471  case 'r':
3472  if (VT == MVT::v2i32)
3473  return std::make_pair(0U, &SP::IntPairRegClass);
3474  else
3475  return std::make_pair(0U, &SP::IntRegsRegClass);
3476  case 'f':
3477  if (VT == MVT::f32)
3478  return std::make_pair(0U, &SP::FPRegsRegClass);
3479  else if (VT == MVT::f64)
3480  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3481  else if (VT == MVT::f128)
3482  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3483  llvm_unreachable("Unknown ValueType for f-register-type!");
3484  break;
3485  case 'e':
3486  if (VT == MVT::f32)
3487  return std::make_pair(0U, &SP::FPRegsRegClass);
3488  else if (VT == MVT::f64)
3489  return std::make_pair(0U, &SP::DFPRegsRegClass);
3490  else if (VT == MVT::f128)
3491  return std::make_pair(0U, &SP::QFPRegsRegClass);
3492  llvm_unreachable("Unknown ValueType for e-register-type!");
3493  break;
3494  }
3495  } else if (!Constraint.empty() && Constraint.size() <= 5
3496  && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3497  // constraint = '{r<d>}'
3498  // Remove the braces from around the name.
3499  StringRef name(Constraint.data()+1, Constraint.size()-2);
3500  // Handle register aliases:
3501  // r0-r7 -> g0-g7
3502  // r8-r15 -> o0-o7
3503  // r16-r23 -> l0-l7
3504  // r24-r31 -> i0-i7
3505  uint64_t intVal = 0;
3506  if (name.substr(0, 1).equals("r")
3507  && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3508  const char regTypes[] = { 'g', 'o', 'l', 'i' };
3509  char regType = regTypes[intVal/8];
3510  char regIdx = '0' + (intVal % 8);
3511  char tmp[] = { '{', regType, regIdx, '}', 0 };
3512  std::string newConstraint = std::string(tmp);
3513  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3514  VT);
3515  }
3516  }
3517 
3518  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3519 }
3520 
3521 bool
3523  // The Sparc target isn't yet aware of offsets.
3524  return false;
3525 }
3526 
3529  SelectionDAG &DAG) const {
3530 
3531  SDLoc dl(N);
3532 
3533  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3534 
3535  switch (N->getOpcode()) {
3536  default:
3537  llvm_unreachable("Do not know how to custom type legalize this operation!");
3538 
3539  case ISD::FP_TO_SINT:
3540  case ISD::FP_TO_UINT:
3541  // Custom lower only if it involves f128 or i64.
3542  if (N->getOperand(0).getValueType() != MVT::f128
3543  || N->getValueType(0) != MVT::i64)
3544  return;
3545  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3546  ? RTLIB::FPTOSINT_F128_I64
3547  : RTLIB::FPTOUINT_F128_I64);
3548 
3549  Results.push_back(LowerF128Op(SDValue(N, 0),
3550  DAG,
3551  getLibcallName(libCall),
3552  1));
3553  return;
3554 
3555  case ISD::SINT_TO_FP:
3556  case ISD::UINT_TO_FP:
3557  // Custom lower only if it involves f128 or i64.
3558  if (N->getValueType(0) != MVT::f128
3559  || N->getOperand(0).getValueType() != MVT::i64)
3560  return;
3561 
3562  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3563  ? RTLIB::SINTTOFP_I64_F128
3564  : RTLIB::UINTTOFP_I64_F128);
3565 
3566  Results.push_back(LowerF128Op(SDValue(N, 0),
3567  DAG,
3568  getLibcallName(libCall),
3569  1));
3570  return;
3571  case ISD::LOAD: {
3572  LoadSDNode *Ld = cast<LoadSDNode>(N);
3573  // Custom handling only for i64: turn i64 load into a v2i32 load,
3574  // and a bitcast.
3575  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3576  return;
3577 
3578  SDLoc dl(N);
3579  SDValue LoadRes = DAG.getExtLoad(
3580  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3581  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
3582  Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3583 
3584  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3585  Results.push_back(Res);
3586  Results.push_back(LoadRes.getValue(1));
3587  return;
3588  }
3589  }
3590 }
3591 
3592 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3594  if (!Subtarget->isTargetLinux())
3596  return true;
3597 }
3598 
3599 // Override to disable global variable loading on Linux.
3601  if (!Subtarget->isTargetLinux())
3603 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:546
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:513
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:836
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getValueType() const
Return the ValueType of the referenced return value.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool usePopc() const
const SDValue & getOffset() const
bool isUndef() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
DiagnosticInfoOptimizationBase::Argument NV
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC&#39;s if you just have an ISD::CondCode instead of an SD...
Definition: SelectionDAG.h:923
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:839
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:618
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
unsigned getPointerSize() const
Get the pointer size for this target.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:223
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:271
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
bool isFP128Ty() const
Return true if this is &#39;fp128&#39;.
Definition: Type.h:156
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:667
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getChain() const
Function Alias Analysis Results
unsigned getValNo() const
unsigned getAlignment() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:302
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const uint32_t * getRTCallPreservedMask(CallingConv::ID CC) const
bool hasHardQuad() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:677
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:677
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
const SparcInstrInfo * getInstrInfo() const override
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:749
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:405
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool useSoftFloat() const override
SDValue getExternalSymbol(const char *Sym, EVT VT)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isMemLoc() const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool needsCustom() const
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
Definition: ISDOpcodes.h:114
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:557
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:210
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:434
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
bool isTargetLinux() const
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const HexagonInstrInfo * TII
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Shift and rotation operations.
Definition: ISDOpcodes.h:380
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:730
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:191
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
uint64_t getConstantOperandVal(unsigned i) const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Reg
All possible values of the reg field in the ModR/M byte.
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:778
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:293
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:449
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:390
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
LocInfo getLocInfo() const
bool useSoftFloat() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:663
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool fixAllFDIVSQRT() const
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(const char(&S)[N], const T &Value)
Definition: StringSwitch.h:74
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:387
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:456
SDValue getRegisterMask(const uint32_t *RegMask)
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:391
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
bool useSoftMulDiv() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
SmallVector< ISD::OutputArg, 32 > Outs
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:219
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:918
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:561
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
bool isStrongerThanMonotonic(AtomicOrdering ao)
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
amdgpu Simplify well known AMD library false Value * Callee
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getByValSize() const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
UNDEF - An undefined node.
Definition: ISDOpcodes.h:178
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:499
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:121
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:303
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool hasNoFMULS() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
const SDValue & getBasePtr() const
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
unsigned const MachineRegisterInfo * MRI
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
Definition: ISDOpcodes.h:260
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")