LLVM  6.0.0svn
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Sparc uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SparcISelLowering.h"
18 #include "SparcRegisterInfo.h"
19 #include "SparcTargetMachine.h"
20 #include "SparcTargetObjectFile.h"
21 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
33 #include "llvm/Support/KnownBits.h"
34 using namespace llvm;
35 
36 
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
40 
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43  ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 {
45  assert (ArgFlags.isSRet());
46 
47  // Assign SRet argument.
48  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
49  0,
50  LocVT, LocInfo));
51  return true;
52 }
53 
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56  ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 {
58  static const MCPhysReg RegList[] = {
59  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60  };
61  // Try to get first reg.
62  if (unsigned Reg = State.AllocateReg(RegList)) {
63  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64  } else {
65  // Assign whole thing in stack.
66  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
67  State.AllocateStack(8,4),
68  LocVT, LocInfo));
69  return true;
70  }
71 
72  // Try to get second reg.
73  if (unsigned Reg = State.AllocateReg(RegList))
74  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75  else
76  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
77  State.AllocateStack(4,4),
78  LocVT, LocInfo));
79  return true;
80 }
81 
82 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
83  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
84  ISD::ArgFlagsTy &ArgFlags, CCState &State)
85 {
86  static const MCPhysReg RegList[] = {
87  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
88  };
89 
90  // Try to get first reg.
91  if (unsigned Reg = State.AllocateReg(RegList))
92  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
93  else
94  return false;
95 
96  // Try to get second reg.
97  if (unsigned Reg = State.AllocateReg(RegList))
98  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
99  else
100  return false;
101 
102  return true;
103 }
104 
105 // Allocate a full-sized argument for the 64-bit ABI.
106 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
107  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
108  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
109  assert((LocVT == MVT::f32 || LocVT == MVT::f128
110  || LocVT.getSizeInBits() == 64) &&
111  "Can't handle non-64 bits locations");
112 
113  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
114  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
115  unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
116  unsigned Offset = State.AllocateStack(size, alignment);
117  unsigned Reg = 0;
118 
119  if (LocVT == MVT::i64 && Offset < 6*8)
120  // Promote integers to %i0-%i5.
121  Reg = SP::I0 + Offset/8;
122  else if (LocVT == MVT::f64 && Offset < 16*8)
123  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
124  Reg = SP::D0 + Offset/8;
125  else if (LocVT == MVT::f32 && Offset < 16*8)
126  // Promote floats to %f1, %f3, ...
127  Reg = SP::F1 + Offset/4;
128  else if (LocVT == MVT::f128 && Offset < 16*8)
129  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
130  Reg = SP::Q0 + Offset/16;
131 
132  // Promote to register when possible, otherwise use the stack slot.
133  if (Reg) {
134  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
135  return true;
136  }
137 
138  // This argument goes on the stack in an 8-byte slot.
139  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
140  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
141  if (LocVT == MVT::f32)
142  Offset += 4;
143 
144  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
145  return true;
146 }
147 
148 // Allocate a half-sized argument for the 64-bit ABI.
149 //
150 // This is used when passing { float, int } structs by value in registers.
151 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
152  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
153  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
154  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
155  unsigned Offset = State.AllocateStack(4, 4);
156 
157  if (LocVT == MVT::f32 && Offset < 16*8) {
158  // Promote floats to %f0-%f31.
159  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
160  LocVT, LocInfo));
161  return true;
162  }
163 
164  if (LocVT == MVT::i32 && Offset < 6*8) {
165  // Promote integers to %i0-%i5, using half the register.
166  unsigned Reg = SP::I0 + Offset/8;
167  LocVT = MVT::i64;
168  LocInfo = CCValAssign::AExt;
169 
170  // Set the Custom bit if this i32 goes in the high bits of a register.
171  if (Offset % 8 == 0)
172  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
173  LocVT, LocInfo));
174  else
175  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
176  return true;
177  }
178 
179  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
180  return true;
181 }
182 
183 #include "SparcGenCallingConv.inc"
184 
185 // The calling conventions in SparcCallingConv.td are described in terms of the
186 // callee's register window. This function translates registers to the
187 // corresponding caller window %o register.
188 static unsigned toCallerWindow(unsigned Reg) {
189  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
190  "Unexpected enum");
191  if (Reg >= SP::I0 && Reg <= SP::I7)
192  return Reg - SP::I0 + SP::O0;
193  return Reg;
194 }
195 
196 SDValue
198  bool IsVarArg,
200  const SmallVectorImpl<SDValue> &OutVals,
201  const SDLoc &DL, SelectionDAG &DAG) const {
202  if (Subtarget->is64Bit())
203  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
204  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
205 }
206 
207 SDValue
209  bool IsVarArg,
211  const SmallVectorImpl<SDValue> &OutVals,
212  const SDLoc &DL, SelectionDAG &DAG) const {
214 
215  // CCValAssign - represent the assignment of the return value to locations.
217 
218  // CCState - Info about the registers and stack slot.
219  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
220  *DAG.getContext());
221 
222  // Analyze return values.
223  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
224 
225  SDValue Flag;
226  SmallVector<SDValue, 4> RetOps(1, Chain);
227  // Make room for the return address offset.
228  RetOps.push_back(SDValue());
229 
230  // Copy the result values into the output registers.
231  for (unsigned i = 0, realRVLocIdx = 0;
232  i != RVLocs.size();
233  ++i, ++realRVLocIdx) {
234  CCValAssign &VA = RVLocs[i];
235  assert(VA.isRegLoc() && "Can only return in registers!");
236 
237  SDValue Arg = OutVals[realRVLocIdx];
238 
239  if (VA.needsCustom()) {
240  assert(VA.getLocVT() == MVT::v2i32);
241  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
242  // happen by default if this wasn't a legal type)
243 
245  Arg,
246  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
248  Arg,
249  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
250 
251  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
252  Flag = Chain.getValue(1);
253  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
254  VA = RVLocs[++i]; // skip ahead to next loc
255  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
256  Flag);
257  } else
258  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
259 
260  // Guarantee that all emitted copies are stuck together with flags.
261  Flag = Chain.getValue(1);
262  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
263  }
264 
265  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
266  // If the function returns a struct, copy the SRetReturnReg to I0
267  if (MF.getFunction()->hasStructRetAttr()) {
269  unsigned Reg = SFI->getSRetReturnReg();
270  if (!Reg)
271  llvm_unreachable("sret virtual register not created in the entry block");
272  auto PtrVT = getPointerTy(DAG.getDataLayout());
273  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
274  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
275  Flag = Chain.getValue(1);
276  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
277  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
278  }
279 
280  RetOps[0] = Chain; // Update chain.
281  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
282 
283  // Add the flag if we have it.
284  if (Flag.getNode())
285  RetOps.push_back(Flag);
286 
287  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
288 }
289 
290 // Lower return values for the 64-bit ABI.
291 // Return values are passed the exactly the same way as function arguments.
292 SDValue
294  bool IsVarArg,
296  const SmallVectorImpl<SDValue> &OutVals,
297  const SDLoc &DL, SelectionDAG &DAG) const {
298  // CCValAssign - represent the assignment of the return value to locations.
300 
301  // CCState - Info about the registers and stack slot.
302  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
303  *DAG.getContext());
304 
305  // Analyze return values.
306  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
307 
308  SDValue Flag;
309  SmallVector<SDValue, 4> RetOps(1, Chain);
310 
311  // The second operand on the return instruction is the return address offset.
312  // The return address is always %i7+8 with the 64-bit ABI.
313  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
314 
315  // Copy the result values into the output registers.
316  for (unsigned i = 0; i != RVLocs.size(); ++i) {
317  CCValAssign &VA = RVLocs[i];
318  assert(VA.isRegLoc() && "Can only return in registers!");
319  SDValue OutVal = OutVals[i];
320 
321  // Integer return values must be sign or zero extended by the callee.
322  switch (VA.getLocInfo()) {
323  case CCValAssign::Full: break;
324  case CCValAssign::SExt:
325  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
326  break;
327  case CCValAssign::ZExt:
328  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
329  break;
330  case CCValAssign::AExt:
331  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
332  break;
333  default:
334  llvm_unreachable("Unknown loc info!");
335  }
336 
337  // The custom bit on an i32 return value indicates that it should be passed
338  // in the high bits of the register.
339  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
340  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
341  DAG.getConstant(32, DL, MVT::i32));
342 
343  // The next value may go in the low bits of the same register.
344  // Handle both at once.
345  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
346  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
347  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
348  // Skip the next value, it's already done.
349  ++i;
350  }
351  }
352 
353  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
354 
355  // Guarantee that all emitted copies are stuck together with flags.
356  Flag = Chain.getValue(1);
357  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
358  }
359 
360  RetOps[0] = Chain; // Update chain.
361 
362  // Add the flag if we have it.
363  if (Flag.getNode())
364  RetOps.push_back(Flag);
365 
366  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
367 }
368 
370  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
371  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
372  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
373  if (Subtarget->is64Bit())
374  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
375  DL, DAG, InVals);
376  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
377  DL, DAG, InVals);
378 }
379 
380 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
381 /// passed in either one or two GPRs, including FP values. TODO: we should
382 /// pass FP values in FP registers for fastcc functions.
384  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
385  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
386  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
388  MachineRegisterInfo &RegInfo = MF.getRegInfo();
390 
391  // Assign locations to all of the incoming arguments.
393  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
394  *DAG.getContext());
395  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
396 
397  const unsigned StackOffset = 92;
398  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
399 
400  unsigned InIdx = 0;
401  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
402  CCValAssign &VA = ArgLocs[i];
403 
404  if (Ins[InIdx].Flags.isSRet()) {
405  if (InIdx != 0)
406  report_fatal_error("sparc only supports sret on the first parameter");
407  // Get SRet from [%fp+64].
408  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
409  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
410  SDValue Arg =
411  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
412  InVals.push_back(Arg);
413  continue;
414  }
415 
416  if (VA.isRegLoc()) {
417  if (VA.needsCustom()) {
418  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
419 
420  unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
421  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
422  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
423 
424  assert(i+1 < e);
425  CCValAssign &NextVA = ArgLocs[++i];
426 
427  SDValue LoVal;
428  if (NextVA.isMemLoc()) {
429  int FrameIdx = MF.getFrameInfo().
430  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
431  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
432  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
433  } else {
434  unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
435  &SP::IntRegsRegClass);
436  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
437  }
438 
439  if (IsLittleEndian)
440  std::swap(LoVal, HiVal);
441 
442  SDValue WholeValue =
443  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
444  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
445  InVals.push_back(WholeValue);
446  continue;
447  }
448  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
449  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
450  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
451  if (VA.getLocVT() == MVT::f32)
452  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
453  else if (VA.getLocVT() != MVT::i32) {
454  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
455  DAG.getValueType(VA.getLocVT()));
456  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
457  }
458  InVals.push_back(Arg);
459  continue;
460  }
461 
462  assert(VA.isMemLoc());
463 
464  unsigned Offset = VA.getLocMemOffset()+StackOffset;
465  auto PtrVT = getPointerTy(DAG.getDataLayout());
466 
467  if (VA.needsCustom()) {
468  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
469  // If it is double-word aligned, just load.
470  if (Offset % 8 == 0) {
471  int FI = MF.getFrameInfo().CreateFixedObject(8,
472  Offset,
473  true);
474  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
475  SDValue Load =
476  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
477  InVals.push_back(Load);
478  continue;
479  }
480 
481  int FI = MF.getFrameInfo().CreateFixedObject(4,
482  Offset,
483  true);
484  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
485  SDValue HiVal =
486  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
487  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
488  Offset+4,
489  true);
490  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
491 
492  SDValue LoVal =
493  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
494 
495  if (IsLittleEndian)
496  std::swap(LoVal, HiVal);
497 
498  SDValue WholeValue =
499  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
500  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
501  InVals.push_back(WholeValue);
502  continue;
503  }
504 
505  int FI = MF.getFrameInfo().CreateFixedObject(4,
506  Offset,
507  true);
508  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
509  SDValue Load ;
510  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
511  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
512  } else if (VA.getValVT() == MVT::f128) {
513  report_fatal_error("SPARCv8 does not handle f128 in calls; "
514  "pass indirectly");
515  } else {
516  // We shouldn't see any other value types here.
517  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
518  }
519  InVals.push_back(Load);
520  }
521 
522  if (MF.getFunction()->hasStructRetAttr()) {
523  // Copy the SRet Argument to SRetReturnReg.
525  unsigned Reg = SFI->getSRetReturnReg();
526  if (!Reg) {
527  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
528  SFI->setSRetReturnReg(Reg);
529  }
530  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
531  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
532  }
533 
534  // Store remaining ArgRegs to the stack if this is a varargs function.
535  if (isVarArg) {
536  static const MCPhysReg ArgRegs[] = {
537  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
538  };
539  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
540  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
541  unsigned ArgOffset = CCInfo.getNextStackOffset();
542  if (NumAllocated == 6)
543  ArgOffset += StackOffset;
544  else {
545  assert(!ArgOffset);
546  ArgOffset = 68+4*NumAllocated;
547  }
548 
549  // Remember the vararg offset for the va_start implementation.
550  FuncInfo->setVarArgsFrameOffset(ArgOffset);
551 
552  std::vector<SDValue> OutChains;
553 
554  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
555  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
556  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
557  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
558 
559  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
560  true);
561  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
562 
563  OutChains.push_back(
564  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
565  ArgOffset += 4;
566  }
567 
568  if (!OutChains.empty()) {
569  OutChains.push_back(Chain);
570  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
571  }
572  }
573 
574  return Chain;
575 }
576 
577 // Lower formal arguments for the 64 bit ABI.
579  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
580  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
581  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
583 
584  // Analyze arguments according to CC_Sparc64.
586  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
587  *DAG.getContext());
588  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
589 
590  // The argument array begins at %fp+BIAS+128, after the register save area.
591  const unsigned ArgArea = 128;
592 
593  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
594  CCValAssign &VA = ArgLocs[i];
595  if (VA.isRegLoc()) {
596  // This argument is passed in a register.
597  // All integer register arguments are promoted by the caller to i64.
598 
599  // Create a virtual register for the promoted live-in value.
600  unsigned VReg = MF.addLiveIn(VA.getLocReg(),
601  getRegClassFor(VA.getLocVT()));
602  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
603 
604  // Get the high bits for i32 struct elements.
605  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
606  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
607  DAG.getConstant(32, DL, MVT::i32));
608 
609  // The caller promoted the argument, so insert an Assert?ext SDNode so we
610  // won't promote the value again in this function.
611  switch (VA.getLocInfo()) {
612  case CCValAssign::SExt:
613  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
614  DAG.getValueType(VA.getValVT()));
615  break;
616  case CCValAssign::ZExt:
617  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
618  DAG.getValueType(VA.getValVT()));
619  break;
620  default:
621  break;
622  }
623 
624  // Truncate the register down to the argument type.
625  if (VA.isExtInLoc())
626  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
627 
628  InVals.push_back(Arg);
629  continue;
630  }
631 
632  // The registers are exhausted. This argument was passed on the stack.
633  assert(VA.isMemLoc());
634  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
635  // beginning of the arguments area at %fp+BIAS+128.
636  unsigned Offset = VA.getLocMemOffset() + ArgArea;
637  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
638  // Adjust offset for extended arguments, SPARC is big-endian.
639  // The caller will have written the full slot with extended bytes, but we
640  // prefer our own extending loads.
641  if (VA.isExtInLoc())
642  Offset += 8 - ValSize;
643  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
644  InVals.push_back(
645  DAG.getLoad(VA.getValVT(), DL, Chain,
648  }
649 
650  if (!IsVarArg)
651  return Chain;
652 
653  // This function takes variable arguments, some of which may have been passed
654  // in registers %i0-%i5. Variable floating point arguments are never passed
655  // in floating point registers. They go on %i0-%i5 or on the stack like
656  // integer arguments.
657  //
658  // The va_start intrinsic needs to know the offset to the first variable
659  // argument.
660  unsigned ArgOffset = CCInfo.getNextStackOffset();
662  // Skip the 128 bytes of register save area.
663  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
664  Subtarget->getStackPointerBias());
665 
666  // Save the variable arguments that were passed in registers.
667  // The caller is required to reserve stack space for 6 arguments regardless
668  // of how many arguments were actually passed.
669  SmallVector<SDValue, 8> OutChains;
670  for (; ArgOffset < 6*8; ArgOffset += 8) {
671  unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
672  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
673  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
674  auto PtrVT = getPointerTy(MF.getDataLayout());
675  OutChains.push_back(
676  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
678  }
679 
680  if (!OutChains.empty())
681  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
682 
683  return Chain;
684 }
685 
686 SDValue
688  SmallVectorImpl<SDValue> &InVals) const {
689  if (Subtarget->is64Bit())
690  return LowerCall_64(CLI, InVals);
691  return LowerCall_32(CLI, InVals);
692 }
693 
695  ImmutableCallSite CS) {
696  if (CS)
697  return CS.hasFnAttr(Attribute::ReturnsTwice);
698 
699  const Function *CalleeFn = nullptr;
700  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
701  CalleeFn = dyn_cast<Function>(G->getGlobal());
702  } else if (ExternalSymbolSDNode *E =
703  dyn_cast<ExternalSymbolSDNode>(Callee)) {
704  const Function *Fn = DAG.getMachineFunction().getFunction();
705  const Module *M = Fn->getParent();
706  const char *CalleeName = E->getSymbol();
707  CalleeFn = M->getFunction(CalleeName);
708  }
709 
710  if (!CalleeFn)
711  return false;
712  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
713 }
714 
715 // Lower a call for the 32-bit ABI.
716 SDValue
718  SmallVectorImpl<SDValue> &InVals) const {
719  SelectionDAG &DAG = CLI.DAG;
720  SDLoc &dl = CLI.DL;
722  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
724  SDValue Chain = CLI.Chain;
725  SDValue Callee = CLI.Callee;
726  bool &isTailCall = CLI.IsTailCall;
727  CallingConv::ID CallConv = CLI.CallConv;
728  bool isVarArg = CLI.IsVarArg;
729 
730  // Sparc target does not yet support tail call optimization.
731  isTailCall = false;
732 
733  // Analyze operands of the call, assigning locations to each operand.
735  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
736  *DAG.getContext());
737  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
738 
739  // Get the size of the outgoing arguments stack space requirement.
740  unsigned ArgsSize = CCInfo.getNextStackOffset();
741 
742  // Keep stack frames 8-byte aligned.
743  ArgsSize = (ArgsSize+7) & ~7;
744 
746 
747  // Create local copies for byval args.
748  SmallVector<SDValue, 8> ByValArgs;
749  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
750  ISD::ArgFlagsTy Flags = Outs[i].Flags;
751  if (!Flags.isByVal())
752  continue;
753 
754  SDValue Arg = OutVals[i];
755  unsigned Size = Flags.getByValSize();
756  unsigned Align = Flags.getByValAlign();
757 
758  if (Size > 0U) {
759  int FI = MFI.CreateStackObject(Size, Align, false);
760  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
761  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
762 
763  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
764  false, // isVolatile,
765  (Size <= 32), // AlwaysInline if size <= 32,
766  false, // isTailCall
768  ByValArgs.push_back(FIPtr);
769  }
770  else {
771  SDValue nullVal;
772  ByValArgs.push_back(nullVal);
773  }
774  }
775 
776  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
777 
779  SmallVector<SDValue, 8> MemOpChains;
780 
781  const unsigned StackOffset = 92;
782  bool hasStructRetAttr = false;
783  // Walk the register/memloc assignments, inserting copies/loads.
784  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
785  i != e;
786  ++i, ++realArgIdx) {
787  CCValAssign &VA = ArgLocs[i];
788  SDValue Arg = OutVals[realArgIdx];
789 
790  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
791 
792  // Use local copy if it is a byval arg.
793  if (Flags.isByVal()) {
794  Arg = ByValArgs[byvalArgIdx++];
795  if (!Arg) {
796  continue;
797  }
798  }
799 
800  // Promote the value if needed.
801  switch (VA.getLocInfo()) {
802  default: llvm_unreachable("Unknown loc info!");
803  case CCValAssign::Full: break;
804  case CCValAssign::SExt:
805  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
806  break;
807  case CCValAssign::ZExt:
808  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
809  break;
810  case CCValAssign::AExt:
811  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
812  break;
813  case CCValAssign::BCvt:
814  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
815  break;
816  }
817 
818  if (Flags.isSRet()) {
819  assert(VA.needsCustom());
820  // store SRet argument in %sp+64
821  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
822  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
823  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
824  MemOpChains.push_back(
825  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
826  hasStructRetAttr = true;
827  continue;
828  }
829 
830  if (VA.needsCustom()) {
831  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
832 
833  if (VA.isMemLoc()) {
834  unsigned Offset = VA.getLocMemOffset() + StackOffset;
835  // if it is double-word aligned, just store.
836  if (Offset % 8 == 0) {
837  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
838  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
839  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
840  MemOpChains.push_back(
841  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
842  continue;
843  }
844  }
845 
846  if (VA.getLocVT() == MVT::f64) {
847  // Move from the float value from float registers into the
848  // integer registers.
849 
850  // TODO: The f64 -> v2i32 conversion is super-inefficient for
851  // constants: it sticks them in the constant pool, then loads
852  // to a fp register, then stores to temp memory, then loads to
853  // integer registers.
854  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
855  }
856 
858  Arg,
859  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
861  Arg,
862  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
863 
864  if (VA.isRegLoc()) {
865  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
866  assert(i+1 != e);
867  CCValAssign &NextVA = ArgLocs[++i];
868  if (NextVA.isRegLoc()) {
869  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
870  } else {
871  // Store the second part in stack.
872  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
873  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
874  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
875  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
876  MemOpChains.push_back(
877  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
878  }
879  } else {
880  unsigned Offset = VA.getLocMemOffset() + StackOffset;
881  // Store the first part.
882  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
883  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
884  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
885  MemOpChains.push_back(
886  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
887  // Store the second part.
888  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
889  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
890  MemOpChains.push_back(
891  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
892  }
893  continue;
894  }
895 
896  // Arguments that can be passed on register must be kept at
897  // RegsToPass vector
898  if (VA.isRegLoc()) {
899  if (VA.getLocVT() != MVT::f32) {
900  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
901  continue;
902  }
903  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
904  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
905  continue;
906  }
907 
908  assert(VA.isMemLoc());
909 
910  // Create a store off the stack pointer for this argument.
911  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
912  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
913  dl);
914  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
915  MemOpChains.push_back(
916  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
917  }
918 
919 
920  // Emit all stores, make sure the occur before any copies into physregs.
921  if (!MemOpChains.empty())
922  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
923 
924  // Build a sequence of copy-to-reg nodes chained together with token
925  // chain and flag operands which copy the outgoing args into registers.
926  // The InFlag in necessary since all emitted instructions must be
927  // stuck together.
928  SDValue InFlag;
929  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
930  unsigned Reg = toCallerWindow(RegsToPass[i].first);
931  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
932  InFlag = Chain.getValue(1);
933  }
934 
935  unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
936  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
937 
938  // If the callee is a GlobalAddress node (quite common, every direct call is)
939  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
940  // Likewise ExternalSymbol -> TargetExternalSymbol.
942  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
943  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
944  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
945  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
946 
947  // Returns a chain & a flag for retval copy to use
948  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
950  Ops.push_back(Chain);
951  Ops.push_back(Callee);
952  if (hasStructRetAttr)
953  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
954  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
955  Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
956  RegsToPass[i].second.getValueType()));
957 
958  // Add a register mask operand representing the call-preserved registers.
959  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
960  const uint32_t *Mask =
961  ((hasReturnsTwice)
962  ? TRI->getRTCallPreservedMask(CallConv)
963  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
964  assert(Mask && "Missing call preserved mask for calling convention");
965  Ops.push_back(DAG.getRegisterMask(Mask));
966 
967  if (InFlag.getNode())
968  Ops.push_back(InFlag);
969 
970  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
971  InFlag = Chain.getValue(1);
972 
973  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
974  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
975  InFlag = Chain.getValue(1);
976 
977  // Assign locations to each value returned by this call.
979  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
980  *DAG.getContext());
981 
982  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
983 
984  // Copy all of the result registers out of their specified physreg.
985  for (unsigned i = 0; i != RVLocs.size(); ++i) {
986  if (RVLocs[i].getLocVT() == MVT::v2i32) {
987  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
988  SDValue Lo = DAG.getCopyFromReg(
989  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
990  Chain = Lo.getValue(1);
991  InFlag = Lo.getValue(2);
992  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
993  DAG.getConstant(0, dl, MVT::i32));
994  SDValue Hi = DAG.getCopyFromReg(
995  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
996  Chain = Hi.getValue(1);
997  InFlag = Hi.getValue(2);
998  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
999  DAG.getConstant(1, dl, MVT::i32));
1000  InVals.push_back(Vec);
1001  } else {
1002  Chain =
1003  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1004  RVLocs[i].getValVT(), InFlag)
1005  .getValue(1);
1006  InFlag = Chain.getValue(2);
1007  InVals.push_back(Chain.getValue(0));
1008  }
1009  }
1010 
1011  return Chain;
1012 }
1013 
1014 // FIXME? Maybe this could be a TableGen attribute on some registers and
1015 // this table could be generated automatically from RegInfo.
1016 unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
1017  SelectionDAG &DAG) const {
1018  unsigned Reg = StringSwitch<unsigned>(RegName)
1019  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1020  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1021  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1022  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1023  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1024  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1025  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1026  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1027  .Default(0);
1028 
1029  if (Reg)
1030  return Reg;
1031 
1032  report_fatal_error("Invalid register name global variable");
1033 }
1034 
1035 // This functions returns true if CalleeName is a ABI function that returns
1036 // a long double (fp128).
1037 static bool isFP128ABICall(const char *CalleeName)
1038 {
1039  static const char *const ABICalls[] =
1040  { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
1041  "_Q_sqrt", "_Q_neg",
1042  "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
1043  "_Q_lltoq", "_Q_ulltoq",
1044  nullptr
1045  };
1046  for (const char * const *I = ABICalls; *I != nullptr; ++I)
1047  if (strcmp(CalleeName, *I) == 0)
1048  return true;
1049  return false;
1050 }
1051 
1052 unsigned
1054 {
1055  const Function *CalleeFn = nullptr;
1056  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1057  CalleeFn = dyn_cast<Function>(G->getGlobal());
1058  } else if (ExternalSymbolSDNode *E =
1059  dyn_cast<ExternalSymbolSDNode>(Callee)) {
1060  const Function *Fn = DAG.getMachineFunction().getFunction();
1061  const Module *M = Fn->getParent();
1062  const char *CalleeName = E->getSymbol();
1063  CalleeFn = M->getFunction(CalleeName);
1064  if (!CalleeFn && isFP128ABICall(CalleeName))
1065  return 16; // Return sizeof(fp128)
1066  }
1067 
1068  if (!CalleeFn)
1069  return 0;
1070 
1071  // It would be nice to check for the sret attribute on CalleeFn here,
1072  // but since it is not part of the function type, any check will misfire.
1073 
1074  PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
1075  Type *ElementTy = Ty->getElementType();
1076  return DAG.getDataLayout().getTypeAllocSize(ElementTy);
1077 }
1078 
1079 
1080 // Fixup floating point arguments in the ... part of a varargs call.
1081 //
1082 // The SPARC v9 ABI requires that floating point arguments are treated the same
1083 // as integers when calling a varargs function. This does not apply to the
1084 // fixed arguments that are part of the function's prototype.
1085 //
1086 // This function post-processes a CCValAssign array created by
1087 // AnalyzeCallOperands().
1089  ArrayRef<ISD::OutputArg> Outs) {
1090  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1091  const CCValAssign &VA = ArgLocs[i];
1092  MVT ValTy = VA.getLocVT();
1093  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1094  // varargs functions.
1095  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1096  continue;
1097  // The fixed arguments to a varargs function still go in FP registers.
1098  if (Outs[VA.getValNo()].IsFixed)
1099  continue;
1100 
1101  // This floating point argument should be reassigned.
1102  CCValAssign NewVA;
1103 
1104  // Determine the offset into the argument array.
1105  unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1106  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1107  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1108  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1109 
1110  if (Offset < 6*8) {
1111  // This argument should go in %i0-%i5.
1112  unsigned IReg = SP::I0 + Offset/8;
1113  if (ValTy == MVT::f64)
1114  // Full register, just bitconvert into i64.
1115  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1116  IReg, MVT::i64, CCValAssign::BCvt);
1117  else {
1118  assert(ValTy == MVT::f128 && "Unexpected type!");
1119  // Full register, just bitconvert into i128 -- We will lower this into
1120  // two i64s in LowerCall_64.
1121  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1122  IReg, MVT::i128, CCValAssign::BCvt);
1123  }
1124  } else {
1125  // This needs to go to memory, we're out of integer registers.
1126  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1127  Offset, VA.getLocVT(), VA.getLocInfo());
1128  }
1129  ArgLocs[i] = NewVA;
1130  }
1131 }
1132 
1133 // Lower a call for the 64-bit ABI.
1134 SDValue
1136  SmallVectorImpl<SDValue> &InVals) const {
1137  SelectionDAG &DAG = CLI.DAG;
1138  SDLoc DL = CLI.DL;
1139  SDValue Chain = CLI.Chain;
1140  auto PtrVT = getPointerTy(DAG.getDataLayout());
1141 
1142  // Sparc target does not yet support tail call optimization.
1143  CLI.IsTailCall = false;
1144 
1145  // Analyze operands of the call, assigning locations to each operand.
1147  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1148  *DAG.getContext());
1149  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1150 
1151  // Get the size of the outgoing arguments stack space requirement.
1152  // The stack offset computed by CC_Sparc64 includes all arguments.
1153  // Called functions expect 6 argument words to exist in the stack frame, used
1154  // or not.
1155  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1156 
1157  // Keep stack frames 16-byte aligned.
1158  ArgsSize = alignTo(ArgsSize, 16);
1159 
1160  // Varargs calls require special treatment.
1161  if (CLI.IsVarArg)
1162  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1163 
1164  // Adjust the stack pointer to make room for the arguments.
1165  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1166  // with more than 6 arguments.
1167  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1168 
1169  // Collect the set of registers to pass to the function and their values.
1170  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1171  // instruction.
1173 
1174  // Collect chains from all the memory opeations that copy arguments to the
1175  // stack. They must follow the stack pointer adjustment above and precede the
1176  // call instruction itself.
1177  SmallVector<SDValue, 8> MemOpChains;
1178 
1179  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1180  const CCValAssign &VA = ArgLocs[i];
1181  SDValue Arg = CLI.OutVals[i];
1182 
1183  // Promote the value if needed.
1184  switch (VA.getLocInfo()) {
1185  default:
1186  llvm_unreachable("Unknown location info!");
1187  case CCValAssign::Full:
1188  break;
1189  case CCValAssign::SExt:
1190  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1191  break;
1192  case CCValAssign::ZExt:
1193  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1194  break;
1195  case CCValAssign::AExt:
1196  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1197  break;
1198  case CCValAssign::BCvt:
1199  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1200  // SPARC does not support i128 natively. Lower it into two i64, see below.
1201  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1202  || VA.getLocVT() != MVT::i128)
1203  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1204  break;
1205  }
1206 
1207  if (VA.isRegLoc()) {
1208  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1209  && VA.getLocVT() == MVT::i128) {
1210  // Store and reload into the integer register reg and reg+1.
1211  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1212  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1213  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1214  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1215  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1216  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1217  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1218 
1219  // Store to %sp+BIAS+128+Offset
1220  SDValue Store =
1221  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1222  // Load into Reg and Reg+1
1223  SDValue Hi64 =
1224  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1225  SDValue Lo64 =
1226  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1227  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1228  Hi64));
1229  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1230  Lo64));
1231  continue;
1232  }
1233 
1234  // The custom bit on an i32 return value indicates that it should be
1235  // passed in the high bits of the register.
1236  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1237  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1238  DAG.getConstant(32, DL, MVT::i32));
1239 
1240  // The next value may go in the low bits of the same register.
1241  // Handle both at once.
1242  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1243  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1245  CLI.OutVals[i+1]);
1246  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1247  // Skip the next value, it's already done.
1248  ++i;
1249  }
1250  }
1251  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1252  continue;
1253  }
1254 
1255  assert(VA.isMemLoc());
1256 
1257  // Create a store off the stack pointer for this argument.
1258  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1259  // The argument area starts at %fp+BIAS+128 in the callee frame,
1260  // %sp+BIAS+128 in ours.
1261  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1262  Subtarget->getStackPointerBias() +
1263  128, DL);
1264  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1265  MemOpChains.push_back(
1266  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1267  }
1268 
1269  // Emit all stores, make sure they occur before the call.
1270  if (!MemOpChains.empty())
1271  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1272 
1273  // Build a sequence of CopyToReg nodes glued together with token chain and
1274  // glue operands which copy the outgoing args into registers. The InGlue is
1275  // necessary since all emitted instructions must be stuck together in order
1276  // to pass the live physical registers.
1277  SDValue InGlue;
1278  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1279  Chain = DAG.getCopyToReg(Chain, DL,
1280  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1281  InGlue = Chain.getValue(1);
1282  }
1283 
1284  // If the callee is a GlobalAddress node (quite common, every direct call is)
1285  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1286  // Likewise ExternalSymbol -> TargetExternalSymbol.
1287  SDValue Callee = CLI.Callee;
1288  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
1290  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1291  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1292  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1293  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1294 
1295  // Build the operands for the call instruction itself.
1297  Ops.push_back(Chain);
1298  Ops.push_back(Callee);
1299  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1300  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1301  RegsToPass[i].second.getValueType()));
1302 
1303  // Add a register mask operand representing the call-preserved registers.
1304  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1305  const uint32_t *Mask =
1306  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1308  CLI.CallConv));
1309  assert(Mask && "Missing call preserved mask for calling convention");
1310  Ops.push_back(DAG.getRegisterMask(Mask));
1311 
1312  // Make sure the CopyToReg nodes are glued to the call instruction which
1313  // consumes the registers.
1314  if (InGlue.getNode())
1315  Ops.push_back(InGlue);
1316 
1317  // Now the call itself.
1318  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1319  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1320  InGlue = Chain.getValue(1);
1321 
1322  // Revert the stack pointer immediately after the call.
1323  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1324  DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1325  InGlue = Chain.getValue(1);
1326 
1327  // Now extract the return values. This is more or less the same as
1328  // LowerFormalArguments_64.
1329 
1330  // Assign locations to each value returned by this call.
1332  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1333  *DAG.getContext());
1334 
1335  // Set inreg flag manually for codegen generated library calls that
1336  // return float.
1337  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
1338  CLI.Ins[0].Flags.setInReg();
1339 
1340  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1341 
1342  // Copy all of the result registers out of their specified physreg.
1343  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1344  CCValAssign &VA = RVLocs[i];
1345  unsigned Reg = toCallerWindow(VA.getLocReg());
1346 
1347  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1348  // reside in the same register in the high and low bits. Reuse the
1349  // CopyFromReg previous node to avoid duplicate copies.
1350  SDValue RV;
1351  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1352  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1353  RV = Chain.getValue(0);
1354 
1355  // But usually we'll create a new CopyFromReg for a different register.
1356  if (!RV.getNode()) {
1357  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1358  Chain = RV.getValue(1);
1359  InGlue = Chain.getValue(2);
1360  }
1361 
1362  // Get the high bits for i32 struct elements.
1363  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1364  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1365  DAG.getConstant(32, DL, MVT::i32));
1366 
1367  // The callee promoted the return value, so insert an Assert?ext SDNode so
1368  // we won't promote the value again in this function.
1369  switch (VA.getLocInfo()) {
1370  case CCValAssign::SExt:
1371  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1372  DAG.getValueType(VA.getValVT()));
1373  break;
1374  case CCValAssign::ZExt:
1375  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1376  DAG.getValueType(VA.getValVT()));
1377  break;
1378  default:
1379  break;
1380  }
1381 
1382  // Truncate the register down to the return value type.
1383  if (VA.isExtInLoc())
1384  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1385 
1386  InVals.push_back(RV);
1387  }
1388 
1389  return Chain;
1390 }
1391 
1392 //===----------------------------------------------------------------------===//
1393 // TargetLowering Implementation
1394 //===----------------------------------------------------------------------===//
1395 
1397  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1398  AI->getType()->getPrimitiveSizeInBits() == 32)
1399  return AtomicExpansionKind::None; // Uses xchg instruction
1400 
1402 }
1403 
1404 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1405 /// condition.
1407  switch (CC) {
1408  default: llvm_unreachable("Unknown integer condition code!");
1409  case ISD::SETEQ: return SPCC::ICC_E;
1410  case ISD::SETNE: return SPCC::ICC_NE;
1411  case ISD::SETLT: return SPCC::ICC_L;
1412  case ISD::SETGT: return SPCC::ICC_G;
1413  case ISD::SETLE: return SPCC::ICC_LE;
1414  case ISD::SETGE: return SPCC::ICC_GE;
1415  case ISD::SETULT: return SPCC::ICC_CS;
1416  case ISD::SETULE: return SPCC::ICC_LEU;
1417  case ISD::SETUGT: return SPCC::ICC_GU;
1418  case ISD::SETUGE: return SPCC::ICC_CC;
1419  }
1420 }
1421 
1422 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1423 /// FCC condition.
1425  switch (CC) {
1426  default: llvm_unreachable("Unknown fp condition code!");
1427  case ISD::SETEQ:
1428  case ISD::SETOEQ: return SPCC::FCC_E;
1429  case ISD::SETNE:
1430  case ISD::SETUNE: return SPCC::FCC_NE;
1431  case ISD::SETLT:
1432  case ISD::SETOLT: return SPCC::FCC_L;
1433  case ISD::SETGT:
1434  case ISD::SETOGT: return SPCC::FCC_G;
1435  case ISD::SETLE:
1436  case ISD::SETOLE: return SPCC::FCC_LE;
1437  case ISD::SETGE:
1438  case ISD::SETOGE: return SPCC::FCC_GE;
1439  case ISD::SETULT: return SPCC::FCC_UL;
1440  case ISD::SETULE: return SPCC::FCC_ULE;
1441  case ISD::SETUGT: return SPCC::FCC_UG;
1442  case ISD::SETUGE: return SPCC::FCC_UGE;
1443  case ISD::SETUO: return SPCC::FCC_U;
1444  case ISD::SETO: return SPCC::FCC_O;
1445  case ISD::SETONE: return SPCC::FCC_LG;
1446  case ISD::SETUEQ: return SPCC::FCC_UE;
1447  }
1448 }
1449 
1451  const SparcSubtarget &STI)
1452  : TargetLowering(TM), Subtarget(&STI) {
1453  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
1454 
1455  // Instructions which use registers as conditionals examine all the
1456  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1457  // matters much whether it's ZeroOrOneBooleanContent, or
1458  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1459  // former.
1462 
1463  // Set up the register classes.
1464  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1465  if (!Subtarget->useSoftFloat()) {
1466  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1467  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1468  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1469  }
1470  if (Subtarget->is64Bit()) {
1471  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1472  } else {
1473  // On 32bit sparc, we define a double-register 32bit register
1474  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1475  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1476 
1477  // ...but almost all operations must be expanded, so set that as
1478  // the default.
1479  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1481  }
1482  // Truncating/extending stores/loads are also not supported.
1483  for (MVT VT : MVT::integer_vector_valuetypes()) {
1487 
1491 
1494  }
1495  // However, load and store *are* legal.
1500 
1501  // And we need to promote i64 loads/stores into vector load/store
1504 
1505  // Sadly, this doesn't work:
1506  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1507  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1508  }
1509 
1510  // Turn FP extload into load/fpextend
1511  for (MVT VT : MVT::fp_valuetypes()) {
1514  }
1515 
1516  // Sparc doesn't have i1 sign extending load
1517  for (MVT VT : MVT::integer_valuetypes())
1519 
1520  // Turn FP truncstore into trunc + store.
1524 
1525  // Custom legalize GlobalAddress nodes into LO/HI parts.
1530 
1531  // Sparc doesn't have sext_inreg, replace them with shl/sra
1535 
1536  // Sparc has no REM or DIVREM operations.
1541 
1542  // ... nor does SparcV9.
1543  if (Subtarget->is64Bit()) {
1548  }
1549 
1550  // Custom expand fp<->sint
1555 
1556  // Custom Expand fp<->uint
1561 
1562  // Sparc has no select or setcc: expand to SELECT_CC.
1567 
1572 
1573  // Sparc doesn't have BRCOND either, it has BR_CC.
1581 
1586 
1589 
1592 
1593  if (Subtarget->is64Bit()) {
1602 
1604  Subtarget->usePopc() ? Legal : Expand);
1611 
1614  }
1615 
1616  // ATOMICs.
1617  // Atomics are supported on SparcV9. 32-bit atomics are also
1618  // supported by some Leon SparcV8 variants. Otherwise, atomics
1619  // are unsupported.
1620  if (Subtarget->isV9())
1622  else if (Subtarget->hasLeonCasa())
1624  else
1626 
1628 
1630 
1632 
1633  // Custom Lower Atomic LOAD/STORE
1636 
1637  if (Subtarget->is64Bit()) {
1642  }
1643 
1644  if (!Subtarget->is64Bit()) {
1645  // These libcalls are not available in 32-bit.
1646  setLibcallName(RTLIB::SHL_I128, nullptr);
1647  setLibcallName(RTLIB::SRL_I128, nullptr);
1648  setLibcallName(RTLIB::SRA_I128, nullptr);
1649  }
1650 
1651  if (!Subtarget->isV9()) {
1652  // SparcV8 does not have FNEGD and FABSD.
1655  }
1656 
1683 
1687 
1688  // Expands to [SU]MUL_LOHI.
1692 
1693  if (Subtarget->useSoftMulDiv()) {
1694  // .umul works for both signed and unsigned
1697  setLibcallName(RTLIB::MUL_I32, ".umul");
1698 
1700  setLibcallName(RTLIB::SDIV_I32, ".div");
1701 
1703  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1704  }
1705 
1706  if (Subtarget->is64Bit()) {
1711 
1714 
1718  }
1719 
1720  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1722  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1724 
1726 
1727  // Use the default implementation.
1733 
1735 
1737  Subtarget->usePopc() ? Legal : Expand);
1738 
1739  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1742  } else {
1745  }
1746 
1747  if (Subtarget->hasHardQuad()) {
1755  if (Subtarget->isV9()) {
1758  } else {
1761  }
1762 
1763  if (!Subtarget->is64Bit()) {
1764  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1765  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1766  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1767  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1768  }
1769 
1770  } else {
1771  // Custom legalize f128 operations.
1772 
1780 
1784 
1785  // Setup Runtime library names.
1786  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1787  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1788  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1789  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1790  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1791  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1792  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1793  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1794  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1795  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1796  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1797  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1798  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1799  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1800  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1801  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1802  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1803  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1804  } else if (!Subtarget->useSoftFloat()) {
1805  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1806  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1807  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1808  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1809  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1810  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1811  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1812  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1813  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1814  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1815  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1816  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1817  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1818  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1819  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1820  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1821  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1822  }
1823  }
1824 
1825  if (Subtarget->fixAllFDIVSQRT()) {
1826  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1827  // the former instructions generate errata on LEON processors.
1830  }
1831 
1832  if (Subtarget->hasNoFMULS()) {
1834  }
1835 
1837 
1839 
1841 }
1842 
1844  return Subtarget->useSoftFloat();
1845 }
1846 
1847 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1848  switch ((SPISD::NodeType)Opcode) {
1849  case SPISD::FIRST_NUMBER: break;
1850  case SPISD::CMPICC: return "SPISD::CMPICC";
1851  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1852  case SPISD::BRICC: return "SPISD::BRICC";
1853  case SPISD::BRXCC: return "SPISD::BRXCC";
1854  case SPISD::BRFCC: return "SPISD::BRFCC";
1855  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1856  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1857  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1858  case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP";
1859  case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP";
1860  case SPISD::Hi: return "SPISD::Hi";
1861  case SPISD::Lo: return "SPISD::Lo";
1862  case SPISD::FTOI: return "SPISD::FTOI";
1863  case SPISD::ITOF: return "SPISD::ITOF";
1864  case SPISD::FTOX: return "SPISD::FTOX";
1865  case SPISD::XTOF: return "SPISD::XTOF";
1866  case SPISD::CALL: return "SPISD::CALL";
1867  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1868  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1869  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1870  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1871  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1872  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1873  }
1874  return nullptr;
1875 }
1876 
1878  EVT VT) const {
1879  if (!VT.isVector())
1880  return MVT::i32;
1882 }
1883 
1884 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1885 /// be zero. Op is expected to be a target specific node. Used by DAG
1886 /// combiner.
1888  (const SDValue Op,
1889  KnownBits &Known,
1890  const APInt &DemandedElts,
1891  const SelectionDAG &DAG,
1892  unsigned Depth) const {
1893  KnownBits Known2;
1894  Known.resetAll();
1895 
1896  switch (Op.getOpcode()) {
1897  default: break;
1898  case SPISD::SELECT_ICC:
1899  case SPISD::SELECT_XCC:
1900  case SPISD::SELECT_FCC:
1901  DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
1902  DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
1903 
1904  // Only known if known in both the LHS and RHS.
1905  Known.One &= Known2.One;
1906  Known.Zero &= Known2.Zero;
1907  break;
1908  }
1909 }
1910 
1911 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1912 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1913 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1914  ISD::CondCode CC, unsigned &SPCC) {
1915  if (isNullConstant(RHS) &&
1916  CC == ISD::SETNE &&
1917  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1918  LHS.getOpcode() == SPISD::SELECT_XCC) &&
1919  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1920  (LHS.getOpcode() == SPISD::SELECT_FCC &&
1921  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1922  isOneConstant(LHS.getOperand(0)) &&
1923  isNullConstant(LHS.getOperand(1))) {
1924  SDValue CMPCC = LHS.getOperand(3);
1925  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1926  LHS = CMPCC.getOperand(0);
1927  RHS = CMPCC.getOperand(1);
1928  }
1929 }
1930 
1931 // Convert to a target node and set target flags.
1933  SelectionDAG &DAG) const {
1934  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1935  return DAG.getTargetGlobalAddress(GA->getGlobal(),
1936  SDLoc(GA),
1937  GA->getValueType(0),
1938  GA->getOffset(), TF);
1939 
1940  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1941  return DAG.getTargetConstantPool(CP->getConstVal(),
1942  CP->getValueType(0),
1943  CP->getAlignment(),
1944  CP->getOffset(), TF);
1945 
1946  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1947  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1948  Op.getValueType(),
1949  0,
1950  TF);
1951 
1952  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1953  return DAG.getTargetExternalSymbol(ES->getSymbol(),
1954  ES->getValueType(0), TF);
1955 
1956  llvm_unreachable("Unhandled address SDNode");
1957 }
1958 
1959 // Split Op into high and low parts according to HiTF and LoTF.
1960 // Return an ADD node combining the parts.
1962  unsigned HiTF, unsigned LoTF,
1963  SelectionDAG &DAG) const {
1964  SDLoc DL(Op);
1965  EVT VT = Op.getValueType();
1966  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1967  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1968  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1969 }
1970 
1971 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1972 // or ExternalSymbol SDNode.
1974  SDLoc DL(Op);
1975  EVT VT = getPointerTy(DAG.getDataLayout());
1976 
1977  // Handle PIC mode first. SPARC needs a got load for every variable!
1978  if (isPositionIndependent()) {
1979  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1982  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1983  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
1984  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1985  // function has calls.
1987  MFI.setHasCalls(true);
1988  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1990  }
1991 
1992  // This is one of the absolute code models.
1993  switch(getTargetMachine().getCodeModel()) {
1994  default:
1995  llvm_unreachable("Unsupported absolute code model");
1996  case CodeModel::Small:
1997  // abs32.
2000  case CodeModel::Medium: {
2001  // abs44.
2004  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2006  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2007  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2008  }
2009  case CodeModel::Large: {
2010  // abs64.
2013  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2016  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2017  }
2018  }
2019 }
2020 
2022  SelectionDAG &DAG) const {
2023  return makeAddress(Op, DAG);
2024 }
2025 
2027  SelectionDAG &DAG) const {
2028  return makeAddress(Op, DAG);
2029 }
2030 
2032  SelectionDAG &DAG) const {
2033  return makeAddress(Op, DAG);
2034 }
2035 
2037  SelectionDAG &DAG) const {
2038 
2039  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2040  if (DAG.getTarget().Options.EmulatedTLS)
2041  return LowerToTLSEmulatedModel(GA, DAG);
2042 
2043  SDLoc DL(GA);
2044  const GlobalValue *GV = GA->getGlobal();
2045  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2046 
2048 
2049  if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2050  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2053  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2056  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2059  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2062 
2063  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2064  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2065  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2066  withTargetFlags(Op, addTF, DAG));
2067 
2068  SDValue Chain = DAG.getEntryNode();
2069  SDValue InFlag;
2070 
2071  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2072  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2073  InFlag = Chain.getValue(1);
2074  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2075  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2076 
2077  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2078  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2080  assert(Mask && "Missing call preserved mask for calling convention");
2081  SDValue Ops[] = {Chain,
2082  Callee,
2083  Symbol,
2084  DAG.getRegister(SP::O0, PtrVT),
2085  DAG.getRegisterMask(Mask),
2086  InFlag};
2087  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2088  InFlag = Chain.getValue(1);
2089  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2090  DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2091  InFlag = Chain.getValue(1);
2092  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2093 
2094  if (model != TLSModel::LocalDynamic)
2095  return Ret;
2096 
2097  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2099  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2101  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2102  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2104  }
2105 
2106  if (model == TLSModel::InitialExec) {
2107  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2109 
2110  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2111 
2112  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2113  // function has calls.
2115  MFI.setHasCalls(true);
2116 
2117  SDValue TGA = makeHiLoPair(Op,
2120  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2122  DL, PtrVT, Ptr,
2123  withTargetFlags(Op, ldTF, DAG));
2124  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2125  DAG.getRegister(SP::G7, PtrVT), Offset,
2126  withTargetFlags(Op,
2128  }
2129 
2130  assert(model == TLSModel::LocalExec);
2131  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2133  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2135  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2136 
2137  return DAG.getNode(ISD::ADD, DL, PtrVT,
2138  DAG.getRegister(SP::G7, PtrVT), Offset);
2139 }
2140 
2143  const SDLoc &DL,
2144  SelectionDAG &DAG) const {
2146  EVT ArgVT = Arg.getValueType();
2147  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2148 
2149  ArgListEntry Entry;
2150  Entry.Node = Arg;
2151  Entry.Ty = ArgTy;
2152 
2153  if (ArgTy->isFP128Ty()) {
2154  // Create a stack object and pass the pointer to the library function.
2155  int FI = MFI.CreateStackObject(16, 8, false);
2156  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2157  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2158  /* Alignment = */ 8);
2159 
2160  Entry.Node = FIPtr;
2161  Entry.Ty = PointerType::getUnqual(ArgTy);
2162  }
2163  Args.push_back(Entry);
2164  return Chain;
2165 }
2166 
2167 SDValue
2169  const char *LibFuncName,
2170  unsigned numArgs) const {
2171 
2172  ArgListTy Args;
2173 
2175  auto PtrVT = getPointerTy(DAG.getDataLayout());
2176 
2177  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2178  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2179  Type *RetTyABI = RetTy;
2180  SDValue Chain = DAG.getEntryNode();
2181  SDValue RetPtr;
2182 
2183  if (RetTy->isFP128Ty()) {
2184  // Create a Stack Object to receive the return value of type f128.
2185  ArgListEntry Entry;
2186  int RetFI = MFI.CreateStackObject(16, 8, false);
2187  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2188  Entry.Node = RetPtr;
2189  Entry.Ty = PointerType::getUnqual(RetTy);
2190  if (!Subtarget->is64Bit())
2191  Entry.IsSRet = true;
2192  Entry.IsReturned = false;
2193  Args.push_back(Entry);
2194  RetTyABI = Type::getVoidTy(*DAG.getContext());
2195  }
2196 
2197  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2198  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2199  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2200  }
2202  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2203  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2204 
2205  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2206 
2207  // chain is in second result.
2208  if (RetTyABI == RetTy)
2209  return CallInfo.first;
2210 
2211  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2212 
2213  Chain = CallInfo.second;
2214 
2215  // Load RetPtr to get the return value.
2216  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2217  MachinePointerInfo(), /* Alignment = */ 8);
2218 }
2219 
2221  unsigned &SPCC, const SDLoc &DL,
2222  SelectionDAG &DAG) const {
2223 
2224  const char *LibCall = nullptr;
2225  bool is64Bit = Subtarget->is64Bit();
2226  switch(SPCC) {
2227  default: llvm_unreachable("Unhandled conditional code!");
2228  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2229  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2230  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2231  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2232  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2233  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2234  case SPCC::FCC_UL :
2235  case SPCC::FCC_ULE:
2236  case SPCC::FCC_UG :
2237  case SPCC::FCC_UGE:
2238  case SPCC::FCC_U :
2239  case SPCC::FCC_O :
2240  case SPCC::FCC_LG :
2241  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2242  }
2243 
2244  auto PtrVT = getPointerTy(DAG.getDataLayout());
2245  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2246  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2247  ArgListTy Args;
2248  SDValue Chain = DAG.getEntryNode();
2249  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2250  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2251 
2253  CLI.setDebugLoc(DL).setChain(Chain)
2254  .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2255 
2256  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2257 
2258  // result is in first, and chain is in second result.
2259  SDValue Result = CallInfo.first;
2260 
2261  switch(SPCC) {
2262  default: {
2263  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2264  SPCC = SPCC::ICC_NE;
2265  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2266  }
2267  case SPCC::FCC_UL : {
2268  SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
2269  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2270  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2271  SPCC = SPCC::ICC_NE;
2272  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2273  }
2274  case SPCC::FCC_ULE: {
2275  SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
2276  SPCC = SPCC::ICC_NE;
2277  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2278  }
2279  case SPCC::FCC_UG : {
2280  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2281  SPCC = SPCC::ICC_G;
2282  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2283  }
2284  case SPCC::FCC_UGE: {
2285  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2286  SPCC = SPCC::ICC_NE;
2287  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2288  }
2289 
2290  case SPCC::FCC_U : {
2291  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2292  SPCC = SPCC::ICC_E;
2293  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2294  }
2295  case SPCC::FCC_O : {
2296  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2297  SPCC = SPCC::ICC_NE;
2298  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2299  }
2300  case SPCC::FCC_LG : {
2301  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2302  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2303  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2304  SPCC = SPCC::ICC_NE;
2305  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2306  }
2307  case SPCC::FCC_UE : {
2308  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2309  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2310  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2311  SPCC = SPCC::ICC_E;
2312  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2313  }
2314  }
2315 }
2316 
2317 static SDValue
2319  const SparcTargetLowering &TLI) {
2320 
2321  if (Op.getOperand(0).getValueType() == MVT::f64)
2322  return TLI.LowerF128Op(Op, DAG,
2323  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2324 
2325  if (Op.getOperand(0).getValueType() == MVT::f32)
2326  return TLI.LowerF128Op(Op, DAG,
2327  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2328 
2329  llvm_unreachable("fpextend with non-float operand!");
2330  return SDValue();
2331 }
2332 
2333 static SDValue
2335  const SparcTargetLowering &TLI) {
2336  // FP_ROUND on f64 and f32 are legal.
2337  if (Op.getOperand(0).getValueType() != MVT::f128)
2338  return Op;
2339 
2340  if (Op.getValueType() == MVT::f64)
2341  return TLI.LowerF128Op(Op, DAG,
2342  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2343  if (Op.getValueType() == MVT::f32)
2344  return TLI.LowerF128Op(Op, DAG,
2345  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2346 
2347  llvm_unreachable("fpround to non-float!");
2348  return SDValue();
2349 }
2350 
2352  const SparcTargetLowering &TLI,
2353  bool hasHardQuad) {
2354  SDLoc dl(Op);
2355  EVT VT = Op.getValueType();
2356  assert(VT == MVT::i32 || VT == MVT::i64);
2357 
2358  // Expand f128 operations to fp128 abi calls.
2359  if (Op.getOperand(0).getValueType() == MVT::f128
2360  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2361  const char *libName = TLI.getLibcallName(VT == MVT::i32
2362  ? RTLIB::FPTOSINT_F128_I32
2363  : RTLIB::FPTOSINT_F128_I64);
2364  return TLI.LowerF128Op(Op, DAG, libName, 1);
2365  }
2366 
2367  // Expand if the resulting type is illegal.
2368  if (!TLI.isTypeLegal(VT))
2369  return SDValue();
2370 
2371  // Otherwise, Convert the fp value to integer in an FP register.
2372  if (VT == MVT::i32)
2373  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2374  else
2375  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2376 
2377  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2378 }
2379 
2381  const SparcTargetLowering &TLI,
2382  bool hasHardQuad) {
2383  SDLoc dl(Op);
2384  EVT OpVT = Op.getOperand(0).getValueType();
2385  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2386 
2387  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2388 
2389  // Expand f128 operations to fp128 ABI calls.
2390  if (Op.getValueType() == MVT::f128
2391  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2392  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2393  ? RTLIB::SINTTOFP_I32_F128
2394  : RTLIB::SINTTOFP_I64_F128);
2395  return TLI.LowerF128Op(Op, DAG, libName, 1);
2396  }
2397 
2398  // Expand if the operand type is illegal.
2399  if (!TLI.isTypeLegal(OpVT))
2400  return SDValue();
2401 
2402  // Otherwise, Convert the int value to FP in an FP register.
2403  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2404  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2405  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2406 }
2407 
2409  const SparcTargetLowering &TLI,
2410  bool hasHardQuad) {
2411  SDLoc dl(Op);
2412  EVT VT = Op.getValueType();
2413 
2414  // Expand if it does not involve f128 or the target has support for
2415  // quad floating point instructions and the resulting type is legal.
2416  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2417  (hasHardQuad && TLI.isTypeLegal(VT)))
2418  return SDValue();
2419 
2420  assert(VT == MVT::i32 || VT == MVT::i64);
2421 
2422  return TLI.LowerF128Op(Op, DAG,
2423  TLI.getLibcallName(VT == MVT::i32
2424  ? RTLIB::FPTOUINT_F128_I32
2425  : RTLIB::FPTOUINT_F128_I64),
2426  1);
2427 }
2428 
2430  SDLoc dl(Op);
2431  EVT SrcVT = Op.getOperand(0).getValueType();
2432 
2433  EVT DstVT = Op.getValueType();
2434 
2435  if (Subtarget->isVIS3()) {
2436  if (DstVT == MVT::f32 && SrcVT == MVT::i32) {
2437  return Op; // Legal
2438  } else if (DstVT == MVT::f64 && SrcVT == MVT::i64) {
2439  return (Subtarget->is64Bit())
2440  ? Op
2441  : SDValue(); // Legal on 64 bit, otherwise Expand
2442  } else if (DstVT == MVT::i64 && SrcVT == MVT::f64) {
2443  return (Subtarget->is64Bit())
2444  ? Op
2445  : SDValue(); // Legal on 64 bit, otherwise Expand
2446  }
2447  }
2448 
2449  // Expand
2450  return SDValue();
2451 }
2452 
2454  SelectionDAG &DAG) const {
2455  SDLoc dl(Op);
2456  EVT OpVT = Op.getOperand(0).getValueType();
2457  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2458 
2459  // Expand f128 operations to fp128 ABI calls.
2460  if (Op.getValueType() == MVT::f128 &&
2461  (!Subtarget->hasHardQuad() || !isTypeLegal(OpVT))) {
2462  return LowerF128Op(Op, DAG,
2463  getLibcallName(OpVT == MVT::i32
2464  ? RTLIB::UINTTOFP_I32_F128
2465  : RTLIB::UINTTOFP_I64_F128),
2466  1);
2467  }
2468 
2469  // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
2470  // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
2471  // the optimization here.
2472  if (DAG.SignBitIsZero(Op.getOperand(0))) {
2473 
2474  EVT floatVT = MVT::f32;
2475  unsigned IntToFloatOpcode = SPISD::ITOF;
2476 
2477  if (OpVT == MVT::i64) {
2478  floatVT = MVT::f64;
2479  IntToFloatOpcode = SPISD::XTOF;
2480  }
2481 
2482  // Convert the int value to FP in an FP register.
2483  SDValue FloatTmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2484 
2485  return DAG.getNode(IntToFloatOpcode, dl, Op.getValueType(), FloatTmp);
2486  }
2487 
2488  if (OpVT == MVT::i32 && Subtarget->is64Bit()) {
2489 
2490  SDValue Int64Tmp =
2491  DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Op.getOperand(0));
2492 
2493  SDValue Float64Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Int64Tmp);
2494 
2495  return DAG.getNode(SPISD::XTOF, dl, Op.getValueType(), Float64Tmp);
2496  }
2497 
2498  return SDValue();
2499 }
2500 
2502  const SparcTargetLowering &TLI,
2503  bool hasHardQuad) {
2504  SDValue Chain = Op.getOperand(0);
2505  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2506  SDValue LHS = Op.getOperand(2);
2507  SDValue RHS = Op.getOperand(3);
2508  SDValue Dest = Op.getOperand(4);
2509  SDLoc dl(Op);
2510  unsigned Opc, SPCC = ~0U;
2511 
2512  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2513  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2514  LookThroughSetCC(LHS, RHS, CC, SPCC);
2515 
2516  // Get the condition flag.
2517  SDValue CompareFlag;
2518  if (LHS.getValueType().isInteger()) {
2519  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2520  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2521  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2522  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2523  } else {
2524  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2525  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2526  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2527  Opc = SPISD::BRICC;
2528  } else {
2529  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2530  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2531  Opc = SPISD::BRFCC;
2532  }
2533  }
2534  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2535  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2536 }
2537 
2539  const SparcTargetLowering &TLI,
2540  bool hasHardQuad) {
2541  SDValue LHS = Op.getOperand(0);
2542  SDValue RHS = Op.getOperand(1);
2543  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2544  SDValue TrueVal = Op.getOperand(2);
2545  SDValue FalseVal = Op.getOperand(3);
2546  SDLoc dl(Op);
2547  unsigned Opc, SPCC = ~0U;
2548 
2549  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2550  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2551  LookThroughSetCC(LHS, RHS, CC, SPCC);
2552 
2553  SDValue CompareFlag;
2554  if (LHS.getValueType().isInteger()) {
2555  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2556  Opc = LHS.getValueType() == MVT::i32 ?
2558  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2559  } else {
2560  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2561  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2562  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2563  Opc = SPISD::SELECT_ICC;
2564  } else {
2565  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2566  Opc = SPISD::SELECT_FCC;
2567  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2568  }
2569  }
2570  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2571  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2572 }
2573 
2575  const SparcTargetLowering &TLI) const {
2576  SDLoc DL(Op);
2577  return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL,
2578  DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1));
2579 
2580 }
2581 
2583  const SparcTargetLowering &TLI) const {
2584  SDLoc DL(Op);
2585  return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1));
2586 }
2587 
2589  const SparcTargetLowering &TLI) {
2590  MachineFunction &MF = DAG.getMachineFunction();
2592  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2593 
2594  // Need frame address to find the address of VarArgsFrameIndex.
2596 
2597  // vastart just stores the address of the VarArgsFrameIndex slot into the
2598  // memory location argument.
2599  SDLoc DL(Op);
2600  SDValue Offset =
2601  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2602  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2603  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2604  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2605  MachinePointerInfo(SV));
2606 }
2607 
2609  SDNode *Node = Op.getNode();
2610  EVT VT = Node->getValueType(0);
2611  SDValue InChain = Node->getOperand(0);
2612  SDValue VAListPtr = Node->getOperand(1);
2613  EVT PtrVT = VAListPtr.getValueType();
2614  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2615  SDLoc DL(Node);
2616  SDValue VAList =
2617  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2618  // Increment the pointer, VAList, to the next vaarg.
2619  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2620  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2621  DL));
2622  // Store the incremented VAList to the legalized pointer.
2623  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2624  MachinePointerInfo(SV));
2625  // Load the actual argument out of the pointer VAList.
2626  // We can't count on greater alignment than the word size.
2627  return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2628  std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
2629 }
2630 
2632  const SparcSubtarget *Subtarget) {
2633  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2634  SDValue Size = Op.getOperand(1); // Legalize the size.
2635  unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2636  unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment();
2637  EVT VT = Size->getValueType(0);
2638  SDLoc dl(Op);
2639 
2640  // TODO: implement over-aligned alloca. (Note: also implies
2641  // supporting support for overaligned function frames + dynamic
2642  // allocations, at all, which currently isn't supported)
2643  if (Align > StackAlign) {
2644  const MachineFunction &MF = DAG.getMachineFunction();
2645  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2646  "over-aligned dynamic alloca not supported.");
2647  }
2648 
2649  // The resultant pointer needs to be above the register spill area
2650  // at the bottom of the stack.
2651  unsigned regSpillArea;
2652  if (Subtarget->is64Bit()) {
2653  regSpillArea = 128;
2654  } else {
2655  // On Sparc32, the size of the spill area is 92. Unfortunately,
2656  // that's only 4-byte aligned, not 8-byte aligned (the stack
2657  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2658  // aligned dynamic allocation, we actually need to add 96 to the
2659  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2660 
2661  // That also means adding 4 to the size of the allocation --
2662  // before applying the 8-byte rounding. Unfortunately, we the
2663  // value we get here has already had rounding applied. So, we need
2664  // to add 8, instead, wasting a bit more memory.
2665 
2666  // Further, this only actually needs to be done if the required
2667  // alignment is > 4, but, we've lost that info by this point, too,
2668  // so we always apply it.
2669 
2670  // (An alternative approach would be to always reserve 96 bytes
2671  // instead of the required 92, but then we'd waste 4 extra bytes
2672  // in every frame, not just those with dynamic stack allocations)
2673 
2674  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2675 
2676  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2677  DAG.getConstant(8, dl, VT));
2678  regSpillArea = 96;
2679  }
2680 
2681  unsigned SPReg = SP::O6;
2682  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2683  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2684  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2685 
2686  regSpillArea += Subtarget->getStackPointerBias();
2687 
2688  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2689  DAG.getConstant(regSpillArea, dl, VT));
2690  SDValue Ops[2] = { NewVal, Chain };
2691  return DAG.getMergeValues(Ops, dl);
2692 }
2693 
2694 
2696  SDLoc dl(Op);
2697  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2698  dl, MVT::Other, DAG.getEntryNode());
2699  return Chain;
2700 }
2701 
2702 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2703  const SparcSubtarget *Subtarget) {
2705  MFI.setFrameAddressIsTaken(true);
2706 
2707  EVT VT = Op.getValueType();
2708  SDLoc dl(Op);
2709  unsigned FrameReg = SP::I6;
2710  unsigned stackBias = Subtarget->getStackPointerBias();
2711 
2712  SDValue FrameAddr;
2713 
2714  if (depth == 0) {
2715  FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2716  if (Subtarget->is64Bit())
2717  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2718  DAG.getIntPtrConstant(stackBias, dl));
2719  return FrameAddr;
2720  }
2721 
2722  // flush first to make sure the windowed registers' values are in stack
2723  SDValue Chain = getFLUSHW(Op, DAG);
2724  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2725 
2726  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2727 
2728  while (depth--) {
2729  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2730  DAG.getIntPtrConstant(Offset, dl));
2731  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2732  }
2733  if (Subtarget->is64Bit())
2734  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2735  DAG.getIntPtrConstant(stackBias, dl));
2736  return FrameAddr;
2737 }
2738 
2739 
2741  const SparcSubtarget *Subtarget) {
2742 
2743  uint64_t depth = Op.getConstantOperandVal(0);
2744 
2745  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2746 
2747 }
2748 
2750  const SparcTargetLowering &TLI,
2751  const SparcSubtarget *Subtarget) {
2752  MachineFunction &MF = DAG.getMachineFunction();
2753  MachineFrameInfo &MFI = MF.getFrameInfo();
2754  MFI.setReturnAddressIsTaken(true);
2755 
2756  if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2757  return SDValue();
2758 
2759  EVT VT = Op.getValueType();
2760  SDLoc dl(Op);
2761  uint64_t depth = Op.getConstantOperandVal(0);
2762 
2763  SDValue RetAddr;
2764  if (depth == 0) {
2765  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2766  unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2767  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2768  return RetAddr;
2769  }
2770 
2771  // Need frame address to find return address of the caller.
2772  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget);
2773 
2774  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2775  SDValue Ptr = DAG.getNode(ISD::ADD,
2776  dl, VT,
2777  FrameAddr,
2778  DAG.getIntPtrConstant(Offset, dl));
2779  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2780 
2781  return RetAddr;
2782 }
2783 
2784 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2785  unsigned opcode) {
2786  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2787  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2788 
2789  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2790  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2791  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2792 
2793  // Note: in little-endian, the floating-point value is stored in the
2794  // registers are in the opposite order, so the subreg with the sign
2795  // bit is the highest-numbered (odd), rather than the
2796  // lowest-numbered (even).
2797 
2798  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2799  SrcReg64);
2800  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2801  SrcReg64);
2802 
2803  if (DAG.getDataLayout().isLittleEndian())
2804  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2805  else
2806  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2807 
2808  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2809  dl, MVT::f64), 0);
2810  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2811  DstReg64, Hi32);
2812  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2813  DstReg64, Lo32);
2814  return DstReg64;
2815 }
2816 
2817 // Lower a f128 load into two f64 loads.
2819 {
2820  SDLoc dl(Op);
2821  LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2822  assert(LdNode && LdNode->getOffset().isUndef()
2823  && "Unexpected node type");
2824 
2825  unsigned alignment = LdNode->getAlignment();
2826  if (alignment > 8)
2827  alignment = 8;
2828 
2829  SDValue Hi64 =
2830  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2831  LdNode->getPointerInfo(), alignment);
2832  EVT addrVT = LdNode->getBasePtr().getValueType();
2833  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2834  LdNode->getBasePtr(),
2835  DAG.getConstant(8, dl, addrVT));
2836  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2837  LdNode->getPointerInfo(), alignment);
2838 
2839  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2840  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2841 
2842  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2843  dl, MVT::f128);
2844  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2845  MVT::f128,
2846  SDValue(InFP128, 0),
2847  Hi64,
2848  SubRegEven);
2849  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2850  MVT::f128,
2851  SDValue(InFP128, 0),
2852  Lo64,
2853  SubRegOdd);
2854  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2855  SDValue(Lo64.getNode(), 1) };
2856  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2857  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2858  return DAG.getMergeValues(Ops, dl);
2859 }
2860 
2862 {
2863  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2864 
2865  EVT MemVT = LdNode->getMemoryVT();
2866  if (MemVT == MVT::f128)
2867  return LowerF128Load(Op, DAG);
2868 
2869  return Op;
2870 }
2871 
2872 // Lower a f128 store into two f64 stores.
2874  SDLoc dl(Op);
2875  StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2876  assert(StNode && StNode->getOffset().isUndef()
2877  && "Unexpected node type");
2878  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2879  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2880 
2881  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2882  dl,
2883  MVT::f64,
2884  StNode->getValue(),
2885  SubRegEven);
2886  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2887  dl,
2888  MVT::f64,
2889  StNode->getValue(),
2890  SubRegOdd);
2891 
2892  unsigned alignment = StNode->getAlignment();
2893  if (alignment > 8)
2894  alignment = 8;
2895 
2896  SDValue OutChains[2];
2897  OutChains[0] =
2898  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2899  StNode->getBasePtr(), MachinePointerInfo(), alignment);
2900  EVT addrVT = StNode->getBasePtr().getValueType();
2901  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2902  StNode->getBasePtr(),
2903  DAG.getConstant(8, dl, addrVT));
2904  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2905  MachinePointerInfo(), alignment);
2906  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2907 }
2908 
2910 {
2911  SDLoc dl(Op);
2912  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2913 
2914  EVT MemVT = St->getMemoryVT();
2915  if (MemVT == MVT::f128)
2916  return LowerF128Store(Op, DAG);
2917 
2918  if (MemVT == MVT::i64) {
2919  // Custom handling for i64 stores: turn it into a bitcast and a
2920  // v2i32 store.
2921  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2922  SDValue Chain = DAG.getStore(
2923  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2924  St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo());
2925  return Chain;
2926  }
2927 
2928  return SDValue();
2929 }
2930 
2931 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2932  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2933  && "invalid opcode");
2934 
2935  SDLoc dl(Op);
2936 
2937  if (Op.getValueType() == MVT::f64)
2938  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2939  if (Op.getValueType() != MVT::f128)
2940  return Op;
2941 
2942  // Lower fabs/fneg on f128 to fabs/fneg on f64
2943  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2944  // (As with LowerF64Op, on little-endian, we need to negate the odd
2945  // subreg)
2946 
2947  SDValue SrcReg128 = Op.getOperand(0);
2948  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2949  SrcReg128);
2950  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2951  SrcReg128);
2952 
2953  if (DAG.getDataLayout().isLittleEndian()) {
2954  if (isV9)
2955  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2956  else
2957  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2958  } else {
2959  if (isV9)
2960  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2961  else
2962  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2963  }
2964 
2965  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2966  dl, MVT::f128), 0);
2967  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2968  DstReg128, Hi64);
2969  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2970  DstReg128, Lo64);
2971  return DstReg128;
2972 }
2973 
2975 
2976  if (Op.getValueType() != MVT::i64)
2977  return Op;
2978 
2979  SDLoc dl(Op);
2980  SDValue Src1 = Op.getOperand(0);
2981  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2982  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2983  DAG.getConstant(32, dl, MVT::i64));
2984  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2985 
2986  SDValue Src2 = Op.getOperand(1);
2987  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2988  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2989  DAG.getConstant(32, dl, MVT::i64));
2990  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2991 
2992 
2993  bool hasChain = false;
2994  unsigned hiOpc = Op.getOpcode();
2995  switch (Op.getOpcode()) {
2996  default: llvm_unreachable("Invalid opcode");
2997  case ISD::ADDC: hiOpc = ISD::ADDE; break;
2998  case ISD::ADDE: hasChain = true; break;
2999  case ISD::SUBC: hiOpc = ISD::SUBE; break;
3000  case ISD::SUBE: hasChain = true; break;
3001  }
3002  SDValue Lo;
3003  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
3004  if (hasChain) {
3005  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
3006  Op.getOperand(2));
3007  } else {
3008  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
3009  }
3010  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
3011  SDValue Carry = Hi.getValue(1);
3012 
3013  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
3014  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
3015  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
3016  DAG.getConstant(32, dl, MVT::i64));
3017 
3018  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
3019  SDValue Ops[2] = { Dst, Carry };
3020  return DAG.getMergeValues(Ops, dl);
3021 }
3022 
3023 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
3024 // in LegalizeDAG.cpp except the order of arguments to the library function.
3026  const SparcTargetLowering &TLI)
3027 {
3028  unsigned opcode = Op.getOpcode();
3029  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
3030 
3031  bool isSigned = (opcode == ISD::SMULO);
3032  EVT VT = MVT::i64;
3033  EVT WideVT = MVT::i128;
3034  SDLoc dl(Op);
3035  SDValue LHS = Op.getOperand(0);
3036 
3037  if (LHS.getValueType() != VT)
3038  return Op;
3039 
3040  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3041 
3042  SDValue RHS = Op.getOperand(1);
3043  SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3044  SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3045  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3046 
3047  SDValue MulResult = TLI.makeLibCall(DAG,
3048  RTLIB::MUL_I128, WideVT,
3049  Args, isSigned, dl).first;
3050  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3051  MulResult, DAG.getIntPtrConstant(0, dl));
3052  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3053  MulResult, DAG.getIntPtrConstant(1, dl));
3054  if (isSigned) {
3055  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3056  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3057  } else {
3058  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3059  ISD::SETNE);
3060  }
3061  // MulResult is a node with an illegal type. Because such things are not
3062  // generally permitted during this phase of legalization, ensure that
3063  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3064  // been folded.
3065  assert(MulResult->use_empty() && "Illegally typed node still in use!");
3066 
3067  SDValue Ops[2] = { BottomHalf, TopHalf } ;
3068  return DAG.getMergeValues(Ops, dl);
3069 }
3070 
3072  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
3073  // Expand with a fence.
3074  return SDValue();
3075 
3076  // Monotonic load/stores are legal.
3077  return Op;
3078 }
3079 
3081  SelectionDAG &DAG) const {
3082  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3083  SDLoc dl(Op);
3084  switch (IntNo) {
3085  default: return SDValue(); // Don't custom lower most intrinsics.
3086  case Intrinsic::thread_pointer: {
3087  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3088  return DAG.getRegister(SP::G7, PtrVT);
3089  }
3090  }
3091 }
3092 
3095 
3096  bool hasHardQuad = Subtarget->hasHardQuad();
3097  bool isV9 = Subtarget->isV9();
3098 
3099  switch (Op.getOpcode()) {
3100  default: llvm_unreachable("Should not custom lower this!");
3101 
3102  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3103  Subtarget);
3104  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3105  Subtarget);
3106  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3107  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3108  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3109  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3110  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3111  hasHardQuad);
3112  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3113  hasHardQuad);
3114  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3115  hasHardQuad);
3116  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
3117  case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3118  hasHardQuad);
3119  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3120  hasHardQuad);
3121  case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this);
3122  case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this);
3123  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3124  case ISD::VAARG: return LowerVAARG(Op, DAG);
3125  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3126  Subtarget);
3127 
3128  case ISD::LOAD: return LowerLOAD(Op, DAG);
3129  case ISD::STORE: return LowerSTORE(Op, DAG);
3130  case ISD::FADD: return LowerF128Op(Op, DAG,
3131  getLibcallName(RTLIB::ADD_F128), 2);
3132  case ISD::FSUB: return LowerF128Op(Op, DAG,
3133  getLibcallName(RTLIB::SUB_F128), 2);
3134  case ISD::FMUL: return LowerF128Op(Op, DAG,
3135  getLibcallName(RTLIB::MUL_F128), 2);
3136  case ISD::FDIV: return LowerF128Op(Op, DAG,
3137  getLibcallName(RTLIB::DIV_F128), 2);
3138  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3139  getLibcallName(RTLIB::SQRT_F128),1);
3140  case ISD::FABS:
3141  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3142  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3143  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3144  case ISD::ADDC:
3145  case ISD::ADDE:
3146  case ISD::SUBC:
3147  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3148  case ISD::UMULO:
3149  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3150  case ISD::ATOMIC_LOAD:
3151  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3152  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3153  case ISD::BITCAST: return LowerBITCAST(Op, DAG);
3154  }
3155 }
3156 
3159  MachineBasicBlock *BB) const {
3160  switch (MI.getOpcode()) {
3161  default: llvm_unreachable("Unknown SELECT_CC!");
3162  case SP::SELECT_CC_Int_ICC:
3163  case SP::SELECT_CC_FP_ICC:
3164  case SP::SELECT_CC_DFP_ICC:
3165  case SP::SELECT_CC_QFP_ICC:
3166  return expandSelectCC(MI, BB, SP::BCOND);
3167  case SP::SELECT_CC_Int_FCC:
3168  case SP::SELECT_CC_FP_FCC:
3169  case SP::SELECT_CC_DFP_FCC:
3170  case SP::SELECT_CC_QFP_FCC:
3171  return expandSelectCC(MI, BB, SP::FBCOND);
3172  case SP::EH_SJLJ_SETJMP32ri:
3173  case SP::EH_SJLJ_SETJMP32rr:
3174  return emitEHSjLjSetJmp(MI, BB);
3175  case SP::EH_SJLJ_LONGJMP32rr:
3176  case SP::EH_SJLJ_LONGJMP32ri:
3177  return emitEHSjLjLongJmp(MI, BB);
3178 
3179  }
3180 }
3181 
3184  unsigned BROpcode) const {
3185  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3186  DebugLoc dl = MI.getDebugLoc();
3187  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3188 
3189  // To "insert" a SELECT_CC instruction, we actually have to insert the
3190  // triangle control-flow pattern. The incoming instruction knows the
3191  // destination vreg to set, the condition code register to branch on, the
3192  // true/false values to select between, and the condition code for the branch.
3193  //
3194  // We produce the following control flow:
3195  // ThisMBB
3196  // | \
3197  // | IfFalseMBB
3198  // | /
3199  // SinkMBB
3200  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3202 
3203  MachineBasicBlock *ThisMBB = BB;
3204  MachineFunction *F = BB->getParent();
3205  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3206  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3207  F->insert(It, IfFalseMBB);
3208  F->insert(It, SinkMBB);
3209 
3210  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3211  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3212  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3213  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3214 
3215  // Set the new successors for ThisMBB.
3216  ThisMBB->addSuccessor(IfFalseMBB);
3217  ThisMBB->addSuccessor(SinkMBB);
3218 
3219  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3220  .addMBB(SinkMBB)
3221  .addImm(CC);
3222 
3223  // IfFalseMBB just falls through to SinkMBB.
3224  IfFalseMBB->addSuccessor(SinkMBB);
3225 
3226  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3227  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3228  MI.getOperand(0).getReg())
3229  .addReg(MI.getOperand(1).getReg())
3230  .addMBB(ThisMBB)
3231  .addReg(MI.getOperand(2).getReg())
3232  .addMBB(IfFalseMBB);
3233 
3234  MI.eraseFromParent(); // The pseudo instruction is gone now.
3235  return SinkMBB;
3236 }
3237 
3240  MachineBasicBlock *MBB) const {
3241  DebugLoc DL = MI.getDebugLoc();
3242  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3243 
3244  MachineFunction *MF = MBB->getParent();
3246  MachineInstrBuilder MIB;
3247 
3248  MVT PVT = getPointerTy(MF->getDataLayout());
3249  unsigned RegSize = PVT.getStoreSize();
3250  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3251 
3252  unsigned Buf = MI.getOperand(0).getReg();
3253  unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3254 
3255  // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3
3256  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A);
3257 
3258  // Instruction to restore FP
3259  const unsigned FP = SP::I6;
3260  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3261  .addReg(FP)
3262  .addReg(Buf)
3263  .addImm(0);
3264 
3265  // Instruction to load jmp location
3266  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3267  .addReg(JmpLoc, RegState::Define)
3268  .addReg(Buf)
3269  .addImm(RegSize);
3270 
3271  // Instruction to restore SP
3272  const unsigned SP = SP::O6;
3273  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3274  .addReg(SP)
3275  .addReg(Buf)
3276  .addImm(2 * RegSize);
3277 
3278  // Instruction to restore I7
3279  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3280  .addReg(SP::I7)
3281  .addReg(Buf, RegState::Kill)
3282  .addImm(3 * RegSize);
3283 
3284  // Jump to JmpLoc
3285  BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0);
3286 
3287  MI.eraseFromParent();
3288  return MBB;
3289 }
3290 
3293  MachineBasicBlock *MBB) const {
3294  DebugLoc DL = MI.getDebugLoc();
3295  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3296  const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3297 
3298  MachineFunction *MF = MBB->getParent();
3300  MachineInstrBuilder MIB;
3301 
3302  MVT PVT = getPointerTy(MF->getDataLayout());
3303  unsigned RegSize = PVT.getStoreSize();
3304  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3305 
3306  unsigned DstReg = MI.getOperand(0).getReg();
3307  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
3308  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
3309  (void)TRI;
3310  unsigned mainDstReg = MRI.createVirtualRegister(RC);
3311  unsigned restoreDstReg = MRI.createVirtualRegister(RC);
3312 
3313  // For v = setjmp(buf), we generate
3314  //
3315  // thisMBB:
3316  // buf[0] = FP
3317  // buf[RegSize] = restoreMBB <-- takes address of restoreMBB
3318  // buf[RegSize * 2] = O6
3319  // buf[RegSize * 3] = I7
3320  // Ensure restoreMBB remains in the relocations list (done using a bn instruction)
3321  // b mainMBB
3322  //
3323  // mainMBB:
3324  // v_main = 0
3325  // b sinkMBB
3326  //
3327  // restoreMBB:
3328  // v_restore = 1
3329  // --fall through--
3330  //
3331  // sinkMBB:
3332  // v = phi(main, restore)
3333 
3334  const BasicBlock *BB = MBB->getBasicBlock();
3335  MachineFunction::iterator It = ++MBB->getIterator();
3336  MachineBasicBlock *thisMBB = MBB;
3337  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
3338  MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
3339  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
3340 
3341  MF->insert(It, mainMBB);
3342  MF->insert(It, restoreMBB);
3343  MF->insert(It, sinkMBB);
3344  restoreMBB->setHasAddressTaken();
3345 
3346  // Transfer the remainder of BB and its successor edges to sinkMBB.
3347  sinkMBB->splice(sinkMBB->begin(), MBB,
3348  std::next(MachineBasicBlock::iterator(MI)),
3349  MBB->end());
3350  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
3351 
3352  unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3353  unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3354  unsigned BufReg = MI.getOperand(1).getReg();
3355 
3356  // Instruction to store FP
3357  const unsigned FP = SP::I6;
3358  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3359  .addReg(BufReg)
3360  .addImm(0)
3361  .addReg(FP);
3362 
3363  // Instructions to store jmp location
3364  MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi))
3365  .addReg(LabelReg, RegState::Define)
3366  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI);
3367 
3368  MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri))
3369  .addReg(LabelReg2, RegState::Define)
3370  .addReg(LabelReg, RegState::Kill)
3371  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO);
3372 
3373  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3374  .addReg(BufReg)
3375  .addImm(RegSize)
3376  .addReg(LabelReg2, RegState::Kill);
3377 
3378  // Instruction to store SP
3379  const unsigned SP = SP::O6;
3380  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3381  .addReg(BufReg)
3382  .addImm(2 * RegSize)
3383  .addReg(SP);
3384 
3385  // Instruction to store I7
3386  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3387  .addReg(BufReg)
3388  .addImm(3 * RegSize)
3389  .addReg(SP::I7);
3390 
3391 
3392  // FIX ME: This next instruction ensures that the restoreMBB block address remains
3393  // valid through optimization passes and serves no other purpose. The ICC_N ensures
3394  // that the branch is never taken. This commented-out code here was an alternative
3395  // attempt to achieve this which brought myriad problems.
3396  //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None);
3397  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3398  .addMBB(restoreMBB)
3399  .addImm(SPCC::ICC_N);
3400 
3401  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3402  .addMBB(mainMBB)
3403  .addImm(SPCC::ICC_A);
3404 
3405  thisMBB->addSuccessor(mainMBB);
3406  thisMBB->addSuccessor(restoreMBB);
3407 
3408 
3409  // mainMBB:
3410  MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr))
3411  .addReg(mainDstReg, RegState::Define)
3412  .addReg(SP::G0)
3413  .addReg(SP::G0);
3414  MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3415 
3416  mainMBB->addSuccessor(sinkMBB);
3417 
3418 
3419  // restoreMBB:
3420  MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri))
3421  .addReg(restoreDstReg, RegState::Define)
3422  .addReg(SP::G0)
3423  .addImm(1);
3424  //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3425  restoreMBB->addSuccessor(sinkMBB);
3426 
3427  // sinkMBB:
3428  MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL,
3429  TII->get(SP::PHI), DstReg)
3430  .addReg(mainDstReg).addMBB(mainMBB)
3431  .addReg(restoreDstReg).addMBB(restoreMBB);
3432 
3433  MI.eraseFromParent();
3434  return sinkMBB;
3435 }
3436 
3437 //===----------------------------------------------------------------------===//
3438 // Sparc Inline Assembly Support
3439 //===----------------------------------------------------------------------===//
3440 
3441 /// getConstraintType - Given a constraint letter, return the type of
3442 /// constraint it is for this target.
3445  if (Constraint.size() == 1) {
3446  switch (Constraint[0]) {
3447  default: break;
3448  case 'r':
3449  case 'f':
3450  case 'e':
3451  return C_RegisterClass;
3452  case 'I': // SIMM13
3453  return C_Other;
3454  }
3455  }
3456 
3457  return TargetLowering::getConstraintType(Constraint);
3458 }
3459 
3462  const char *constraint) const {
3463  ConstraintWeight weight = CW_Invalid;
3464  Value *CallOperandVal = info.CallOperandVal;
3465  // If we don't have a value, we can't do a match,
3466  // but allow it at the lowest weight.
3467  if (!CallOperandVal)
3468  return CW_Default;
3469 
3470  // Look at the constraint type.
3471  switch (*constraint) {
3472  default:
3473  weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3474  break;
3475  case 'I': // SIMM13
3476  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3477  if (isInt<13>(C->getSExtValue()))
3478  weight = CW_Constant;
3479  }
3480  break;
3481  }
3482  return weight;
3483 }
3484 
3485 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3486 /// vector. If it is invalid, don't add anything to Ops.
3489  std::string &Constraint,
3490  std::vector<SDValue> &Ops,
3491  SelectionDAG &DAG) const {
3492  SDValue Result(nullptr, 0);
3493 
3494  // Only support length 1 constraints for now.
3495  if (Constraint.length() > 1)
3496  return;
3497 
3498  char ConstraintLetter = Constraint[0];
3499  switch (ConstraintLetter) {
3500  default: break;
3501  case 'I':
3502  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3503  if (isInt<13>(C->getSExtValue())) {
3504  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3505  Op.getValueType());
3506  break;
3507  }
3508  return;
3509  }
3510  }
3511 
3512  if (Result.getNode()) {
3513  Ops.push_back(Result);
3514  return;
3515  }
3516  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3517 }
3518 
3519 std::pair<unsigned, const TargetRegisterClass *>
3521  StringRef Constraint,
3522  MVT VT) const {
3523  if (Constraint.size() == 1) {
3524  switch (Constraint[0]) {
3525  case 'r':
3526  if (VT == MVT::v2i32)
3527  return std::make_pair(0U, &SP::IntPairRegClass);
3528  else
3529  return std::make_pair(0U, &SP::IntRegsRegClass);
3530  case 'f':
3531  if (VT == MVT::f32)
3532  return std::make_pair(0U, &SP::FPRegsRegClass);
3533  else if (VT == MVT::f64)
3534  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3535  else if (VT == MVT::f128)
3536  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3537  llvm_unreachable("Unknown ValueType for f-register-type!");
3538  break;
3539  case 'e':
3540  if (VT == MVT::f32)
3541  return std::make_pair(0U, &SP::FPRegsRegClass);
3542  else if (VT == MVT::f64)
3543  return std::make_pair(0U, &SP::DFPRegsRegClass);
3544  else if (VT == MVT::f128)
3545  return std::make_pair(0U, &SP::QFPRegsRegClass);
3546  llvm_unreachable("Unknown ValueType for e-register-type!");
3547  break;
3548  }
3549  } else if (!Constraint.empty() && Constraint.size() <= 5
3550  && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3551  // constraint = '{r<d>}'
3552  // Remove the braces from around the name.
3553  StringRef name(Constraint.data()+1, Constraint.size()-2);
3554  // Handle register aliases:
3555  // r0-r7 -> g0-g7
3556  // r8-r15 -> o0-o7
3557  // r16-r23 -> l0-l7
3558  // r24-r31 -> i0-i7
3559  uint64_t intVal = 0;
3560  if (name.substr(0, 1).equals("r")
3561  && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3562  const char regTypes[] = { 'g', 'o', 'l', 'i' };
3563  char regType = regTypes[intVal/8];
3564  char regIdx = '0' + (intVal % 8);
3565  char tmp[] = { '{', regType, regIdx, '}', 0 };
3566  std::string newConstraint = std::string(tmp);
3567  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3568  VT);
3569  }
3570  }
3571 
3572  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3573 }
3574 
3575 bool
3577  // The Sparc target isn't yet aware of offsets.
3578  return false;
3579 }
3580 
3583  SelectionDAG &DAG) const {
3584 
3585  SDLoc dl(N);
3586 
3587  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3588 
3589  switch (N->getOpcode()) {
3590  default:
3591  llvm_unreachable("Do not know how to custom type legalize this operation!");
3592 
3593  case ISD::FP_TO_SINT:
3594  case ISD::FP_TO_UINT:
3595  // Custom lower only if it involves f128 or i64.
3596  if (N->getOperand(0).getValueType() != MVT::f128
3597  || N->getValueType(0) != MVT::i64)
3598  return;
3599  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3600  ? RTLIB::FPTOSINT_F128_I64
3601  : RTLIB::FPTOUINT_F128_I64);
3602 
3603  Results.push_back(LowerF128Op(SDValue(N, 0),
3604  DAG,
3605  getLibcallName(libCall),
3606  1));
3607  return;
3608 
3609  case ISD::SINT_TO_FP:
3610  case ISD::UINT_TO_FP:
3611  // Custom lower only if it involves f128 or i64.
3612  if (N->getValueType(0) != MVT::f128
3613  || N->getOperand(0).getValueType() != MVT::i64)
3614  return;
3615 
3616  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3617  ? RTLIB::SINTTOFP_I64_F128
3618  : RTLIB::UINTTOFP_I64_F128);
3619 
3620  Results.push_back(LowerF128Op(SDValue(N, 0),
3621  DAG,
3622  getLibcallName(libCall),
3623  1));
3624  return;
3625  case ISD::LOAD: {
3626  LoadSDNode *Ld = cast<LoadSDNode>(N);
3627  // Custom handling only for i64: turn i64 load into a v2i32 load,
3628  // and a bitcast.
3629  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3630  return;
3631 
3632  SDLoc dl(N);
3633  SDValue LoadRes = DAG.getExtLoad(
3634  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3635  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
3636  Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3637 
3638  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3639  Results.push_back(Res);
3640  Results.push_back(LoadRes.getValue(1));
3641  return;
3642  }
3643  }
3644 }
3645 
3646 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3648  if (!Subtarget->isTargetLinux())
3650  return true;
3651 }
3652 
3653 // Override to disable global variable loading on Linux.
3655  if (!Subtarget->isTargetLinux())
3657 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:545
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:512
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:834
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getValueType() const
Return the ValueType of the referenced return value.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool usePopc() const
const SDValue & getOffset() const
bool isUndef() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
DiagnosticInfoOptimizationBase::Argument NV
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC&#39;s if you just have an ISD::CondCode instead of an SD...
Definition: SelectionDAG.h:898
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:814
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:617
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
unsigned getPointerSize() const
Get the pointer size for this target.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:222
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:268
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
bool isFP128Ty() const
Return true if this is &#39;fp128&#39;.
Definition: Type.h:156
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:666
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getChain() const
Function Alias Analysis Results
unsigned getValNo() const
unsigned getAlignment() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:262
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const uint32_t * getRTCallPreservedMask(CallingConv::ID CC) const
bool hasHardQuad() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:677
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:677
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
const SparcInstrInfo * getInstrInfo() const override
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:748
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:404
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool useSoftFloat() const override
SDValue getExternalSymbol(const char *Sym, EVT VT)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isMemLoc() const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool needsCustom() const
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
Definition: ISDOpcodes.h:114
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:511
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:209
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:433
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
bool isTargetLinux() const
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const HexagonInstrInfo * TII
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Shift and rotation operations.
Definition: ISDOpcodes.h:379
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:730
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
uint64_t getConstantOperandVal(unsigned i) const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Reg
All possible values of the reg field in the ModR/M byte.
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:777
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:290
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:446
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:388
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
LocInfo getLocInfo() const
bool useSoftFloat() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:662
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool fixAllFDIVSQRT() const
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(const char(&S)[N], const T &Value)
Definition: StringSwitch.h:74
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:385
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:455
SDValue getRegisterMask(const uint32_t *RegMask)
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:389
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
bool useSoftMulDiv() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
SmallVector< ISD::OutputArg, 32 > Outs
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:216
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:916
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:558
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
bool isStrongerThanMonotonic(AtomicOrdering ao)
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
amdgpu Simplify well known AMD library false Value * Callee
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getByValSize() const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
UNDEF - An undefined node.
Definition: ISDOpcodes.h:178
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:498
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:121
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:302
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const
bool hasNoFMULS() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.