LLVM  8.0.0svn
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Sparc uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SparcISelLowering.h"
18 #include "SparcRegisterInfo.h"
19 #include "SparcTargetMachine.h"
20 #include "SparcTargetObjectFile.h"
21 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
33 #include "llvm/Support/KnownBits.h"
34 using namespace llvm;
35 
36 
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
40 
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43  ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 {
45  assert (ArgFlags.isSRet());
46 
47  // Assign SRet argument.
48  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
49  0,
50  LocVT, LocInfo));
51  return true;
52 }
53 
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56  ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 {
58  static const MCPhysReg RegList[] = {
59  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60  };
61  // Try to get first reg.
62  if (unsigned Reg = State.AllocateReg(RegList)) {
63  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64  } else {
65  // Assign whole thing in stack.
66  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
67  State.AllocateStack(8,4),
68  LocVT, LocInfo));
69  return true;
70  }
71 
72  // Try to get second reg.
73  if (unsigned Reg = State.AllocateReg(RegList))
74  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75  else
76  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
77  State.AllocateStack(4,4),
78  LocVT, LocInfo));
79  return true;
80 }
81 
82 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
83  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
84  ISD::ArgFlagsTy &ArgFlags, CCState &State)
85 {
86  static const MCPhysReg RegList[] = {
87  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
88  };
89 
90  // Try to get first reg.
91  if (unsigned Reg = State.AllocateReg(RegList))
92  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
93  else
94  return false;
95 
96  // Try to get second reg.
97  if (unsigned Reg = State.AllocateReg(RegList))
98  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
99  else
100  return false;
101 
102  return true;
103 }
104 
105 // Allocate a full-sized argument for the 64-bit ABI.
106 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
107  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
108  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
109  assert((LocVT == MVT::f32 || LocVT == MVT::f128
110  || LocVT.getSizeInBits() == 64) &&
111  "Can't handle non-64 bits locations");
112 
113  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
114  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
115  unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
116  unsigned Offset = State.AllocateStack(size, alignment);
117  unsigned Reg = 0;
118 
119  if (LocVT == MVT::i64 && Offset < 6*8)
120  // Promote integers to %i0-%i5.
121  Reg = SP::I0 + Offset/8;
122  else if (LocVT == MVT::f64 && Offset < 16*8)
123  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
124  Reg = SP::D0 + Offset/8;
125  else if (LocVT == MVT::f32 && Offset < 16*8)
126  // Promote floats to %f1, %f3, ...
127  Reg = SP::F1 + Offset/4;
128  else if (LocVT == MVT::f128 && Offset < 16*8)
129  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
130  Reg = SP::Q0 + Offset/16;
131 
132  // Promote to register when possible, otherwise use the stack slot.
133  if (Reg) {
134  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
135  return true;
136  }
137 
138  // This argument goes on the stack in an 8-byte slot.
139  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
140  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
141  if (LocVT == MVT::f32)
142  Offset += 4;
143 
144  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
145  return true;
146 }
147 
148 // Allocate a half-sized argument for the 64-bit ABI.
149 //
150 // This is used when passing { float, int } structs by value in registers.
151 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
152  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
153  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
154  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
155  unsigned Offset = State.AllocateStack(4, 4);
156 
157  if (LocVT == MVT::f32 && Offset < 16*8) {
158  // Promote floats to %f0-%f31.
159  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
160  LocVT, LocInfo));
161  return true;
162  }
163 
164  if (LocVT == MVT::i32 && Offset < 6*8) {
165  // Promote integers to %i0-%i5, using half the register.
166  unsigned Reg = SP::I0 + Offset/8;
167  LocVT = MVT::i64;
168  LocInfo = CCValAssign::AExt;
169 
170  // Set the Custom bit if this i32 goes in the high bits of a register.
171  if (Offset % 8 == 0)
172  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
173  LocVT, LocInfo));
174  else
175  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
176  return true;
177  }
178 
179  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
180  return true;
181 }
182 
183 #include "SparcGenCallingConv.inc"
184 
185 // The calling conventions in SparcCallingConv.td are described in terms of the
186 // callee's register window. This function translates registers to the
187 // corresponding caller window %o register.
188 static unsigned toCallerWindow(unsigned Reg) {
189  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
190  "Unexpected enum");
191  if (Reg >= SP::I0 && Reg <= SP::I7)
192  return Reg - SP::I0 + SP::O0;
193  return Reg;
194 }
195 
196 SDValue
198  bool IsVarArg,
200  const SmallVectorImpl<SDValue> &OutVals,
201  const SDLoc &DL, SelectionDAG &DAG) const {
202  if (Subtarget->is64Bit())
203  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
204  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
205 }
206 
207 SDValue
209  bool IsVarArg,
211  const SmallVectorImpl<SDValue> &OutVals,
212  const SDLoc &DL, SelectionDAG &DAG) const {
214 
215  // CCValAssign - represent the assignment of the return value to locations.
217 
218  // CCState - Info about the registers and stack slot.
219  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
220  *DAG.getContext());
221 
222  // Analyze return values.
223  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
224 
225  SDValue Flag;
226  SmallVector<SDValue, 4> RetOps(1, Chain);
227  // Make room for the return address offset.
228  RetOps.push_back(SDValue());
229 
230  // Copy the result values into the output registers.
231  for (unsigned i = 0, realRVLocIdx = 0;
232  i != RVLocs.size();
233  ++i, ++realRVLocIdx) {
234  CCValAssign &VA = RVLocs[i];
235  assert(VA.isRegLoc() && "Can only return in registers!");
236 
237  SDValue Arg = OutVals[realRVLocIdx];
238 
239  if (VA.needsCustom()) {
240  assert(VA.getLocVT() == MVT::v2i32);
241  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
242  // happen by default if this wasn't a legal type)
243 
245  Arg,
246  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
248  Arg,
249  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
250 
251  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
252  Flag = Chain.getValue(1);
253  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
254  VA = RVLocs[++i]; // skip ahead to next loc
255  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
256  Flag);
257  } else
258  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
259 
260  // Guarantee that all emitted copies are stuck together with flags.
261  Flag = Chain.getValue(1);
262  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
263  }
264 
265  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
266  // If the function returns a struct, copy the SRetReturnReg to I0
267  if (MF.getFunction().hasStructRetAttr()) {
269  unsigned Reg = SFI->getSRetReturnReg();
270  if (!Reg)
271  llvm_unreachable("sret virtual register not created in the entry block");
272  auto PtrVT = getPointerTy(DAG.getDataLayout());
273  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
274  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
275  Flag = Chain.getValue(1);
276  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
277  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
278  }
279 
280  RetOps[0] = Chain; // Update chain.
281  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
282 
283  // Add the flag if we have it.
284  if (Flag.getNode())
285  RetOps.push_back(Flag);
286 
287  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
288 }
289 
290 // Lower return values for the 64-bit ABI.
291 // Return values are passed the exactly the same way as function arguments.
292 SDValue
294  bool IsVarArg,
296  const SmallVectorImpl<SDValue> &OutVals,
297  const SDLoc &DL, SelectionDAG &DAG) const {
298  // CCValAssign - represent the assignment of the return value to locations.
300 
301  // CCState - Info about the registers and stack slot.
302  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
303  *DAG.getContext());
304 
305  // Analyze return values.
306  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
307 
308  SDValue Flag;
309  SmallVector<SDValue, 4> RetOps(1, Chain);
310 
311  // The second operand on the return instruction is the return address offset.
312  // The return address is always %i7+8 with the 64-bit ABI.
313  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
314 
315  // Copy the result values into the output registers.
316  for (unsigned i = 0; i != RVLocs.size(); ++i) {
317  CCValAssign &VA = RVLocs[i];
318  assert(VA.isRegLoc() && "Can only return in registers!");
319  SDValue OutVal = OutVals[i];
320 
321  // Integer return values must be sign or zero extended by the callee.
322  switch (VA.getLocInfo()) {
323  case CCValAssign::Full: break;
324  case CCValAssign::SExt:
325  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
326  break;
327  case CCValAssign::ZExt:
328  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
329  break;
330  case CCValAssign::AExt:
331  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
332  break;
333  default:
334  llvm_unreachable("Unknown loc info!");
335  }
336 
337  // The custom bit on an i32 return value indicates that it should be passed
338  // in the high bits of the register.
339  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
340  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
341  DAG.getConstant(32, DL, MVT::i32));
342 
343  // The next value may go in the low bits of the same register.
344  // Handle both at once.
345  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
346  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
347  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
348  // Skip the next value, it's already done.
349  ++i;
350  }
351  }
352 
353  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
354 
355  // Guarantee that all emitted copies are stuck together with flags.
356  Flag = Chain.getValue(1);
357  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
358  }
359 
360  RetOps[0] = Chain; // Update chain.
361 
362  // Add the flag if we have it.
363  if (Flag.getNode())
364  RetOps.push_back(Flag);
365 
366  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
367 }
368 
370  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
371  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
372  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
373  if (Subtarget->is64Bit())
374  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
375  DL, DAG, InVals);
376  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
377  DL, DAG, InVals);
378 }
379 
380 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
381 /// passed in either one or two GPRs, including FP values. TODO: we should
382 /// pass FP values in FP registers for fastcc functions.
384  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
385  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
386  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
388  MachineRegisterInfo &RegInfo = MF.getRegInfo();
390 
391  // Assign locations to all of the incoming arguments.
393  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
394  *DAG.getContext());
395  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
396 
397  const unsigned StackOffset = 92;
398  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
399 
400  unsigned InIdx = 0;
401  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
402  CCValAssign &VA = ArgLocs[i];
403 
404  if (Ins[InIdx].Flags.isSRet()) {
405  if (InIdx != 0)
406  report_fatal_error("sparc only supports sret on the first parameter");
407  // Get SRet from [%fp+64].
408  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
409  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
410  SDValue Arg =
411  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
412  InVals.push_back(Arg);
413  continue;
414  }
415 
416  if (VA.isRegLoc()) {
417  if (VA.needsCustom()) {
418  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
419 
420  unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
421  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
422  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
423 
424  assert(i+1 < e);
425  CCValAssign &NextVA = ArgLocs[++i];
426 
427  SDValue LoVal;
428  if (NextVA.isMemLoc()) {
429  int FrameIdx = MF.getFrameInfo().
430  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
431  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
432  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
433  } else {
434  unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
435  &SP::IntRegsRegClass);
436  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
437  }
438 
439  if (IsLittleEndian)
440  std::swap(LoVal, HiVal);
441 
442  SDValue WholeValue =
443  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
444  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
445  InVals.push_back(WholeValue);
446  continue;
447  }
448  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
449  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
450  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
451  if (VA.getLocVT() == MVT::f32)
452  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
453  else if (VA.getLocVT() != MVT::i32) {
454  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
455  DAG.getValueType(VA.getLocVT()));
456  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
457  }
458  InVals.push_back(Arg);
459  continue;
460  }
461 
462  assert(VA.isMemLoc());
463 
464  unsigned Offset = VA.getLocMemOffset()+StackOffset;
465  auto PtrVT = getPointerTy(DAG.getDataLayout());
466 
467  if (VA.needsCustom()) {
468  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
469  // If it is double-word aligned, just load.
470  if (Offset % 8 == 0) {
471  int FI = MF.getFrameInfo().CreateFixedObject(8,
472  Offset,
473  true);
474  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
475  SDValue Load =
476  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
477  InVals.push_back(Load);
478  continue;
479  }
480 
481  int FI = MF.getFrameInfo().CreateFixedObject(4,
482  Offset,
483  true);
484  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
485  SDValue HiVal =
486  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
487  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
488  Offset+4,
489  true);
490  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
491 
492  SDValue LoVal =
493  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
494 
495  if (IsLittleEndian)
496  std::swap(LoVal, HiVal);
497 
498  SDValue WholeValue =
499  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
500  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
501  InVals.push_back(WholeValue);
502  continue;
503  }
504 
505  int FI = MF.getFrameInfo().CreateFixedObject(4,
506  Offset,
507  true);
508  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
509  SDValue Load ;
510  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
511  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
512  } else if (VA.getValVT() == MVT::f128) {
513  report_fatal_error("SPARCv8 does not handle f128 in calls; "
514  "pass indirectly");
515  } else {
516  // We shouldn't see any other value types here.
517  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
518  }
519  InVals.push_back(Load);
520  }
521 
522  if (MF.getFunction().hasStructRetAttr()) {
523  // Copy the SRet Argument to SRetReturnReg.
525  unsigned Reg = SFI->getSRetReturnReg();
526  if (!Reg) {
527  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
528  SFI->setSRetReturnReg(Reg);
529  }
530  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
531  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
532  }
533 
534  // Store remaining ArgRegs to the stack if this is a varargs function.
535  if (isVarArg) {
536  static const MCPhysReg ArgRegs[] = {
537  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
538  };
539  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
540  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
541  unsigned ArgOffset = CCInfo.getNextStackOffset();
542  if (NumAllocated == 6)
543  ArgOffset += StackOffset;
544  else {
545  assert(!ArgOffset);
546  ArgOffset = 68+4*NumAllocated;
547  }
548 
549  // Remember the vararg offset for the va_start implementation.
550  FuncInfo->setVarArgsFrameOffset(ArgOffset);
551 
552  std::vector<SDValue> OutChains;
553 
554  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
555  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
556  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
557  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
558 
559  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
560  true);
561  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
562 
563  OutChains.push_back(
564  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
565  ArgOffset += 4;
566  }
567 
568  if (!OutChains.empty()) {
569  OutChains.push_back(Chain);
570  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
571  }
572  }
573 
574  return Chain;
575 }
576 
577 // Lower formal arguments for the 64 bit ABI.
579  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
580  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
581  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
583 
584  // Analyze arguments according to CC_Sparc64.
586  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
587  *DAG.getContext());
588  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
589 
590  // The argument array begins at %fp+BIAS+128, after the register save area.
591  const unsigned ArgArea = 128;
592 
593  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
594  CCValAssign &VA = ArgLocs[i];
595  if (VA.isRegLoc()) {
596  // This argument is passed in a register.
597  // All integer register arguments are promoted by the caller to i64.
598 
599  // Create a virtual register for the promoted live-in value.
600  unsigned VReg = MF.addLiveIn(VA.getLocReg(),
601  getRegClassFor(VA.getLocVT()));
602  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
603 
604  // Get the high bits for i32 struct elements.
605  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
606  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
607  DAG.getConstant(32, DL, MVT::i32));
608 
609  // The caller promoted the argument, so insert an Assert?ext SDNode so we
610  // won't promote the value again in this function.
611  switch (VA.getLocInfo()) {
612  case CCValAssign::SExt:
613  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
614  DAG.getValueType(VA.getValVT()));
615  break;
616  case CCValAssign::ZExt:
617  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
618  DAG.getValueType(VA.getValVT()));
619  break;
620  default:
621  break;
622  }
623 
624  // Truncate the register down to the argument type.
625  if (VA.isExtInLoc())
626  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
627 
628  InVals.push_back(Arg);
629  continue;
630  }
631 
632  // The registers are exhausted. This argument was passed on the stack.
633  assert(VA.isMemLoc());
634  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
635  // beginning of the arguments area at %fp+BIAS+128.
636  unsigned Offset = VA.getLocMemOffset() + ArgArea;
637  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
638  // Adjust offset for extended arguments, SPARC is big-endian.
639  // The caller will have written the full slot with extended bytes, but we
640  // prefer our own extending loads.
641  if (VA.isExtInLoc())
642  Offset += 8 - ValSize;
643  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
644  InVals.push_back(
645  DAG.getLoad(VA.getValVT(), DL, Chain,
648  }
649 
650  if (!IsVarArg)
651  return Chain;
652 
653  // This function takes variable arguments, some of which may have been passed
654  // in registers %i0-%i5. Variable floating point arguments are never passed
655  // in floating point registers. They go on %i0-%i5 or on the stack like
656  // integer arguments.
657  //
658  // The va_start intrinsic needs to know the offset to the first variable
659  // argument.
660  unsigned ArgOffset = CCInfo.getNextStackOffset();
662  // Skip the 128 bytes of register save area.
663  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
664  Subtarget->getStackPointerBias());
665 
666  // Save the variable arguments that were passed in registers.
667  // The caller is required to reserve stack space for 6 arguments regardless
668  // of how many arguments were actually passed.
669  SmallVector<SDValue, 8> OutChains;
670  for (; ArgOffset < 6*8; ArgOffset += 8) {
671  unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
672  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
673  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
674  auto PtrVT = getPointerTy(MF.getDataLayout());
675  OutChains.push_back(
676  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
678  }
679 
680  if (!OutChains.empty())
681  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
682 
683  return Chain;
684 }
685 
686 SDValue
688  SmallVectorImpl<SDValue> &InVals) const {
689  if (Subtarget->is64Bit())
690  return LowerCall_64(CLI, InVals);
691  return LowerCall_32(CLI, InVals);
692 }
693 
695  ImmutableCallSite CS) {
696  if (CS)
697  return CS.hasFnAttr(Attribute::ReturnsTwice);
698 
699  const Function *CalleeFn = nullptr;
700  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
701  CalleeFn = dyn_cast<Function>(G->getGlobal());
702  } else if (ExternalSymbolSDNode *E =
703  dyn_cast<ExternalSymbolSDNode>(Callee)) {
704  const Function &Fn = DAG.getMachineFunction().getFunction();
705  const Module *M = Fn.getParent();
706  const char *CalleeName = E->getSymbol();
707  CalleeFn = M->getFunction(CalleeName);
708  }
709 
710  if (!CalleeFn)
711  return false;
712  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
713 }
714 
715 // Lower a call for the 32-bit ABI.
716 SDValue
718  SmallVectorImpl<SDValue> &InVals) const {
719  SelectionDAG &DAG = CLI.DAG;
720  SDLoc &dl = CLI.DL;
722  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
724  SDValue Chain = CLI.Chain;
725  SDValue Callee = CLI.Callee;
726  bool &isTailCall = CLI.IsTailCall;
727  CallingConv::ID CallConv = CLI.CallConv;
728  bool isVarArg = CLI.IsVarArg;
729 
730  // Sparc target does not yet support tail call optimization.
731  isTailCall = false;
732 
733  // Analyze operands of the call, assigning locations to each operand.
735  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
736  *DAG.getContext());
737  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
738 
739  // Get the size of the outgoing arguments stack space requirement.
740  unsigned ArgsSize = CCInfo.getNextStackOffset();
741 
742  // Keep stack frames 8-byte aligned.
743  ArgsSize = (ArgsSize+7) & ~7;
744 
746 
747  // Create local copies for byval args.
748  SmallVector<SDValue, 8> ByValArgs;
749  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
750  ISD::ArgFlagsTy Flags = Outs[i].Flags;
751  if (!Flags.isByVal())
752  continue;
753 
754  SDValue Arg = OutVals[i];
755  unsigned Size = Flags.getByValSize();
756  unsigned Align = Flags.getByValAlign();
757 
758  if (Size > 0U) {
759  int FI = MFI.CreateStackObject(Size, Align, false);
760  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
761  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
762 
763  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
764  false, // isVolatile,
765  (Size <= 32), // AlwaysInline if size <= 32,
766  false, // isTailCall
768  ByValArgs.push_back(FIPtr);
769  }
770  else {
771  SDValue nullVal;
772  ByValArgs.push_back(nullVal);
773  }
774  }
775 
776  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
777 
779  SmallVector<SDValue, 8> MemOpChains;
780 
781  const unsigned StackOffset = 92;
782  bool hasStructRetAttr = false;
783  unsigned SRetArgSize = 0;
784  // Walk the register/memloc assignments, inserting copies/loads.
785  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
786  i != e;
787  ++i, ++realArgIdx) {
788  CCValAssign &VA = ArgLocs[i];
789  SDValue Arg = OutVals[realArgIdx];
790 
791  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
792 
793  // Use local copy if it is a byval arg.
794  if (Flags.isByVal()) {
795  Arg = ByValArgs[byvalArgIdx++];
796  if (!Arg) {
797  continue;
798  }
799  }
800 
801  // Promote the value if needed.
802  switch (VA.getLocInfo()) {
803  default: llvm_unreachable("Unknown loc info!");
804  case CCValAssign::Full: break;
805  case CCValAssign::SExt:
806  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
807  break;
808  case CCValAssign::ZExt:
809  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
810  break;
811  case CCValAssign::AExt:
812  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
813  break;
814  case CCValAssign::BCvt:
815  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
816  break;
817  }
818 
819  if (Flags.isSRet()) {
820  assert(VA.needsCustom());
821  // store SRet argument in %sp+64
822  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
823  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
824  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
825  MemOpChains.push_back(
826  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
827  hasStructRetAttr = true;
828  // sret only allowed on first argument
829  assert(Outs[realArgIdx].OrigArgIndex == 0);
830  PointerType *Ty = cast<PointerType>(CLI.getArgs()[0].Ty);
831  Type *ElementTy = Ty->getElementType();
832  SRetArgSize = DAG.getDataLayout().getTypeAllocSize(ElementTy);
833  continue;
834  }
835 
836  if (VA.needsCustom()) {
837  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
838 
839  if (VA.isMemLoc()) {
840  unsigned Offset = VA.getLocMemOffset() + StackOffset;
841  // if it is double-word aligned, just store.
842  if (Offset % 8 == 0) {
843  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
844  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
845  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
846  MemOpChains.push_back(
847  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
848  continue;
849  }
850  }
851 
852  if (VA.getLocVT() == MVT::f64) {
853  // Move from the float value from float registers into the
854  // integer registers.
855  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
856  Arg = bitcastConstantFPToInt(C, dl, DAG);
857  else
858  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
859  }
860 
862  Arg,
863  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
865  Arg,
866  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
867 
868  if (VA.isRegLoc()) {
869  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
870  assert(i+1 != e);
871  CCValAssign &NextVA = ArgLocs[++i];
872  if (NextVA.isRegLoc()) {
873  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
874  } else {
875  // Store the second part in stack.
876  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
877  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
878  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
879  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
880  MemOpChains.push_back(
881  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
882  }
883  } else {
884  unsigned Offset = VA.getLocMemOffset() + StackOffset;
885  // Store the first part.
886  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
887  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
888  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
889  MemOpChains.push_back(
890  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
891  // Store the second part.
892  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
893  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
894  MemOpChains.push_back(
895  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
896  }
897  continue;
898  }
899 
900  // Arguments that can be passed on register must be kept at
901  // RegsToPass vector
902  if (VA.isRegLoc()) {
903  if (VA.getLocVT() != MVT::f32) {
904  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
905  continue;
906  }
907  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
908  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
909  continue;
910  }
911 
912  assert(VA.isMemLoc());
913 
914  // Create a store off the stack pointer for this argument.
915  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
916  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
917  dl);
918  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
919  MemOpChains.push_back(
920  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
921  }
922 
923 
924  // Emit all stores, make sure the occur before any copies into physregs.
925  if (!MemOpChains.empty())
926  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
927 
928  // Build a sequence of copy-to-reg nodes chained together with token
929  // chain and flag operands which copy the outgoing args into registers.
930  // The InFlag in necessary since all emitted instructions must be
931  // stuck together.
932  SDValue InFlag;
933  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
934  unsigned Reg = toCallerWindow(RegsToPass[i].first);
935  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
936  InFlag = Chain.getValue(1);
937  }
938 
939  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
940 
941  // If the callee is a GlobalAddress node (quite common, every direct call is)
942  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
943  // Likewise ExternalSymbol -> TargetExternalSymbol.
945  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
946  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
947  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
948  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
949 
950  // Returns a chain & a flag for retval copy to use
951  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
953  Ops.push_back(Chain);
954  Ops.push_back(Callee);
955  if (hasStructRetAttr)
956  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
957  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
958  Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
959  RegsToPass[i].second.getValueType()));
960 
961  // Add a register mask operand representing the call-preserved registers.
962  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
963  const uint32_t *Mask =
964  ((hasReturnsTwice)
965  ? TRI->getRTCallPreservedMask(CallConv)
966  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
967  assert(Mask && "Missing call preserved mask for calling convention");
968  Ops.push_back(DAG.getRegisterMask(Mask));
969 
970  if (InFlag.getNode())
971  Ops.push_back(InFlag);
972 
973  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
974  InFlag = Chain.getValue(1);
975 
976  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
977  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
978  InFlag = Chain.getValue(1);
979 
980  // Assign locations to each value returned by this call.
982  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
983  *DAG.getContext());
984 
985  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
986 
987  // Copy all of the result registers out of their specified physreg.
988  for (unsigned i = 0; i != RVLocs.size(); ++i) {
989  if (RVLocs[i].getLocVT() == MVT::v2i32) {
990  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
991  SDValue Lo = DAG.getCopyFromReg(
992  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
993  Chain = Lo.getValue(1);
994  InFlag = Lo.getValue(2);
995  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
996  DAG.getConstant(0, dl, MVT::i32));
997  SDValue Hi = DAG.getCopyFromReg(
998  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
999  Chain = Hi.getValue(1);
1000  InFlag = Hi.getValue(2);
1001  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1002  DAG.getConstant(1, dl, MVT::i32));
1003  InVals.push_back(Vec);
1004  } else {
1005  Chain =
1006  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1007  RVLocs[i].getValVT(), InFlag)
1008  .getValue(1);
1009  InFlag = Chain.getValue(2);
1010  InVals.push_back(Chain.getValue(0));
1011  }
1012  }
1013 
1014  return Chain;
1015 }
1016 
1017 // FIXME? Maybe this could be a TableGen attribute on some registers and
1018 // this table could be generated automatically from RegInfo.
1019 unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
1020  SelectionDAG &DAG) const {
1021  unsigned Reg = StringSwitch<unsigned>(RegName)
1022  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1023  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1024  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1025  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1026  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1027  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1028  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1029  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1030  .Default(0);
1031 
1032  if (Reg)
1033  return Reg;
1034 
1035  report_fatal_error("Invalid register name global variable");
1036 }
1037 
1038 // Fixup floating point arguments in the ... part of a varargs call.
1039 //
1040 // The SPARC v9 ABI requires that floating point arguments are treated the same
1041 // as integers when calling a varargs function. This does not apply to the
1042 // fixed arguments that are part of the function's prototype.
1043 //
1044 // This function post-processes a CCValAssign array created by
1045 // AnalyzeCallOperands().
1047  ArrayRef<ISD::OutputArg> Outs) {
1048  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1049  const CCValAssign &VA = ArgLocs[i];
1050  MVT ValTy = VA.getLocVT();
1051  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1052  // varargs functions.
1053  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1054  continue;
1055  // The fixed arguments to a varargs function still go in FP registers.
1056  if (Outs[VA.getValNo()].IsFixed)
1057  continue;
1058 
1059  // This floating point argument should be reassigned.
1060  CCValAssign NewVA;
1061 
1062  // Determine the offset into the argument array.
1063  unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1064  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1065  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1066  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1067 
1068  if (Offset < 6*8) {
1069  // This argument should go in %i0-%i5.
1070  unsigned IReg = SP::I0 + Offset/8;
1071  if (ValTy == MVT::f64)
1072  // Full register, just bitconvert into i64.
1073  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1074  IReg, MVT::i64, CCValAssign::BCvt);
1075  else {
1076  assert(ValTy == MVT::f128 && "Unexpected type!");
1077  // Full register, just bitconvert into i128 -- We will lower this into
1078  // two i64s in LowerCall_64.
1079  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1080  IReg, MVT::i128, CCValAssign::BCvt);
1081  }
1082  } else {
1083  // This needs to go to memory, we're out of integer registers.
1084  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1085  Offset, VA.getLocVT(), VA.getLocInfo());
1086  }
1087  ArgLocs[i] = NewVA;
1088  }
1089 }
1090 
1091 // Lower a call for the 64-bit ABI.
1092 SDValue
1094  SmallVectorImpl<SDValue> &InVals) const {
1095  SelectionDAG &DAG = CLI.DAG;
1096  SDLoc DL = CLI.DL;
1097  SDValue Chain = CLI.Chain;
1098  auto PtrVT = getPointerTy(DAG.getDataLayout());
1099 
1100  // Sparc target does not yet support tail call optimization.
1101  CLI.IsTailCall = false;
1102 
1103  // Analyze operands of the call, assigning locations to each operand.
1105  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1106  *DAG.getContext());
1107  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1108 
1109  // Get the size of the outgoing arguments stack space requirement.
1110  // The stack offset computed by CC_Sparc64 includes all arguments.
1111  // Called functions expect 6 argument words to exist in the stack frame, used
1112  // or not.
1113  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1114 
1115  // Keep stack frames 16-byte aligned.
1116  ArgsSize = alignTo(ArgsSize, 16);
1117 
1118  // Varargs calls require special treatment.
1119  if (CLI.IsVarArg)
1120  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1121 
1122  // Adjust the stack pointer to make room for the arguments.
1123  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1124  // with more than 6 arguments.
1125  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1126 
1127  // Collect the set of registers to pass to the function and their values.
1128  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1129  // instruction.
1131 
1132  // Collect chains from all the memory opeations that copy arguments to the
1133  // stack. They must follow the stack pointer adjustment above and precede the
1134  // call instruction itself.
1135  SmallVector<SDValue, 8> MemOpChains;
1136 
1137  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1138  const CCValAssign &VA = ArgLocs[i];
1139  SDValue Arg = CLI.OutVals[i];
1140 
1141  // Promote the value if needed.
1142  switch (VA.getLocInfo()) {
1143  default:
1144  llvm_unreachable("Unknown location info!");
1145  case CCValAssign::Full:
1146  break;
1147  case CCValAssign::SExt:
1148  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1149  break;
1150  case CCValAssign::ZExt:
1151  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1152  break;
1153  case CCValAssign::AExt:
1154  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1155  break;
1156  case CCValAssign::BCvt:
1157  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1158  // SPARC does not support i128 natively. Lower it into two i64, see below.
1159  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1160  || VA.getLocVT() != MVT::i128)
1161  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1162  break;
1163  }
1164 
1165  if (VA.isRegLoc()) {
1166  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1167  && VA.getLocVT() == MVT::i128) {
1168  // Store and reload into the integer register reg and reg+1.
1169  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1170  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1171  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1172  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1173  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1174  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1175  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1176 
1177  // Store to %sp+BIAS+128+Offset
1178  SDValue Store =
1179  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1180  // Load into Reg and Reg+1
1181  SDValue Hi64 =
1182  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1183  SDValue Lo64 =
1184  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1185  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1186  Hi64));
1187  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1188  Lo64));
1189  continue;
1190  }
1191 
1192  // The custom bit on an i32 return value indicates that it should be
1193  // passed in the high bits of the register.
1194  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1195  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1196  DAG.getConstant(32, DL, MVT::i32));
1197 
1198  // The next value may go in the low bits of the same register.
1199  // Handle both at once.
1200  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1201  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1203  CLI.OutVals[i+1]);
1204  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1205  // Skip the next value, it's already done.
1206  ++i;
1207  }
1208  }
1209  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1210  continue;
1211  }
1212 
1213  assert(VA.isMemLoc());
1214 
1215  // Create a store off the stack pointer for this argument.
1216  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1217  // The argument area starts at %fp+BIAS+128 in the callee frame,
1218  // %sp+BIAS+128 in ours.
1219  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1220  Subtarget->getStackPointerBias() +
1221  128, DL);
1222  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1223  MemOpChains.push_back(
1224  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1225  }
1226 
1227  // Emit all stores, make sure they occur before the call.
1228  if (!MemOpChains.empty())
1229  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1230 
1231  // Build a sequence of CopyToReg nodes glued together with token chain and
1232  // glue operands which copy the outgoing args into registers. The InGlue is
1233  // necessary since all emitted instructions must be stuck together in order
1234  // to pass the live physical registers.
1235  SDValue InGlue;
1236  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1237  Chain = DAG.getCopyToReg(Chain, DL,
1238  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1239  InGlue = Chain.getValue(1);
1240  }
1241 
1242  // If the callee is a GlobalAddress node (quite common, every direct call is)
1243  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1244  // Likewise ExternalSymbol -> TargetExternalSymbol.
1245  SDValue Callee = CLI.Callee;
1246  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
1248  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1249  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1250  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1251  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1252 
1253  // Build the operands for the call instruction itself.
1255  Ops.push_back(Chain);
1256  Ops.push_back(Callee);
1257  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1258  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1259  RegsToPass[i].second.getValueType()));
1260 
1261  // Add a register mask operand representing the call-preserved registers.
1262  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1263  const uint32_t *Mask =
1264  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1266  CLI.CallConv));
1267  assert(Mask && "Missing call preserved mask for calling convention");
1268  Ops.push_back(DAG.getRegisterMask(Mask));
1269 
1270  // Make sure the CopyToReg nodes are glued to the call instruction which
1271  // consumes the registers.
1272  if (InGlue.getNode())
1273  Ops.push_back(InGlue);
1274 
1275  // Now the call itself.
1276  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1277  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1278  InGlue = Chain.getValue(1);
1279 
1280  // Revert the stack pointer immediately after the call.
1281  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1282  DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1283  InGlue = Chain.getValue(1);
1284 
1285  // Now extract the return values. This is more or less the same as
1286  // LowerFormalArguments_64.
1287 
1288  // Assign locations to each value returned by this call.
1290  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1291  *DAG.getContext());
1292 
1293  // Set inreg flag manually for codegen generated library calls that
1294  // return float.
1295  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
1296  CLI.Ins[0].Flags.setInReg();
1297 
1298  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1299 
1300  // Copy all of the result registers out of their specified physreg.
1301  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1302  CCValAssign &VA = RVLocs[i];
1303  unsigned Reg = toCallerWindow(VA.getLocReg());
1304 
1305  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1306  // reside in the same register in the high and low bits. Reuse the
1307  // CopyFromReg previous node to avoid duplicate copies.
1308  SDValue RV;
1309  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1310  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1311  RV = Chain.getValue(0);
1312 
1313  // But usually we'll create a new CopyFromReg for a different register.
1314  if (!RV.getNode()) {
1315  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1316  Chain = RV.getValue(1);
1317  InGlue = Chain.getValue(2);
1318  }
1319 
1320  // Get the high bits for i32 struct elements.
1321  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1322  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1323  DAG.getConstant(32, DL, MVT::i32));
1324 
1325  // The callee promoted the return value, so insert an Assert?ext SDNode so
1326  // we won't promote the value again in this function.
1327  switch (VA.getLocInfo()) {
1328  case CCValAssign::SExt:
1329  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1330  DAG.getValueType(VA.getValVT()));
1331  break;
1332  case CCValAssign::ZExt:
1333  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1334  DAG.getValueType(VA.getValVT()));
1335  break;
1336  default:
1337  break;
1338  }
1339 
1340  // Truncate the register down to the return value type.
1341  if (VA.isExtInLoc())
1342  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1343 
1344  InVals.push_back(RV);
1345  }
1346 
1347  return Chain;
1348 }
1349 
1350 //===----------------------------------------------------------------------===//
1351 // TargetLowering Implementation
1352 //===----------------------------------------------------------------------===//
1353 
1355  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1356  AI->getType()->getPrimitiveSizeInBits() == 32)
1357  return AtomicExpansionKind::None; // Uses xchg instruction
1358 
1360 }
1361 
1362 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1363 /// condition.
1365  switch (CC) {
1366  default: llvm_unreachable("Unknown integer condition code!");
1367  case ISD::SETEQ: return SPCC::ICC_E;
1368  case ISD::SETNE: return SPCC::ICC_NE;
1369  case ISD::SETLT: return SPCC::ICC_L;
1370  case ISD::SETGT: return SPCC::ICC_G;
1371  case ISD::SETLE: return SPCC::ICC_LE;
1372  case ISD::SETGE: return SPCC::ICC_GE;
1373  case ISD::SETULT: return SPCC::ICC_CS;
1374  case ISD::SETULE: return SPCC::ICC_LEU;
1375  case ISD::SETUGT: return SPCC::ICC_GU;
1376  case ISD::SETUGE: return SPCC::ICC_CC;
1377  }
1378 }
1379 
1380 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1381 /// FCC condition.
1383  switch (CC) {
1384  default: llvm_unreachable("Unknown fp condition code!");
1385  case ISD::SETEQ:
1386  case ISD::SETOEQ: return SPCC::FCC_E;
1387  case ISD::SETNE:
1388  case ISD::SETUNE: return SPCC::FCC_NE;
1389  case ISD::SETLT:
1390  case ISD::SETOLT: return SPCC::FCC_L;
1391  case ISD::SETGT:
1392  case ISD::SETOGT: return SPCC::FCC_G;
1393  case ISD::SETLE:
1394  case ISD::SETOLE: return SPCC::FCC_LE;
1395  case ISD::SETGE:
1396  case ISD::SETOGE: return SPCC::FCC_GE;
1397  case ISD::SETULT: return SPCC::FCC_UL;
1398  case ISD::SETULE: return SPCC::FCC_ULE;
1399  case ISD::SETUGT: return SPCC::FCC_UG;
1400  case ISD::SETUGE: return SPCC::FCC_UGE;
1401  case ISD::SETUO: return SPCC::FCC_U;
1402  case ISD::SETO: return SPCC::FCC_O;
1403  case ISD::SETONE: return SPCC::FCC_LG;
1404  case ISD::SETUEQ: return SPCC::FCC_UE;
1405  }
1406 }
1407 
1409  const SparcSubtarget &STI)
1410  : TargetLowering(TM), Subtarget(&STI) {
1411  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
1412 
1413  // Instructions which use registers as conditionals examine all the
1414  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1415  // matters much whether it's ZeroOrOneBooleanContent, or
1416  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1417  // former.
1420 
1421  // Set up the register classes.
1422  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1423  if (!Subtarget->useSoftFloat()) {
1424  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1425  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1426  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1427  }
1428  if (Subtarget->is64Bit()) {
1429  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1430  } else {
1431  // On 32bit sparc, we define a double-register 32bit register
1432  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1433  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1434 
1435  // ...but almost all operations must be expanded, so set that as
1436  // the default.
1437  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1439  }
1440  // Truncating/extending stores/loads are also not supported.
1441  for (MVT VT : MVT::integer_vector_valuetypes()) {
1445 
1449 
1452  }
1453  // However, load and store *are* legal.
1458 
1459  // And we need to promote i64 loads/stores into vector load/store
1462 
1463  // Sadly, this doesn't work:
1464  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1465  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1466  }
1467 
1468  // Turn FP extload into load/fpextend
1469  for (MVT VT : MVT::fp_valuetypes()) {
1472  }
1473 
1474  // Sparc doesn't have i1 sign extending load
1475  for (MVT VT : MVT::integer_valuetypes())
1477 
1478  // Turn FP truncstore into trunc + store.
1482 
1483  // Custom legalize GlobalAddress nodes into LO/HI parts.
1488 
1489  // Sparc doesn't have sext_inreg, replace them with shl/sra
1493 
1494  // Sparc has no REM or DIVREM operations.
1499 
1500  // ... nor does SparcV9.
1501  if (Subtarget->is64Bit()) {
1506  }
1507 
1508  // Custom expand fp<->sint
1513 
1514  // Custom Expand fp<->uint
1519 
1522 
1523  // Sparc has no select or setcc: expand to SELECT_CC.
1528 
1533 
1534  // Sparc doesn't have BRCOND either, it has BR_CC.
1542 
1547 
1550 
1555 
1556  if (Subtarget->is64Bit()) {
1567 
1569  Subtarget->usePopc() ? Legal : Expand);
1576  }
1577 
1578  // ATOMICs.
1579  // Atomics are supported on SparcV9. 32-bit atomics are also
1580  // supported by some Leon SparcV8 variants. Otherwise, atomics
1581  // are unsupported.
1582  if (Subtarget->isV9())
1584  else if (Subtarget->hasLeonCasa())
1586  else
1588 
1590 
1592 
1594 
1595  // Custom Lower Atomic LOAD/STORE
1598 
1599  if (Subtarget->is64Bit()) {
1604  }
1605 
1606  if (!Subtarget->is64Bit()) {
1607  // These libcalls are not available in 32-bit.
1608  setLibcallName(RTLIB::SHL_I128, nullptr);
1609  setLibcallName(RTLIB::SRL_I128, nullptr);
1610  setLibcallName(RTLIB::SRA_I128, nullptr);
1611  }
1612 
1613  if (!Subtarget->isV9()) {
1614  // SparcV8 does not have FNEGD and FABSD.
1617  }
1618 
1645 
1649 
1650  // Expands to [SU]MUL_LOHI.
1654 
1655  if (Subtarget->useSoftMulDiv()) {
1656  // .umul works for both signed and unsigned
1659  setLibcallName(RTLIB::MUL_I32, ".umul");
1660 
1662  setLibcallName(RTLIB::SDIV_I32, ".div");
1663 
1665  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1666 
1667  setLibcallName(RTLIB::SREM_I32, ".rem");
1668  setLibcallName(RTLIB::UREM_I32, ".urem");
1669  }
1670 
1671  if (Subtarget->is64Bit()) {
1676 
1679 
1683  }
1684 
1685  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1687  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1689 
1692 
1693  // Use the default implementation.
1699 
1701 
1703  Subtarget->usePopc() ? Legal : Expand);
1704 
1705  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1708  } else {
1711  }
1712 
1713  if (Subtarget->hasHardQuad()) {
1721  if (Subtarget->isV9()) {
1724  } else {
1727  }
1728 
1729  if (!Subtarget->is64Bit()) {
1730  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1731  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1732  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1733  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1734  }
1735 
1736  } else {
1737  // Custom legalize f128 operations.
1738 
1746 
1750 
1751  // Setup Runtime library names.
1752  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1753  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1754  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1755  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1756  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1757  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1758  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1759  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1760  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1761  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1762  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1763  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1764  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1765  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1766  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1767  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1768  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1769  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1770  } else if (!Subtarget->useSoftFloat()) {
1771  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1772  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1773  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1774  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1775  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1776  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1777  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1778  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1779  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1780  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1781  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1782  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1783  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1784  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1785  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1786  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1787  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1788  }
1789  }
1790 
1791  if (Subtarget->fixAllFDIVSQRT()) {
1792  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1793  // the former instructions generate errata on LEON processors.
1796  }
1797 
1798  if (Subtarget->hasNoFMULS()) {
1800  }
1801 
1802  // Custom combine bitcast between f64 and v2i32
1803  if (!Subtarget->is64Bit())
1805 
1806  if (Subtarget->hasLeonCycleCounter())
1808 
1810 
1812 
1814 }
1815 
1817  return Subtarget->useSoftFloat();
1818 }
1819 
1820 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1821  switch ((SPISD::NodeType)Opcode) {
1822  case SPISD::FIRST_NUMBER: break;
1823  case SPISD::CMPICC: return "SPISD::CMPICC";
1824  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1825  case SPISD::BRICC: return "SPISD::BRICC";
1826  case SPISD::BRXCC: return "SPISD::BRXCC";
1827  case SPISD::BRFCC: return "SPISD::BRFCC";
1828  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1829  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1830  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1831  case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP";
1832  case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP";
1833  case SPISD::Hi: return "SPISD::Hi";
1834  case SPISD::Lo: return "SPISD::Lo";
1835  case SPISD::FTOI: return "SPISD::FTOI";
1836  case SPISD::ITOF: return "SPISD::ITOF";
1837  case SPISD::FTOX: return "SPISD::FTOX";
1838  case SPISD::XTOF: return "SPISD::XTOF";
1839  case SPISD::CALL: return "SPISD::CALL";
1840  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1841  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1842  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1843  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1844  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1845  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1846  }
1847  return nullptr;
1848 }
1849 
1851  EVT VT) const {
1852  if (!VT.isVector())
1853  return MVT::i32;
1855 }
1856 
1857 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1858 /// be zero. Op is expected to be a target specific node. Used by DAG
1859 /// combiner.
1861  (const SDValue Op,
1862  KnownBits &Known,
1863  const APInt &DemandedElts,
1864  const SelectionDAG &DAG,
1865  unsigned Depth) const {
1866  KnownBits Known2;
1867  Known.resetAll();
1868 
1869  switch (Op.getOpcode()) {
1870  default: break;
1871  case SPISD::SELECT_ICC:
1872  case SPISD::SELECT_XCC:
1873  case SPISD::SELECT_FCC:
1874  DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
1875  DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
1876 
1877  // Only known if known in both the LHS and RHS.
1878  Known.One &= Known2.One;
1879  Known.Zero &= Known2.Zero;
1880  break;
1881  }
1882 }
1883 
1884 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1885 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1886 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1887  ISD::CondCode CC, unsigned &SPCC) {
1888  if (isNullConstant(RHS) &&
1889  CC == ISD::SETNE &&
1890  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1891  LHS.getOpcode() == SPISD::SELECT_XCC) &&
1892  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1893  (LHS.getOpcode() == SPISD::SELECT_FCC &&
1894  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1895  isOneConstant(LHS.getOperand(0)) &&
1896  isNullConstant(LHS.getOperand(1))) {
1897  SDValue CMPCC = LHS.getOperand(3);
1898  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1899  LHS = CMPCC.getOperand(0);
1900  RHS = CMPCC.getOperand(1);
1901  }
1902 }
1903 
1904 // Convert to a target node and set target flags.
1906  SelectionDAG &DAG) const {
1907  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1908  return DAG.getTargetGlobalAddress(GA->getGlobal(),
1909  SDLoc(GA),
1910  GA->getValueType(0),
1911  GA->getOffset(), TF);
1912 
1913  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1914  return DAG.getTargetConstantPool(CP->getConstVal(),
1915  CP->getValueType(0),
1916  CP->getAlignment(),
1917  CP->getOffset(), TF);
1918 
1919  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1920  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1921  Op.getValueType(),
1922  0,
1923  TF);
1924 
1925  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1926  return DAG.getTargetExternalSymbol(ES->getSymbol(),
1927  ES->getValueType(0), TF);
1928 
1929  llvm_unreachable("Unhandled address SDNode");
1930 }
1931 
1932 // Split Op into high and low parts according to HiTF and LoTF.
1933 // Return an ADD node combining the parts.
1935  unsigned HiTF, unsigned LoTF,
1936  SelectionDAG &DAG) const {
1937  SDLoc DL(Op);
1938  EVT VT = Op.getValueType();
1939  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1940  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1941  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1942 }
1943 
1944 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1945 // or ExternalSymbol SDNode.
1947  SDLoc DL(Op);
1948  EVT VT = getPointerTy(DAG.getDataLayout());
1949 
1950  // Handle PIC mode first. SPARC needs a got load for every variable!
1951  if (isPositionIndependent()) {
1952  const Module *M = DAG.getMachineFunction().getFunction().getParent();
1953  PICLevel::Level picLevel = M->getPICLevel();
1954  SDValue Idx;
1955 
1956  if (picLevel == PICLevel::SmallPIC) {
1957  // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
1958  Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
1960  } else {
1961  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1964  }
1965 
1966  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1967  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
1968  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1969  // function has calls.
1971  MFI.setHasCalls(true);
1972  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1974  }
1975 
1976  // This is one of the absolute code models.
1977  switch(getTargetMachine().getCodeModel()) {
1978  default:
1979  llvm_unreachable("Unsupported absolute code model");
1980  case CodeModel::Small:
1981  // abs32.
1984  case CodeModel::Medium: {
1985  // abs44.
1988  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
1990  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
1991  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
1992  }
1993  case CodeModel::Large: {
1994  // abs64.
1997  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2000  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2001  }
2002  }
2003 }
2004 
2006  SelectionDAG &DAG) const {
2007  return makeAddress(Op, DAG);
2008 }
2009 
2011  SelectionDAG &DAG) const {
2012  return makeAddress(Op, DAG);
2013 }
2014 
2016  SelectionDAG &DAG) const {
2017  return makeAddress(Op, DAG);
2018 }
2019 
2021  SelectionDAG &DAG) const {
2022 
2023  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2024  if (DAG.getTarget().useEmulatedTLS())
2025  return LowerToTLSEmulatedModel(GA, DAG);
2026 
2027  SDLoc DL(GA);
2028  const GlobalValue *GV = GA->getGlobal();
2029  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2030 
2032 
2033  if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2034  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2037  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2040  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2043  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2046 
2047  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2048  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2049  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2050  withTargetFlags(Op, addTF, DAG));
2051 
2052  SDValue Chain = DAG.getEntryNode();
2053  SDValue InFlag;
2054 
2055  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2056  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2057  InFlag = Chain.getValue(1);
2058  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2059  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2060 
2061  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2062  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2064  assert(Mask && "Missing call preserved mask for calling convention");
2065  SDValue Ops[] = {Chain,
2066  Callee,
2067  Symbol,
2068  DAG.getRegister(SP::O0, PtrVT),
2069  DAG.getRegisterMask(Mask),
2070  InFlag};
2071  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2072  InFlag = Chain.getValue(1);
2073  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2074  DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2075  InFlag = Chain.getValue(1);
2076  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2077 
2078  if (model != TLSModel::LocalDynamic)
2079  return Ret;
2080 
2081  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2083  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2085  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2086  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2088  }
2089 
2090  if (model == TLSModel::InitialExec) {
2091  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2093 
2094  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2095 
2096  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2097  // function has calls.
2099  MFI.setHasCalls(true);
2100 
2101  SDValue TGA = makeHiLoPair(Op,
2104  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2106  DL, PtrVT, Ptr,
2107  withTargetFlags(Op, ldTF, DAG));
2108  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2109  DAG.getRegister(SP::G7, PtrVT), Offset,
2110  withTargetFlags(Op,
2112  }
2113 
2114  assert(model == TLSModel::LocalExec);
2115  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2117  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2119  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2120 
2121  return DAG.getNode(ISD::ADD, DL, PtrVT,
2122  DAG.getRegister(SP::G7, PtrVT), Offset);
2123 }
2124 
2127  const SDLoc &DL,
2128  SelectionDAG &DAG) const {
2130  EVT ArgVT = Arg.getValueType();
2131  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2132 
2133  ArgListEntry Entry;
2134  Entry.Node = Arg;
2135  Entry.Ty = ArgTy;
2136 
2137  if (ArgTy->isFP128Ty()) {
2138  // Create a stack object and pass the pointer to the library function.
2139  int FI = MFI.CreateStackObject(16, 8, false);
2140  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2141  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2142  /* Alignment = */ 8);
2143 
2144  Entry.Node = FIPtr;
2145  Entry.Ty = PointerType::getUnqual(ArgTy);
2146  }
2147  Args.push_back(Entry);
2148  return Chain;
2149 }
2150 
2151 SDValue
2153  const char *LibFuncName,
2154  unsigned numArgs) const {
2155 
2156  ArgListTy Args;
2157 
2159  auto PtrVT = getPointerTy(DAG.getDataLayout());
2160 
2161  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2162  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2163  Type *RetTyABI = RetTy;
2164  SDValue Chain = DAG.getEntryNode();
2165  SDValue RetPtr;
2166 
2167  if (RetTy->isFP128Ty()) {
2168  // Create a Stack Object to receive the return value of type f128.
2169  ArgListEntry Entry;
2170  int RetFI = MFI.CreateStackObject(16, 8, false);
2171  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2172  Entry.Node = RetPtr;
2173  Entry.Ty = PointerType::getUnqual(RetTy);
2174  if (!Subtarget->is64Bit())
2175  Entry.IsSRet = true;
2176  Entry.IsReturned = false;
2177  Args.push_back(Entry);
2178  RetTyABI = Type::getVoidTy(*DAG.getContext());
2179  }
2180 
2181  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2182  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2183  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2184  }
2186  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2187  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2188 
2189  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2190 
2191  // chain is in second result.
2192  if (RetTyABI == RetTy)
2193  return CallInfo.first;
2194 
2195  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2196 
2197  Chain = CallInfo.second;
2198 
2199  // Load RetPtr to get the return value.
2200  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2201  MachinePointerInfo(), /* Alignment = */ 8);
2202 }
2203 
2205  unsigned &SPCC, const SDLoc &DL,
2206  SelectionDAG &DAG) const {
2207 
2208  const char *LibCall = nullptr;
2209  bool is64Bit = Subtarget->is64Bit();
2210  switch(SPCC) {
2211  default: llvm_unreachable("Unhandled conditional code!");
2212  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2213  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2214  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2215  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2216  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2217  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2218  case SPCC::FCC_UL :
2219  case SPCC::FCC_ULE:
2220  case SPCC::FCC_UG :
2221  case SPCC::FCC_UGE:
2222  case SPCC::FCC_U :
2223  case SPCC::FCC_O :
2224  case SPCC::FCC_LG :
2225  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2226  }
2227 
2228  auto PtrVT = getPointerTy(DAG.getDataLayout());
2229  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2230  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2231  ArgListTy Args;
2232  SDValue Chain = DAG.getEntryNode();
2233  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2234  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2235 
2237  CLI.setDebugLoc(DL).setChain(Chain)
2238  .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2239 
2240  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2241 
2242  // result is in first, and chain is in second result.
2243  SDValue Result = CallInfo.first;
2244 
2245  switch(SPCC) {
2246  default: {
2247  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2248  SPCC = SPCC::ICC_NE;
2249  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2250  }
2251  case SPCC::FCC_UL : {
2252  SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
2253  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2254  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2255  SPCC = SPCC::ICC_NE;
2256  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2257  }
2258  case SPCC::FCC_ULE: {
2259  SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
2260  SPCC = SPCC::ICC_NE;
2261  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2262  }
2263  case SPCC::FCC_UG : {
2264  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2265  SPCC = SPCC::ICC_G;
2266  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2267  }
2268  case SPCC::FCC_UGE: {
2269  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2270  SPCC = SPCC::ICC_NE;
2271  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2272  }
2273 
2274  case SPCC::FCC_U : {
2275  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2276  SPCC = SPCC::ICC_E;
2277  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2278  }
2279  case SPCC::FCC_O : {
2280  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2281  SPCC = SPCC::ICC_NE;
2282  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2283  }
2284  case SPCC::FCC_LG : {
2285  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2286  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2287  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2288  SPCC = SPCC::ICC_NE;
2289  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2290  }
2291  case SPCC::FCC_UE : {
2292  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2293  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2294  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2295  SPCC = SPCC::ICC_E;
2296  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2297  }
2298  }
2299 }
2300 
2301 static SDValue
2303  const SparcTargetLowering &TLI) {
2304 
2305  if (Op.getOperand(0).getValueType() == MVT::f64)
2306  return TLI.LowerF128Op(Op, DAG,
2307  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2308 
2309  if (Op.getOperand(0).getValueType() == MVT::f32)
2310  return TLI.LowerF128Op(Op, DAG,
2311  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2312 
2313  llvm_unreachable("fpextend with non-float operand!");
2314  return SDValue();
2315 }
2316 
2317 static SDValue
2319  const SparcTargetLowering &TLI) {
2320  // FP_ROUND on f64 and f32 are legal.
2321  if (Op.getOperand(0).getValueType() != MVT::f128)
2322  return Op;
2323 
2324  if (Op.getValueType() == MVT::f64)
2325  return TLI.LowerF128Op(Op, DAG,
2326  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2327  if (Op.getValueType() == MVT::f32)
2328  return TLI.LowerF128Op(Op, DAG,
2329  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2330 
2331  llvm_unreachable("fpround to non-float!");
2332  return SDValue();
2333 }
2334 
2336  const SparcTargetLowering &TLI,
2337  bool hasHardQuad) {
2338  SDLoc dl(Op);
2339  EVT VT = Op.getValueType();
2340  assert(VT == MVT::i32 || VT == MVT::i64);
2341 
2342  // Expand f128 operations to fp128 abi calls.
2343  if (Op.getOperand(0).getValueType() == MVT::f128
2344  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2345  const char *libName = TLI.getLibcallName(VT == MVT::i32
2346  ? RTLIB::FPTOSINT_F128_I32
2347  : RTLIB::FPTOSINT_F128_I64);
2348  return TLI.LowerF128Op(Op, DAG, libName, 1);
2349  }
2350 
2351  // Expand if the resulting type is illegal.
2352  if (!TLI.isTypeLegal(VT))
2353  return SDValue();
2354 
2355  // Otherwise, Convert the fp value to integer in an FP register.
2356  if (VT == MVT::i32)
2357  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2358  else
2359  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2360 
2361  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2362 }
2363 
2365  const SparcTargetLowering &TLI,
2366  bool hasHardQuad) {
2367  SDLoc dl(Op);
2368  EVT OpVT = Op.getOperand(0).getValueType();
2369  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2370 
2371  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2372 
2373  // Expand f128 operations to fp128 ABI calls.
2374  if (Op.getValueType() == MVT::f128
2375  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2376  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2377  ? RTLIB::SINTTOFP_I32_F128
2378  : RTLIB::SINTTOFP_I64_F128);
2379  return TLI.LowerF128Op(Op, DAG, libName, 1);
2380  }
2381 
2382  // Expand if the operand type is illegal.
2383  if (!TLI.isTypeLegal(OpVT))
2384  return SDValue();
2385 
2386  // Otherwise, Convert the int value to FP in an FP register.
2387  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2388  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2389  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2390 }
2391 
2393  const SparcTargetLowering &TLI,
2394  bool hasHardQuad) {
2395  SDLoc dl(Op);
2396  EVT VT = Op.getValueType();
2397 
2398  // Expand if it does not involve f128 or the target has support for
2399  // quad floating point instructions and the resulting type is legal.
2400  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2401  (hasHardQuad && TLI.isTypeLegal(VT)))
2402  return SDValue();
2403 
2404  assert(VT == MVT::i32 || VT == MVT::i64);
2405 
2406  return TLI.LowerF128Op(Op, DAG,
2407  TLI.getLibcallName(VT == MVT::i32
2408  ? RTLIB::FPTOUINT_F128_I32
2409  : RTLIB::FPTOUINT_F128_I64),
2410  1);
2411 }
2412 
2414  const SparcTargetLowering &TLI,
2415  bool hasHardQuad) {
2416  SDLoc dl(Op);
2417  EVT OpVT = Op.getOperand(0).getValueType();
2418  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2419 
2420  // Expand if it does not involve f128 or the target has support for
2421  // quad floating point instructions and the operand type is legal.
2422  if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2423  return SDValue();
2424 
2425  return TLI.LowerF128Op(Op, DAG,
2426  TLI.getLibcallName(OpVT == MVT::i32
2427  ? RTLIB::UINTTOFP_I32_F128
2428  : RTLIB::UINTTOFP_I64_F128),
2429  1);
2430 }
2431 
2433  const SparcTargetLowering &TLI,
2434  bool hasHardQuad) {
2435  SDValue Chain = Op.getOperand(0);
2436  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2437  SDValue LHS = Op.getOperand(2);
2438  SDValue RHS = Op.getOperand(3);
2439  SDValue Dest = Op.getOperand(4);
2440  SDLoc dl(Op);
2441  unsigned Opc, SPCC = ~0U;
2442 
2443  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2444  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2445  LookThroughSetCC(LHS, RHS, CC, SPCC);
2446 
2447  // Get the condition flag.
2448  SDValue CompareFlag;
2449  if (LHS.getValueType().isInteger()) {
2450  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2451  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2452  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2453  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2454  } else {
2455  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2456  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2457  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2458  Opc = SPISD::BRICC;
2459  } else {
2460  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2461  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2462  Opc = SPISD::BRFCC;
2463  }
2464  }
2465  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2466  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2467 }
2468 
2470  const SparcTargetLowering &TLI,
2471  bool hasHardQuad) {
2472  SDValue LHS = Op.getOperand(0);
2473  SDValue RHS = Op.getOperand(1);
2474  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2475  SDValue TrueVal = Op.getOperand(2);
2476  SDValue FalseVal = Op.getOperand(3);
2477  SDLoc dl(Op);
2478  unsigned Opc, SPCC = ~0U;
2479 
2480  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2481  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2482  LookThroughSetCC(LHS, RHS, CC, SPCC);
2483 
2484  SDValue CompareFlag;
2485  if (LHS.getValueType().isInteger()) {
2486  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2487  Opc = LHS.getValueType() == MVT::i32 ?
2489  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2490  } else {
2491  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2492  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2493  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2494  Opc = SPISD::SELECT_ICC;
2495  } else {
2496  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2497  Opc = SPISD::SELECT_FCC;
2498  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2499  }
2500  }
2501  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2502  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2503 }
2504 
2506  const SparcTargetLowering &TLI) const {
2507  SDLoc DL(Op);
2508  return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL,
2509  DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1));
2510 
2511 }
2512 
2514  const SparcTargetLowering &TLI) const {
2515  SDLoc DL(Op);
2516  return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1));
2517 }
2518 
2520  const SparcTargetLowering &TLI) {
2521  MachineFunction &MF = DAG.getMachineFunction();
2523  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2524 
2525  // Need frame address to find the address of VarArgsFrameIndex.
2527 
2528  // vastart just stores the address of the VarArgsFrameIndex slot into the
2529  // memory location argument.
2530  SDLoc DL(Op);
2531  SDValue Offset =
2532  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2533  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2534  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2535  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2536  MachinePointerInfo(SV));
2537 }
2538 
2540  SDNode *Node = Op.getNode();
2541  EVT VT = Node->getValueType(0);
2542  SDValue InChain = Node->getOperand(0);
2543  SDValue VAListPtr = Node->getOperand(1);
2544  EVT PtrVT = VAListPtr.getValueType();
2545  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2546  SDLoc DL(Node);
2547  SDValue VAList =
2548  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2549  // Increment the pointer, VAList, to the next vaarg.
2550  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2551  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2552  DL));
2553  // Store the incremented VAList to the legalized pointer.
2554  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2555  MachinePointerInfo(SV));
2556  // Load the actual argument out of the pointer VAList.
2557  // We can't count on greater alignment than the word size.
2558  return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2559  std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
2560 }
2561 
2563  const SparcSubtarget *Subtarget) {
2564  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2565  SDValue Size = Op.getOperand(1); // Legalize the size.
2566  unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2567  unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment();
2568  EVT VT = Size->getValueType(0);
2569  SDLoc dl(Op);
2570 
2571  // TODO: implement over-aligned alloca. (Note: also implies
2572  // supporting support for overaligned function frames + dynamic
2573  // allocations, at all, which currently isn't supported)
2574  if (Align > StackAlign) {
2575  const MachineFunction &MF = DAG.getMachineFunction();
2576  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2577  "over-aligned dynamic alloca not supported.");
2578  }
2579 
2580  // The resultant pointer needs to be above the register spill area
2581  // at the bottom of the stack.
2582  unsigned regSpillArea;
2583  if (Subtarget->is64Bit()) {
2584  regSpillArea = 128;
2585  } else {
2586  // On Sparc32, the size of the spill area is 92. Unfortunately,
2587  // that's only 4-byte aligned, not 8-byte aligned (the stack
2588  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2589  // aligned dynamic allocation, we actually need to add 96 to the
2590  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2591 
2592  // That also means adding 4 to the size of the allocation --
2593  // before applying the 8-byte rounding. Unfortunately, we the
2594  // value we get here has already had rounding applied. So, we need
2595  // to add 8, instead, wasting a bit more memory.
2596 
2597  // Further, this only actually needs to be done if the required
2598  // alignment is > 4, but, we've lost that info by this point, too,
2599  // so we always apply it.
2600 
2601  // (An alternative approach would be to always reserve 96 bytes
2602  // instead of the required 92, but then we'd waste 4 extra bytes
2603  // in every frame, not just those with dynamic stack allocations)
2604 
2605  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2606 
2607  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2608  DAG.getConstant(8, dl, VT));
2609  regSpillArea = 96;
2610  }
2611 
2612  unsigned SPReg = SP::O6;
2613  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2614  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2615  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2616 
2617  regSpillArea += Subtarget->getStackPointerBias();
2618 
2619  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2620  DAG.getConstant(regSpillArea, dl, VT));
2621  SDValue Ops[2] = { NewVal, Chain };
2622  return DAG.getMergeValues(Ops, dl);
2623 }
2624 
2625 
2627  SDLoc dl(Op);
2628  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2629  dl, MVT::Other, DAG.getEntryNode());
2630  return Chain;
2631 }
2632 
2633 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2634  const SparcSubtarget *Subtarget,
2635  bool AlwaysFlush = false) {
2637  MFI.setFrameAddressIsTaken(true);
2638 
2639  EVT VT = Op.getValueType();
2640  SDLoc dl(Op);
2641  unsigned FrameReg = SP::I6;
2642  unsigned stackBias = Subtarget->getStackPointerBias();
2643 
2644  SDValue FrameAddr;
2645  SDValue Chain;
2646 
2647  // flush first to make sure the windowed registers' values are in stack
2648  Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2649 
2650  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2651 
2652  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2653 
2654  while (depth--) {
2655  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2656  DAG.getIntPtrConstant(Offset, dl));
2657  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2658  }
2659  if (Subtarget->is64Bit())
2660  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2661  DAG.getIntPtrConstant(stackBias, dl));
2662  return FrameAddr;
2663 }
2664 
2665 
2667  const SparcSubtarget *Subtarget) {
2668 
2669  uint64_t depth = Op.getConstantOperandVal(0);
2670 
2671  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2672 
2673 }
2674 
2676  const SparcTargetLowering &TLI,
2677  const SparcSubtarget *Subtarget) {
2678  MachineFunction &MF = DAG.getMachineFunction();
2679  MachineFrameInfo &MFI = MF.getFrameInfo();
2680  MFI.setReturnAddressIsTaken(true);
2681 
2682  if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2683  return SDValue();
2684 
2685  EVT VT = Op.getValueType();
2686  SDLoc dl(Op);
2687  uint64_t depth = Op.getConstantOperandVal(0);
2688 
2689  SDValue RetAddr;
2690  if (depth == 0) {
2691  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2692  unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2693  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2694  return RetAddr;
2695  }
2696 
2697  // Need frame address to find return address of the caller.
2698  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2699 
2700  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2701  SDValue Ptr = DAG.getNode(ISD::ADD,
2702  dl, VT,
2703  FrameAddr,
2704  DAG.getIntPtrConstant(Offset, dl));
2705  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2706 
2707  return RetAddr;
2708 }
2709 
2710 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2711  unsigned opcode) {
2712  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2713  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2714 
2715  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2716  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2717  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2718 
2719  // Note: in little-endian, the floating-point value is stored in the
2720  // registers are in the opposite order, so the subreg with the sign
2721  // bit is the highest-numbered (odd), rather than the
2722  // lowest-numbered (even).
2723 
2724  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2725  SrcReg64);
2726  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2727  SrcReg64);
2728 
2729  if (DAG.getDataLayout().isLittleEndian())
2730  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2731  else
2732  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2733 
2734  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2735  dl, MVT::f64), 0);
2736  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2737  DstReg64, Hi32);
2738  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2739  DstReg64, Lo32);
2740  return DstReg64;
2741 }
2742 
2743 // Lower a f128 load into two f64 loads.
2745 {
2746  SDLoc dl(Op);
2747  LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2748  assert(LdNode && LdNode->getOffset().isUndef()
2749  && "Unexpected node type");
2750 
2751  unsigned alignment = LdNode->getAlignment();
2752  if (alignment > 8)
2753  alignment = 8;
2754 
2755  SDValue Hi64 =
2756  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2757  LdNode->getPointerInfo(), alignment);
2758  EVT addrVT = LdNode->getBasePtr().getValueType();
2759  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2760  LdNode->getBasePtr(),
2761  DAG.getConstant(8, dl, addrVT));
2762  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2763  LdNode->getPointerInfo(), alignment);
2764 
2765  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2766  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2767 
2768  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2769  dl, MVT::f128);
2770  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2771  MVT::f128,
2772  SDValue(InFP128, 0),
2773  Hi64,
2774  SubRegEven);
2775  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2776  MVT::f128,
2777  SDValue(InFP128, 0),
2778  Lo64,
2779  SubRegOdd);
2780  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2781  SDValue(Lo64.getNode(), 1) };
2782  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2783  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2784  return DAG.getMergeValues(Ops, dl);
2785 }
2786 
2788 {
2789  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2790 
2791  EVT MemVT = LdNode->getMemoryVT();
2792  if (MemVT == MVT::f128)
2793  return LowerF128Load(Op, DAG);
2794 
2795  return Op;
2796 }
2797 
2798 // Lower a f128 store into two f64 stores.
2800  SDLoc dl(Op);
2801  StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2802  assert(StNode && StNode->getOffset().isUndef()
2803  && "Unexpected node type");
2804  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2805  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2806 
2807  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2808  dl,
2809  MVT::f64,
2810  StNode->getValue(),
2811  SubRegEven);
2812  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2813  dl,
2814  MVT::f64,
2815  StNode->getValue(),
2816  SubRegOdd);
2817 
2818  unsigned alignment = StNode->getAlignment();
2819  if (alignment > 8)
2820  alignment = 8;
2821 
2822  SDValue OutChains[2];
2823  OutChains[0] =
2824  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2825  StNode->getBasePtr(), MachinePointerInfo(), alignment);
2826  EVT addrVT = StNode->getBasePtr().getValueType();
2827  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2828  StNode->getBasePtr(),
2829  DAG.getConstant(8, dl, addrVT));
2830  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2831  MachinePointerInfo(), alignment);
2832  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2833 }
2834 
2836 {
2837  SDLoc dl(Op);
2838  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2839 
2840  EVT MemVT = St->getMemoryVT();
2841  if (MemVT == MVT::f128)
2842  return LowerF128Store(Op, DAG);
2843 
2844  if (MemVT == MVT::i64) {
2845  // Custom handling for i64 stores: turn it into a bitcast and a
2846  // v2i32 store.
2847  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2848  SDValue Chain = DAG.getStore(
2849  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2850  St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo());
2851  return Chain;
2852  }
2853 
2854  return SDValue();
2855 }
2856 
2857 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2858  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2859  && "invalid opcode");
2860 
2861  SDLoc dl(Op);
2862 
2863  if (Op.getValueType() == MVT::f64)
2864  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2865  if (Op.getValueType() != MVT::f128)
2866  return Op;
2867 
2868  // Lower fabs/fneg on f128 to fabs/fneg on f64
2869  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2870  // (As with LowerF64Op, on little-endian, we need to negate the odd
2871  // subreg)
2872 
2873  SDValue SrcReg128 = Op.getOperand(0);
2874  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2875  SrcReg128);
2876  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2877  SrcReg128);
2878 
2879  if (DAG.getDataLayout().isLittleEndian()) {
2880  if (isV9)
2881  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2882  else
2883  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2884  } else {
2885  if (isV9)
2886  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2887  else
2888  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2889  }
2890 
2891  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2892  dl, MVT::f128), 0);
2893  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2894  DstReg128, Hi64);
2895  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2896  DstReg128, Lo64);
2897  return DstReg128;
2898 }
2899 
2901 
2902  if (Op.getValueType() != MVT::i64)
2903  return Op;
2904 
2905  SDLoc dl(Op);
2906  SDValue Src1 = Op.getOperand(0);
2907  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2908  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2909  DAG.getConstant(32, dl, MVT::i64));
2910  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2911 
2912  SDValue Src2 = Op.getOperand(1);
2913  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2914  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2915  DAG.getConstant(32, dl, MVT::i64));
2916  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2917 
2918 
2919  bool hasChain = false;
2920  unsigned hiOpc = Op.getOpcode();
2921  switch (Op.getOpcode()) {
2922  default: llvm_unreachable("Invalid opcode");
2923  case ISD::ADDC: hiOpc = ISD::ADDE; break;
2924  case ISD::ADDE: hasChain = true; break;
2925  case ISD::SUBC: hiOpc = ISD::SUBE; break;
2926  case ISD::SUBE: hasChain = true; break;
2927  }
2928  SDValue Lo;
2929  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2930  if (hasChain) {
2931  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2932  Op.getOperand(2));
2933  } else {
2934  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2935  }
2936  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2937  SDValue Carry = Hi.getValue(1);
2938 
2939  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2940  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2941  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2942  DAG.getConstant(32, dl, MVT::i64));
2943 
2944  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2945  SDValue Ops[2] = { Dst, Carry };
2946  return DAG.getMergeValues(Ops, dl);
2947 }
2948 
2949 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2950 // in LegalizeDAG.cpp except the order of arguments to the library function.
2952  const SparcTargetLowering &TLI)
2953 {
2954  unsigned opcode = Op.getOpcode();
2955  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
2956 
2957  bool isSigned = (opcode == ISD::SMULO);
2958  EVT VT = MVT::i64;
2959  EVT WideVT = MVT::i128;
2960  SDLoc dl(Op);
2961  SDValue LHS = Op.getOperand(0);
2962 
2963  if (LHS.getValueType() != VT)
2964  return Op;
2965 
2966  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
2967 
2968  SDValue RHS = Op.getOperand(1);
2969  SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
2970  SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
2971  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
2972 
2973  SDValue MulResult = TLI.makeLibCall(DAG,
2974  RTLIB::MUL_I128, WideVT,
2975  Args, isSigned, dl).first;
2976  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2977  MulResult, DAG.getIntPtrConstant(0, dl));
2978  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2979  MulResult, DAG.getIntPtrConstant(1, dl));
2980  if (isSigned) {
2981  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
2982  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
2983  } else {
2984  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
2985  ISD::SETNE);
2986  }
2987  // MulResult is a node with an illegal type. Because such things are not
2988  // generally permitted during this phase of legalization, ensure that
2989  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
2990  // been folded.
2991  assert(MulResult->use_empty() && "Illegally typed node still in use!");
2992 
2993  SDValue Ops[2] = { BottomHalf, TopHalf } ;
2994  return DAG.getMergeValues(Ops, dl);
2995 }
2996 
2998  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
2999  // Expand with a fence.
3000  return SDValue();
3001 
3002  // Monotonic load/stores are legal.
3003  return Op;
3004 }
3005 
3007  SelectionDAG &DAG) const {
3008  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3009  SDLoc dl(Op);
3010  switch (IntNo) {
3011  default: return SDValue(); // Don't custom lower most intrinsics.
3012  case Intrinsic::thread_pointer: {
3013  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3014  return DAG.getRegister(SP::G7, PtrVT);
3015  }
3016  }
3017 }
3018 
3021 
3022  bool hasHardQuad = Subtarget->hasHardQuad();
3023  bool isV9 = Subtarget->isV9();
3024 
3025  switch (Op.getOpcode()) {
3026  default: llvm_unreachable("Should not custom lower this!");
3027 
3028  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3029  Subtarget);
3030  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3031  Subtarget);
3032  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3033  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3034  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3035  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3036  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3037  hasHardQuad);
3038  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3039  hasHardQuad);
3040  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3041  hasHardQuad);
3042  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3043  hasHardQuad);
3044  case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3045  hasHardQuad);
3046  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3047  hasHardQuad);
3048  case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this);
3049  case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this);
3050  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3051  case ISD::VAARG: return LowerVAARG(Op, DAG);
3052  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3053  Subtarget);
3054 
3055  case ISD::LOAD: return LowerLOAD(Op, DAG);
3056  case ISD::STORE: return LowerSTORE(Op, DAG);
3057  case ISD::FADD: return LowerF128Op(Op, DAG,
3058  getLibcallName(RTLIB::ADD_F128), 2);
3059  case ISD::FSUB: return LowerF128Op(Op, DAG,
3060  getLibcallName(RTLIB::SUB_F128), 2);
3061  case ISD::FMUL: return LowerF128Op(Op, DAG,
3062  getLibcallName(RTLIB::MUL_F128), 2);
3063  case ISD::FDIV: return LowerF128Op(Op, DAG,
3064  getLibcallName(RTLIB::DIV_F128), 2);
3065  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3066  getLibcallName(RTLIB::SQRT_F128),1);
3067  case ISD::FABS:
3068  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3069  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3070  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3071  case ISD::ADDC:
3072  case ISD::ADDE:
3073  case ISD::SUBC:
3074  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3075  case ISD::UMULO:
3076  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3077  case ISD::ATOMIC_LOAD:
3078  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3079  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3080  }
3081 }
3082 
3084  const SDLoc &DL,
3085  SelectionDAG &DAG) const {
3086  APInt V = C->getValueAPF().bitcastToAPInt();
3087  SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3088  SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3089  if (DAG.getDataLayout().isLittleEndian())
3090  std::swap(Lo, Hi);
3091  return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3092 }
3093 
3095  DAGCombinerInfo &DCI) const {
3096  SDLoc dl(N);
3097  SDValue Src = N->getOperand(0);
3098 
3099  if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3100  Src.getSimpleValueType() == MVT::f64)
3101  return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3102 
3103  return SDValue();
3104 }
3105 
3107  DAGCombinerInfo &DCI) const {
3108  switch (N->getOpcode()) {
3109  default:
3110  break;
3111  case ISD::BITCAST:
3112  return PerformBITCASTCombine(N, DCI);
3113  }
3114  return SDValue();
3115 }
3116 
3119  MachineBasicBlock *BB) const {
3120  switch (MI.getOpcode()) {
3121  default: llvm_unreachable("Unknown SELECT_CC!");
3122  case SP::SELECT_CC_Int_ICC:
3123  case SP::SELECT_CC_FP_ICC:
3124  case SP::SELECT_CC_DFP_ICC:
3125  case SP::SELECT_CC_QFP_ICC:
3126  return expandSelectCC(MI, BB, SP::BCOND);
3127  case SP::SELECT_CC_Int_FCC:
3128  case SP::SELECT_CC_FP_FCC:
3129  case SP::SELECT_CC_DFP_FCC:
3130  case SP::SELECT_CC_QFP_FCC:
3131  return expandSelectCC(MI, BB, SP::FBCOND);
3132  case SP::EH_SJLJ_SETJMP32ri:
3133  case SP::EH_SJLJ_SETJMP32rr:
3134  return emitEHSjLjSetJmp(MI, BB);
3135  case SP::EH_SJLJ_LONGJMP32rr:
3136  case SP::EH_SJLJ_LONGJMP32ri:
3137  return emitEHSjLjLongJmp(MI, BB);
3138 
3139  }
3140 }
3141 
3144  unsigned BROpcode) const {
3145  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3146  DebugLoc dl = MI.getDebugLoc();
3147  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3148 
3149  // To "insert" a SELECT_CC instruction, we actually have to insert the
3150  // triangle control-flow pattern. The incoming instruction knows the
3151  // destination vreg to set, the condition code register to branch on, the
3152  // true/false values to select between, and the condition code for the branch.
3153  //
3154  // We produce the following control flow:
3155  // ThisMBB
3156  // | \
3157  // | IfFalseMBB
3158  // | /
3159  // SinkMBB
3160  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3162 
3163  MachineBasicBlock *ThisMBB = BB;
3164  MachineFunction *F = BB->getParent();
3165  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3166  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3167  F->insert(It, IfFalseMBB);
3168  F->insert(It, SinkMBB);
3169 
3170  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3171  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3172  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3173  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3174 
3175  // Set the new successors for ThisMBB.
3176  ThisMBB->addSuccessor(IfFalseMBB);
3177  ThisMBB->addSuccessor(SinkMBB);
3178 
3179  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3180  .addMBB(SinkMBB)
3181  .addImm(CC);
3182 
3183  // IfFalseMBB just falls through to SinkMBB.
3184  IfFalseMBB->addSuccessor(SinkMBB);
3185 
3186  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3187  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3188  MI.getOperand(0).getReg())
3189  .addReg(MI.getOperand(1).getReg())
3190  .addMBB(ThisMBB)
3191  .addReg(MI.getOperand(2).getReg())
3192  .addMBB(IfFalseMBB);
3193 
3194  MI.eraseFromParent(); // The pseudo instruction is gone now.
3195  return SinkMBB;
3196 }
3197 
3200  MachineBasicBlock *MBB) const {
3201  DebugLoc DL = MI.getDebugLoc();
3202  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3203 
3204  MachineFunction *MF = MBB->getParent();
3206  MachineInstrBuilder MIB;
3207 
3208  MVT PVT = getPointerTy(MF->getDataLayout());
3209  unsigned RegSize = PVT.getStoreSize();
3210  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3211 
3212  unsigned Buf = MI.getOperand(0).getReg();
3213  unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3214 
3215  // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3
3216  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A);
3217 
3218  // Instruction to restore FP
3219  const unsigned FP = SP::I6;
3220  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3221  .addReg(FP)
3222  .addReg(Buf)
3223  .addImm(0);
3224 
3225  // Instruction to load jmp location
3226  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3227  .addReg(JmpLoc, RegState::Define)
3228  .addReg(Buf)
3229  .addImm(RegSize);
3230 
3231  // Instruction to restore SP
3232  const unsigned SP = SP::O6;
3233  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3234  .addReg(SP)
3235  .addReg(Buf)
3236  .addImm(2 * RegSize);
3237 
3238  // Instruction to restore I7
3239  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3240  .addReg(SP::I7)
3241  .addReg(Buf, RegState::Kill)
3242  .addImm(3 * RegSize);
3243 
3244  // Jump to JmpLoc
3245  BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0);
3246 
3247  MI.eraseFromParent();
3248  return MBB;
3249 }
3250 
3253  MachineBasicBlock *MBB) const {
3254  DebugLoc DL = MI.getDebugLoc();
3255  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3256  const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3257 
3258  MachineFunction *MF = MBB->getParent();
3260  MachineInstrBuilder MIB;
3261 
3262  MVT PVT = getPointerTy(MF->getDataLayout());
3263  unsigned RegSize = PVT.getStoreSize();
3264  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3265 
3266  unsigned DstReg = MI.getOperand(0).getReg();
3267  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
3268  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
3269  (void)TRI;
3270  unsigned mainDstReg = MRI.createVirtualRegister(RC);
3271  unsigned restoreDstReg = MRI.createVirtualRegister(RC);
3272 
3273  // For v = setjmp(buf), we generate
3274  //
3275  // thisMBB:
3276  // buf[0] = FP
3277  // buf[RegSize] = restoreMBB <-- takes address of restoreMBB
3278  // buf[RegSize * 2] = O6
3279  // buf[RegSize * 3] = I7
3280  // Ensure restoreMBB remains in the relocations list (done using a bn instruction)
3281  // b mainMBB
3282  //
3283  // mainMBB:
3284  // v_main = 0
3285  // b sinkMBB
3286  //
3287  // restoreMBB:
3288  // v_restore = 1
3289  // --fall through--
3290  //
3291  // sinkMBB:
3292  // v = phi(main, restore)
3293 
3294  const BasicBlock *BB = MBB->getBasicBlock();
3295  MachineFunction::iterator It = ++MBB->getIterator();
3296  MachineBasicBlock *thisMBB = MBB;
3297  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
3298  MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
3299  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
3300 
3301  MF->insert(It, mainMBB);
3302  MF->insert(It, restoreMBB);
3303  MF->insert(It, sinkMBB);
3304  restoreMBB->setHasAddressTaken();
3305 
3306  // Transfer the remainder of BB and its successor edges to sinkMBB.
3307  sinkMBB->splice(sinkMBB->begin(), MBB,
3308  std::next(MachineBasicBlock::iterator(MI)),
3309  MBB->end());
3310  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
3311 
3312  unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3313  unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3314  unsigned BufReg = MI.getOperand(1).getReg();
3315 
3316  // Instruction to store FP
3317  const unsigned FP = SP::I6;
3318  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3319  .addReg(BufReg)
3320  .addImm(0)
3321  .addReg(FP);
3322 
3323  // Instructions to store jmp location
3324  MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi))
3325  .addReg(LabelReg, RegState::Define)
3326  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI);
3327 
3328  MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri))
3329  .addReg(LabelReg2, RegState::Define)
3330  .addReg(LabelReg, RegState::Kill)
3331  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO);
3332 
3333  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3334  .addReg(BufReg)
3335  .addImm(RegSize)
3336  .addReg(LabelReg2, RegState::Kill);
3337 
3338  // Instruction to store SP
3339  const unsigned SP = SP::O6;
3340  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3341  .addReg(BufReg)
3342  .addImm(2 * RegSize)
3343  .addReg(SP);
3344 
3345  // Instruction to store I7
3346  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3347  .addReg(BufReg)
3348  .addImm(3 * RegSize)
3349  .addReg(SP::I7);
3350 
3351 
3352  // FIX ME: This next instruction ensures that the restoreMBB block address remains
3353  // valid through optimization passes and serves no other purpose. The ICC_N ensures
3354  // that the branch is never taken. This commented-out code here was an alternative
3355  // attempt to achieve this which brought myriad problems.
3356  //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None);
3357  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3358  .addMBB(restoreMBB)
3359  .addImm(SPCC::ICC_N);
3360 
3361  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3362  .addMBB(mainMBB)
3363  .addImm(SPCC::ICC_A);
3364 
3365  thisMBB->addSuccessor(mainMBB);
3366  thisMBB->addSuccessor(restoreMBB);
3367 
3368 
3369  // mainMBB:
3370  MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr))
3371  .addReg(mainDstReg, RegState::Define)
3372  .addReg(SP::G0)
3373  .addReg(SP::G0);
3374  MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3375 
3376  mainMBB->addSuccessor(sinkMBB);
3377 
3378 
3379  // restoreMBB:
3380  MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri))
3381  .addReg(restoreDstReg, RegState::Define)
3382  .addReg(SP::G0)
3383  .addImm(1);
3384  //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3385  restoreMBB->addSuccessor(sinkMBB);
3386 
3387  // sinkMBB:
3388  MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL,
3389  TII->get(SP::PHI), DstReg)
3390  .addReg(mainDstReg).addMBB(mainMBB)
3391  .addReg(restoreDstReg).addMBB(restoreMBB);
3392 
3393  MI.eraseFromParent();
3394  return sinkMBB;
3395 }
3396 
3397 //===----------------------------------------------------------------------===//
3398 // Sparc Inline Assembly Support
3399 //===----------------------------------------------------------------------===//
3400 
3401 /// getConstraintType - Given a constraint letter, return the type of
3402 /// constraint it is for this target.
3405  if (Constraint.size() == 1) {
3406  switch (Constraint[0]) {
3407  default: break;
3408  case 'r':
3409  case 'f':
3410  case 'e':
3411  return C_RegisterClass;
3412  case 'I': // SIMM13
3413  return C_Other;
3414  }
3415  }
3416 
3417  return TargetLowering::getConstraintType(Constraint);
3418 }
3419 
3422  const char *constraint) const {
3423  ConstraintWeight weight = CW_Invalid;
3424  Value *CallOperandVal = info.CallOperandVal;
3425  // If we don't have a value, we can't do a match,
3426  // but allow it at the lowest weight.
3427  if (!CallOperandVal)
3428  return CW_Default;
3429 
3430  // Look at the constraint type.
3431  switch (*constraint) {
3432  default:
3433  weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3434  break;
3435  case 'I': // SIMM13
3436  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3437  if (isInt<13>(C->getSExtValue()))
3438  weight = CW_Constant;
3439  }
3440  break;
3441  }
3442  return weight;
3443 }
3444 
3445 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3446 /// vector. If it is invalid, don't add anything to Ops.
3449  std::string &Constraint,
3450  std::vector<SDValue> &Ops,
3451  SelectionDAG &DAG) const {
3452  SDValue Result(nullptr, 0);
3453 
3454  // Only support length 1 constraints for now.
3455  if (Constraint.length() > 1)
3456  return;
3457 
3458  char ConstraintLetter = Constraint[0];
3459  switch (ConstraintLetter) {
3460  default: break;
3461  case 'I':
3462  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3463  if (isInt<13>(C->getSExtValue())) {
3464  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3465  Op.getValueType());
3466  break;
3467  }
3468  return;
3469  }
3470  }
3471 
3472  if (Result.getNode()) {
3473  Ops.push_back(Result);
3474  return;
3475  }
3476  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3477 }
3478 
3479 std::pair<unsigned, const TargetRegisterClass *>
3481  StringRef Constraint,
3482  MVT VT) const {
3483  if (Constraint.size() == 1) {
3484  switch (Constraint[0]) {
3485  case 'r':
3486  if (VT == MVT::v2i32)
3487  return std::make_pair(0U, &SP::IntPairRegClass);
3488  else
3489  return std::make_pair(0U, &SP::IntRegsRegClass);
3490  case 'f':
3491  if (VT == MVT::f32)
3492  return std::make_pair(0U, &SP::FPRegsRegClass);
3493  else if (VT == MVT::f64)
3494  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3495  else if (VT == MVT::f128)
3496  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3497  llvm_unreachable("Unknown ValueType for f-register-type!");
3498  break;
3499  case 'e':
3500  if (VT == MVT::f32)
3501  return std::make_pair(0U, &SP::FPRegsRegClass);
3502  else if (VT == MVT::f64)
3503  return std::make_pair(0U, &SP::DFPRegsRegClass);
3504  else if (VT == MVT::f128)
3505  return std::make_pair(0U, &SP::QFPRegsRegClass);
3506  llvm_unreachable("Unknown ValueType for e-register-type!");
3507  break;
3508  }
3509  } else if (!Constraint.empty() && Constraint.size() <= 5
3510  && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3511  // constraint = '{r<d>}'
3512  // Remove the braces from around the name.
3513  StringRef name(Constraint.data()+1, Constraint.size()-2);
3514  // Handle register aliases:
3515  // r0-r7 -> g0-g7
3516  // r8-r15 -> o0-o7
3517  // r16-r23 -> l0-l7
3518  // r24-r31 -> i0-i7
3519  uint64_t intVal = 0;
3520  if (name.substr(0, 1).equals("r")
3521  && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3522  const char regTypes[] = { 'g', 'o', 'l', 'i' };
3523  char regType = regTypes[intVal/8];
3524  char regIdx = '0' + (intVal % 8);
3525  char tmp[] = { '{', regType, regIdx, '}', 0 };
3526  std::string newConstraint = std::string(tmp);
3527  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3528  VT);
3529  }
3530  if (name.substr(0, 1).equals("f") &&
3531  !name.substr(1).getAsInteger(10, intVal) && intVal <= 63) {
3532  std::string newConstraint;
3533 
3534  if (VT == MVT::f32 || VT == MVT::Other) {
3535  newConstraint = "{f" + utostr(intVal) + "}";
3536  } else if (VT == MVT::f64 && (intVal % 2 == 0)) {
3537  newConstraint = "{d" + utostr(intVal / 2) + "}";
3538  } else if (VT == MVT::f128 && (intVal % 4 == 0)) {
3539  newConstraint = "{q" + utostr(intVal / 4) + "}";
3540  } else {
3541  return std::make_pair(0U, nullptr);
3542  }
3543  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3544  VT);
3545  }
3546  }
3547 
3548  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3549 }
3550 
3551 bool
3553  // The Sparc target isn't yet aware of offsets.
3554  return false;
3555 }
3556 
3559  SelectionDAG &DAG) const {
3560 
3561  SDLoc dl(N);
3562 
3563  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3564 
3565  switch (N->getOpcode()) {
3566  default:
3567  llvm_unreachable("Do not know how to custom type legalize this operation!");
3568 
3569  case ISD::FP_TO_SINT:
3570  case ISD::FP_TO_UINT:
3571  // Custom lower only if it involves f128 or i64.
3572  if (N->getOperand(0).getValueType() != MVT::f128
3573  || N->getValueType(0) != MVT::i64)
3574  return;
3575  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3576  ? RTLIB::FPTOSINT_F128_I64
3577  : RTLIB::FPTOUINT_F128_I64);
3578 
3579  Results.push_back(LowerF128Op(SDValue(N, 0),
3580  DAG,
3581  getLibcallName(libCall),
3582  1));
3583  return;
3584  case ISD::READCYCLECOUNTER: {
3585  assert(Subtarget->hasLeonCycleCounter());
3586  SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3587  SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3588  SDValue Ops[] = { Lo, Hi };
3589  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3590  Results.push_back(Pair);
3591  Results.push_back(N->getOperand(0));
3592  return;
3593  }
3594  case ISD::SINT_TO_FP:
3595  case ISD::UINT_TO_FP:
3596  // Custom lower only if it involves f128 or i64.
3597  if (N->getValueType(0) != MVT::f128
3598  || N->getOperand(0).getValueType() != MVT::i64)
3599  return;
3600 
3601  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3602  ? RTLIB::SINTTOFP_I64_F128
3603  : RTLIB::UINTTOFP_I64_F128);
3604 
3605  Results.push_back(LowerF128Op(SDValue(N, 0),
3606  DAG,
3607  getLibcallName(libCall),
3608  1));
3609  return;
3610  case ISD::LOAD: {
3611  LoadSDNode *Ld = cast<LoadSDNode>(N);
3612  // Custom handling only for i64: turn i64 load into a v2i32 load,
3613  // and a bitcast.
3614  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3615  return;
3616 
3617  SDLoc dl(N);
3618  SDValue LoadRes = DAG.getExtLoad(
3619  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3620  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
3621  Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3622 
3623  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3624  Results.push_back(Res);
3625  Results.push_back(LoadRes.getValue(1));
3626  return;
3627  }
3628  }
3629 }
3630 
3631 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3633  if (!Subtarget->isTargetLinux())
3635  return true;
3636 }
3637 
3638 // Override to disable global variable loading on Linux.
3640  if (!Subtarget->isTargetLinux())
3642 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:541
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:508
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:837
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getValueType() const
Return the ValueType of the referenced return value.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool usePopc() const
const SDValue & getOffset() const
bool isUndef() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
DiagnosticInfoOptimizationBase::Argument NV
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC&#39;s if you just have an ISD::CondCode instead of an SD...
Definition: SelectionDAG.h:951
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:849
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:610
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:64
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:223
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
bool isFP128Ty() const
Return true if this is &#39;fp128&#39;.
Definition: Type.h:156
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:659
unsigned Reg
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getChain() const
Function Alias Analysis Results
unsigned getValNo() const
unsigned getAlignment() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:321
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const uint32_t * getRTCallPreservedMask(CallingConv::ID CC) const
bool hasHardQuad() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:685
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:876
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:681
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
const SparcInstrInfo * getInstrInfo() const override
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:741
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:407
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool useSoftFloat() const override
SDValue getExternalSymbol(const char *Sym, EVT VT)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isMemLoc() const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
bool needsCustom() const
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
Definition: ISDOpcodes.h:114
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:579
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:210
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:428
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
bool isTargetLinux() const
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const HexagonInstrInfo * TII
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Shift and rotation operations.
Definition: ISDOpcodes.h:382
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:734
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:191
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
uint64_t getConstantOperandVal(unsigned i) const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:770
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:457
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:398
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
Definition: StringSwitch.h:203
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
LocInfo getLocInfo() const
bool useSoftFloat() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool hasLeonCycleCounter() const
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:655
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool fixAllFDIVSQRT() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:395
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:450
SDValue getRegisterMask(const uint32_t *RegMask)
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:399
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
bool useSoftMulDiv() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
SmallVector< ISD::OutputArg, 32 > Outs
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:221
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:919
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:707
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:571
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
bool isStrongerThanMonotonic(AtomicOrdering ao)
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
amdgpu Simplify well known AMD library false Value * Callee
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getByValSize() const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
UNDEF - An undefined node.
Definition: ISDOpcodes.h:178
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:494
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
Flag
These should be considered private to the implementation of the MCI