LLVM  7.0.0svn
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Sparc uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "SparcISelLowering.h"
18 #include "SparcRegisterInfo.h"
19 #include "SparcTargetMachine.h"
20 #include "SparcTargetObjectFile.h"
21 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
33 #include "llvm/Support/KnownBits.h"
34 using namespace llvm;
35 
36 
37 //===----------------------------------------------------------------------===//
38 // Calling Convention Implementation
39 //===----------------------------------------------------------------------===//
40 
41 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
42  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
43  ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 {
45  assert (ArgFlags.isSRet());
46 
47  // Assign SRet argument.
48  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
49  0,
50  LocVT, LocInfo));
51  return true;
52 }
53 
54 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
55  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
56  ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 {
58  static const MCPhysReg RegList[] = {
59  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60  };
61  // Try to get first reg.
62  if (unsigned Reg = State.AllocateReg(RegList)) {
63  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64  } else {
65  // Assign whole thing in stack.
66  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
67  State.AllocateStack(8,4),
68  LocVT, LocInfo));
69  return true;
70  }
71 
72  // Try to get second reg.
73  if (unsigned Reg = State.AllocateReg(RegList))
74  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75  else
76  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
77  State.AllocateStack(4,4),
78  LocVT, LocInfo));
79  return true;
80 }
81 
82 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
83  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
84  ISD::ArgFlagsTy &ArgFlags, CCState &State)
85 {
86  static const MCPhysReg RegList[] = {
87  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
88  };
89 
90  // Try to get first reg.
91  if (unsigned Reg = State.AllocateReg(RegList))
92  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
93  else
94  return false;
95 
96  // Try to get second reg.
97  if (unsigned Reg = State.AllocateReg(RegList))
98  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
99  else
100  return false;
101 
102  return true;
103 }
104 
105 // Allocate a full-sized argument for the 64-bit ABI.
106 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
107  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
108  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
109  assert((LocVT == MVT::f32 || LocVT == MVT::f128
110  || LocVT.getSizeInBits() == 64) &&
111  "Can't handle non-64 bits locations");
112 
113  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
114  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
115  unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
116  unsigned Offset = State.AllocateStack(size, alignment);
117  unsigned Reg = 0;
118 
119  if (LocVT == MVT::i64 && Offset < 6*8)
120  // Promote integers to %i0-%i5.
121  Reg = SP::I0 + Offset/8;
122  else if (LocVT == MVT::f64 && Offset < 16*8)
123  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
124  Reg = SP::D0 + Offset/8;
125  else if (LocVT == MVT::f32 && Offset < 16*8)
126  // Promote floats to %f1, %f3, ...
127  Reg = SP::F1 + Offset/4;
128  else if (LocVT == MVT::f128 && Offset < 16*8)
129  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
130  Reg = SP::Q0 + Offset/16;
131 
132  // Promote to register when possible, otherwise use the stack slot.
133  if (Reg) {
134  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
135  return true;
136  }
137 
138  // This argument goes on the stack in an 8-byte slot.
139  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
140  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
141  if (LocVT == MVT::f32)
142  Offset += 4;
143 
144  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
145  return true;
146 }
147 
148 // Allocate a half-sized argument for the 64-bit ABI.
149 //
150 // This is used when passing { float, int } structs by value in registers.
151 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
152  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
153  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
154  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
155  unsigned Offset = State.AllocateStack(4, 4);
156 
157  if (LocVT == MVT::f32 && Offset < 16*8) {
158  // Promote floats to %f0-%f31.
159  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
160  LocVT, LocInfo));
161  return true;
162  }
163 
164  if (LocVT == MVT::i32 && Offset < 6*8) {
165  // Promote integers to %i0-%i5, using half the register.
166  unsigned Reg = SP::I0 + Offset/8;
167  LocVT = MVT::i64;
168  LocInfo = CCValAssign::AExt;
169 
170  // Set the Custom bit if this i32 goes in the high bits of a register.
171  if (Offset % 8 == 0)
172  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
173  LocVT, LocInfo));
174  else
175  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
176  return true;
177  }
178 
179  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
180  return true;
181 }
182 
183 #include "SparcGenCallingConv.inc"
184 
185 // The calling conventions in SparcCallingConv.td are described in terms of the
186 // callee's register window. This function translates registers to the
187 // corresponding caller window %o register.
188 static unsigned toCallerWindow(unsigned Reg) {
189  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
190  "Unexpected enum");
191  if (Reg >= SP::I0 && Reg <= SP::I7)
192  return Reg - SP::I0 + SP::O0;
193  return Reg;
194 }
195 
196 SDValue
198  bool IsVarArg,
200  const SmallVectorImpl<SDValue> &OutVals,
201  const SDLoc &DL, SelectionDAG &DAG) const {
202  if (Subtarget->is64Bit())
203  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
204  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
205 }
206 
207 SDValue
209  bool IsVarArg,
211  const SmallVectorImpl<SDValue> &OutVals,
212  const SDLoc &DL, SelectionDAG &DAG) const {
214 
215  // CCValAssign - represent the assignment of the return value to locations.
217 
218  // CCState - Info about the registers and stack slot.
219  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
220  *DAG.getContext());
221 
222  // Analyze return values.
223  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
224 
225  SDValue Flag;
226  SmallVector<SDValue, 4> RetOps(1, Chain);
227  // Make room for the return address offset.
228  RetOps.push_back(SDValue());
229 
230  // Copy the result values into the output registers.
231  for (unsigned i = 0, realRVLocIdx = 0;
232  i != RVLocs.size();
233  ++i, ++realRVLocIdx) {
234  CCValAssign &VA = RVLocs[i];
235  assert(VA.isRegLoc() && "Can only return in registers!");
236 
237  SDValue Arg = OutVals[realRVLocIdx];
238 
239  if (VA.needsCustom()) {
240  assert(VA.getLocVT() == MVT::v2i32);
241  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
242  // happen by default if this wasn't a legal type)
243 
245  Arg,
246  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
248  Arg,
249  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
250 
251  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
252  Flag = Chain.getValue(1);
253  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
254  VA = RVLocs[++i]; // skip ahead to next loc
255  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
256  Flag);
257  } else
258  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
259 
260  // Guarantee that all emitted copies are stuck together with flags.
261  Flag = Chain.getValue(1);
262  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
263  }
264 
265  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
266  // If the function returns a struct, copy the SRetReturnReg to I0
267  if (MF.getFunction().hasStructRetAttr()) {
269  unsigned Reg = SFI->getSRetReturnReg();
270  if (!Reg)
271  llvm_unreachable("sret virtual register not created in the entry block");
272  auto PtrVT = getPointerTy(DAG.getDataLayout());
273  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
274  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
275  Flag = Chain.getValue(1);
276  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
277  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
278  }
279 
280  RetOps[0] = Chain; // Update chain.
281  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
282 
283  // Add the flag if we have it.
284  if (Flag.getNode())
285  RetOps.push_back(Flag);
286 
287  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
288 }
289 
290 // Lower return values for the 64-bit ABI.
291 // Return values are passed the exactly the same way as function arguments.
292 SDValue
294  bool IsVarArg,
296  const SmallVectorImpl<SDValue> &OutVals,
297  const SDLoc &DL, SelectionDAG &DAG) const {
298  // CCValAssign - represent the assignment of the return value to locations.
300 
301  // CCState - Info about the registers and stack slot.
302  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
303  *DAG.getContext());
304 
305  // Analyze return values.
306  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
307 
308  SDValue Flag;
309  SmallVector<SDValue, 4> RetOps(1, Chain);
310 
311  // The second operand on the return instruction is the return address offset.
312  // The return address is always %i7+8 with the 64-bit ABI.
313  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
314 
315  // Copy the result values into the output registers.
316  for (unsigned i = 0; i != RVLocs.size(); ++i) {
317  CCValAssign &VA = RVLocs[i];
318  assert(VA.isRegLoc() && "Can only return in registers!");
319  SDValue OutVal = OutVals[i];
320 
321  // Integer return values must be sign or zero extended by the callee.
322  switch (VA.getLocInfo()) {
323  case CCValAssign::Full: break;
324  case CCValAssign::SExt:
325  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
326  break;
327  case CCValAssign::ZExt:
328  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
329  break;
330  case CCValAssign::AExt:
331  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
332  break;
333  default:
334  llvm_unreachable("Unknown loc info!");
335  }
336 
337  // The custom bit on an i32 return value indicates that it should be passed
338  // in the high bits of the register.
339  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
340  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
341  DAG.getConstant(32, DL, MVT::i32));
342 
343  // The next value may go in the low bits of the same register.
344  // Handle both at once.
345  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
346  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
347  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
348  // Skip the next value, it's already done.
349  ++i;
350  }
351  }
352 
353  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
354 
355  // Guarantee that all emitted copies are stuck together with flags.
356  Flag = Chain.getValue(1);
357  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
358  }
359 
360  RetOps[0] = Chain; // Update chain.
361 
362  // Add the flag if we have it.
363  if (Flag.getNode())
364  RetOps.push_back(Flag);
365 
366  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
367 }
368 
370  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
371  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
372  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
373  if (Subtarget->is64Bit())
374  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
375  DL, DAG, InVals);
376  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
377  DL, DAG, InVals);
378 }
379 
380 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
381 /// passed in either one or two GPRs, including FP values. TODO: we should
382 /// pass FP values in FP registers for fastcc functions.
384  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
385  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
386  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
388  MachineRegisterInfo &RegInfo = MF.getRegInfo();
390 
391  // Assign locations to all of the incoming arguments.
393  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
394  *DAG.getContext());
395  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
396 
397  const unsigned StackOffset = 92;
398  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
399 
400  unsigned InIdx = 0;
401  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
402  CCValAssign &VA = ArgLocs[i];
403 
404  if (Ins[InIdx].Flags.isSRet()) {
405  if (InIdx != 0)
406  report_fatal_error("sparc only supports sret on the first parameter");
407  // Get SRet from [%fp+64].
408  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
409  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
410  SDValue Arg =
411  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
412  InVals.push_back(Arg);
413  continue;
414  }
415 
416  if (VA.isRegLoc()) {
417  if (VA.needsCustom()) {
418  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
419 
420  unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
421  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
422  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
423 
424  assert(i+1 < e);
425  CCValAssign &NextVA = ArgLocs[++i];
426 
427  SDValue LoVal;
428  if (NextVA.isMemLoc()) {
429  int FrameIdx = MF.getFrameInfo().
430  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
431  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
432  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
433  } else {
434  unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
435  &SP::IntRegsRegClass);
436  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
437  }
438 
439  if (IsLittleEndian)
440  std::swap(LoVal, HiVal);
441 
442  SDValue WholeValue =
443  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
444  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
445  InVals.push_back(WholeValue);
446  continue;
447  }
448  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
449  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
450  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
451  if (VA.getLocVT() == MVT::f32)
452  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
453  else if (VA.getLocVT() != MVT::i32) {
454  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
455  DAG.getValueType(VA.getLocVT()));
456  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
457  }
458  InVals.push_back(Arg);
459  continue;
460  }
461 
462  assert(VA.isMemLoc());
463 
464  unsigned Offset = VA.getLocMemOffset()+StackOffset;
465  auto PtrVT = getPointerTy(DAG.getDataLayout());
466 
467  if (VA.needsCustom()) {
468  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
469  // If it is double-word aligned, just load.
470  if (Offset % 8 == 0) {
471  int FI = MF.getFrameInfo().CreateFixedObject(8,
472  Offset,
473  true);
474  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
475  SDValue Load =
476  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
477  InVals.push_back(Load);
478  continue;
479  }
480 
481  int FI = MF.getFrameInfo().CreateFixedObject(4,
482  Offset,
483  true);
484  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
485  SDValue HiVal =
486  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
487  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
488  Offset+4,
489  true);
490  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
491 
492  SDValue LoVal =
493  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
494 
495  if (IsLittleEndian)
496  std::swap(LoVal, HiVal);
497 
498  SDValue WholeValue =
499  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
500  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
501  InVals.push_back(WholeValue);
502  continue;
503  }
504 
505  int FI = MF.getFrameInfo().CreateFixedObject(4,
506  Offset,
507  true);
508  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
509  SDValue Load ;
510  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
511  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
512  } else if (VA.getValVT() == MVT::f128) {
513  report_fatal_error("SPARCv8 does not handle f128 in calls; "
514  "pass indirectly");
515  } else {
516  // We shouldn't see any other value types here.
517  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
518  }
519  InVals.push_back(Load);
520  }
521 
522  if (MF.getFunction().hasStructRetAttr()) {
523  // Copy the SRet Argument to SRetReturnReg.
525  unsigned Reg = SFI->getSRetReturnReg();
526  if (!Reg) {
527  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
528  SFI->setSRetReturnReg(Reg);
529  }
530  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
531  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
532  }
533 
534  // Store remaining ArgRegs to the stack if this is a varargs function.
535  if (isVarArg) {
536  static const MCPhysReg ArgRegs[] = {
537  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
538  };
539  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
540  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
541  unsigned ArgOffset = CCInfo.getNextStackOffset();
542  if (NumAllocated == 6)
543  ArgOffset += StackOffset;
544  else {
545  assert(!ArgOffset);
546  ArgOffset = 68+4*NumAllocated;
547  }
548 
549  // Remember the vararg offset for the va_start implementation.
550  FuncInfo->setVarArgsFrameOffset(ArgOffset);
551 
552  std::vector<SDValue> OutChains;
553 
554  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
555  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
556  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
557  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
558 
559  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
560  true);
561  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
562 
563  OutChains.push_back(
564  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
565  ArgOffset += 4;
566  }
567 
568  if (!OutChains.empty()) {
569  OutChains.push_back(Chain);
570  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
571  }
572  }
573 
574  return Chain;
575 }
576 
577 // Lower formal arguments for the 64 bit ABI.
579  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
580  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
581  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
583 
584  // Analyze arguments according to CC_Sparc64.
586  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
587  *DAG.getContext());
588  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
589 
590  // The argument array begins at %fp+BIAS+128, after the register save area.
591  const unsigned ArgArea = 128;
592 
593  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
594  CCValAssign &VA = ArgLocs[i];
595  if (VA.isRegLoc()) {
596  // This argument is passed in a register.
597  // All integer register arguments are promoted by the caller to i64.
598 
599  // Create a virtual register for the promoted live-in value.
600  unsigned VReg = MF.addLiveIn(VA.getLocReg(),
601  getRegClassFor(VA.getLocVT()));
602  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
603 
604  // Get the high bits for i32 struct elements.
605  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
606  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
607  DAG.getConstant(32, DL, MVT::i32));
608 
609  // The caller promoted the argument, so insert an Assert?ext SDNode so we
610  // won't promote the value again in this function.
611  switch (VA.getLocInfo()) {
612  case CCValAssign::SExt:
613  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
614  DAG.getValueType(VA.getValVT()));
615  break;
616  case CCValAssign::ZExt:
617  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
618  DAG.getValueType(VA.getValVT()));
619  break;
620  default:
621  break;
622  }
623 
624  // Truncate the register down to the argument type.
625  if (VA.isExtInLoc())
626  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
627 
628  InVals.push_back(Arg);
629  continue;
630  }
631 
632  // The registers are exhausted. This argument was passed on the stack.
633  assert(VA.isMemLoc());
634  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
635  // beginning of the arguments area at %fp+BIAS+128.
636  unsigned Offset = VA.getLocMemOffset() + ArgArea;
637  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
638  // Adjust offset for extended arguments, SPARC is big-endian.
639  // The caller will have written the full slot with extended bytes, but we
640  // prefer our own extending loads.
641  if (VA.isExtInLoc())
642  Offset += 8 - ValSize;
643  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
644  InVals.push_back(
645  DAG.getLoad(VA.getValVT(), DL, Chain,
648  }
649 
650  if (!IsVarArg)
651  return Chain;
652 
653  // This function takes variable arguments, some of which may have been passed
654  // in registers %i0-%i5. Variable floating point arguments are never passed
655  // in floating point registers. They go on %i0-%i5 or on the stack like
656  // integer arguments.
657  //
658  // The va_start intrinsic needs to know the offset to the first variable
659  // argument.
660  unsigned ArgOffset = CCInfo.getNextStackOffset();
662  // Skip the 128 bytes of register save area.
663  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
664  Subtarget->getStackPointerBias());
665 
666  // Save the variable arguments that were passed in registers.
667  // The caller is required to reserve stack space for 6 arguments regardless
668  // of how many arguments were actually passed.
669  SmallVector<SDValue, 8> OutChains;
670  for (; ArgOffset < 6*8; ArgOffset += 8) {
671  unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
672  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
673  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
674  auto PtrVT = getPointerTy(MF.getDataLayout());
675  OutChains.push_back(
676  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
678  }
679 
680  if (!OutChains.empty())
681  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
682 
683  return Chain;
684 }
685 
686 SDValue
688  SmallVectorImpl<SDValue> &InVals) const {
689  if (Subtarget->is64Bit())
690  return LowerCall_64(CLI, InVals);
691  return LowerCall_32(CLI, InVals);
692 }
693 
695  ImmutableCallSite CS) {
696  if (CS)
697  return CS.hasFnAttr(Attribute::ReturnsTwice);
698 
699  const Function *CalleeFn = nullptr;
700  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
701  CalleeFn = dyn_cast<Function>(G->getGlobal());
702  } else if (ExternalSymbolSDNode *E =
703  dyn_cast<ExternalSymbolSDNode>(Callee)) {
704  const Function &Fn = DAG.getMachineFunction().getFunction();
705  const Module *M = Fn.getParent();
706  const char *CalleeName = E->getSymbol();
707  CalleeFn = M->getFunction(CalleeName);
708  }
709 
710  if (!CalleeFn)
711  return false;
712  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
713 }
714 
715 // Lower a call for the 32-bit ABI.
716 SDValue
718  SmallVectorImpl<SDValue> &InVals) const {
719  SelectionDAG &DAG = CLI.DAG;
720  SDLoc &dl = CLI.DL;
722  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
724  SDValue Chain = CLI.Chain;
725  SDValue Callee = CLI.Callee;
726  bool &isTailCall = CLI.IsTailCall;
727  CallingConv::ID CallConv = CLI.CallConv;
728  bool isVarArg = CLI.IsVarArg;
729 
730  // Sparc target does not yet support tail call optimization.
731  isTailCall = false;
732 
733  // Analyze operands of the call, assigning locations to each operand.
735  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
736  *DAG.getContext());
737  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
738 
739  // Get the size of the outgoing arguments stack space requirement.
740  unsigned ArgsSize = CCInfo.getNextStackOffset();
741 
742  // Keep stack frames 8-byte aligned.
743  ArgsSize = (ArgsSize+7) & ~7;
744 
746 
747  // Create local copies for byval args.
748  SmallVector<SDValue, 8> ByValArgs;
749  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
750  ISD::ArgFlagsTy Flags = Outs[i].Flags;
751  if (!Flags.isByVal())
752  continue;
753 
754  SDValue Arg = OutVals[i];
755  unsigned Size = Flags.getByValSize();
756  unsigned Align = Flags.getByValAlign();
757 
758  if (Size > 0U) {
759  int FI = MFI.CreateStackObject(Size, Align, false);
760  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
761  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
762 
763  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
764  false, // isVolatile,
765  (Size <= 32), // AlwaysInline if size <= 32,
766  false, // isTailCall
768  ByValArgs.push_back(FIPtr);
769  }
770  else {
771  SDValue nullVal;
772  ByValArgs.push_back(nullVal);
773  }
774  }
775 
776  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
777 
779  SmallVector<SDValue, 8> MemOpChains;
780 
781  const unsigned StackOffset = 92;
782  bool hasStructRetAttr = false;
783  // Walk the register/memloc assignments, inserting copies/loads.
784  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
785  i != e;
786  ++i, ++realArgIdx) {
787  CCValAssign &VA = ArgLocs[i];
788  SDValue Arg = OutVals[realArgIdx];
789 
790  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
791 
792  // Use local copy if it is a byval arg.
793  if (Flags.isByVal()) {
794  Arg = ByValArgs[byvalArgIdx++];
795  if (!Arg) {
796  continue;
797  }
798  }
799 
800  // Promote the value if needed.
801  switch (VA.getLocInfo()) {
802  default: llvm_unreachable("Unknown loc info!");
803  case CCValAssign::Full: break;
804  case CCValAssign::SExt:
805  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
806  break;
807  case CCValAssign::ZExt:
808  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
809  break;
810  case CCValAssign::AExt:
811  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
812  break;
813  case CCValAssign::BCvt:
814  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
815  break;
816  }
817 
818  if (Flags.isSRet()) {
819  assert(VA.needsCustom());
820  // store SRet argument in %sp+64
821  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
822  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
823  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
824  MemOpChains.push_back(
825  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
826  hasStructRetAttr = true;
827  continue;
828  }
829 
830  if (VA.needsCustom()) {
831  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
832 
833  if (VA.isMemLoc()) {
834  unsigned Offset = VA.getLocMemOffset() + StackOffset;
835  // if it is double-word aligned, just store.
836  if (Offset % 8 == 0) {
837  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
838  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
839  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
840  MemOpChains.push_back(
841  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
842  continue;
843  }
844  }
845 
846  if (VA.getLocVT() == MVT::f64) {
847  // Move from the float value from float registers into the
848  // integer registers.
849 
850  // TODO: The f64 -> v2i32 conversion is super-inefficient for
851  // constants: it sticks them in the constant pool, then loads
852  // to a fp register, then stores to temp memory, then loads to
853  // integer registers.
854  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
855  }
856 
858  Arg,
859  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
861  Arg,
862  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
863 
864  if (VA.isRegLoc()) {
865  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
866  assert(i+1 != e);
867  CCValAssign &NextVA = ArgLocs[++i];
868  if (NextVA.isRegLoc()) {
869  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
870  } else {
871  // Store the second part in stack.
872  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
873  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
874  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
875  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
876  MemOpChains.push_back(
877  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
878  }
879  } else {
880  unsigned Offset = VA.getLocMemOffset() + StackOffset;
881  // Store the first part.
882  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
883  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
884  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
885  MemOpChains.push_back(
886  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
887  // Store the second part.
888  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
889  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
890  MemOpChains.push_back(
891  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
892  }
893  continue;
894  }
895 
896  // Arguments that can be passed on register must be kept at
897  // RegsToPass vector
898  if (VA.isRegLoc()) {
899  if (VA.getLocVT() != MVT::f32) {
900  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
901  continue;
902  }
903  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
904  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
905  continue;
906  }
907 
908  assert(VA.isMemLoc());
909 
910  // Create a store off the stack pointer for this argument.
911  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
912  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
913  dl);
914  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
915  MemOpChains.push_back(
916  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
917  }
918 
919 
920  // Emit all stores, make sure the occur before any copies into physregs.
921  if (!MemOpChains.empty())
922  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
923 
924  // Build a sequence of copy-to-reg nodes chained together with token
925  // chain and flag operands which copy the outgoing args into registers.
926  // The InFlag in necessary since all emitted instructions must be
927  // stuck together.
928  SDValue InFlag;
929  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
930  unsigned Reg = toCallerWindow(RegsToPass[i].first);
931  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
932  InFlag = Chain.getValue(1);
933  }
934 
935  unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
936  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
937 
938  // If the callee is a GlobalAddress node (quite common, every direct call is)
939  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
940  // Likewise ExternalSymbol -> TargetExternalSymbol.
942  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
943  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
944  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
945  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
946 
947  // Returns a chain & a flag for retval copy to use
948  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
950  Ops.push_back(Chain);
951  Ops.push_back(Callee);
952  if (hasStructRetAttr)
953  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
954  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
955  Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
956  RegsToPass[i].second.getValueType()));
957 
958  // Add a register mask operand representing the call-preserved registers.
959  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
960  const uint32_t *Mask =
961  ((hasReturnsTwice)
962  ? TRI->getRTCallPreservedMask(CallConv)
963  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
964  assert(Mask && "Missing call preserved mask for calling convention");
965  Ops.push_back(DAG.getRegisterMask(Mask));
966 
967  if (InFlag.getNode())
968  Ops.push_back(InFlag);
969 
970  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
971  InFlag = Chain.getValue(1);
972 
973  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
974  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
975  InFlag = Chain.getValue(1);
976 
977  // Assign locations to each value returned by this call.
979  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
980  *DAG.getContext());
981 
982  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
983 
984  // Copy all of the result registers out of their specified physreg.
985  for (unsigned i = 0; i != RVLocs.size(); ++i) {
986  if (RVLocs[i].getLocVT() == MVT::v2i32) {
987  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
988  SDValue Lo = DAG.getCopyFromReg(
989  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
990  Chain = Lo.getValue(1);
991  InFlag = Lo.getValue(2);
992  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
993  DAG.getConstant(0, dl, MVT::i32));
994  SDValue Hi = DAG.getCopyFromReg(
995  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
996  Chain = Hi.getValue(1);
997  InFlag = Hi.getValue(2);
998  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
999  DAG.getConstant(1, dl, MVT::i32));
1000  InVals.push_back(Vec);
1001  } else {
1002  Chain =
1003  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1004  RVLocs[i].getValVT(), InFlag)
1005  .getValue(1);
1006  InFlag = Chain.getValue(2);
1007  InVals.push_back(Chain.getValue(0));
1008  }
1009  }
1010 
1011  return Chain;
1012 }
1013 
1014 // FIXME? Maybe this could be a TableGen attribute on some registers and
1015 // this table could be generated automatically from RegInfo.
1016 unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
1017  SelectionDAG &DAG) const {
1018  unsigned Reg = StringSwitch<unsigned>(RegName)
1019  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1020  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1021  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1022  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1023  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1024  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1025  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1026  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1027  .Default(0);
1028 
1029  if (Reg)
1030  return Reg;
1031 
1032  report_fatal_error("Invalid register name global variable");
1033 }
1034 
1035 // This functions returns true if CalleeName is a ABI function that returns
1036 // a long double (fp128).
1037 static bool isFP128ABICall(const char *CalleeName)
1038 {
1039  static const char *const ABICalls[] =
1040  { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
1041  "_Q_sqrt", "_Q_neg",
1042  "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
1043  "_Q_lltoq", "_Q_ulltoq",
1044  nullptr
1045  };
1046  for (const char * const *I = ABICalls; *I != nullptr; ++I)
1047  if (strcmp(CalleeName, *I) == 0)
1048  return true;
1049  return false;
1050 }
1051 
1052 unsigned
1054 {
1055  const Function *CalleeFn = nullptr;
1056  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1057  CalleeFn = dyn_cast<Function>(G->getGlobal());
1058  } else if (ExternalSymbolSDNode *E =
1059  dyn_cast<ExternalSymbolSDNode>(Callee)) {
1060  const Function &F = DAG.getMachineFunction().getFunction();
1061  const Module *M = F.getParent();
1062  const char *CalleeName = E->getSymbol();
1063  CalleeFn = M->getFunction(CalleeName);
1064  if (!CalleeFn && isFP128ABICall(CalleeName))
1065  return 16; // Return sizeof(fp128)
1066  }
1067 
1068  if (!CalleeFn)
1069  return 0;
1070 
1071  // It would be nice to check for the sret attribute on CalleeFn here,
1072  // but since it is not part of the function type, any check will misfire.
1073 
1074  PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
1075  Type *ElementTy = Ty->getElementType();
1076  return DAG.getDataLayout().getTypeAllocSize(ElementTy);
1077 }
1078 
1079 
1080 // Fixup floating point arguments in the ... part of a varargs call.
1081 //
1082 // The SPARC v9 ABI requires that floating point arguments are treated the same
1083 // as integers when calling a varargs function. This does not apply to the
1084 // fixed arguments that are part of the function's prototype.
1085 //
1086 // This function post-processes a CCValAssign array created by
1087 // AnalyzeCallOperands().
1089  ArrayRef<ISD::OutputArg> Outs) {
1090  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1091  const CCValAssign &VA = ArgLocs[i];
1092  MVT ValTy = VA.getLocVT();
1093  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1094  // varargs functions.
1095  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1096  continue;
1097  // The fixed arguments to a varargs function still go in FP registers.
1098  if (Outs[VA.getValNo()].IsFixed)
1099  continue;
1100 
1101  // This floating point argument should be reassigned.
1102  CCValAssign NewVA;
1103 
1104  // Determine the offset into the argument array.
1105  unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1106  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1107  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1108  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1109 
1110  if (Offset < 6*8) {
1111  // This argument should go in %i0-%i5.
1112  unsigned IReg = SP::I0 + Offset/8;
1113  if (ValTy == MVT::f64)
1114  // Full register, just bitconvert into i64.
1115  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1116  IReg, MVT::i64, CCValAssign::BCvt);
1117  else {
1118  assert(ValTy == MVT::f128 && "Unexpected type!");
1119  // Full register, just bitconvert into i128 -- We will lower this into
1120  // two i64s in LowerCall_64.
1121  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1122  IReg, MVT::i128, CCValAssign::BCvt);
1123  }
1124  } else {
1125  // This needs to go to memory, we're out of integer registers.
1126  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1127  Offset, VA.getLocVT(), VA.getLocInfo());
1128  }
1129  ArgLocs[i] = NewVA;
1130  }
1131 }
1132 
1133 // Lower a call for the 64-bit ABI.
1134 SDValue
1136  SmallVectorImpl<SDValue> &InVals) const {
1137  SelectionDAG &DAG = CLI.DAG;
1138  SDLoc DL = CLI.DL;
1139  SDValue Chain = CLI.Chain;
1140  auto PtrVT = getPointerTy(DAG.getDataLayout());
1141 
1142  // Sparc target does not yet support tail call optimization.
1143  CLI.IsTailCall = false;
1144 
1145  // Analyze operands of the call, assigning locations to each operand.
1147  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1148  *DAG.getContext());
1149  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1150 
1151  // Get the size of the outgoing arguments stack space requirement.
1152  // The stack offset computed by CC_Sparc64 includes all arguments.
1153  // Called functions expect 6 argument words to exist in the stack frame, used
1154  // or not.
1155  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1156 
1157  // Keep stack frames 16-byte aligned.
1158  ArgsSize = alignTo(ArgsSize, 16);
1159 
1160  // Varargs calls require special treatment.
1161  if (CLI.IsVarArg)
1162  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1163 
1164  // Adjust the stack pointer to make room for the arguments.
1165  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1166  // with more than 6 arguments.
1167  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1168 
1169  // Collect the set of registers to pass to the function and their values.
1170  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1171  // instruction.
1173 
1174  // Collect chains from all the memory opeations that copy arguments to the
1175  // stack. They must follow the stack pointer adjustment above and precede the
1176  // call instruction itself.
1177  SmallVector<SDValue, 8> MemOpChains;
1178 
1179  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1180  const CCValAssign &VA = ArgLocs[i];
1181  SDValue Arg = CLI.OutVals[i];
1182 
1183  // Promote the value if needed.
1184  switch (VA.getLocInfo()) {
1185  default:
1186  llvm_unreachable("Unknown location info!");
1187  case CCValAssign::Full:
1188  break;
1189  case CCValAssign::SExt:
1190  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1191  break;
1192  case CCValAssign::ZExt:
1193  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1194  break;
1195  case CCValAssign::AExt:
1196  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1197  break;
1198  case CCValAssign::BCvt:
1199  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1200  // SPARC does not support i128 natively. Lower it into two i64, see below.
1201  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1202  || VA.getLocVT() != MVT::i128)
1203  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1204  break;
1205  }
1206 
1207  if (VA.isRegLoc()) {
1208  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1209  && VA.getLocVT() == MVT::i128) {
1210  // Store and reload into the integer register reg and reg+1.
1211  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1212  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1213  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1214  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1215  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1216  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1217  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1218 
1219  // Store to %sp+BIAS+128+Offset
1220  SDValue Store =
1221  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1222  // Load into Reg and Reg+1
1223  SDValue Hi64 =
1224  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1225  SDValue Lo64 =
1226  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1227  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1228  Hi64));
1229  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1230  Lo64));
1231  continue;
1232  }
1233 
1234  // The custom bit on an i32 return value indicates that it should be
1235  // passed in the high bits of the register.
1236  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1237  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1238  DAG.getConstant(32, DL, MVT::i32));
1239 
1240  // The next value may go in the low bits of the same register.
1241  // Handle both at once.
1242  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1243  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1245  CLI.OutVals[i+1]);
1246  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1247  // Skip the next value, it's already done.
1248  ++i;
1249  }
1250  }
1251  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1252  continue;
1253  }
1254 
1255  assert(VA.isMemLoc());
1256 
1257  // Create a store off the stack pointer for this argument.
1258  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1259  // The argument area starts at %fp+BIAS+128 in the callee frame,
1260  // %sp+BIAS+128 in ours.
1261  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1262  Subtarget->getStackPointerBias() +
1263  128, DL);
1264  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1265  MemOpChains.push_back(
1266  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1267  }
1268 
1269  // Emit all stores, make sure they occur before the call.
1270  if (!MemOpChains.empty())
1271  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1272 
1273  // Build a sequence of CopyToReg nodes glued together with token chain and
1274  // glue operands which copy the outgoing args into registers. The InGlue is
1275  // necessary since all emitted instructions must be stuck together in order
1276  // to pass the live physical registers.
1277  SDValue InGlue;
1278  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1279  Chain = DAG.getCopyToReg(Chain, DL,
1280  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1281  InGlue = Chain.getValue(1);
1282  }
1283 
1284  // If the callee is a GlobalAddress node (quite common, every direct call is)
1285  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1286  // Likewise ExternalSymbol -> TargetExternalSymbol.
1287  SDValue Callee = CLI.Callee;
1288  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
1290  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1291  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1292  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1293  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1294 
1295  // Build the operands for the call instruction itself.
1297  Ops.push_back(Chain);
1298  Ops.push_back(Callee);
1299  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1300  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1301  RegsToPass[i].second.getValueType()));
1302 
1303  // Add a register mask operand representing the call-preserved registers.
1304  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1305  const uint32_t *Mask =
1306  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1308  CLI.CallConv));
1309  assert(Mask && "Missing call preserved mask for calling convention");
1310  Ops.push_back(DAG.getRegisterMask(Mask));
1311 
1312  // Make sure the CopyToReg nodes are glued to the call instruction which
1313  // consumes the registers.
1314  if (InGlue.getNode())
1315  Ops.push_back(InGlue);
1316 
1317  // Now the call itself.
1318  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1319  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1320  InGlue = Chain.getValue(1);
1321 
1322  // Revert the stack pointer immediately after the call.
1323  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1324  DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1325  InGlue = Chain.getValue(1);
1326 
1327  // Now extract the return values. This is more or less the same as
1328  // LowerFormalArguments_64.
1329 
1330  // Assign locations to each value returned by this call.
1332  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1333  *DAG.getContext());
1334 
1335  // Set inreg flag manually for codegen generated library calls that
1336  // return float.
1337  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
1338  CLI.Ins[0].Flags.setInReg();
1339 
1340  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1341 
1342  // Copy all of the result registers out of their specified physreg.
1343  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1344  CCValAssign &VA = RVLocs[i];
1345  unsigned Reg = toCallerWindow(VA.getLocReg());
1346 
1347  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1348  // reside in the same register in the high and low bits. Reuse the
1349  // CopyFromReg previous node to avoid duplicate copies.
1350  SDValue RV;
1351  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1352  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1353  RV = Chain.getValue(0);
1354 
1355  // But usually we'll create a new CopyFromReg for a different register.
1356  if (!RV.getNode()) {
1357  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1358  Chain = RV.getValue(1);
1359  InGlue = Chain.getValue(2);
1360  }
1361 
1362  // Get the high bits for i32 struct elements.
1363  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1364  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1365  DAG.getConstant(32, DL, MVT::i32));
1366 
1367  // The callee promoted the return value, so insert an Assert?ext SDNode so
1368  // we won't promote the value again in this function.
1369  switch (VA.getLocInfo()) {
1370  case CCValAssign::SExt:
1371  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1372  DAG.getValueType(VA.getValVT()));
1373  break;
1374  case CCValAssign::ZExt:
1375  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1376  DAG.getValueType(VA.getValVT()));
1377  break;
1378  default:
1379  break;
1380  }
1381 
1382  // Truncate the register down to the return value type.
1383  if (VA.isExtInLoc())
1384  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1385 
1386  InVals.push_back(RV);
1387  }
1388 
1389  return Chain;
1390 }
1391 
1392 //===----------------------------------------------------------------------===//
1393 // TargetLowering Implementation
1394 //===----------------------------------------------------------------------===//
1395 
1397  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1398  AI->getType()->getPrimitiveSizeInBits() == 32)
1399  return AtomicExpansionKind::None; // Uses xchg instruction
1400 
1402 }
1403 
1404 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1405 /// condition.
1407  switch (CC) {
1408  default: llvm_unreachable("Unknown integer condition code!");
1409  case ISD::SETEQ: return SPCC::ICC_E;
1410  case ISD::SETNE: return SPCC::ICC_NE;
1411  case ISD::SETLT: return SPCC::ICC_L;
1412  case ISD::SETGT: return SPCC::ICC_G;
1413  case ISD::SETLE: return SPCC::ICC_LE;
1414  case ISD::SETGE: return SPCC::ICC_GE;
1415  case ISD::SETULT: return SPCC::ICC_CS;
1416  case ISD::SETULE: return SPCC::ICC_LEU;
1417  case ISD::SETUGT: return SPCC::ICC_GU;
1418  case ISD::SETUGE: return SPCC::ICC_CC;
1419  }
1420 }
1421 
1422 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1423 /// FCC condition.
1425  switch (CC) {
1426  default: llvm_unreachable("Unknown fp condition code!");
1427  case ISD::SETEQ:
1428  case ISD::SETOEQ: return SPCC::FCC_E;
1429  case ISD::SETNE:
1430  case ISD::SETUNE: return SPCC::FCC_NE;
1431  case ISD::SETLT:
1432  case ISD::SETOLT: return SPCC::FCC_L;
1433  case ISD::SETGT:
1434  case ISD::SETOGT: return SPCC::FCC_G;
1435  case ISD::SETLE:
1436  case ISD::SETOLE: return SPCC::FCC_LE;
1437  case ISD::SETGE:
1438  case ISD::SETOGE: return SPCC::FCC_GE;
1439  case ISD::SETULT: return SPCC::FCC_UL;
1440  case ISD::SETULE: return SPCC::FCC_ULE;
1441  case ISD::SETUGT: return SPCC::FCC_UG;
1442  case ISD::SETUGE: return SPCC::FCC_UGE;
1443  case ISD::SETUO: return SPCC::FCC_U;
1444  case ISD::SETO: return SPCC::FCC_O;
1445  case ISD::SETONE: return SPCC::FCC_LG;
1446  case ISD::SETUEQ: return SPCC::FCC_UE;
1447  }
1448 }
1449 
1451  const SparcSubtarget &STI)
1452  : TargetLowering(TM), Subtarget(&STI) {
1453  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
1454 
1455  // Instructions which use registers as conditionals examine all the
1456  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1457  // matters much whether it's ZeroOrOneBooleanContent, or
1458  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1459  // former.
1462 
1463  // Set up the register classes.
1464  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1465  if (!Subtarget->useSoftFloat()) {
1466  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1467  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1468  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1469  }
1470  if (Subtarget->is64Bit()) {
1471  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1472  } else {
1473  // On 32bit sparc, we define a double-register 32bit register
1474  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1475  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1476 
1477  // ...but almost all operations must be expanded, so set that as
1478  // the default.
1479  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1481  }
1482  // Truncating/extending stores/loads are also not supported.
1483  for (MVT VT : MVT::integer_vector_valuetypes()) {
1487 
1491 
1494  }
1495  // However, load and store *are* legal.
1500 
1501  // And we need to promote i64 loads/stores into vector load/store
1504 
1505  // Sadly, this doesn't work:
1506  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1507  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1508  }
1509 
1510  // Turn FP extload into load/fpextend
1511  for (MVT VT : MVT::fp_valuetypes()) {
1514  }
1515 
1516  // Sparc doesn't have i1 sign extending load
1517  for (MVT VT : MVT::integer_valuetypes())
1519 
1520  // Turn FP truncstore into trunc + store.
1524 
1525  // Custom legalize GlobalAddress nodes into LO/HI parts.
1530 
1531  // Sparc doesn't have sext_inreg, replace them with shl/sra
1535 
1536  // Sparc has no REM or DIVREM operations.
1541 
1542  // ... nor does SparcV9.
1543  if (Subtarget->is64Bit()) {
1548  }
1549 
1550  // Custom expand fp<->sint
1555 
1556  // Custom Expand fp<->uint
1561 
1564 
1565  // Sparc has no select or setcc: expand to SELECT_CC.
1570 
1575 
1576  // Sparc doesn't have BRCOND either, it has BR_CC.
1584 
1589 
1592 
1597 
1598  if (Subtarget->is64Bit()) {
1609 
1611  Subtarget->usePopc() ? Legal : Expand);
1618  }
1619 
1620  // ATOMICs.
1621  // Atomics are supported on SparcV9. 32-bit atomics are also
1622  // supported by some Leon SparcV8 variants. Otherwise, atomics
1623  // are unsupported.
1624  if (Subtarget->isV9())
1626  else if (Subtarget->hasLeonCasa())
1628  else
1630 
1632 
1634 
1636 
1637  // Custom Lower Atomic LOAD/STORE
1640 
1641  if (Subtarget->is64Bit()) {
1646  }
1647 
1648  if (!Subtarget->is64Bit()) {
1649  // These libcalls are not available in 32-bit.
1650  setLibcallName(RTLIB::SHL_I128, nullptr);
1651  setLibcallName(RTLIB::SRL_I128, nullptr);
1652  setLibcallName(RTLIB::SRA_I128, nullptr);
1653  }
1654 
1655  if (!Subtarget->isV9()) {
1656  // SparcV8 does not have FNEGD and FABSD.
1659  }
1660 
1687 
1691 
1692  // Expands to [SU]MUL_LOHI.
1696 
1697  if (Subtarget->useSoftMulDiv()) {
1698  // .umul works for both signed and unsigned
1701  setLibcallName(RTLIB::MUL_I32, ".umul");
1702 
1704  setLibcallName(RTLIB::SDIV_I32, ".div");
1705 
1707  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1708  }
1709 
1710  if (Subtarget->is64Bit()) {
1715 
1718 
1722  }
1723 
1724  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1726  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1728 
1730 
1731  // Use the default implementation.
1737 
1739 
1741  Subtarget->usePopc() ? Legal : Expand);
1742 
1743  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1746  } else {
1749  }
1750 
1751  if (Subtarget->hasHardQuad()) {
1759  if (Subtarget->isV9()) {
1762  } else {
1765  }
1766 
1767  if (!Subtarget->is64Bit()) {
1768  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1769  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1770  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1771  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1772  }
1773 
1774  } else {
1775  // Custom legalize f128 operations.
1776 
1784 
1788 
1789  // Setup Runtime library names.
1790  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1791  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1792  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1793  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1794  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1795  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1796  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1797  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1798  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1799  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1800  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1801  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1802  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1803  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1804  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1805  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1806  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1807  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1808  } else if (!Subtarget->useSoftFloat()) {
1809  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1810  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1811  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1812  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1813  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1814  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1815  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1816  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1817  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1818  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1819  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1820  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1821  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1822  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1823  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1824  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1825  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1826  }
1827  }
1828 
1829  if (Subtarget->fixAllFDIVSQRT()) {
1830  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1831  // the former instructions generate errata on LEON processors.
1834  }
1835 
1836  if (Subtarget->hasNoFMULS()) {
1838  }
1839 
1841 
1843 
1845 }
1846 
1848  return Subtarget->useSoftFloat();
1849 }
1850 
1851 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1852  switch ((SPISD::NodeType)Opcode) {
1853  case SPISD::FIRST_NUMBER: break;
1854  case SPISD::CMPICC: return "SPISD::CMPICC";
1855  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1856  case SPISD::BRICC: return "SPISD::BRICC";
1857  case SPISD::BRXCC: return "SPISD::BRXCC";
1858  case SPISD::BRFCC: return "SPISD::BRFCC";
1859  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1860  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1861  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1862  case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP";
1863  case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP";
1864  case SPISD::Hi: return "SPISD::Hi";
1865  case SPISD::Lo: return "SPISD::Lo";
1866  case SPISD::FTOI: return "SPISD::FTOI";
1867  case SPISD::ITOF: return "SPISD::ITOF";
1868  case SPISD::FTOX: return "SPISD::FTOX";
1869  case SPISD::XTOF: return "SPISD::XTOF";
1870  case SPISD::CALL: return "SPISD::CALL";
1871  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1872  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1873  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1874  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1875  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1876  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1877  }
1878  return nullptr;
1879 }
1880 
1882  EVT VT) const {
1883  if (!VT.isVector())
1884  return MVT::i32;
1886 }
1887 
1888 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1889 /// be zero. Op is expected to be a target specific node. Used by DAG
1890 /// combiner.
1892  (const SDValue Op,
1893  KnownBits &Known,
1894  const APInt &DemandedElts,
1895  const SelectionDAG &DAG,
1896  unsigned Depth) const {
1897  KnownBits Known2;
1898  Known.resetAll();
1899 
1900  switch (Op.getOpcode()) {
1901  default: break;
1902  case SPISD::SELECT_ICC:
1903  case SPISD::SELECT_XCC:
1904  case SPISD::SELECT_FCC:
1905  DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
1906  DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
1907 
1908  // Only known if known in both the LHS and RHS.
1909  Known.One &= Known2.One;
1910  Known.Zero &= Known2.Zero;
1911  break;
1912  }
1913 }
1914 
1915 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1916 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1917 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1918  ISD::CondCode CC, unsigned &SPCC) {
1919  if (isNullConstant(RHS) &&
1920  CC == ISD::SETNE &&
1921  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1922  LHS.getOpcode() == SPISD::SELECT_XCC) &&
1923  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1924  (LHS.getOpcode() == SPISD::SELECT_FCC &&
1925  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1926  isOneConstant(LHS.getOperand(0)) &&
1927  isNullConstant(LHS.getOperand(1))) {
1928  SDValue CMPCC = LHS.getOperand(3);
1929  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1930  LHS = CMPCC.getOperand(0);
1931  RHS = CMPCC.getOperand(1);
1932  }
1933 }
1934 
1935 // Convert to a target node and set target flags.
1937  SelectionDAG &DAG) const {
1938  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1939  return DAG.getTargetGlobalAddress(GA->getGlobal(),
1940  SDLoc(GA),
1941  GA->getValueType(0),
1942  GA->getOffset(), TF);
1943 
1944  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1945  return DAG.getTargetConstantPool(CP->getConstVal(),
1946  CP->getValueType(0),
1947  CP->getAlignment(),
1948  CP->getOffset(), TF);
1949 
1950  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1951  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1952  Op.getValueType(),
1953  0,
1954  TF);
1955 
1956  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1957  return DAG.getTargetExternalSymbol(ES->getSymbol(),
1958  ES->getValueType(0), TF);
1959 
1960  llvm_unreachable("Unhandled address SDNode");
1961 }
1962 
1963 // Split Op into high and low parts according to HiTF and LoTF.
1964 // Return an ADD node combining the parts.
1966  unsigned HiTF, unsigned LoTF,
1967  SelectionDAG &DAG) const {
1968  SDLoc DL(Op);
1969  EVT VT = Op.getValueType();
1970  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1971  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1972  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1973 }
1974 
1975 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1976 // or ExternalSymbol SDNode.
1978  SDLoc DL(Op);
1979  EVT VT = getPointerTy(DAG.getDataLayout());
1980 
1981  // Handle PIC mode first. SPARC needs a got load for every variable!
1982  if (isPositionIndependent()) {
1983  const Module *M = DAG.getMachineFunction().getFunction().getParent();
1984  PICLevel::Level picLevel = M->getPICLevel();
1985  SDValue Idx;
1986 
1987  if (picLevel == PICLevel::SmallPIC) {
1988  // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
1989  Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
1991  } else {
1992  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1995  }
1996 
1997  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1998  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
1999  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2000  // function has calls.
2002  MFI.setHasCalls(true);
2003  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2005  }
2006 
2007  // This is one of the absolute code models.
2008  switch(getTargetMachine().getCodeModel()) {
2009  default:
2010  llvm_unreachable("Unsupported absolute code model");
2011  case CodeModel::Small:
2012  // abs32.
2015  case CodeModel::Medium: {
2016  // abs44.
2019  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2021  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2022  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2023  }
2024  case CodeModel::Large: {
2025  // abs64.
2028  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2031  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2032  }
2033  }
2034 }
2035 
2037  SelectionDAG &DAG) const {
2038  return makeAddress(Op, DAG);
2039 }
2040 
2042  SelectionDAG &DAG) const {
2043  return makeAddress(Op, DAG);
2044 }
2045 
2047  SelectionDAG &DAG) const {
2048  return makeAddress(Op, DAG);
2049 }
2050 
2052  SelectionDAG &DAG) const {
2053 
2054  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2055  if (DAG.getTarget().useEmulatedTLS())
2056  return LowerToTLSEmulatedModel(GA, DAG);
2057 
2058  SDLoc DL(GA);
2059  const GlobalValue *GV = GA->getGlobal();
2060  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2061 
2063 
2064  if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2065  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2068  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2071  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2074  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2077 
2078  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2079  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2080  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2081  withTargetFlags(Op, addTF, DAG));
2082 
2083  SDValue Chain = DAG.getEntryNode();
2084  SDValue InFlag;
2085 
2086  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2087  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2088  InFlag = Chain.getValue(1);
2089  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2090  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2091 
2092  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2093  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2095  assert(Mask && "Missing call preserved mask for calling convention");
2096  SDValue Ops[] = {Chain,
2097  Callee,
2098  Symbol,
2099  DAG.getRegister(SP::O0, PtrVT),
2100  DAG.getRegisterMask(Mask),
2101  InFlag};
2102  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2103  InFlag = Chain.getValue(1);
2104  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2105  DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2106  InFlag = Chain.getValue(1);
2107  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2108 
2109  if (model != TLSModel::LocalDynamic)
2110  return Ret;
2111 
2112  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2114  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2116  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2117  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2119  }
2120 
2121  if (model == TLSModel::InitialExec) {
2122  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2124 
2125  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2126 
2127  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2128  // function has calls.
2130  MFI.setHasCalls(true);
2131 
2132  SDValue TGA = makeHiLoPair(Op,
2135  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2137  DL, PtrVT, Ptr,
2138  withTargetFlags(Op, ldTF, DAG));
2139  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2140  DAG.getRegister(SP::G7, PtrVT), Offset,
2141  withTargetFlags(Op,
2143  }
2144 
2145  assert(model == TLSModel::LocalExec);
2146  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2148  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2150  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2151 
2152  return DAG.getNode(ISD::ADD, DL, PtrVT,
2153  DAG.getRegister(SP::G7, PtrVT), Offset);
2154 }
2155 
2158  const SDLoc &DL,
2159  SelectionDAG &DAG) const {
2161  EVT ArgVT = Arg.getValueType();
2162  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2163 
2164  ArgListEntry Entry;
2165  Entry.Node = Arg;
2166  Entry.Ty = ArgTy;
2167 
2168  if (ArgTy->isFP128Ty()) {
2169  // Create a stack object and pass the pointer to the library function.
2170  int FI = MFI.CreateStackObject(16, 8, false);
2171  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2172  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2173  /* Alignment = */ 8);
2174 
2175  Entry.Node = FIPtr;
2176  Entry.Ty = PointerType::getUnqual(ArgTy);
2177  }
2178  Args.push_back(Entry);
2179  return Chain;
2180 }
2181 
2182 SDValue
2184  const char *LibFuncName,
2185  unsigned numArgs) const {
2186 
2187  ArgListTy Args;
2188 
2190  auto PtrVT = getPointerTy(DAG.getDataLayout());
2191 
2192  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2193  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2194  Type *RetTyABI = RetTy;
2195  SDValue Chain = DAG.getEntryNode();
2196  SDValue RetPtr;
2197 
2198  if (RetTy->isFP128Ty()) {
2199  // Create a Stack Object to receive the return value of type f128.
2200  ArgListEntry Entry;
2201  int RetFI = MFI.CreateStackObject(16, 8, false);
2202  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2203  Entry.Node = RetPtr;
2204  Entry.Ty = PointerType::getUnqual(RetTy);
2205  if (!Subtarget->is64Bit())
2206  Entry.IsSRet = true;
2207  Entry.IsReturned = false;
2208  Args.push_back(Entry);
2209  RetTyABI = Type::getVoidTy(*DAG.getContext());
2210  }
2211 
2212  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2213  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2214  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2215  }
2217  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2218  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2219 
2220  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2221 
2222  // chain is in second result.
2223  if (RetTyABI == RetTy)
2224  return CallInfo.first;
2225 
2226  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2227 
2228  Chain = CallInfo.second;
2229 
2230  // Load RetPtr to get the return value.
2231  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2232  MachinePointerInfo(), /* Alignment = */ 8);
2233 }
2234 
2236  unsigned &SPCC, const SDLoc &DL,
2237  SelectionDAG &DAG) const {
2238 
2239  const char *LibCall = nullptr;
2240  bool is64Bit = Subtarget->is64Bit();
2241  switch(SPCC) {
2242  default: llvm_unreachable("Unhandled conditional code!");
2243  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2244  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2245  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2246  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2247  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2248  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2249  case SPCC::FCC_UL :
2250  case SPCC::FCC_ULE:
2251  case SPCC::FCC_UG :
2252  case SPCC::FCC_UGE:
2253  case SPCC::FCC_U :
2254  case SPCC::FCC_O :
2255  case SPCC::FCC_LG :
2256  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2257  }
2258 
2259  auto PtrVT = getPointerTy(DAG.getDataLayout());
2260  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2261  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2262  ArgListTy Args;
2263  SDValue Chain = DAG.getEntryNode();
2264  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2265  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2266 
2268  CLI.setDebugLoc(DL).setChain(Chain)
2269  .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2270 
2271  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2272 
2273  // result is in first, and chain is in second result.
2274  SDValue Result = CallInfo.first;
2275 
2276  switch(SPCC) {
2277  default: {
2278  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2279  SPCC = SPCC::ICC_NE;
2280  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2281  }
2282  case SPCC::FCC_UL : {
2283  SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
2284  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2285  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2286  SPCC = SPCC::ICC_NE;
2287  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2288  }
2289  case SPCC::FCC_ULE: {
2290  SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
2291  SPCC = SPCC::ICC_NE;
2292  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2293  }
2294  case SPCC::FCC_UG : {
2295  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2296  SPCC = SPCC::ICC_G;
2297  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2298  }
2299  case SPCC::FCC_UGE: {
2300  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2301  SPCC = SPCC::ICC_NE;
2302  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2303  }
2304 
2305  case SPCC::FCC_U : {
2306  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2307  SPCC = SPCC::ICC_E;
2308  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2309  }
2310  case SPCC::FCC_O : {
2311  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2312  SPCC = SPCC::ICC_NE;
2313  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2314  }
2315  case SPCC::FCC_LG : {
2316  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2317  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2318  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2319  SPCC = SPCC::ICC_NE;
2320  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2321  }
2322  case SPCC::FCC_UE : {
2323  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2324  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2325  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2326  SPCC = SPCC::ICC_E;
2327  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2328  }
2329  }
2330 }
2331 
2332 static SDValue
2334  const SparcTargetLowering &TLI) {
2335 
2336  if (Op.getOperand(0).getValueType() == MVT::f64)
2337  return TLI.LowerF128Op(Op, DAG,
2338  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2339 
2340  if (Op.getOperand(0).getValueType() == MVT::f32)
2341  return TLI.LowerF128Op(Op, DAG,
2342  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2343 
2344  llvm_unreachable("fpextend with non-float operand!");
2345  return SDValue();
2346 }
2347 
2348 static SDValue
2350  const SparcTargetLowering &TLI) {
2351  // FP_ROUND on f64 and f32 are legal.
2352  if (Op.getOperand(0).getValueType() != MVT::f128)
2353  return Op;
2354 
2355  if (Op.getValueType() == MVT::f64)
2356  return TLI.LowerF128Op(Op, DAG,
2357  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2358  if (Op.getValueType() == MVT::f32)
2359  return TLI.LowerF128Op(Op, DAG,
2360  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2361 
2362  llvm_unreachable("fpround to non-float!");
2363  return SDValue();
2364 }
2365 
2367  const SparcTargetLowering &TLI,
2368  bool hasHardQuad) {
2369  SDLoc dl(Op);
2370  EVT VT = Op.getValueType();
2371  assert(VT == MVT::i32 || VT == MVT::i64);
2372 
2373  // Expand f128 operations to fp128 abi calls.
2374  if (Op.getOperand(0).getValueType() == MVT::f128
2375  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2376  const char *libName = TLI.getLibcallName(VT == MVT::i32
2377  ? RTLIB::FPTOSINT_F128_I32
2378  : RTLIB::FPTOSINT_F128_I64);
2379  return TLI.LowerF128Op(Op, DAG, libName, 1);
2380  }
2381 
2382  // Expand if the resulting type is illegal.
2383  if (!TLI.isTypeLegal(VT))
2384  return SDValue();
2385 
2386  // Otherwise, Convert the fp value to integer in an FP register.
2387  if (VT == MVT::i32)
2388  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2389  else
2390  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2391 
2392  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2393 }
2394 
2396  const SparcTargetLowering &TLI,
2397  bool hasHardQuad) {
2398  SDLoc dl(Op);
2399  EVT OpVT = Op.getOperand(0).getValueType();
2400  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2401 
2402  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2403 
2404  // Expand f128 operations to fp128 ABI calls.
2405  if (Op.getValueType() == MVT::f128
2406  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2407  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2408  ? RTLIB::SINTTOFP_I32_F128
2409  : RTLIB::SINTTOFP_I64_F128);
2410  return TLI.LowerF128Op(Op, DAG, libName, 1);
2411  }
2412 
2413  // Expand if the operand type is illegal.
2414  if (!TLI.isTypeLegal(OpVT))
2415  return SDValue();
2416 
2417  // Otherwise, Convert the int value to FP in an FP register.
2418  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2419  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2420  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2421 }
2422 
2424  const SparcTargetLowering &TLI,
2425  bool hasHardQuad) {
2426  SDLoc dl(Op);
2427  EVT VT = Op.getValueType();
2428 
2429  // Expand if it does not involve f128 or the target has support for
2430  // quad floating point instructions and the resulting type is legal.
2431  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2432  (hasHardQuad && TLI.isTypeLegal(VT)))
2433  return SDValue();
2434 
2435  assert(VT == MVT::i32 || VT == MVT::i64);
2436 
2437  return TLI.LowerF128Op(Op, DAG,
2438  TLI.getLibcallName(VT == MVT::i32
2439  ? RTLIB::FPTOUINT_F128_I32
2440  : RTLIB::FPTOUINT_F128_I64),
2441  1);
2442 }
2443 
2445  const SparcTargetLowering &TLI,
2446  bool hasHardQuad) {
2447  SDLoc dl(Op);
2448  EVT OpVT = Op.getOperand(0).getValueType();
2449  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2450 
2451  // Expand if it does not involve f128 or the target has support for
2452  // quad floating point instructions and the operand type is legal.
2453  if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2454  return SDValue();
2455 
2456  return TLI.LowerF128Op(Op, DAG,
2457  TLI.getLibcallName(OpVT == MVT::i32
2458  ? RTLIB::UINTTOFP_I32_F128
2459  : RTLIB::UINTTOFP_I64_F128),
2460  1);
2461 }
2462 
2464  const SparcTargetLowering &TLI,
2465  bool hasHardQuad) {
2466  SDValue Chain = Op.getOperand(0);
2467  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2468  SDValue LHS = Op.getOperand(2);
2469  SDValue RHS = Op.getOperand(3);
2470  SDValue Dest = Op.getOperand(4);
2471  SDLoc dl(Op);
2472  unsigned Opc, SPCC = ~0U;
2473 
2474  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2475  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2476  LookThroughSetCC(LHS, RHS, CC, SPCC);
2477 
2478  // Get the condition flag.
2479  SDValue CompareFlag;
2480  if (LHS.getValueType().isInteger()) {
2481  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2482  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2483  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2484  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2485  } else {
2486  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2487  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2488  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2489  Opc = SPISD::BRICC;
2490  } else {
2491  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2492  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2493  Opc = SPISD::BRFCC;
2494  }
2495  }
2496  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2497  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2498 }
2499 
2501  const SparcTargetLowering &TLI,
2502  bool hasHardQuad) {
2503  SDValue LHS = Op.getOperand(0);
2504  SDValue RHS = Op.getOperand(1);
2505  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2506  SDValue TrueVal = Op.getOperand(2);
2507  SDValue FalseVal = Op.getOperand(3);
2508  SDLoc dl(Op);
2509  unsigned Opc, SPCC = ~0U;
2510 
2511  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2512  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2513  LookThroughSetCC(LHS, RHS, CC, SPCC);
2514 
2515  SDValue CompareFlag;
2516  if (LHS.getValueType().isInteger()) {
2517  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2518  Opc = LHS.getValueType() == MVT::i32 ?
2520  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2521  } else {
2522  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2523  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2524  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2525  Opc = SPISD::SELECT_ICC;
2526  } else {
2527  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2528  Opc = SPISD::SELECT_FCC;
2529  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2530  }
2531  }
2532  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2533  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2534 }
2535 
2537  const SparcTargetLowering &TLI) const {
2538  SDLoc DL(Op);
2539  return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL,
2540  DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1));
2541 
2542 }
2543 
2545  const SparcTargetLowering &TLI) const {
2546  SDLoc DL(Op);
2547  return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1));
2548 }
2549 
2551  const SparcTargetLowering &TLI) {
2552  MachineFunction &MF = DAG.getMachineFunction();
2554  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2555 
2556  // Need frame address to find the address of VarArgsFrameIndex.
2558 
2559  // vastart just stores the address of the VarArgsFrameIndex slot into the
2560  // memory location argument.
2561  SDLoc DL(Op);
2562  SDValue Offset =
2563  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2564  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2565  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2566  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2567  MachinePointerInfo(SV));
2568 }
2569 
2571  SDNode *Node = Op.getNode();
2572  EVT VT = Node->getValueType(0);
2573  SDValue InChain = Node->getOperand(0);
2574  SDValue VAListPtr = Node->getOperand(1);
2575  EVT PtrVT = VAListPtr.getValueType();
2576  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2577  SDLoc DL(Node);
2578  SDValue VAList =
2579  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2580  // Increment the pointer, VAList, to the next vaarg.
2581  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2582  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2583  DL));
2584  // Store the incremented VAList to the legalized pointer.
2585  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2586  MachinePointerInfo(SV));
2587  // Load the actual argument out of the pointer VAList.
2588  // We can't count on greater alignment than the word size.
2589  return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2590  std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
2591 }
2592 
2594  const SparcSubtarget *Subtarget) {
2595  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2596  SDValue Size = Op.getOperand(1); // Legalize the size.
2597  unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2598  unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment();
2599  EVT VT = Size->getValueType(0);
2600  SDLoc dl(Op);
2601 
2602  // TODO: implement over-aligned alloca. (Note: also implies
2603  // supporting support for overaligned function frames + dynamic
2604  // allocations, at all, which currently isn't supported)
2605  if (Align > StackAlign) {
2606  const MachineFunction &MF = DAG.getMachineFunction();
2607  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2608  "over-aligned dynamic alloca not supported.");
2609  }
2610 
2611  // The resultant pointer needs to be above the register spill area
2612  // at the bottom of the stack.
2613  unsigned regSpillArea;
2614  if (Subtarget->is64Bit()) {
2615  regSpillArea = 128;
2616  } else {
2617  // On Sparc32, the size of the spill area is 92. Unfortunately,
2618  // that's only 4-byte aligned, not 8-byte aligned (the stack
2619  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2620  // aligned dynamic allocation, we actually need to add 96 to the
2621  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2622 
2623  // That also means adding 4 to the size of the allocation --
2624  // before applying the 8-byte rounding. Unfortunately, we the
2625  // value we get here has already had rounding applied. So, we need
2626  // to add 8, instead, wasting a bit more memory.
2627 
2628  // Further, this only actually needs to be done if the required
2629  // alignment is > 4, but, we've lost that info by this point, too,
2630  // so we always apply it.
2631 
2632  // (An alternative approach would be to always reserve 96 bytes
2633  // instead of the required 92, but then we'd waste 4 extra bytes
2634  // in every frame, not just those with dynamic stack allocations)
2635 
2636  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2637 
2638  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2639  DAG.getConstant(8, dl, VT));
2640  regSpillArea = 96;
2641  }
2642 
2643  unsigned SPReg = SP::O6;
2644  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2645  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2646  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2647 
2648  regSpillArea += Subtarget->getStackPointerBias();
2649 
2650  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2651  DAG.getConstant(regSpillArea, dl, VT));
2652  SDValue Ops[2] = { NewVal, Chain };
2653  return DAG.getMergeValues(Ops, dl);
2654 }
2655 
2656 
2658  SDLoc dl(Op);
2659  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2660  dl, MVT::Other, DAG.getEntryNode());
2661  return Chain;
2662 }
2663 
2664 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2665  const SparcSubtarget *Subtarget) {
2667  MFI.setFrameAddressIsTaken(true);
2668 
2669  EVT VT = Op.getValueType();
2670  SDLoc dl(Op);
2671  unsigned FrameReg = SP::I6;
2672  unsigned stackBias = Subtarget->getStackPointerBias();
2673 
2674  SDValue FrameAddr;
2675 
2676  if (depth == 0) {
2677  FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2678  if (Subtarget->is64Bit())
2679  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2680  DAG.getIntPtrConstant(stackBias, dl));
2681  return FrameAddr;
2682  }
2683 
2684  // flush first to make sure the windowed registers' values are in stack
2685  SDValue Chain = getFLUSHW(Op, DAG);
2686  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2687 
2688  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2689 
2690  while (depth--) {
2691  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2692  DAG.getIntPtrConstant(Offset, dl));
2693  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2694  }
2695  if (Subtarget->is64Bit())
2696  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2697  DAG.getIntPtrConstant(stackBias, dl));
2698  return FrameAddr;
2699 }
2700 
2701 
2703  const SparcSubtarget *Subtarget) {
2704 
2705  uint64_t depth = Op.getConstantOperandVal(0);
2706 
2707  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2708 
2709 }
2710 
2712  const SparcTargetLowering &TLI,
2713  const SparcSubtarget *Subtarget) {
2714  MachineFunction &MF = DAG.getMachineFunction();
2715  MachineFrameInfo &MFI = MF.getFrameInfo();
2716  MFI.setReturnAddressIsTaken(true);
2717 
2718  if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2719  return SDValue();
2720 
2721  EVT VT = Op.getValueType();
2722  SDLoc dl(Op);
2723  uint64_t depth = Op.getConstantOperandVal(0);
2724 
2725  SDValue RetAddr;
2726  if (depth == 0) {
2727  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2728  unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2729  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2730  return RetAddr;
2731  }
2732 
2733  // Need frame address to find return address of the caller.
2734  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget);
2735 
2736  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2737  SDValue Ptr = DAG.getNode(ISD::ADD,
2738  dl, VT,
2739  FrameAddr,
2740  DAG.getIntPtrConstant(Offset, dl));
2741  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2742 
2743  return RetAddr;
2744 }
2745 
2746 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2747  unsigned opcode) {
2748  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2749  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2750 
2751  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2752  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2753  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2754 
2755  // Note: in little-endian, the floating-point value is stored in the
2756  // registers are in the opposite order, so the subreg with the sign
2757  // bit is the highest-numbered (odd), rather than the
2758  // lowest-numbered (even).
2759 
2760  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2761  SrcReg64);
2762  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2763  SrcReg64);
2764 
2765  if (DAG.getDataLayout().isLittleEndian())
2766  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2767  else
2768  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2769 
2770  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2771  dl, MVT::f64), 0);
2772  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2773  DstReg64, Hi32);
2774  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2775  DstReg64, Lo32);
2776  return DstReg64;
2777 }
2778 
2779 // Lower a f128 load into two f64 loads.
2781 {
2782  SDLoc dl(Op);
2783  LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2784  assert(LdNode && LdNode->getOffset().isUndef()
2785  && "Unexpected node type");
2786 
2787  unsigned alignment = LdNode->getAlignment();
2788  if (alignment > 8)
2789  alignment = 8;
2790 
2791  SDValue Hi64 =
2792  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2793  LdNode->getPointerInfo(), alignment);
2794  EVT addrVT = LdNode->getBasePtr().getValueType();
2795  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2796  LdNode->getBasePtr(),
2797  DAG.getConstant(8, dl, addrVT));
2798  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2799  LdNode->getPointerInfo(), alignment);
2800 
2801  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2802  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2803 
2804  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2805  dl, MVT::f128);
2806  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2807  MVT::f128,
2808  SDValue(InFP128, 0),
2809  Hi64,
2810  SubRegEven);
2811  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2812  MVT::f128,
2813  SDValue(InFP128, 0),
2814  Lo64,
2815  SubRegOdd);
2816  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2817  SDValue(Lo64.getNode(), 1) };
2818  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2819  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2820  return DAG.getMergeValues(Ops, dl);
2821 }
2822 
2824 {
2825  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2826 
2827  EVT MemVT = LdNode->getMemoryVT();
2828  if (MemVT == MVT::f128)
2829  return LowerF128Load(Op, DAG);
2830 
2831  return Op;
2832 }
2833 
2834 // Lower a f128 store into two f64 stores.
2836  SDLoc dl(Op);
2837  StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2838  assert(StNode && StNode->getOffset().isUndef()
2839  && "Unexpected node type");
2840  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2841  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2842 
2843  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2844  dl,
2845  MVT::f64,
2846  StNode->getValue(),
2847  SubRegEven);
2848  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2849  dl,
2850  MVT::f64,
2851  StNode->getValue(),
2852  SubRegOdd);
2853 
2854  unsigned alignment = StNode->getAlignment();
2855  if (alignment > 8)
2856  alignment = 8;
2857 
2858  SDValue OutChains[2];
2859  OutChains[0] =
2860  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2861  StNode->getBasePtr(), MachinePointerInfo(), alignment);
2862  EVT addrVT = StNode->getBasePtr().getValueType();
2863  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2864  StNode->getBasePtr(),
2865  DAG.getConstant(8, dl, addrVT));
2866  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2867  MachinePointerInfo(), alignment);
2868  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2869 }
2870 
2872 {
2873  SDLoc dl(Op);
2874  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2875 
2876  EVT MemVT = St->getMemoryVT();
2877  if (MemVT == MVT::f128)
2878  return LowerF128Store(Op, DAG);
2879 
2880  if (MemVT == MVT::i64) {
2881  // Custom handling for i64 stores: turn it into a bitcast and a
2882  // v2i32 store.
2883  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2884  SDValue Chain = DAG.getStore(
2885  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2886  St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo());
2887  return Chain;
2888  }
2889 
2890  return SDValue();
2891 }
2892 
2893 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2894  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2895  && "invalid opcode");
2896 
2897  SDLoc dl(Op);
2898 
2899  if (Op.getValueType() == MVT::f64)
2900  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2901  if (Op.getValueType() != MVT::f128)
2902  return Op;
2903 
2904  // Lower fabs/fneg on f128 to fabs/fneg on f64
2905  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2906  // (As with LowerF64Op, on little-endian, we need to negate the odd
2907  // subreg)
2908 
2909  SDValue SrcReg128 = Op.getOperand(0);
2910  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2911  SrcReg128);
2912  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2913  SrcReg128);
2914 
2915  if (DAG.getDataLayout().isLittleEndian()) {
2916  if (isV9)
2917  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2918  else
2919  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2920  } else {
2921  if (isV9)
2922  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2923  else
2924  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2925  }
2926 
2927  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2928  dl, MVT::f128), 0);
2929  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2930  DstReg128, Hi64);
2931  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2932  DstReg128, Lo64);
2933  return DstReg128;
2934 }
2935 
2937 
2938  if (Op.getValueType() != MVT::i64)
2939  return Op;
2940 
2941  SDLoc dl(Op);
2942  SDValue Src1 = Op.getOperand(0);
2943  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2944  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2945  DAG.getConstant(32, dl, MVT::i64));
2946  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2947 
2948  SDValue Src2 = Op.getOperand(1);
2949  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2950  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2951  DAG.getConstant(32, dl, MVT::i64));
2952  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2953 
2954 
2955  bool hasChain = false;
2956  unsigned hiOpc = Op.getOpcode();
2957  switch (Op.getOpcode()) {
2958  default: llvm_unreachable("Invalid opcode");
2959  case ISD::ADDC: hiOpc = ISD::ADDE; break;
2960  case ISD::ADDE: hasChain = true; break;
2961  case ISD::SUBC: hiOpc = ISD::SUBE; break;
2962  case ISD::SUBE: hasChain = true; break;
2963  }
2964  SDValue Lo;
2965  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2966  if (hasChain) {
2967  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2968  Op.getOperand(2));
2969  } else {
2970  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2971  }
2972  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2973  SDValue Carry = Hi.getValue(1);
2974 
2975  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2976  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2977  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2978  DAG.getConstant(32, dl, MVT::i64));
2979 
2980  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2981  SDValue Ops[2] = { Dst, Carry };
2982  return DAG.getMergeValues(Ops, dl);
2983 }
2984 
2985 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2986 // in LegalizeDAG.cpp except the order of arguments to the library function.
2988  const SparcTargetLowering &TLI)
2989 {
2990  unsigned opcode = Op.getOpcode();
2991  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
2992 
2993  bool isSigned = (opcode == ISD::SMULO);
2994  EVT VT = MVT::i64;
2995  EVT WideVT = MVT::i128;
2996  SDLoc dl(Op);
2997  SDValue LHS = Op.getOperand(0);
2998 
2999  if (LHS.getValueType() != VT)
3000  return Op;
3001 
3002  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
3003 
3004  SDValue RHS = Op.getOperand(1);
3005  SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
3006  SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
3007  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
3008 
3009  SDValue MulResult = TLI.makeLibCall(DAG,
3010  RTLIB::MUL_I128, WideVT,
3011  Args, isSigned, dl).first;
3012  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3013  MulResult, DAG.getIntPtrConstant(0, dl));
3014  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
3015  MulResult, DAG.getIntPtrConstant(1, dl));
3016  if (isSigned) {
3017  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
3018  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
3019  } else {
3020  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
3021  ISD::SETNE);
3022  }
3023  // MulResult is a node with an illegal type. Because such things are not
3024  // generally permitted during this phase of legalization, ensure that
3025  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3026  // been folded.
3027  assert(MulResult->use_empty() && "Illegally typed node still in use!");
3028 
3029  SDValue Ops[2] = { BottomHalf, TopHalf } ;
3030  return DAG.getMergeValues(Ops, dl);
3031 }
3032 
3034  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
3035  // Expand with a fence.
3036  return SDValue();
3037 
3038  // Monotonic load/stores are legal.
3039  return Op;
3040 }
3041 
3043  SelectionDAG &DAG) const {
3044  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3045  SDLoc dl(Op);
3046  switch (IntNo) {
3047  default: return SDValue(); // Don't custom lower most intrinsics.
3048  case Intrinsic::thread_pointer: {
3049  EVT PtrVT = getPointerTy(DAG.getDataLayout());
3050  return DAG.getRegister(SP::G7, PtrVT);
3051  }
3052  }
3053 }
3054 
3057 
3058  bool hasHardQuad = Subtarget->hasHardQuad();
3059  bool isV9 = Subtarget->isV9();
3060 
3061  switch (Op.getOpcode()) {
3062  default: llvm_unreachable("Should not custom lower this!");
3063 
3064  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3065  Subtarget);
3066  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3067  Subtarget);
3068  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3069  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3070  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3071  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3072  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3073  hasHardQuad);
3074  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3075  hasHardQuad);
3076  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3077  hasHardQuad);
3078  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3079  hasHardQuad);
3080  case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3081  hasHardQuad);
3082  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3083  hasHardQuad);
3084  case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this);
3085  case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this);
3086  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3087  case ISD::VAARG: return LowerVAARG(Op, DAG);
3088  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3089  Subtarget);
3090 
3091  case ISD::LOAD: return LowerLOAD(Op, DAG);
3092  case ISD::STORE: return LowerSTORE(Op, DAG);
3093  case ISD::FADD: return LowerF128Op(Op, DAG,
3094  getLibcallName(RTLIB::ADD_F128), 2);
3095  case ISD::FSUB: return LowerF128Op(Op, DAG,
3096  getLibcallName(RTLIB::SUB_F128), 2);
3097  case ISD::FMUL: return LowerF128Op(Op, DAG,
3098  getLibcallName(RTLIB::MUL_F128), 2);
3099  case ISD::FDIV: return LowerF128Op(Op, DAG,
3100  getLibcallName(RTLIB::DIV_F128), 2);
3101  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3102  getLibcallName(RTLIB::SQRT_F128),1);
3103  case ISD::FABS:
3104  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3105  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3106  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3107  case ISD::ADDC:
3108  case ISD::ADDE:
3109  case ISD::SUBC:
3110  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3111  case ISD::UMULO:
3112  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3113  case ISD::ATOMIC_LOAD:
3114  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3115  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3116  }
3117 }
3118 
3121  MachineBasicBlock *BB) const {
3122  switch (MI.getOpcode()) {
3123  default: llvm_unreachable("Unknown SELECT_CC!");
3124  case SP::SELECT_CC_Int_ICC:
3125  case SP::SELECT_CC_FP_ICC:
3126  case SP::SELECT_CC_DFP_ICC:
3127  case SP::SELECT_CC_QFP_ICC:
3128  return expandSelectCC(MI, BB, SP::BCOND);
3129  case SP::SELECT_CC_Int_FCC:
3130  case SP::SELECT_CC_FP_FCC:
3131  case SP::SELECT_CC_DFP_FCC:
3132  case SP::SELECT_CC_QFP_FCC:
3133  return expandSelectCC(MI, BB, SP::FBCOND);
3134  case SP::EH_SJLJ_SETJMP32ri:
3135  case SP::EH_SJLJ_SETJMP32rr:
3136  return emitEHSjLjSetJmp(MI, BB);
3137  case SP::EH_SJLJ_LONGJMP32rr:
3138  case SP::EH_SJLJ_LONGJMP32ri:
3139  return emitEHSjLjLongJmp(MI, BB);
3140 
3141  }
3142 }
3143 
3146  unsigned BROpcode) const {
3147  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3148  DebugLoc dl = MI.getDebugLoc();
3149  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3150 
3151  // To "insert" a SELECT_CC instruction, we actually have to insert the
3152  // triangle control-flow pattern. The incoming instruction knows the
3153  // destination vreg to set, the condition code register to branch on, the
3154  // true/false values to select between, and the condition code for the branch.
3155  //
3156  // We produce the following control flow:
3157  // ThisMBB
3158  // | \
3159  // | IfFalseMBB
3160  // | /
3161  // SinkMBB
3162  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3164 
3165  MachineBasicBlock *ThisMBB = BB;
3166  MachineFunction *F = BB->getParent();
3167  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3168  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3169  F->insert(It, IfFalseMBB);
3170  F->insert(It, SinkMBB);
3171 
3172  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3173  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3174  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3175  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3176 
3177  // Set the new successors for ThisMBB.
3178  ThisMBB->addSuccessor(IfFalseMBB);
3179  ThisMBB->addSuccessor(SinkMBB);
3180 
3181  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3182  .addMBB(SinkMBB)
3183  .addImm(CC);
3184 
3185  // IfFalseMBB just falls through to SinkMBB.
3186  IfFalseMBB->addSuccessor(SinkMBB);
3187 
3188  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3189  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3190  MI.getOperand(0).getReg())
3191  .addReg(MI.getOperand(1).getReg())
3192  .addMBB(ThisMBB)
3193  .addReg(MI.getOperand(2).getReg())
3194  .addMBB(IfFalseMBB);
3195 
3196  MI.eraseFromParent(); // The pseudo instruction is gone now.
3197  return SinkMBB;
3198 }
3199 
3202  MachineBasicBlock *MBB) const {
3203  DebugLoc DL = MI.getDebugLoc();
3204  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3205 
3206  MachineFunction *MF = MBB->getParent();
3208  MachineInstrBuilder MIB;
3209 
3210  MVT PVT = getPointerTy(MF->getDataLayout());
3211  unsigned RegSize = PVT.getStoreSize();
3212  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3213 
3214  unsigned Buf = MI.getOperand(0).getReg();
3215  unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3216 
3217  // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3
3218  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A);
3219 
3220  // Instruction to restore FP
3221  const unsigned FP = SP::I6;
3222  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3223  .addReg(FP)
3224  .addReg(Buf)
3225  .addImm(0);
3226 
3227  // Instruction to load jmp location
3228  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3229  .addReg(JmpLoc, RegState::Define)
3230  .addReg(Buf)
3231  .addImm(RegSize);
3232 
3233  // Instruction to restore SP
3234  const unsigned SP = SP::O6;
3235  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3236  .addReg(SP)
3237  .addReg(Buf)
3238  .addImm(2 * RegSize);
3239 
3240  // Instruction to restore I7
3241  MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3242  .addReg(SP::I7)
3243  .addReg(Buf, RegState::Kill)
3244  .addImm(3 * RegSize);
3245 
3246  // Jump to JmpLoc
3247  BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0);
3248 
3249  MI.eraseFromParent();
3250  return MBB;
3251 }
3252 
3255  MachineBasicBlock *MBB) const {
3256  DebugLoc DL = MI.getDebugLoc();
3257  const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3258  const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3259 
3260  MachineFunction *MF = MBB->getParent();
3262  MachineInstrBuilder MIB;
3263 
3264  MVT PVT = getPointerTy(MF->getDataLayout());
3265  unsigned RegSize = PVT.getStoreSize();
3266  assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3267 
3268  unsigned DstReg = MI.getOperand(0).getReg();
3269  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
3270  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
3271  (void)TRI;
3272  unsigned mainDstReg = MRI.createVirtualRegister(RC);
3273  unsigned restoreDstReg = MRI.createVirtualRegister(RC);
3274 
3275  // For v = setjmp(buf), we generate
3276  //
3277  // thisMBB:
3278  // buf[0] = FP
3279  // buf[RegSize] = restoreMBB <-- takes address of restoreMBB
3280  // buf[RegSize * 2] = O6
3281  // buf[RegSize * 3] = I7
3282  // Ensure restoreMBB remains in the relocations list (done using a bn instruction)
3283  // b mainMBB
3284  //
3285  // mainMBB:
3286  // v_main = 0
3287  // b sinkMBB
3288  //
3289  // restoreMBB:
3290  // v_restore = 1
3291  // --fall through--
3292  //
3293  // sinkMBB:
3294  // v = phi(main, restore)
3295 
3296  const BasicBlock *BB = MBB->getBasicBlock();
3297  MachineFunction::iterator It = ++MBB->getIterator();
3298  MachineBasicBlock *thisMBB = MBB;
3299  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
3300  MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
3301  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
3302 
3303  MF->insert(It, mainMBB);
3304  MF->insert(It, restoreMBB);
3305  MF->insert(It, sinkMBB);
3306  restoreMBB->setHasAddressTaken();
3307 
3308  // Transfer the remainder of BB and its successor edges to sinkMBB.
3309  sinkMBB->splice(sinkMBB->begin(), MBB,
3310  std::next(MachineBasicBlock::iterator(MI)),
3311  MBB->end());
3312  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
3313 
3314  unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3315  unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3316  unsigned BufReg = MI.getOperand(1).getReg();
3317 
3318  // Instruction to store FP
3319  const unsigned FP = SP::I6;
3320  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3321  .addReg(BufReg)
3322  .addImm(0)
3323  .addReg(FP);
3324 
3325  // Instructions to store jmp location
3326  MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi))
3327  .addReg(LabelReg, RegState::Define)
3328  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI);
3329 
3330  MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri))
3331  .addReg(LabelReg2, RegState::Define)
3332  .addReg(LabelReg, RegState::Kill)
3333  .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO);
3334 
3335  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3336  .addReg(BufReg)
3337  .addImm(RegSize)
3338  .addReg(LabelReg2, RegState::Kill);
3339 
3340  // Instruction to store SP
3341  const unsigned SP = SP::O6;
3342  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3343  .addReg(BufReg)
3344  .addImm(2 * RegSize)
3345  .addReg(SP);
3346 
3347  // Instruction to store I7
3348  MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3349  .addReg(BufReg)
3350  .addImm(3 * RegSize)
3351  .addReg(SP::I7);
3352 
3353 
3354  // FIX ME: This next instruction ensures that the restoreMBB block address remains
3355  // valid through optimization passes and serves no other purpose. The ICC_N ensures
3356  // that the branch is never taken. This commented-out code here was an alternative
3357  // attempt to achieve this which brought myriad problems.
3358  //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None);
3359  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3360  .addMBB(restoreMBB)
3361  .addImm(SPCC::ICC_N);
3362 
3363  MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3364  .addMBB(mainMBB)
3365  .addImm(SPCC::ICC_A);
3366 
3367  thisMBB->addSuccessor(mainMBB);
3368  thisMBB->addSuccessor(restoreMBB);
3369 
3370 
3371  // mainMBB:
3372  MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr))
3373  .addReg(mainDstReg, RegState::Define)
3374  .addReg(SP::G0)
3375  .addReg(SP::G0);
3376  MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3377 
3378  mainMBB->addSuccessor(sinkMBB);
3379 
3380 
3381  // restoreMBB:
3382  MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri))
3383  .addReg(restoreDstReg, RegState::Define)
3384  .addReg(SP::G0)
3385  .addImm(1);
3386  //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3387  restoreMBB->addSuccessor(sinkMBB);
3388 
3389  // sinkMBB:
3390  MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL,
3391  TII->get(SP::PHI), DstReg)
3392  .addReg(mainDstReg).addMBB(mainMBB)
3393  .addReg(restoreDstReg).addMBB(restoreMBB);
3394 
3395  MI.eraseFromParent();
3396  return sinkMBB;
3397 }
3398 
3399 //===----------------------------------------------------------------------===//
3400 // Sparc Inline Assembly Support
3401 //===----------------------------------------------------------------------===//
3402 
3403 /// getConstraintType - Given a constraint letter, return the type of
3404 /// constraint it is for this target.
3407  if (Constraint.size() == 1) {
3408  switch (Constraint[0]) {
3409  default: break;
3410  case 'r':
3411  case 'f':
3412  case 'e':
3413  return C_RegisterClass;
3414  case 'I': // SIMM13
3415  return C_Other;
3416  }
3417  }
3418 
3419  return TargetLowering::getConstraintType(Constraint);
3420 }
3421 
3424  const char *constraint) const {
3425  ConstraintWeight weight = CW_Invalid;
3426  Value *CallOperandVal = info.CallOperandVal;
3427  // If we don't have a value, we can't do a match,
3428  // but allow it at the lowest weight.
3429  if (!CallOperandVal)
3430  return CW_Default;
3431 
3432  // Look at the constraint type.
3433  switch (*constraint) {
3434  default:
3435  weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3436  break;
3437  case 'I': // SIMM13
3438  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3439  if (isInt<13>(C->getSExtValue()))
3440  weight = CW_Constant;
3441  }
3442  break;
3443  }
3444  return weight;
3445 }
3446 
3447 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3448 /// vector. If it is invalid, don't add anything to Ops.
3451  std::string &Constraint,
3452  std::vector<SDValue> &Ops,
3453  SelectionDAG &DAG) const {
3454  SDValue Result(nullptr, 0);
3455 
3456  // Only support length 1 constraints for now.
3457  if (Constraint.length() > 1)
3458  return;
3459 
3460  char ConstraintLetter = Constraint[0];
3461  switch (ConstraintLetter) {
3462  default: break;
3463  case 'I':
3464  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3465  if (isInt<13>(C->getSExtValue())) {
3466  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3467  Op.getValueType());
3468  break;
3469  }
3470  return;
3471  }
3472  }
3473 
3474  if (Result.getNode()) {
3475  Ops.push_back(Result);
3476  return;
3477  }
3478  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3479 }
3480 
3481 std::pair<unsigned, const TargetRegisterClass *>
3483  StringRef Constraint,
3484  MVT VT) const {
3485  if (Constraint.size() == 1) {
3486  switch (Constraint[0]) {
3487  case 'r':
3488  if (VT == MVT::v2i32)
3489  return std::make_pair(0U, &SP::IntPairRegClass);
3490  else
3491  return std::make_pair(0U, &SP::IntRegsRegClass);
3492  case 'f':
3493  if (VT == MVT::f32)
3494  return std::make_pair(0U, &SP::FPRegsRegClass);
3495  else if (VT == MVT::f64)
3496  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3497  else if (VT == MVT::f128)
3498  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3499  llvm_unreachable("Unknown ValueType for f-register-type!");
3500  break;
3501  case 'e':
3502  if (VT == MVT::f32)
3503  return std::make_pair(0U, &SP::FPRegsRegClass);
3504  else if (VT == MVT::f64)
3505  return std::make_pair(0U, &SP::DFPRegsRegClass);
3506  else if (VT == MVT::f128)
3507  return std::make_pair(0U, &SP::QFPRegsRegClass);
3508  llvm_unreachable("Unknown ValueType for e-register-type!");
3509  break;
3510  }
3511  } else if (!Constraint.empty() && Constraint.size() <= 5
3512  && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3513  // constraint = '{r<d>}'
3514  // Remove the braces from around the name.
3515  StringRef name(Constraint.data()+1, Constraint.size()-2);
3516  // Handle register aliases:
3517  // r0-r7 -> g0-g7
3518  // r8-r15 -> o0-o7
3519  // r16-r23 -> l0-l7
3520  // r24-r31 -> i0-i7
3521  uint64_t intVal = 0;
3522  if (name.substr(0, 1).equals("r")
3523  && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3524  const char regTypes[] = { 'g', 'o', 'l', 'i' };
3525  char regType = regTypes[intVal/8];
3526  char regIdx = '0' + (intVal % 8);
3527  char tmp[] = { '{', regType, regIdx, '}', 0 };
3528  std::string newConstraint = std::string(tmp);
3529  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3530  VT);
3531  }
3532  if (name.substr(0, 1).equals("f") &&
3533  !name.substr(1).getAsInteger(10, intVal) && intVal <= 63) {
3534  std::string newConstraint;
3535 
3536  if (VT == MVT::f32 || VT == MVT::Other) {
3537  newConstraint = "{f" + utostr(intVal) + "}";
3538  } else if (VT == MVT::f64 && (intVal % 2 == 0)) {
3539  newConstraint = "{d" + utostr(intVal / 2) + "}";
3540  } else if (VT == MVT::f128 && (intVal % 4 == 0)) {
3541  newConstraint = "{q" + utostr(intVal / 4) + "}";
3542  } else {
3543  return std::make_pair(0U, nullptr);
3544  }
3545  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3546  VT);
3547  }
3548  }
3549 
3550  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3551 }
3552 
3553 bool
3555  // The Sparc target isn't yet aware of offsets.
3556  return false;
3557 }
3558 
3561  SelectionDAG &DAG) const {
3562 
3563  SDLoc dl(N);
3564 
3565  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3566 
3567  switch (N->getOpcode()) {
3568  default:
3569  llvm_unreachable("Do not know how to custom type legalize this operation!");
3570 
3571  case ISD::FP_TO_SINT:
3572  case ISD::FP_TO_UINT:
3573  // Custom lower only if it involves f128 or i64.
3574  if (N->getOperand(0).getValueType() != MVT::f128
3575  || N->getValueType(0) != MVT::i64)
3576  return;
3577  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3578  ? RTLIB::FPTOSINT_F128_I64
3579  : RTLIB::FPTOUINT_F128_I64);
3580 
3581  Results.push_back(LowerF128Op(SDValue(N, 0),
3582  DAG,
3583  getLibcallName(libCall),
3584  1));
3585  return;
3586 
3587  case ISD::SINT_TO_FP:
3588  case ISD::UINT_TO_FP:
3589  // Custom lower only if it involves f128 or i64.
3590  if (N->getValueType(0) != MVT::f128
3591  || N->getOperand(0).getValueType() != MVT::i64)
3592  return;
3593 
3594  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3595  ? RTLIB::SINTTOFP_I64_F128
3596  : RTLIB::UINTTOFP_I64_F128);
3597 
3598  Results.push_back(LowerF128Op(SDValue(N, 0),
3599  DAG,
3600  getLibcallName(libCall),
3601  1));
3602  return;
3603  case ISD::LOAD: {
3604  LoadSDNode *Ld = cast<LoadSDNode>(N);
3605  // Custom handling only for i64: turn i64 load into a v2i32 load,
3606  // and a bitcast.
3607  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3608  return;
3609 
3610  SDLoc dl(N);
3611  SDValue LoadRes = DAG.getExtLoad(
3612  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3613  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
3614  Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3615 
3616  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3617  Results.push_back(Res);
3618  Results.push_back(LoadRes.getValue(1));
3619  return;
3620  }
3621  }
3622 }
3623 
3624 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3626  if (!Subtarget->isTargetLinux())
3628  return true;
3629 }
3630 
3631 // Override to disable global variable loading on Linux.
3633  if (!Subtarget->isTargetLinux())
3635 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:539
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:506
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:829
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getValueType() const
Return the ValueType of the referenced return value.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool usePopc() const
const SDValue & getOffset() const
bool isUndef() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
DiagnosticInfoOptimizationBase::Argument NV
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC&#39;s if you just have an ISD::CondCode instead of an SD...
Definition: SelectionDAG.h:951
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:849
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:611
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:137
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:223
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:285
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
bool isFP128Ty() const
Return true if this is &#39;fp128&#39;.
Definition: Type.h:156
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:660
unsigned Reg
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getChain() const
Function Alias Analysis Results
unsigned getValNo() const
unsigned getAlignment() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:307
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const uint32_t * getRTCallPreservedMask(CallingConv::ID CC) const
bool hasHardQuad() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:677
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:677
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
const SparcInstrInfo * getInstrInfo() const override
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:742
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:405
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:128
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool useSoftFloat() const override
SDValue getExternalSymbol(const char *Sym, EVT VT)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isMemLoc() const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool needsCustom() const
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
Definition: ISDOpcodes.h:114
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:565
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:210
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:426
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
bool isTargetLinux() const
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const HexagonInstrInfo * TII
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Shift and rotation operations.
Definition: ISDOpcodes.h:380
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:730
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:191
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
uint64_t getConstantOperandVal(unsigned i) const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:771
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:311
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:457
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:398
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
Definition: StringSwitch.h:203
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
LocInfo getLocInfo() const
bool useSoftFloat() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:656
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool fixAllFDIVSQRT() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:395
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:448
SDValue getRegisterMask(const uint32_t *RegMask)
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:399
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
bool useSoftMulDiv() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
SmallVector< ISD::OutputArg, 32 > Outs
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:221
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:911
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:571
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
bool isStrongerThanMonotonic(AtomicOrdering ao)
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const
Return true if the given TargetRegisterClass has the ValueType T.
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
amdgpu Simplify well known AMD library false Value * Callee
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Class to represent pointers.
Definition: DerivedTypes.h:467
unsigned getByValSize() const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
UNDEF - An undefined node.
Definition: ISDOpcodes.h:178
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:492
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:118
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:303
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool hasNoFMULS() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
const SDValue & getBasePtr() const
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43