LLVM  9.0.0svn
SparcISelLowering.cpp
Go to the documentation of this file.
1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the interfaces that Sparc uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SparcISelLowering.h"
17 #include "SparcRegisterInfo.h"
18 #include "SparcTargetMachine.h"
19 #include "SparcTargetObjectFile.h"
20 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Module.h"
32 #include "llvm/Support/KnownBits.h"
33 using namespace llvm;
34 
35 
36 //===----------------------------------------------------------------------===//
37 // Calling Convention Implementation
38 //===----------------------------------------------------------------------===//
39 
40 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
41  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
42  ISD::ArgFlagsTy &ArgFlags, CCState &State)
43 {
44  assert (ArgFlags.isSRet());
45 
46  // Assign SRet argument.
47  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
48  0,
49  LocVT, LocInfo));
50  return true;
51 }
52 
53 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
54  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
55  ISD::ArgFlagsTy &ArgFlags, CCState &State)
56 {
57  static const MCPhysReg RegList[] = {
58  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
59  };
60  // Try to get first reg.
61  if (unsigned Reg = State.AllocateReg(RegList)) {
62  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
63  } else {
64  // Assign whole thing in stack.
65  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
66  State.AllocateStack(8,4),
67  LocVT, LocInfo));
68  return true;
69  }
70 
71  // Try to get second reg.
72  if (unsigned Reg = State.AllocateReg(RegList))
73  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
74  else
75  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
76  State.AllocateStack(4,4),
77  LocVT, LocInfo));
78  return true;
79 }
80 
81 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
82  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
83  ISD::ArgFlagsTy &ArgFlags, CCState &State)
84 {
85  static const MCPhysReg RegList[] = {
86  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
87  };
88 
89  // Try to get first reg.
90  if (unsigned Reg = State.AllocateReg(RegList))
91  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
92  else
93  return false;
94 
95  // Try to get second reg.
96  if (unsigned Reg = State.AllocateReg(RegList))
97  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
98  else
99  return false;
100 
101  return true;
102 }
103 
104 // Allocate a full-sized argument for the 64-bit ABI.
105 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
106  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
107  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
108  assert((LocVT == MVT::f32 || LocVT == MVT::f128
109  || LocVT.getSizeInBits() == 64) &&
110  "Can't handle non-64 bits locations");
111 
112  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
113  unsigned size = (LocVT == MVT::f128) ? 16 : 8;
114  unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
115  unsigned Offset = State.AllocateStack(size, alignment);
116  unsigned Reg = 0;
117 
118  if (LocVT == MVT::i64 && Offset < 6*8)
119  // Promote integers to %i0-%i5.
120  Reg = SP::I0 + Offset/8;
121  else if (LocVT == MVT::f64 && Offset < 16*8)
122  // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
123  Reg = SP::D0 + Offset/8;
124  else if (LocVT == MVT::f32 && Offset < 16*8)
125  // Promote floats to %f1, %f3, ...
126  Reg = SP::F1 + Offset/4;
127  else if (LocVT == MVT::f128 && Offset < 16*8)
128  // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
129  Reg = SP::Q0 + Offset/16;
130 
131  // Promote to register when possible, otherwise use the stack slot.
132  if (Reg) {
133  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
134  return true;
135  }
136 
137  // This argument goes on the stack in an 8-byte slot.
138  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
139  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
140  if (LocVT == MVT::f32)
141  Offset += 4;
142 
143  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
144  return true;
145 }
146 
147 // Allocate a half-sized argument for the 64-bit ABI.
148 //
149 // This is used when passing { float, int } structs by value in registers.
150 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
151  MVT &LocVT, CCValAssign::LocInfo &LocInfo,
152  ISD::ArgFlagsTy &ArgFlags, CCState &State) {
153  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
154  unsigned Offset = State.AllocateStack(4, 4);
155 
156  if (LocVT == MVT::f32 && Offset < 16*8) {
157  // Promote floats to %f0-%f31.
158  State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
159  LocVT, LocInfo));
160  return true;
161  }
162 
163  if (LocVT == MVT::i32 && Offset < 6*8) {
164  // Promote integers to %i0-%i5, using half the register.
165  unsigned Reg = SP::I0 + Offset/8;
166  LocVT = MVT::i64;
167  LocInfo = CCValAssign::AExt;
168 
169  // Set the Custom bit if this i32 goes in the high bits of a register.
170  if (Offset % 8 == 0)
171  State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
172  LocVT, LocInfo));
173  else
174  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
175  return true;
176  }
177 
178  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
179  return true;
180 }
181 
182 #include "SparcGenCallingConv.inc"
183 
184 // The calling conventions in SparcCallingConv.td are described in terms of the
185 // callee's register window. This function translates registers to the
186 // corresponding caller window %o register.
187 static unsigned toCallerWindow(unsigned Reg) {
188  static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
189  "Unexpected enum");
190  if (Reg >= SP::I0 && Reg <= SP::I7)
191  return Reg - SP::I0 + SP::O0;
192  return Reg;
193 }
194 
195 SDValue
197  bool IsVarArg,
199  const SmallVectorImpl<SDValue> &OutVals,
200  const SDLoc &DL, SelectionDAG &DAG) const {
201  if (Subtarget->is64Bit())
202  return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
203  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
204 }
205 
206 SDValue
208  bool IsVarArg,
210  const SmallVectorImpl<SDValue> &OutVals,
211  const SDLoc &DL, SelectionDAG &DAG) const {
213 
214  // CCValAssign - represent the assignment of the return value to locations.
216 
217  // CCState - Info about the registers and stack slot.
218  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
219  *DAG.getContext());
220 
221  // Analyze return values.
222  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
223 
224  SDValue Flag;
225  SmallVector<SDValue, 4> RetOps(1, Chain);
226  // Make room for the return address offset.
227  RetOps.push_back(SDValue());
228 
229  // Copy the result values into the output registers.
230  for (unsigned i = 0, realRVLocIdx = 0;
231  i != RVLocs.size();
232  ++i, ++realRVLocIdx) {
233  CCValAssign &VA = RVLocs[i];
234  assert(VA.isRegLoc() && "Can only return in registers!");
235 
236  SDValue Arg = OutVals[realRVLocIdx];
237 
238  if (VA.needsCustom()) {
239  assert(VA.getLocVT() == MVT::v2i32);
240  // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
241  // happen by default if this wasn't a legal type)
242 
244  Arg,
245  DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
247  Arg,
248  DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
249 
250  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
251  Flag = Chain.getValue(1);
252  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
253  VA = RVLocs[++i]; // skip ahead to next loc
254  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
255  Flag);
256  } else
257  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
258 
259  // Guarantee that all emitted copies are stuck together with flags.
260  Flag = Chain.getValue(1);
261  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
262  }
263 
264  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
265  // If the function returns a struct, copy the SRetReturnReg to I0
266  if (MF.getFunction().hasStructRetAttr()) {
268  unsigned Reg = SFI->getSRetReturnReg();
269  if (!Reg)
270  llvm_unreachable("sret virtual register not created in the entry block");
271  auto PtrVT = getPointerTy(DAG.getDataLayout());
272  SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
273  Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
274  Flag = Chain.getValue(1);
275  RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
276  RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
277  }
278 
279  RetOps[0] = Chain; // Update chain.
280  RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
281 
282  // Add the flag if we have it.
283  if (Flag.getNode())
284  RetOps.push_back(Flag);
285 
286  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
287 }
288 
289 // Lower return values for the 64-bit ABI.
290 // Return values are passed the exactly the same way as function arguments.
291 SDValue
293  bool IsVarArg,
295  const SmallVectorImpl<SDValue> &OutVals,
296  const SDLoc &DL, SelectionDAG &DAG) const {
297  // CCValAssign - represent the assignment of the return value to locations.
299 
300  // CCState - Info about the registers and stack slot.
301  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
302  *DAG.getContext());
303 
304  // Analyze return values.
305  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
306 
307  SDValue Flag;
308  SmallVector<SDValue, 4> RetOps(1, Chain);
309 
310  // The second operand on the return instruction is the return address offset.
311  // The return address is always %i7+8 with the 64-bit ABI.
312  RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
313 
314  // Copy the result values into the output registers.
315  for (unsigned i = 0; i != RVLocs.size(); ++i) {
316  CCValAssign &VA = RVLocs[i];
317  assert(VA.isRegLoc() && "Can only return in registers!");
318  SDValue OutVal = OutVals[i];
319 
320  // Integer return values must be sign or zero extended by the callee.
321  switch (VA.getLocInfo()) {
322  case CCValAssign::Full: break;
323  case CCValAssign::SExt:
324  OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
325  break;
326  case CCValAssign::ZExt:
327  OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
328  break;
329  case CCValAssign::AExt:
330  OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
331  break;
332  default:
333  llvm_unreachable("Unknown loc info!");
334  }
335 
336  // The custom bit on an i32 return value indicates that it should be passed
337  // in the high bits of the register.
338  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
339  OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
340  DAG.getConstant(32, DL, MVT::i32));
341 
342  // The next value may go in the low bits of the same register.
343  // Handle both at once.
344  if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
345  SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
346  OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
347  // Skip the next value, it's already done.
348  ++i;
349  }
350  }
351 
352  Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
353 
354  // Guarantee that all emitted copies are stuck together with flags.
355  Flag = Chain.getValue(1);
356  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
357  }
358 
359  RetOps[0] = Chain; // Update chain.
360 
361  // Add the flag if we have it.
362  if (Flag.getNode())
363  RetOps.push_back(Flag);
364 
365  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
366 }
367 
369  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
370  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
371  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
372  if (Subtarget->is64Bit())
373  return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
374  DL, DAG, InVals);
375  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
376  DL, DAG, InVals);
377 }
378 
379 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
380 /// passed in either one or two GPRs, including FP values. TODO: we should
381 /// pass FP values in FP registers for fastcc functions.
383  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
384  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
385  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
387  MachineRegisterInfo &RegInfo = MF.getRegInfo();
389 
390  // Assign locations to all of the incoming arguments.
392  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
393  *DAG.getContext());
394  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
395 
396  const unsigned StackOffset = 92;
397  bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
398 
399  unsigned InIdx = 0;
400  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
401  CCValAssign &VA = ArgLocs[i];
402 
403  if (Ins[InIdx].Flags.isSRet()) {
404  if (InIdx != 0)
405  report_fatal_error("sparc only supports sret on the first parameter");
406  // Get SRet from [%fp+64].
407  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
408  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
409  SDValue Arg =
410  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
411  InVals.push_back(Arg);
412  continue;
413  }
414 
415  if (VA.isRegLoc()) {
416  if (VA.needsCustom()) {
417  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
418 
419  unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
420  MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
421  SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
422 
423  assert(i+1 < e);
424  CCValAssign &NextVA = ArgLocs[++i];
425 
426  SDValue LoVal;
427  if (NextVA.isMemLoc()) {
428  int FrameIdx = MF.getFrameInfo().
429  CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
430  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
431  LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
432  } else {
433  unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
434  &SP::IntRegsRegClass);
435  LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
436  }
437 
438  if (IsLittleEndian)
439  std::swap(LoVal, HiVal);
440 
441  SDValue WholeValue =
442  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
443  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
444  InVals.push_back(WholeValue);
445  continue;
446  }
447  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
448  MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
449  SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
450  if (VA.getLocVT() == MVT::f32)
451  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
452  else if (VA.getLocVT() != MVT::i32) {
453  Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
454  DAG.getValueType(VA.getLocVT()));
455  Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
456  }
457  InVals.push_back(Arg);
458  continue;
459  }
460 
461  assert(VA.isMemLoc());
462 
463  unsigned Offset = VA.getLocMemOffset()+StackOffset;
464  auto PtrVT = getPointerTy(DAG.getDataLayout());
465 
466  if (VA.needsCustom()) {
467  assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
468  // If it is double-word aligned, just load.
469  if (Offset % 8 == 0) {
470  int FI = MF.getFrameInfo().CreateFixedObject(8,
471  Offset,
472  true);
473  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
474  SDValue Load =
475  DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
476  InVals.push_back(Load);
477  continue;
478  }
479 
480  int FI = MF.getFrameInfo().CreateFixedObject(4,
481  Offset,
482  true);
483  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
484  SDValue HiVal =
485  DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
486  int FI2 = MF.getFrameInfo().CreateFixedObject(4,
487  Offset+4,
488  true);
489  SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
490 
491  SDValue LoVal =
492  DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
493 
494  if (IsLittleEndian)
495  std::swap(LoVal, HiVal);
496 
497  SDValue WholeValue =
498  DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
499  WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
500  InVals.push_back(WholeValue);
501  continue;
502  }
503 
504  int FI = MF.getFrameInfo().CreateFixedObject(4,
505  Offset,
506  true);
507  SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
508  SDValue Load ;
509  if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
510  Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
511  } else if (VA.getValVT() == MVT::f128) {
512  report_fatal_error("SPARCv8 does not handle f128 in calls; "
513  "pass indirectly");
514  } else {
515  // We shouldn't see any other value types here.
516  llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
517  }
518  InVals.push_back(Load);
519  }
520 
521  if (MF.getFunction().hasStructRetAttr()) {
522  // Copy the SRet Argument to SRetReturnReg.
524  unsigned Reg = SFI->getSRetReturnReg();
525  if (!Reg) {
526  Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
527  SFI->setSRetReturnReg(Reg);
528  }
529  SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
530  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
531  }
532 
533  // Store remaining ArgRegs to the stack if this is a varargs function.
534  if (isVarArg) {
535  static const MCPhysReg ArgRegs[] = {
536  SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
537  };
538  unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
539  const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
540  unsigned ArgOffset = CCInfo.getNextStackOffset();
541  if (NumAllocated == 6)
542  ArgOffset += StackOffset;
543  else {
544  assert(!ArgOffset);
545  ArgOffset = 68+4*NumAllocated;
546  }
547 
548  // Remember the vararg offset for the va_start implementation.
549  FuncInfo->setVarArgsFrameOffset(ArgOffset);
550 
551  std::vector<SDValue> OutChains;
552 
553  for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
554  unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
555  MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
556  SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
557 
558  int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
559  true);
560  SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
561 
562  OutChains.push_back(
563  DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
564  ArgOffset += 4;
565  }
566 
567  if (!OutChains.empty()) {
568  OutChains.push_back(Chain);
569  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
570  }
571  }
572 
573  return Chain;
574 }
575 
576 // Lower formal arguments for the 64 bit ABI.
578  SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
579  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
580  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
582 
583  // Analyze arguments according to CC_Sparc64.
585  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
586  *DAG.getContext());
587  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
588 
589  // The argument array begins at %fp+BIAS+128, after the register save area.
590  const unsigned ArgArea = 128;
591 
592  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
593  CCValAssign &VA = ArgLocs[i];
594  if (VA.isRegLoc()) {
595  // This argument is passed in a register.
596  // All integer register arguments are promoted by the caller to i64.
597 
598  // Create a virtual register for the promoted live-in value.
599  unsigned VReg = MF.addLiveIn(VA.getLocReg(),
600  getRegClassFor(VA.getLocVT()));
601  SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
602 
603  // Get the high bits for i32 struct elements.
604  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
605  Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
606  DAG.getConstant(32, DL, MVT::i32));
607 
608  // The caller promoted the argument, so insert an Assert?ext SDNode so we
609  // won't promote the value again in this function.
610  switch (VA.getLocInfo()) {
611  case CCValAssign::SExt:
612  Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
613  DAG.getValueType(VA.getValVT()));
614  break;
615  case CCValAssign::ZExt:
616  Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
617  DAG.getValueType(VA.getValVT()));
618  break;
619  default:
620  break;
621  }
622 
623  // Truncate the register down to the argument type.
624  if (VA.isExtInLoc())
625  Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
626 
627  InVals.push_back(Arg);
628  continue;
629  }
630 
631  // The registers are exhausted. This argument was passed on the stack.
632  assert(VA.isMemLoc());
633  // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
634  // beginning of the arguments area at %fp+BIAS+128.
635  unsigned Offset = VA.getLocMemOffset() + ArgArea;
636  unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
637  // Adjust offset for extended arguments, SPARC is big-endian.
638  // The caller will have written the full slot with extended bytes, but we
639  // prefer our own extending loads.
640  if (VA.isExtInLoc())
641  Offset += 8 - ValSize;
642  int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
643  InVals.push_back(
644  DAG.getLoad(VA.getValVT(), DL, Chain,
647  }
648 
649  if (!IsVarArg)
650  return Chain;
651 
652  // This function takes variable arguments, some of which may have been passed
653  // in registers %i0-%i5. Variable floating point arguments are never passed
654  // in floating point registers. They go on %i0-%i5 or on the stack like
655  // integer arguments.
656  //
657  // The va_start intrinsic needs to know the offset to the first variable
658  // argument.
659  unsigned ArgOffset = CCInfo.getNextStackOffset();
661  // Skip the 128 bytes of register save area.
662  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
663  Subtarget->getStackPointerBias());
664 
665  // Save the variable arguments that were passed in registers.
666  // The caller is required to reserve stack space for 6 arguments regardless
667  // of how many arguments were actually passed.
668  SmallVector<SDValue, 8> OutChains;
669  for (; ArgOffset < 6*8; ArgOffset += 8) {
670  unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
671  SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
672  int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
673  auto PtrVT = getPointerTy(MF.getDataLayout());
674  OutChains.push_back(
675  DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
677  }
678 
679  if (!OutChains.empty())
680  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
681 
682  return Chain;
683 }
684 
685 SDValue
687  SmallVectorImpl<SDValue> &InVals) const {
688  if (Subtarget->is64Bit())
689  return LowerCall_64(CLI, InVals);
690  return LowerCall_32(CLI, InVals);
691 }
692 
693 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
694  ImmutableCallSite CS) {
695  if (CS)
696  return CS.hasFnAttr(Attribute::ReturnsTwice);
697 
698  const Function *CalleeFn = nullptr;
699  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
700  CalleeFn = dyn_cast<Function>(G->getGlobal());
701  } else if (ExternalSymbolSDNode *E =
702  dyn_cast<ExternalSymbolSDNode>(Callee)) {
703  const Function &Fn = DAG.getMachineFunction().getFunction();
704  const Module *M = Fn.getParent();
705  const char *CalleeName = E->getSymbol();
706  CalleeFn = M->getFunction(CalleeName);
707  }
708 
709  if (!CalleeFn)
710  return false;
711  return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
712 }
713 
714 // Lower a call for the 32-bit ABI.
715 SDValue
717  SmallVectorImpl<SDValue> &InVals) const {
718  SelectionDAG &DAG = CLI.DAG;
719  SDLoc &dl = CLI.DL;
721  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
723  SDValue Chain = CLI.Chain;
724  SDValue Callee = CLI.Callee;
725  bool &isTailCall = CLI.IsTailCall;
726  CallingConv::ID CallConv = CLI.CallConv;
727  bool isVarArg = CLI.IsVarArg;
728 
729  // Sparc target does not yet support tail call optimization.
730  isTailCall = false;
731 
732  // Analyze operands of the call, assigning locations to each operand.
734  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
735  *DAG.getContext());
736  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
737 
738  // Get the size of the outgoing arguments stack space requirement.
739  unsigned ArgsSize = CCInfo.getNextStackOffset();
740 
741  // Keep stack frames 8-byte aligned.
742  ArgsSize = (ArgsSize+7) & ~7;
743 
745 
746  // Create local copies for byval args.
747  SmallVector<SDValue, 8> ByValArgs;
748  for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
749  ISD::ArgFlagsTy Flags = Outs[i].Flags;
750  if (!Flags.isByVal())
751  continue;
752 
753  SDValue Arg = OutVals[i];
754  unsigned Size = Flags.getByValSize();
755  unsigned Align = Flags.getByValAlign();
756 
757  if (Size > 0U) {
758  int FI = MFI.CreateStackObject(Size, Align, false);
759  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
760  SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
761 
762  Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
763  false, // isVolatile,
764  (Size <= 32), // AlwaysInline if size <= 32,
765  false, // isTailCall
767  ByValArgs.push_back(FIPtr);
768  }
769  else {
770  SDValue nullVal;
771  ByValArgs.push_back(nullVal);
772  }
773  }
774 
775  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
776 
778  SmallVector<SDValue, 8> MemOpChains;
779 
780  const unsigned StackOffset = 92;
781  bool hasStructRetAttr = false;
782  unsigned SRetArgSize = 0;
783  // Walk the register/memloc assignments, inserting copies/loads.
784  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
785  i != e;
786  ++i, ++realArgIdx) {
787  CCValAssign &VA = ArgLocs[i];
788  SDValue Arg = OutVals[realArgIdx];
789 
790  ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
791 
792  // Use local copy if it is a byval arg.
793  if (Flags.isByVal()) {
794  Arg = ByValArgs[byvalArgIdx++];
795  if (!Arg) {
796  continue;
797  }
798  }
799 
800  // Promote the value if needed.
801  switch (VA.getLocInfo()) {
802  default: llvm_unreachable("Unknown loc info!");
803  case CCValAssign::Full: break;
804  case CCValAssign::SExt:
805  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
806  break;
807  case CCValAssign::ZExt:
808  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
809  break;
810  case CCValAssign::AExt:
811  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
812  break;
813  case CCValAssign::BCvt:
814  Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
815  break;
816  }
817 
818  if (Flags.isSRet()) {
819  assert(VA.needsCustom());
820  // store SRet argument in %sp+64
821  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
822  SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
823  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
824  MemOpChains.push_back(
825  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
826  hasStructRetAttr = true;
827  // sret only allowed on first argument
828  assert(Outs[realArgIdx].OrigArgIndex == 0);
829  PointerType *Ty = cast<PointerType>(CLI.getArgs()[0].Ty);
830  Type *ElementTy = Ty->getElementType();
831  SRetArgSize = DAG.getDataLayout().getTypeAllocSize(ElementTy);
832  continue;
833  }
834 
835  if (VA.needsCustom()) {
836  assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
837 
838  if (VA.isMemLoc()) {
839  unsigned Offset = VA.getLocMemOffset() + StackOffset;
840  // if it is double-word aligned, just store.
841  if (Offset % 8 == 0) {
842  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
843  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
844  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
845  MemOpChains.push_back(
846  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
847  continue;
848  }
849  }
850 
851  if (VA.getLocVT() == MVT::f64) {
852  // Move from the float value from float registers into the
853  // integer registers.
854  if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg))
855  Arg = bitcastConstantFPToInt(C, dl, DAG);
856  else
857  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
858  }
859 
861  Arg,
862  DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
864  Arg,
865  DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
866 
867  if (VA.isRegLoc()) {
868  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
869  assert(i+1 != e);
870  CCValAssign &NextVA = ArgLocs[++i];
871  if (NextVA.isRegLoc()) {
872  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
873  } else {
874  // Store the second part in stack.
875  unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
876  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
877  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
878  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
879  MemOpChains.push_back(
880  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
881  }
882  } else {
883  unsigned Offset = VA.getLocMemOffset() + StackOffset;
884  // Store the first part.
885  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
886  SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
887  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
888  MemOpChains.push_back(
889  DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
890  // Store the second part.
891  PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
892  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
893  MemOpChains.push_back(
894  DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
895  }
896  continue;
897  }
898 
899  // Arguments that can be passed on register must be kept at
900  // RegsToPass vector
901  if (VA.isRegLoc()) {
902  if (VA.getLocVT() != MVT::f32) {
903  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
904  continue;
905  }
906  Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
907  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
908  continue;
909  }
910 
911  assert(VA.isMemLoc());
912 
913  // Create a store off the stack pointer for this argument.
914  SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
915  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
916  dl);
917  PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
918  MemOpChains.push_back(
919  DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
920  }
921 
922 
923  // Emit all stores, make sure the occur before any copies into physregs.
924  if (!MemOpChains.empty())
925  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
926 
927  // Build a sequence of copy-to-reg nodes chained together with token
928  // chain and flag operands which copy the outgoing args into registers.
929  // The InFlag in necessary since all emitted instructions must be
930  // stuck together.
931  SDValue InFlag;
932  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
933  unsigned Reg = toCallerWindow(RegsToPass[i].first);
934  Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
935  InFlag = Chain.getValue(1);
936  }
937 
938  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
939 
940  // If the callee is a GlobalAddress node (quite common, every direct call is)
941  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
942  // Likewise ExternalSymbol -> TargetExternalSymbol.
944  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
945  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
946  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
947  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
948 
949  // Returns a chain & a flag for retval copy to use
950  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
952  Ops.push_back(Chain);
953  Ops.push_back(Callee);
954  if (hasStructRetAttr)
955  Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
956  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
957  Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
958  RegsToPass[i].second.getValueType()));
959 
960  // Add a register mask operand representing the call-preserved registers.
961  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
962  const uint32_t *Mask =
963  ((hasReturnsTwice)
964  ? TRI->getRTCallPreservedMask(CallConv)
965  : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
966  assert(Mask && "Missing call preserved mask for calling convention");
967  Ops.push_back(DAG.getRegisterMask(Mask));
968 
969  if (InFlag.getNode())
970  Ops.push_back(InFlag);
971 
972  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
973  InFlag = Chain.getValue(1);
974 
975  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
976  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
977  InFlag = Chain.getValue(1);
978 
979  // Assign locations to each value returned by this call.
981  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
982  *DAG.getContext());
983 
984  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
985 
986  // Copy all of the result registers out of their specified physreg.
987  for (unsigned i = 0; i != RVLocs.size(); ++i) {
988  if (RVLocs[i].getLocVT() == MVT::v2i32) {
989  SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
990  SDValue Lo = DAG.getCopyFromReg(
991  Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
992  Chain = Lo.getValue(1);
993  InFlag = Lo.getValue(2);
994  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
995  DAG.getConstant(0, dl, MVT::i32));
996  SDValue Hi = DAG.getCopyFromReg(
997  Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
998  Chain = Hi.getValue(1);
999  InFlag = Hi.getValue(2);
1000  Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1001  DAG.getConstant(1, dl, MVT::i32));
1002  InVals.push_back(Vec);
1003  } else {
1004  Chain =
1005  DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1006  RVLocs[i].getValVT(), InFlag)
1007  .getValue(1);
1008  InFlag = Chain.getValue(2);
1009  InVals.push_back(Chain.getValue(0));
1010  }
1011  }
1012 
1013  return Chain;
1014 }
1015 
1016 // FIXME? Maybe this could be a TableGen attribute on some registers and
1017 // this table could be generated automatically from RegInfo.
1018 unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
1019  SelectionDAG &DAG) const {
1020  unsigned Reg = StringSwitch<unsigned>(RegName)
1021  .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1022  .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1023  .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1024  .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1025  .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1026  .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1027  .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1028  .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1029  .Default(0);
1030 
1031  if (Reg)
1032  return Reg;
1033 
1034  report_fatal_error("Invalid register name global variable");
1035 }
1036 
1037 // Fixup floating point arguments in the ... part of a varargs call.
1038 //
1039 // The SPARC v9 ABI requires that floating point arguments are treated the same
1040 // as integers when calling a varargs function. This does not apply to the
1041 // fixed arguments that are part of the function's prototype.
1042 //
1043 // This function post-processes a CCValAssign array created by
1044 // AnalyzeCallOperands().
1046  ArrayRef<ISD::OutputArg> Outs) {
1047  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1048  const CCValAssign &VA = ArgLocs[i];
1049  MVT ValTy = VA.getLocVT();
1050  // FIXME: What about f32 arguments? C promotes them to f64 when calling
1051  // varargs functions.
1052  if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1053  continue;
1054  // The fixed arguments to a varargs function still go in FP registers.
1055  if (Outs[VA.getValNo()].IsFixed)
1056  continue;
1057 
1058  // This floating point argument should be reassigned.
1059  CCValAssign NewVA;
1060 
1061  // Determine the offset into the argument array.
1062  unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1063  unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1064  unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1065  assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1066 
1067  if (Offset < 6*8) {
1068  // This argument should go in %i0-%i5.
1069  unsigned IReg = SP::I0 + Offset/8;
1070  if (ValTy == MVT::f64)
1071  // Full register, just bitconvert into i64.
1072  NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1073  IReg, MVT::i64, CCValAssign::BCvt);
1074  else {
1075  assert(ValTy == MVT::f128 && "Unexpected type!");
1076  // Full register, just bitconvert into i128 -- We will lower this into
1077  // two i64s in LowerCall_64.
1078  NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1079  IReg, MVT::i128, CCValAssign::BCvt);
1080  }
1081  } else {
1082  // This needs to go to memory, we're out of integer registers.
1083  NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1084  Offset, VA.getLocVT(), VA.getLocInfo());
1085  }
1086  ArgLocs[i] = NewVA;
1087  }
1088 }
1089 
1090 // Lower a call for the 64-bit ABI.
1091 SDValue
1093  SmallVectorImpl<SDValue> &InVals) const {
1094  SelectionDAG &DAG = CLI.DAG;
1095  SDLoc DL = CLI.DL;
1096  SDValue Chain = CLI.Chain;
1097  auto PtrVT = getPointerTy(DAG.getDataLayout());
1098 
1099  // Sparc target does not yet support tail call optimization.
1100  CLI.IsTailCall = false;
1101 
1102  // Analyze operands of the call, assigning locations to each operand.
1104  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1105  *DAG.getContext());
1106  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1107 
1108  // Get the size of the outgoing arguments stack space requirement.
1109  // The stack offset computed by CC_Sparc64 includes all arguments.
1110  // Called functions expect 6 argument words to exist in the stack frame, used
1111  // or not.
1112  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1113 
1114  // Keep stack frames 16-byte aligned.
1115  ArgsSize = alignTo(ArgsSize, 16);
1116 
1117  // Varargs calls require special treatment.
1118  if (CLI.IsVarArg)
1119  fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1120 
1121  // Adjust the stack pointer to make room for the arguments.
1122  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1123  // with more than 6 arguments.
1124  Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1125 
1126  // Collect the set of registers to pass to the function and their values.
1127  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1128  // instruction.
1130 
1131  // Collect chains from all the memory opeations that copy arguments to the
1132  // stack. They must follow the stack pointer adjustment above and precede the
1133  // call instruction itself.
1134  SmallVector<SDValue, 8> MemOpChains;
1135 
1136  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1137  const CCValAssign &VA = ArgLocs[i];
1138  SDValue Arg = CLI.OutVals[i];
1139 
1140  // Promote the value if needed.
1141  switch (VA.getLocInfo()) {
1142  default:
1143  llvm_unreachable("Unknown location info!");
1144  case CCValAssign::Full:
1145  break;
1146  case CCValAssign::SExt:
1147  Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1148  break;
1149  case CCValAssign::ZExt:
1150  Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1151  break;
1152  case CCValAssign::AExt:
1153  Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1154  break;
1155  case CCValAssign::BCvt:
1156  // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1157  // SPARC does not support i128 natively. Lower it into two i64, see below.
1158  if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1159  || VA.getLocVT() != MVT::i128)
1160  Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1161  break;
1162  }
1163 
1164  if (VA.isRegLoc()) {
1165  if (VA.needsCustom() && VA.getValVT() == MVT::f128
1166  && VA.getLocVT() == MVT::i128) {
1167  // Store and reload into the integer register reg and reg+1.
1168  unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1169  unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1170  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1171  SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1172  HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1173  SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1174  LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1175 
1176  // Store to %sp+BIAS+128+Offset
1177  SDValue Store =
1178  DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1179  // Load into Reg and Reg+1
1180  SDValue Hi64 =
1181  DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1182  SDValue Lo64 =
1183  DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1184  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1185  Hi64));
1186  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1187  Lo64));
1188  continue;
1189  }
1190 
1191  // The custom bit on an i32 return value indicates that it should be
1192  // passed in the high bits of the register.
1193  if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1194  Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1195  DAG.getConstant(32, DL, MVT::i32));
1196 
1197  // The next value may go in the low bits of the same register.
1198  // Handle both at once.
1199  if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1200  ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1202  CLI.OutVals[i+1]);
1203  Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1204  // Skip the next value, it's already done.
1205  ++i;
1206  }
1207  }
1208  RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1209  continue;
1210  }
1211 
1212  assert(VA.isMemLoc());
1213 
1214  // Create a store off the stack pointer for this argument.
1215  SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1216  // The argument area starts at %fp+BIAS+128 in the callee frame,
1217  // %sp+BIAS+128 in ours.
1218  SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1219  Subtarget->getStackPointerBias() +
1220  128, DL);
1221  PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1222  MemOpChains.push_back(
1223  DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1224  }
1225 
1226  // Emit all stores, make sure they occur before the call.
1227  if (!MemOpChains.empty())
1228  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1229 
1230  // Build a sequence of CopyToReg nodes glued together with token chain and
1231  // glue operands which copy the outgoing args into registers. The InGlue is
1232  // necessary since all emitted instructions must be stuck together in order
1233  // to pass the live physical registers.
1234  SDValue InGlue;
1235  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1236  Chain = DAG.getCopyToReg(Chain, DL,
1237  RegsToPass[i].first, RegsToPass[i].second, InGlue);
1238  InGlue = Chain.getValue(1);
1239  }
1240 
1241  // If the callee is a GlobalAddress node (quite common, every direct call is)
1242  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1243  // Likewise ExternalSymbol -> TargetExternalSymbol.
1244  SDValue Callee = CLI.Callee;
1245  bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
1247  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1248  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1249  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1250  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1251 
1252  // Build the operands for the call instruction itself.
1254  Ops.push_back(Chain);
1255  Ops.push_back(Callee);
1256  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1257  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1258  RegsToPass[i].second.getValueType()));
1259 
1260  // Add a register mask operand representing the call-preserved registers.
1261  const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1262  const uint32_t *Mask =
1263  ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1265  CLI.CallConv));
1266  assert(Mask && "Missing call preserved mask for calling convention");
1267  Ops.push_back(DAG.getRegisterMask(Mask));
1268 
1269  // Make sure the CopyToReg nodes are glued to the call instruction which
1270  // consumes the registers.
1271  if (InGlue.getNode())
1272  Ops.push_back(InGlue);
1273 
1274  // Now the call itself.
1275  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1276  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1277  InGlue = Chain.getValue(1);
1278 
1279  // Revert the stack pointer immediately after the call.
1280  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1281  DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1282  InGlue = Chain.getValue(1);
1283 
1284  // Now extract the return values. This is more or less the same as
1285  // LowerFormalArguments_64.
1286 
1287  // Assign locations to each value returned by this call.
1289  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1290  *DAG.getContext());
1291 
1292  // Set inreg flag manually for codegen generated library calls that
1293  // return float.
1294  if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
1295  CLI.Ins[0].Flags.setInReg();
1296 
1297  RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1298 
1299  // Copy all of the result registers out of their specified physreg.
1300  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1301  CCValAssign &VA = RVLocs[i];
1302  unsigned Reg = toCallerWindow(VA.getLocReg());
1303 
1304  // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1305  // reside in the same register in the high and low bits. Reuse the
1306  // CopyFromReg previous node to avoid duplicate copies.
1307  SDValue RV;
1308  if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1309  if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1310  RV = Chain.getValue(0);
1311 
1312  // But usually we'll create a new CopyFromReg for a different register.
1313  if (!RV.getNode()) {
1314  RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1315  Chain = RV.getValue(1);
1316  InGlue = Chain.getValue(2);
1317  }
1318 
1319  // Get the high bits for i32 struct elements.
1320  if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1321  RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1322  DAG.getConstant(32, DL, MVT::i32));
1323 
1324  // The callee promoted the return value, so insert an Assert?ext SDNode so
1325  // we won't promote the value again in this function.
1326  switch (VA.getLocInfo()) {
1327  case CCValAssign::SExt:
1328  RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1329  DAG.getValueType(VA.getValVT()));
1330  break;
1331  case CCValAssign::ZExt:
1332  RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1333  DAG.getValueType(VA.getValVT()));
1334  break;
1335  default:
1336  break;
1337  }
1338 
1339  // Truncate the register down to the return value type.
1340  if (VA.isExtInLoc())
1341  RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1342 
1343  InVals.push_back(RV);
1344  }
1345 
1346  return Chain;
1347 }
1348 
1349 //===----------------------------------------------------------------------===//
1350 // TargetLowering Implementation
1351 //===----------------------------------------------------------------------===//
1352 
1354  if (AI->getOperation() == AtomicRMWInst::Xchg &&
1355  AI->getType()->getPrimitiveSizeInBits() == 32)
1356  return AtomicExpansionKind::None; // Uses xchg instruction
1357 
1359 }
1360 
1361 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1362 /// condition.
1364  switch (CC) {
1365  default: llvm_unreachable("Unknown integer condition code!");
1366  case ISD::SETEQ: return SPCC::ICC_E;
1367  case ISD::SETNE: return SPCC::ICC_NE;
1368  case ISD::SETLT: return SPCC::ICC_L;
1369  case ISD::SETGT: return SPCC::ICC_G;
1370  case ISD::SETLE: return SPCC::ICC_LE;
1371  case ISD::SETGE: return SPCC::ICC_GE;
1372  case ISD::SETULT: return SPCC::ICC_CS;
1373  case ISD::SETULE: return SPCC::ICC_LEU;
1374  case ISD::SETUGT: return SPCC::ICC_GU;
1375  case ISD::SETUGE: return SPCC::ICC_CC;
1376  }
1377 }
1378 
1379 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1380 /// FCC condition.
1382  switch (CC) {
1383  default: llvm_unreachable("Unknown fp condition code!");
1384  case ISD::SETEQ:
1385  case ISD::SETOEQ: return SPCC::FCC_E;
1386  case ISD::SETNE:
1387  case ISD::SETUNE: return SPCC::FCC_NE;
1388  case ISD::SETLT:
1389  case ISD::SETOLT: return SPCC::FCC_L;
1390  case ISD::SETGT:
1391  case ISD::SETOGT: return SPCC::FCC_G;
1392  case ISD::SETLE:
1393  case ISD::SETOLE: return SPCC::FCC_LE;
1394  case ISD::SETGE:
1395  case ISD::SETOGE: return SPCC::FCC_GE;
1396  case ISD::SETULT: return SPCC::FCC_UL;
1397  case ISD::SETULE: return SPCC::FCC_ULE;
1398  case ISD::SETUGT: return SPCC::FCC_UG;
1399  case ISD::SETUGE: return SPCC::FCC_UGE;
1400  case ISD::SETUO: return SPCC::FCC_U;
1401  case ISD::SETO: return SPCC::FCC_O;
1402  case ISD::SETONE: return SPCC::FCC_LG;
1403  case ISD::SETUEQ: return SPCC::FCC_UE;
1404  }
1405 }
1406 
1408  const SparcSubtarget &STI)
1409  : TargetLowering(TM), Subtarget(&STI) {
1410  MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
1411 
1412  // Instructions which use registers as conditionals examine all the
1413  // bits (as does the pseudo SELECT_CC expansion). I don't think it
1414  // matters much whether it's ZeroOrOneBooleanContent, or
1415  // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1416  // former.
1419 
1420  // Set up the register classes.
1421  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1422  if (!Subtarget->useSoftFloat()) {
1423  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1424  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1425  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1426  }
1427  if (Subtarget->is64Bit()) {
1428  addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1429  } else {
1430  // On 32bit sparc, we define a double-register 32bit register
1431  // class, as well. This is modeled in LLVM as a 2-vector of i32.
1432  addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1433 
1434  // ...but almost all operations must be expanded, so set that as
1435  // the default.
1436  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1438  }
1439  // Truncating/extending stores/loads are also not supported.
1440  for (MVT VT : MVT::integer_vector_valuetypes()) {
1444 
1448 
1451  }
1452  // However, load and store *are* legal.
1457 
1458  // And we need to promote i64 loads/stores into vector load/store
1461 
1462  // Sadly, this doesn't work:
1463  // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1464  // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1465  }
1466 
1467  // Turn FP extload into load/fpextend
1468  for (MVT VT : MVT::fp_valuetypes()) {
1471  }
1472 
1473  // Sparc doesn't have i1 sign extending load
1474  for (MVT VT : MVT::integer_valuetypes())
1476 
1477  // Turn FP truncstore into trunc + store.
1481 
1482  // Custom legalize GlobalAddress nodes into LO/HI parts.
1487 
1488  // Sparc doesn't have sext_inreg, replace them with shl/sra
1492 
1493  // Sparc has no REM or DIVREM operations.
1498 
1499  // ... nor does SparcV9.
1500  if (Subtarget->is64Bit()) {
1505  }
1506 
1507  // Custom expand fp<->sint
1512 
1513  // Custom Expand fp<->uint
1518 
1521 
1522  // Sparc has no select or setcc: expand to SELECT_CC.
1527 
1532 
1533  // Sparc doesn't have BRCOND either, it has BR_CC.
1541 
1546 
1551 
1552  if (Subtarget->is64Bit()) {
1563 
1565  Subtarget->usePopc() ? Legal : Expand);
1572  }
1573 
1574  // ATOMICs.
1575  // Atomics are supported on SparcV9. 32-bit atomics are also
1576  // supported by some Leon SparcV8 variants. Otherwise, atomics
1577  // are unsupported.
1578  if (Subtarget->isV9())
1580  else if (Subtarget->hasLeonCasa())
1582  else
1584 
1586 
1588 
1590 
1591  // Custom Lower Atomic LOAD/STORE
1594 
1595  if (Subtarget->is64Bit()) {
1600  }
1601 
1602  if (!Subtarget->is64Bit()) {
1603  // These libcalls are not available in 32-bit.
1604  setLibcallName(RTLIB::SHL_I128, nullptr);
1605  setLibcallName(RTLIB::SRL_I128, nullptr);
1606  setLibcallName(RTLIB::SRA_I128, nullptr);
1607  }
1608 
1609  if (!Subtarget->isV9()) {
1610  // SparcV8 does not have FNEGD and FABSD.
1613  }
1614 
1641 
1645 
1646  // Expands to [SU]MUL_LOHI.
1650 
1651  if (Subtarget->useSoftMulDiv()) {
1652  // .umul works for both signed and unsigned
1655  setLibcallName(RTLIB::MUL_I32, ".umul");
1656 
1658  setLibcallName(RTLIB::SDIV_I32, ".div");
1659 
1661  setLibcallName(RTLIB::UDIV_I32, ".udiv");
1662 
1663  setLibcallName(RTLIB::SREM_I32, ".rem");
1664  setLibcallName(RTLIB::UREM_I32, ".urem");
1665  }
1666 
1667  if (Subtarget->is64Bit()) {
1672 
1675 
1679  }
1680 
1681  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1683  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1685 
1688 
1689  // Use the default implementation.
1695 
1697 
1699  Subtarget->usePopc() ? Legal : Expand);
1700 
1701  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1704  } else {
1707  }
1708 
1709  if (Subtarget->hasHardQuad()) {
1717  if (Subtarget->isV9()) {
1720  } else {
1723  }
1724 
1725  if (!Subtarget->is64Bit()) {
1726  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1727  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1728  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1729  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1730  }
1731 
1732  } else {
1733  // Custom legalize f128 operations.
1734 
1742 
1746 
1747  // Setup Runtime library names.
1748  if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1749  setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1750  setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1751  setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1752  setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1753  setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1754  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1755  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1756  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1757  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1758  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1759  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1760  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1761  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1762  setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1763  setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1764  setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1765  setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1766  } else if (!Subtarget->useSoftFloat()) {
1767  setLibcallName(RTLIB::ADD_F128, "_Q_add");
1768  setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1769  setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1770  setLibcallName(RTLIB::DIV_F128, "_Q_div");
1771  setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1772  setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1773  setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1774  setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1775  setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1776  setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1777  setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1778  setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1779  setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1780  setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1781  setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1782  setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1783  setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1784  }
1785  }
1786 
1787  if (Subtarget->fixAllFDIVSQRT()) {
1788  // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1789  // the former instructions generate errata on LEON processors.
1792  }
1793 
1794  if (Subtarget->hasNoFMULS()) {
1796  }
1797 
1798  // Custom combine bitcast between f64 and v2i32
1799  if (!Subtarget->is64Bit())
1801 
1802  if (Subtarget->hasLeonCycleCounter())
1804 
1806 
1808 
1810 }
1811 
1813  return Subtarget->useSoftFloat();
1814 }
1815 
1816 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1817  switch ((SPISD::NodeType)Opcode) {
1818  case SPISD::FIRST_NUMBER: break;
1819  case SPISD::CMPICC: return "SPISD::CMPICC";
1820  case SPISD::CMPFCC: return "SPISD::CMPFCC";
1821  case SPISD::BRICC: return "SPISD::BRICC";
1822  case SPISD::BRXCC: return "SPISD::BRXCC";
1823  case SPISD::BRFCC: return "SPISD::BRFCC";
1824  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1825  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1826  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1827  case SPISD::Hi: return "SPISD::Hi";
1828  case SPISD::Lo: return "SPISD::Lo";
1829  case SPISD::FTOI: return "SPISD::FTOI";
1830  case SPISD::ITOF: return "SPISD::ITOF";
1831  case SPISD::FTOX: return "SPISD::FTOX";
1832  case SPISD::XTOF: return "SPISD::XTOF";
1833  case SPISD::CALL: return "SPISD::CALL";
1834  case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1835  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1836  case SPISD::FLUSHW: return "SPISD::FLUSHW";
1837  case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1838  case SPISD::TLS_LD: return "SPISD::TLS_LD";
1839  case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1840  }
1841  return nullptr;
1842 }
1843 
1845  EVT VT) const {
1846  if (!VT.isVector())
1847  return MVT::i32;
1849 }
1850 
1851 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1852 /// be zero. Op is expected to be a target specific node. Used by DAG
1853 /// combiner.
1855  (const SDValue Op,
1856  KnownBits &Known,
1857  const APInt &DemandedElts,
1858  const SelectionDAG &DAG,
1859  unsigned Depth) const {
1860  KnownBits Known2;
1861  Known.resetAll();
1862 
1863  switch (Op.getOpcode()) {
1864  default: break;
1865  case SPISD::SELECT_ICC:
1866  case SPISD::SELECT_XCC:
1867  case SPISD::SELECT_FCC:
1868  Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
1869  Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
1870 
1871  // Only known if known in both the LHS and RHS.
1872  Known.One &= Known2.One;
1873  Known.Zero &= Known2.Zero;
1874  break;
1875  }
1876 }
1877 
1878 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1879 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1880 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1881  ISD::CondCode CC, unsigned &SPCC) {
1882  if (isNullConstant(RHS) &&
1883  CC == ISD::SETNE &&
1884  (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1885  LHS.getOpcode() == SPISD::SELECT_XCC) &&
1886  LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1887  (LHS.getOpcode() == SPISD::SELECT_FCC &&
1888  LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1889  isOneConstant(LHS.getOperand(0)) &&
1890  isNullConstant(LHS.getOperand(1))) {
1891  SDValue CMPCC = LHS.getOperand(3);
1892  SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1893  LHS = CMPCC.getOperand(0);
1894  RHS = CMPCC.getOperand(1);
1895  }
1896 }
1897 
1898 // Convert to a target node and set target flags.
1900  SelectionDAG &DAG) const {
1901  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1902  return DAG.getTargetGlobalAddress(GA->getGlobal(),
1903  SDLoc(GA),
1904  GA->getValueType(0),
1905  GA->getOffset(), TF);
1906 
1907  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1908  return DAG.getTargetConstantPool(CP->getConstVal(),
1909  CP->getValueType(0),
1910  CP->getAlignment(),
1911  CP->getOffset(), TF);
1912 
1913  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1914  return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1915  Op.getValueType(),
1916  0,
1917  TF);
1918 
1919  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1920  return DAG.getTargetExternalSymbol(ES->getSymbol(),
1921  ES->getValueType(0), TF);
1922 
1923  llvm_unreachable("Unhandled address SDNode");
1924 }
1925 
1926 // Split Op into high and low parts according to HiTF and LoTF.
1927 // Return an ADD node combining the parts.
1929  unsigned HiTF, unsigned LoTF,
1930  SelectionDAG &DAG) const {
1931  SDLoc DL(Op);
1932  EVT VT = Op.getValueType();
1933  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1934  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1935  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1936 }
1937 
1938 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1939 // or ExternalSymbol SDNode.
1941  SDLoc DL(Op);
1942  EVT VT = getPointerTy(DAG.getDataLayout());
1943 
1944  // Handle PIC mode first. SPARC needs a got load for every variable!
1945  if (isPositionIndependent()) {
1946  const Module *M = DAG.getMachineFunction().getFunction().getParent();
1947  PICLevel::Level picLevel = M->getPICLevel();
1948  SDValue Idx;
1949 
1950  if (picLevel == PICLevel::SmallPIC) {
1951  // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
1952  Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
1954  } else {
1955  // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1958  }
1959 
1960  SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1961  SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
1962  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1963  // function has calls.
1965  MFI.setHasCalls(true);
1966  return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1968  }
1969 
1970  // This is one of the absolute code models.
1971  switch(getTargetMachine().getCodeModel()) {
1972  default:
1973  llvm_unreachable("Unsupported absolute code model");
1974  case CodeModel::Small:
1975  // abs32.
1978  case CodeModel::Medium: {
1979  // abs44.
1982  H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
1984  L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
1985  return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
1986  }
1987  case CodeModel::Large: {
1988  // abs64.
1991  Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
1994  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1995  }
1996  }
1997 }
1998 
2000  SelectionDAG &DAG) const {
2001  return makeAddress(Op, DAG);
2002 }
2003 
2005  SelectionDAG &DAG) const {
2006  return makeAddress(Op, DAG);
2007 }
2008 
2010  SelectionDAG &DAG) const {
2011  return makeAddress(Op, DAG);
2012 }
2013 
2015  SelectionDAG &DAG) const {
2016 
2017  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2018  if (DAG.getTarget().useEmulatedTLS())
2019  return LowerToTLSEmulatedModel(GA, DAG);
2020 
2021  SDLoc DL(GA);
2022  const GlobalValue *GV = GA->getGlobal();
2023  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2024 
2026 
2027  if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2028  unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2031  unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2034  unsigned addTF = ((model == TLSModel::GeneralDynamic)
2037  unsigned callTF = ((model == TLSModel::GeneralDynamic)
2040 
2041  SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2042  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2043  SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2044  withTargetFlags(Op, addTF, DAG));
2045 
2046  SDValue Chain = DAG.getEntryNode();
2047  SDValue InFlag;
2048 
2049  Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
2050  Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2051  InFlag = Chain.getValue(1);
2052  SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2053  SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2054 
2055  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2056  const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2058  assert(Mask && "Missing call preserved mask for calling convention");
2059  SDValue Ops[] = {Chain,
2060  Callee,
2061  Symbol,
2062  DAG.getRegister(SP::O0, PtrVT),
2063  DAG.getRegisterMask(Mask),
2064  InFlag};
2065  Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2066  InFlag = Chain.getValue(1);
2067  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2068  DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2069  InFlag = Chain.getValue(1);
2070  SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2071 
2072  if (model != TLSModel::LocalDynamic)
2073  return Ret;
2074 
2075  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2077  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2079  HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2080  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2082  }
2083 
2084  if (model == TLSModel::InitialExec) {
2085  unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2087 
2088  SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2089 
2090  // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2091  // function has calls.
2093  MFI.setHasCalls(true);
2094 
2095  SDValue TGA = makeHiLoPair(Op,
2098  SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2100  DL, PtrVT, Ptr,
2101  withTargetFlags(Op, ldTF, DAG));
2102  return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2103  DAG.getRegister(SP::G7, PtrVT), Offset,
2104  withTargetFlags(Op,
2106  }
2107 
2108  assert(model == TLSModel::LocalExec);
2109  SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2111  SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2113  SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2114 
2115  return DAG.getNode(ISD::ADD, DL, PtrVT,
2116  DAG.getRegister(SP::G7, PtrVT), Offset);
2117 }
2118 
2121  const SDLoc &DL,
2122  SelectionDAG &DAG) const {
2124  EVT ArgVT = Arg.getValueType();
2125  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2126 
2127  ArgListEntry Entry;
2128  Entry.Node = Arg;
2129  Entry.Ty = ArgTy;
2130 
2131  if (ArgTy->isFP128Ty()) {
2132  // Create a stack object and pass the pointer to the library function.
2133  int FI = MFI.CreateStackObject(16, 8, false);
2134  SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2135  Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2136  /* Alignment = */ 8);
2137 
2138  Entry.Node = FIPtr;
2139  Entry.Ty = PointerType::getUnqual(ArgTy);
2140  }
2141  Args.push_back(Entry);
2142  return Chain;
2143 }
2144 
2145 SDValue
2147  const char *LibFuncName,
2148  unsigned numArgs) const {
2149 
2150  ArgListTy Args;
2151 
2153  auto PtrVT = getPointerTy(DAG.getDataLayout());
2154 
2155  SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2156  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2157  Type *RetTyABI = RetTy;
2158  SDValue Chain = DAG.getEntryNode();
2159  SDValue RetPtr;
2160 
2161  if (RetTy->isFP128Ty()) {
2162  // Create a Stack Object to receive the return value of type f128.
2163  ArgListEntry Entry;
2164  int RetFI = MFI.CreateStackObject(16, 8, false);
2165  RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2166  Entry.Node = RetPtr;
2167  Entry.Ty = PointerType::getUnqual(RetTy);
2168  if (!Subtarget->is64Bit())
2169  Entry.IsSRet = true;
2170  Entry.IsReturned = false;
2171  Args.push_back(Entry);
2172  RetTyABI = Type::getVoidTy(*DAG.getContext());
2173  }
2174 
2175  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2176  for (unsigned i = 0, e = numArgs; i != e; ++i) {
2177  Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2178  }
2180  CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2181  .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2182 
2183  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2184 
2185  // chain is in second result.
2186  if (RetTyABI == RetTy)
2187  return CallInfo.first;
2188 
2189  assert (RetTy->isFP128Ty() && "Unexpected return type!");
2190 
2191  Chain = CallInfo.second;
2192 
2193  // Load RetPtr to get the return value.
2194  return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2195  MachinePointerInfo(), /* Alignment = */ 8);
2196 }
2197 
2199  unsigned &SPCC, const SDLoc &DL,
2200  SelectionDAG &DAG) const {
2201 
2202  const char *LibCall = nullptr;
2203  bool is64Bit = Subtarget->is64Bit();
2204  switch(SPCC) {
2205  default: llvm_unreachable("Unhandled conditional code!");
2206  case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2207  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2208  case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2209  case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2210  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2211  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2212  case SPCC::FCC_UL :
2213  case SPCC::FCC_ULE:
2214  case SPCC::FCC_UG :
2215  case SPCC::FCC_UGE:
2216  case SPCC::FCC_U :
2217  case SPCC::FCC_O :
2218  case SPCC::FCC_LG :
2219  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2220  }
2221 
2222  auto PtrVT = getPointerTy(DAG.getDataLayout());
2223  SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2224  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2225  ArgListTy Args;
2226  SDValue Chain = DAG.getEntryNode();
2227  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2228  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2229 
2231  CLI.setDebugLoc(DL).setChain(Chain)
2232  .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2233 
2234  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2235 
2236  // result is in first, and chain is in second result.
2237  SDValue Result = CallInfo.first;
2238 
2239  switch(SPCC) {
2240  default: {
2241  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2242  SPCC = SPCC::ICC_NE;
2243  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2244  }
2245  case SPCC::FCC_UL : {
2246  SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
2247  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2248  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2249  SPCC = SPCC::ICC_NE;
2250  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2251  }
2252  case SPCC::FCC_ULE: {
2253  SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
2254  SPCC = SPCC::ICC_NE;
2255  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2256  }
2257  case SPCC::FCC_UG : {
2258  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2259  SPCC = SPCC::ICC_G;
2260  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2261  }
2262  case SPCC::FCC_UGE: {
2263  SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2264  SPCC = SPCC::ICC_NE;
2265  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2266  }
2267 
2268  case SPCC::FCC_U : {
2269  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2270  SPCC = SPCC::ICC_E;
2271  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2272  }
2273  case SPCC::FCC_O : {
2274  SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2275  SPCC = SPCC::ICC_NE;
2276  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2277  }
2278  case SPCC::FCC_LG : {
2279  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2280  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2281  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2282  SPCC = SPCC::ICC_NE;
2283  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2284  }
2285  case SPCC::FCC_UE : {
2286  SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2287  Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2288  SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2289  SPCC = SPCC::ICC_E;
2290  return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2291  }
2292  }
2293 }
2294 
2295 static SDValue
2297  const SparcTargetLowering &TLI) {
2298 
2299  if (Op.getOperand(0).getValueType() == MVT::f64)
2300  return TLI.LowerF128Op(Op, DAG,
2301  TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2302 
2303  if (Op.getOperand(0).getValueType() == MVT::f32)
2304  return TLI.LowerF128Op(Op, DAG,
2305  TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2306 
2307  llvm_unreachable("fpextend with non-float operand!");
2308  return SDValue();
2309 }
2310 
2311 static SDValue
2313  const SparcTargetLowering &TLI) {
2314  // FP_ROUND on f64 and f32 are legal.
2315  if (Op.getOperand(0).getValueType() != MVT::f128)
2316  return Op;
2317 
2318  if (Op.getValueType() == MVT::f64)
2319  return TLI.LowerF128Op(Op, DAG,
2320  TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2321  if (Op.getValueType() == MVT::f32)
2322  return TLI.LowerF128Op(Op, DAG,
2323  TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2324 
2325  llvm_unreachable("fpround to non-float!");
2326  return SDValue();
2327 }
2328 
2330  const SparcTargetLowering &TLI,
2331  bool hasHardQuad) {
2332  SDLoc dl(Op);
2333  EVT VT = Op.getValueType();
2334  assert(VT == MVT::i32 || VT == MVT::i64);
2335 
2336  // Expand f128 operations to fp128 abi calls.
2337  if (Op.getOperand(0).getValueType() == MVT::f128
2338  && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2339  const char *libName = TLI.getLibcallName(VT == MVT::i32
2340  ? RTLIB::FPTOSINT_F128_I32
2341  : RTLIB::FPTOSINT_F128_I64);
2342  return TLI.LowerF128Op(Op, DAG, libName, 1);
2343  }
2344 
2345  // Expand if the resulting type is illegal.
2346  if (!TLI.isTypeLegal(VT))
2347  return SDValue();
2348 
2349  // Otherwise, Convert the fp value to integer in an FP register.
2350  if (VT == MVT::i32)
2351  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2352  else
2353  Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2354 
2355  return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2356 }
2357 
2359  const SparcTargetLowering &TLI,
2360  bool hasHardQuad) {
2361  SDLoc dl(Op);
2362  EVT OpVT = Op.getOperand(0).getValueType();
2363  assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2364 
2365  EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2366 
2367  // Expand f128 operations to fp128 ABI calls.
2368  if (Op.getValueType() == MVT::f128
2369  && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2370  const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2371  ? RTLIB::SINTTOFP_I32_F128
2372  : RTLIB::SINTTOFP_I64_F128);
2373  return TLI.LowerF128Op(Op, DAG, libName, 1);
2374  }
2375 
2376  // Expand if the operand type is illegal.
2377  if (!TLI.isTypeLegal(OpVT))
2378  return SDValue();
2379 
2380  // Otherwise, Convert the int value to FP in an FP register.
2381  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2382  unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2383  return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2384 }
2385 
2387  const SparcTargetLowering &TLI,
2388  bool hasHardQuad) {
2389  SDLoc dl(Op);
2390  EVT VT = Op.getValueType();
2391 
2392  // Expand if it does not involve f128 or the target has support for
2393  // quad floating point instructions and the resulting type is legal.
2394  if (Op.getOperand(0).getValueType() != MVT::f128 ||
2395  (hasHardQuad && TLI.isTypeLegal(VT)))
2396  return SDValue();
2397 
2398  assert(VT == MVT::i32 || VT == MVT::i64);
2399 
2400  return TLI.LowerF128Op(Op, DAG,
2401  TLI.getLibcallName(VT == MVT::i32
2402  ? RTLIB::FPTOUINT_F128_I32
2403  : RTLIB::FPTOUINT_F128_I64),
2404  1);
2405 }
2406 
2408  const SparcTargetLowering &TLI,
2409  bool hasHardQuad) {
2410  SDLoc dl(Op);
2411  EVT OpVT = Op.getOperand(0).getValueType();
2412  assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2413 
2414  // Expand if it does not involve f128 or the target has support for
2415  // quad floating point instructions and the operand type is legal.
2416  if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2417  return SDValue();
2418 
2419  return TLI.LowerF128Op(Op, DAG,
2420  TLI.getLibcallName(OpVT == MVT::i32
2421  ? RTLIB::UINTTOFP_I32_F128
2422  : RTLIB::UINTTOFP_I64_F128),
2423  1);
2424 }
2425 
2427  const SparcTargetLowering &TLI,
2428  bool hasHardQuad) {
2429  SDValue Chain = Op.getOperand(0);
2430  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2431  SDValue LHS = Op.getOperand(2);
2432  SDValue RHS = Op.getOperand(3);
2433  SDValue Dest = Op.getOperand(4);
2434  SDLoc dl(Op);
2435  unsigned Opc, SPCC = ~0U;
2436 
2437  // If this is a br_cc of a "setcc", and if the setcc got lowered into
2438  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2439  LookThroughSetCC(LHS, RHS, CC, SPCC);
2440 
2441  // Get the condition flag.
2442  SDValue CompareFlag;
2443  if (LHS.getValueType().isInteger()) {
2444  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2445  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2446  // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2447  Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2448  } else {
2449  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2450  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2451  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2452  Opc = SPISD::BRICC;
2453  } else {
2454  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2455  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2456  Opc = SPISD::BRFCC;
2457  }
2458  }
2459  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2460  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2461 }
2462 
2464  const SparcTargetLowering &TLI,
2465  bool hasHardQuad) {
2466  SDValue LHS = Op.getOperand(0);
2467  SDValue RHS = Op.getOperand(1);
2468  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2469  SDValue TrueVal = Op.getOperand(2);
2470  SDValue FalseVal = Op.getOperand(3);
2471  SDLoc dl(Op);
2472  unsigned Opc, SPCC = ~0U;
2473 
2474  // If this is a select_cc of a "setcc", and if the setcc got lowered into
2475  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2476  LookThroughSetCC(LHS, RHS, CC, SPCC);
2477 
2478  SDValue CompareFlag;
2479  if (LHS.getValueType().isInteger()) {
2480  CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2481  Opc = LHS.getValueType() == MVT::i32 ?
2483  if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2484  } else {
2485  if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2486  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2487  CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2488  Opc = SPISD::SELECT_ICC;
2489  } else {
2490  CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2491  Opc = SPISD::SELECT_FCC;
2492  if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2493  }
2494  }
2495  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2496  DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2497 }
2498 
2500  const SparcTargetLowering &TLI) {
2501  MachineFunction &MF = DAG.getMachineFunction();
2503  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2504 
2505  // Need frame address to find the address of VarArgsFrameIndex.
2507 
2508  // vastart just stores the address of the VarArgsFrameIndex slot into the
2509  // memory location argument.
2510  SDLoc DL(Op);
2511  SDValue Offset =
2512  DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2513  DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2514  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2515  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2516  MachinePointerInfo(SV));
2517 }
2518 
2520  SDNode *Node = Op.getNode();
2521  EVT VT = Node->getValueType(0);
2522  SDValue InChain = Node->getOperand(0);
2523  SDValue VAListPtr = Node->getOperand(1);
2524  EVT PtrVT = VAListPtr.getValueType();
2525  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2526  SDLoc DL(Node);
2527  SDValue VAList =
2528  DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2529  // Increment the pointer, VAList, to the next vaarg.
2530  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2531  DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2532  DL));
2533  // Store the incremented VAList to the legalized pointer.
2534  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2535  MachinePointerInfo(SV));
2536  // Load the actual argument out of the pointer VAList.
2537  // We can't count on greater alignment than the word size.
2538  return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2539  std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
2540 }
2541 
2543  const SparcSubtarget *Subtarget) {
2544  SDValue Chain = Op.getOperand(0); // Legalize the chain.
2545  SDValue Size = Op.getOperand(1); // Legalize the size.
2546  unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2547  unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment();
2548  EVT VT = Size->getValueType(0);
2549  SDLoc dl(Op);
2550 
2551  // TODO: implement over-aligned alloca. (Note: also implies
2552  // supporting support for overaligned function frames + dynamic
2553  // allocations, at all, which currently isn't supported)
2554  if (Align > StackAlign) {
2555  const MachineFunction &MF = DAG.getMachineFunction();
2556  report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2557  "over-aligned dynamic alloca not supported.");
2558  }
2559 
2560  // The resultant pointer needs to be above the register spill area
2561  // at the bottom of the stack.
2562  unsigned regSpillArea;
2563  if (Subtarget->is64Bit()) {
2564  regSpillArea = 128;
2565  } else {
2566  // On Sparc32, the size of the spill area is 92. Unfortunately,
2567  // that's only 4-byte aligned, not 8-byte aligned (the stack
2568  // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2569  // aligned dynamic allocation, we actually need to add 96 to the
2570  // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2571 
2572  // That also means adding 4 to the size of the allocation --
2573  // before applying the 8-byte rounding. Unfortunately, we the
2574  // value we get here has already had rounding applied. So, we need
2575  // to add 8, instead, wasting a bit more memory.
2576 
2577  // Further, this only actually needs to be done if the required
2578  // alignment is > 4, but, we've lost that info by this point, too,
2579  // so we always apply it.
2580 
2581  // (An alternative approach would be to always reserve 96 bytes
2582  // instead of the required 92, but then we'd waste 4 extra bytes
2583  // in every frame, not just those with dynamic stack allocations)
2584 
2585  // TODO: modify code in SelectionDAGBuilder to make this less sad.
2586 
2587  Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2588  DAG.getConstant(8, dl, VT));
2589  regSpillArea = 96;
2590  }
2591 
2592  unsigned SPReg = SP::O6;
2593  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2594  SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2595  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2596 
2597  regSpillArea += Subtarget->getStackPointerBias();
2598 
2599  SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2600  DAG.getConstant(regSpillArea, dl, VT));
2601  SDValue Ops[2] = { NewVal, Chain };
2602  return DAG.getMergeValues(Ops, dl);
2603 }
2604 
2605 
2607  SDLoc dl(Op);
2608  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2609  dl, MVT::Other, DAG.getEntryNode());
2610  return Chain;
2611 }
2612 
2613 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2614  const SparcSubtarget *Subtarget,
2615  bool AlwaysFlush = false) {
2617  MFI.setFrameAddressIsTaken(true);
2618 
2619  EVT VT = Op.getValueType();
2620  SDLoc dl(Op);
2621  unsigned FrameReg = SP::I6;
2622  unsigned stackBias = Subtarget->getStackPointerBias();
2623 
2624  SDValue FrameAddr;
2625  SDValue Chain;
2626 
2627  // flush first to make sure the windowed registers' values are in stack
2628  Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2629 
2630  FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2631 
2632  unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2633 
2634  while (depth--) {
2635  SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2636  DAG.getIntPtrConstant(Offset, dl));
2637  FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2638  }
2639  if (Subtarget->is64Bit())
2640  FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2641  DAG.getIntPtrConstant(stackBias, dl));
2642  return FrameAddr;
2643 }
2644 
2645 
2647  const SparcSubtarget *Subtarget) {
2648 
2649  uint64_t depth = Op.getConstantOperandVal(0);
2650 
2651  return getFRAMEADDR(depth, Op, DAG, Subtarget);
2652 
2653 }
2654 
2656  const SparcTargetLowering &TLI,
2657  const SparcSubtarget *Subtarget) {
2658  MachineFunction &MF = DAG.getMachineFunction();
2659  MachineFrameInfo &MFI = MF.getFrameInfo();
2660  MFI.setReturnAddressIsTaken(true);
2661 
2662  if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2663  return SDValue();
2664 
2665  EVT VT = Op.getValueType();
2666  SDLoc dl(Op);
2667  uint64_t depth = Op.getConstantOperandVal(0);
2668 
2669  SDValue RetAddr;
2670  if (depth == 0) {
2671  auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2672  unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2673  RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2674  return RetAddr;
2675  }
2676 
2677  // Need frame address to find return address of the caller.
2678  SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2679 
2680  unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2681  SDValue Ptr = DAG.getNode(ISD::ADD,
2682  dl, VT,
2683  FrameAddr,
2684  DAG.getIntPtrConstant(Offset, dl));
2685  RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2686 
2687  return RetAddr;
2688 }
2689 
2690 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2691  unsigned opcode) {
2692  assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2693  assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2694 
2695  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2696  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2697  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2698 
2699  // Note: in little-endian, the floating-point value is stored in the
2700  // registers are in the opposite order, so the subreg with the sign
2701  // bit is the highest-numbered (odd), rather than the
2702  // lowest-numbered (even).
2703 
2704  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2705  SrcReg64);
2706  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2707  SrcReg64);
2708 
2709  if (DAG.getDataLayout().isLittleEndian())
2710  Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2711  else
2712  Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2713 
2714  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2715  dl, MVT::f64), 0);
2716  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2717  DstReg64, Hi32);
2718  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2719  DstReg64, Lo32);
2720  return DstReg64;
2721 }
2722 
2723 // Lower a f128 load into two f64 loads.
2725 {
2726  SDLoc dl(Op);
2727  LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2728  assert(LdNode && LdNode->getOffset().isUndef()
2729  && "Unexpected node type");
2730 
2731  unsigned alignment = LdNode->getAlignment();
2732  if (alignment > 8)
2733  alignment = 8;
2734 
2735  SDValue Hi64 =
2736  DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2737  LdNode->getPointerInfo(), alignment);
2738  EVT addrVT = LdNode->getBasePtr().getValueType();
2739  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2740  LdNode->getBasePtr(),
2741  DAG.getConstant(8, dl, addrVT));
2742  SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2743  LdNode->getPointerInfo(), alignment);
2744 
2745  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2746  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2747 
2748  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2749  dl, MVT::f128);
2750  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2751  MVT::f128,
2752  SDValue(InFP128, 0),
2753  Hi64,
2754  SubRegEven);
2755  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2756  MVT::f128,
2757  SDValue(InFP128, 0),
2758  Lo64,
2759  SubRegOdd);
2760  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2761  SDValue(Lo64.getNode(), 1) };
2762  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2763  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2764  return DAG.getMergeValues(Ops, dl);
2765 }
2766 
2768 {
2769  LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2770 
2771  EVT MemVT = LdNode->getMemoryVT();
2772  if (MemVT == MVT::f128)
2773  return LowerF128Load(Op, DAG);
2774 
2775  return Op;
2776 }
2777 
2778 // Lower a f128 store into two f64 stores.
2780  SDLoc dl(Op);
2781  StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2782  assert(StNode && StNode->getOffset().isUndef()
2783  && "Unexpected node type");
2784  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2785  SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2786 
2787  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2788  dl,
2789  MVT::f64,
2790  StNode->getValue(),
2791  SubRegEven);
2792  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2793  dl,
2794  MVT::f64,
2795  StNode->getValue(),
2796  SubRegOdd);
2797 
2798  unsigned alignment = StNode->getAlignment();
2799  if (alignment > 8)
2800  alignment = 8;
2801 
2802  SDValue OutChains[2];
2803  OutChains[0] =
2804  DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2805  StNode->getBasePtr(), MachinePointerInfo(), alignment);
2806  EVT addrVT = StNode->getBasePtr().getValueType();
2807  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2808  StNode->getBasePtr(),
2809  DAG.getConstant(8, dl, addrVT));
2810  OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2811  MachinePointerInfo(), alignment);
2812  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2813 }
2814 
2816 {
2817  SDLoc dl(Op);
2818  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2819 
2820  EVT MemVT = St->getMemoryVT();
2821  if (MemVT == MVT::f128)
2822  return LowerF128Store(Op, DAG);
2823 
2824  if (MemVT == MVT::i64) {
2825  // Custom handling for i64 stores: turn it into a bitcast and a
2826  // v2i32 store.
2827  SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2828  SDValue Chain = DAG.getStore(
2829  St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2830  St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo());
2831  return Chain;
2832  }
2833 
2834  return SDValue();
2835 }
2836 
2837 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2838  assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2839  && "invalid opcode");
2840 
2841  SDLoc dl(Op);
2842 
2843  if (Op.getValueType() == MVT::f64)
2844  return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2845  if (Op.getValueType() != MVT::f128)
2846  return Op;
2847 
2848  // Lower fabs/fneg on f128 to fabs/fneg on f64
2849  // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2850  // (As with LowerF64Op, on little-endian, we need to negate the odd
2851  // subreg)
2852 
2853  SDValue SrcReg128 = Op.getOperand(0);
2854  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2855  SrcReg128);
2856  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2857  SrcReg128);
2858 
2859  if (DAG.getDataLayout().isLittleEndian()) {
2860  if (isV9)
2861  Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2862  else
2863  Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2864  } else {
2865  if (isV9)
2866  Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2867  else
2868  Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2869  }
2870 
2871  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2872  dl, MVT::f128), 0);
2873  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2874  DstReg128, Hi64);
2875  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2876  DstReg128, Lo64);
2877  return DstReg128;
2878 }
2879 
2881 
2882  if (Op.getValueType() != MVT::i64)
2883  return Op;
2884 
2885  SDLoc dl(Op);
2886  SDValue Src1 = Op.getOperand(0);
2887  SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2888  SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2889  DAG.getConstant(32, dl, MVT::i64));
2890  Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2891 
2892  SDValue Src2 = Op.getOperand(1);
2893  SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2894  SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2895  DAG.getConstant(32, dl, MVT::i64));
2896  Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2897 
2898 
2899  bool hasChain = false;
2900  unsigned hiOpc = Op.getOpcode();
2901  switch (Op.getOpcode()) {
2902  default: llvm_unreachable("Invalid opcode");
2903  case ISD::ADDC: hiOpc = ISD::ADDE; break;
2904  case ISD::ADDE: hasChain = true; break;
2905  case ISD::SUBC: hiOpc = ISD::SUBE; break;
2906  case ISD::SUBE: hasChain = true; break;
2907  }
2908  SDValue Lo;
2909  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2910  if (hasChain) {
2911  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2912  Op.getOperand(2));
2913  } else {
2914  Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2915  }
2916  SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2917  SDValue Carry = Hi.getValue(1);
2918 
2919  Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2920  Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2921  Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2922  DAG.getConstant(32, dl, MVT::i64));
2923 
2924  SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2925  SDValue Ops[2] = { Dst, Carry };
2926  return DAG.getMergeValues(Ops, dl);
2927 }
2928 
2929 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2930 // in LegalizeDAG.cpp except the order of arguments to the library function.
2932  const SparcTargetLowering &TLI)
2933 {
2934  unsigned opcode = Op.getOpcode();
2935  assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
2936 
2937  bool isSigned = (opcode == ISD::SMULO);
2938  EVT VT = MVT::i64;
2939  EVT WideVT = MVT::i128;
2940  SDLoc dl(Op);
2941  SDValue LHS = Op.getOperand(0);
2942 
2943  if (LHS.getValueType() != VT)
2944  return Op;
2945 
2946  SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
2947 
2948  SDValue RHS = Op.getOperand(1);
2949  SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
2950  SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
2951  SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
2952 
2953  SDValue MulResult = TLI.makeLibCall(DAG,
2954  RTLIB::MUL_I128, WideVT,
2955  Args, isSigned, dl).first;
2956  SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2957  MulResult, DAG.getIntPtrConstant(0, dl));
2958  SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2959  MulResult, DAG.getIntPtrConstant(1, dl));
2960  if (isSigned) {
2961  SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
2962  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
2963  } else {
2964  TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
2965  ISD::SETNE);
2966  }
2967  // MulResult is a node with an illegal type. Because such things are not
2968  // generally permitted during this phase of legalization, ensure that
2969  // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
2970  // been folded.
2971  assert(MulResult->use_empty() && "Illegally typed node still in use!");
2972 
2973  SDValue Ops[2] = { BottomHalf, TopHalf } ;
2974  return DAG.getMergeValues(Ops, dl);
2975 }
2976 
2978  if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
2979  // Expand with a fence.
2980  return SDValue();
2981 
2982  // Monotonic load/stores are legal.
2983  return Op;
2984 }
2985 
2987  SelectionDAG &DAG) const {
2988  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2989  SDLoc dl(Op);
2990  switch (IntNo) {
2991  default: return SDValue(); // Don't custom lower most intrinsics.
2992  case Intrinsic::thread_pointer: {
2993  EVT PtrVT = getPointerTy(DAG.getDataLayout());
2994  return DAG.getRegister(SP::G7, PtrVT);
2995  }
2996  }
2997 }
2998 
3001 
3002  bool hasHardQuad = Subtarget->hasHardQuad();
3003  bool isV9 = Subtarget->isV9();
3004 
3005  switch (Op.getOpcode()) {
3006  default: llvm_unreachable("Should not custom lower this!");
3007 
3008  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3009  Subtarget);
3010  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3011  Subtarget);
3012  case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3013  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3014  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3015  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3016  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3017  hasHardQuad);
3018  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3019  hasHardQuad);
3020  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3021  hasHardQuad);
3022  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3023  hasHardQuad);
3024  case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3025  hasHardQuad);
3026  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3027  hasHardQuad);
3028  case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3029  case ISD::VAARG: return LowerVAARG(Op, DAG);
3030  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3031  Subtarget);
3032 
3033  case ISD::LOAD: return LowerLOAD(Op, DAG);
3034  case ISD::STORE: return LowerSTORE(Op, DAG);
3035  case ISD::FADD: return LowerF128Op(Op, DAG,
3036  getLibcallName(RTLIB::ADD_F128), 2);
3037  case ISD::FSUB: return LowerF128Op(Op, DAG,
3038  getLibcallName(RTLIB::SUB_F128), 2);
3039  case ISD::FMUL: return LowerF128Op(Op, DAG,
3040  getLibcallName(RTLIB::MUL_F128), 2);
3041  case ISD::FDIV: return LowerF128Op(Op, DAG,
3042  getLibcallName(RTLIB::DIV_F128), 2);
3043  case ISD::FSQRT: return LowerF128Op(Op, DAG,
3044  getLibcallName(RTLIB::SQRT_F128),1);
3045  case ISD::FABS:
3046  case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3047  case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3048  case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3049  case ISD::ADDC:
3050  case ISD::ADDE:
3051  case ISD::SUBC:
3052  case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3053  case ISD::UMULO:
3054  case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3055  case ISD::ATOMIC_LOAD:
3056  case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3057  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3058  }
3059 }
3060 
3062  const SDLoc &DL,
3063  SelectionDAG &DAG) const {
3064  APInt V = C->getValueAPF().bitcastToAPInt();
3065  SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3066  SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3067  if (DAG.getDataLayout().isLittleEndian())
3068  std::swap(Lo, Hi);
3069  return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3070 }
3071 
3073  DAGCombinerInfo &DCI) const {
3074  SDLoc dl(N);
3075  SDValue Src = N->getOperand(0);
3076 
3077  if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3078  Src.getSimpleValueType() == MVT::f64)
3079  return bitcastConstantFPToInt(cast<ConstantFPSDNode>(Src), dl, DCI.DAG);
3080 
3081  return SDValue();
3082 }
3083 
3085  DAGCombinerInfo &DCI) const {
3086  switch (N->getOpcode()) {
3087  default:
3088  break;
3089  case ISD::BITCAST:
3090  return PerformBITCASTCombine(N, DCI);
3091  }
3092  return SDValue();
3093 }
3094 
3097  MachineBasicBlock *BB) const {
3098  switch (MI.getOpcode()) {
3099  default: llvm_unreachable("Unknown SELECT_CC!");
3100  case SP::SELECT_CC_Int_ICC:
3101  case SP::SELECT_CC_FP_ICC:
3102  case SP::SELECT_CC_DFP_ICC:
3103  case SP::SELECT_CC_QFP_ICC:
3104  return expandSelectCC(MI, BB, SP::BCOND);
3105  case SP::SELECT_CC_Int_FCC:
3106  case SP::SELECT_CC_FP_FCC:
3107  case SP::SELECT_CC_DFP_FCC:
3108  case SP::SELECT_CC_QFP_FCC:
3109  return expandSelectCC(MI, BB, SP::FBCOND);
3110  }
3111 }
3112 
3115  unsigned BROpcode) const {
3116  const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3117  DebugLoc dl = MI.getDebugLoc();
3118  unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3119 
3120  // To "insert" a SELECT_CC instruction, we actually have to insert the
3121  // triangle control-flow pattern. The incoming instruction knows the
3122  // destination vreg to set, the condition code register to branch on, the
3123  // true/false values to select between, and the condition code for the branch.
3124  //
3125  // We produce the following control flow:
3126  // ThisMBB
3127  // | \
3128  // | IfFalseMBB
3129  // | /
3130  // SinkMBB
3131  const BasicBlock *LLVM_BB = BB->getBasicBlock();
3133 
3134  MachineBasicBlock *ThisMBB = BB;
3135  MachineFunction *F = BB->getParent();
3136  MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3137  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3138  F->insert(It, IfFalseMBB);
3139  F->insert(It, SinkMBB);
3140 
3141  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3142  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3143  std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3144  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3145 
3146  // Set the new successors for ThisMBB.
3147  ThisMBB->addSuccessor(IfFalseMBB);
3148  ThisMBB->addSuccessor(SinkMBB);
3149 
3150  BuildMI(ThisMBB, dl, TII.get(BROpcode))
3151  .addMBB(SinkMBB)
3152  .addImm(CC);
3153 
3154  // IfFalseMBB just falls through to SinkMBB.
3155  IfFalseMBB->addSuccessor(SinkMBB);
3156 
3157  // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3158  BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3159  MI.getOperand(0).getReg())
3160  .addReg(MI.getOperand(1).getReg())
3161  .addMBB(ThisMBB)
3162  .addReg(MI.getOperand(2).getReg())
3163  .addMBB(IfFalseMBB);
3164 
3165  MI.eraseFromParent(); // The pseudo instruction is gone now.
3166  return SinkMBB;
3167 }
3168 
3169 //===----------------------------------------------------------------------===//
3170 // Sparc Inline Assembly Support
3171 //===----------------------------------------------------------------------===//
3172 
3173 /// getConstraintType - Given a constraint letter, return the type of
3174 /// constraint it is for this target.
3177  if (Constraint.size() == 1) {
3178  switch (Constraint[0]) {
3179  default: break;
3180  case 'r':
3181  case 'f':
3182  case 'e':
3183  return C_RegisterClass;
3184  case 'I': // SIMM13
3185  return C_Other;
3186  }
3187  }
3188 
3189  return TargetLowering::getConstraintType(Constraint);
3190 }
3191 
3194  const char *constraint) const {
3195  ConstraintWeight weight = CW_Invalid;
3196  Value *CallOperandVal = info.CallOperandVal;
3197  // If we don't have a value, we can't do a match,
3198  // but allow it at the lowest weight.
3199  if (!CallOperandVal)
3200  return CW_Default;
3201 
3202  // Look at the constraint type.
3203  switch (*constraint) {
3204  default:
3205  weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3206  break;
3207  case 'I': // SIMM13
3208  if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3209  if (isInt<13>(C->getSExtValue()))
3210  weight = CW_Constant;
3211  }
3212  break;
3213  }
3214  return weight;
3215 }
3216 
3217 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3218 /// vector. If it is invalid, don't add anything to Ops.
3221  std::string &Constraint,
3222  std::vector<SDValue> &Ops,
3223  SelectionDAG &DAG) const {
3224  SDValue Result(nullptr, 0);
3225 
3226  // Only support length 1 constraints for now.
3227  if (Constraint.length() > 1)
3228  return;
3229 
3230  char ConstraintLetter = Constraint[0];
3231  switch (ConstraintLetter) {
3232  default: break;
3233  case 'I':
3234  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3235  if (isInt<13>(C->getSExtValue())) {
3236  Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3237  Op.getValueType());
3238  break;
3239  }
3240  return;
3241  }
3242  }
3243 
3244  if (Result.getNode()) {
3245  Ops.push_back(Result);
3246  return;
3247  }
3248  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3249 }
3250 
3251 std::pair<unsigned, const TargetRegisterClass *>
3253  StringRef Constraint,
3254  MVT VT) const {
3255  if (Constraint.size() == 1) {
3256  switch (Constraint[0]) {
3257  case 'r':
3258  if (VT == MVT::v2i32)
3259  return std::make_pair(0U, &SP::IntPairRegClass);
3260  else
3261  return std::make_pair(0U, &SP::IntRegsRegClass);
3262  case 'f':
3263  if (VT == MVT::f32 || VT == MVT::i32)
3264  return std::make_pair(0U, &SP::FPRegsRegClass);
3265  else if (VT == MVT::f64 || VT == MVT::i64)
3266  return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3267  else if (VT == MVT::f128)
3268  return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3269  // This will generate an error message
3270  return std::make_pair(0U, nullptr);
3271  case 'e':
3272  if (VT == MVT::f32 || VT == MVT::i32)
3273  return std::make_pair(0U, &SP::FPRegsRegClass);
3274  else if (VT == MVT::f64 || VT == MVT::i64 )
3275  return std::make_pair(0U, &SP::DFPRegsRegClass);
3276  else if (VT == MVT::f128)
3277  return std::make_pair(0U, &SP::QFPRegsRegClass);
3278  // This will generate an error message
3279  return std::make_pair(0U, nullptr);
3280  }
3281  } else if (!Constraint.empty() && Constraint.size() <= 5
3282  && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3283  // constraint = '{r<d>}'
3284  // Remove the braces from around the name.
3285  StringRef name(Constraint.data()+1, Constraint.size()-2);
3286  // Handle register aliases:
3287  // r0-r7 -> g0-g7
3288  // r8-r15 -> o0-o7
3289  // r16-r23 -> l0-l7
3290  // r24-r31 -> i0-i7
3291  uint64_t intVal = 0;
3292  if (name.substr(0, 1).equals("r")
3293  && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3294  const char regTypes[] = { 'g', 'o', 'l', 'i' };
3295  char regType = regTypes[intVal/8];
3296  char regIdx = '0' + (intVal % 8);
3297  char tmp[] = { '{', regType, regIdx, '}', 0 };
3298  std::string newConstraint = std::string(tmp);
3299  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3300  VT);
3301  }
3302  if (name.substr(0, 1).equals("f") &&
3303  !name.substr(1).getAsInteger(10, intVal) && intVal <= 63) {
3304  std::string newConstraint;
3305 
3306  if (VT == MVT::f32 || VT == MVT::Other) {
3307  newConstraint = "{f" + utostr(intVal) + "}";
3308  } else if (VT == MVT::f64 && (intVal % 2 == 0)) {
3309  newConstraint = "{d" + utostr(intVal / 2) + "}";
3310  } else if (VT == MVT::f128 && (intVal % 4 == 0)) {
3311  newConstraint = "{q" + utostr(intVal / 4) + "}";
3312  } else {
3313  return std::make_pair(0U, nullptr);
3314  }
3315  return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3316  VT);
3317  }
3318  }
3319 
3320  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3321 }
3322 
3323 bool
3325  // The Sparc target isn't yet aware of offsets.
3326  return false;
3327 }
3328 
3331  SelectionDAG &DAG) const {
3332 
3333  SDLoc dl(N);
3334 
3335  RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3336 
3337  switch (N->getOpcode()) {
3338  default:
3339  llvm_unreachable("Do not know how to custom type legalize this operation!");
3340 
3341  case ISD::FP_TO_SINT:
3342  case ISD::FP_TO_UINT:
3343  // Custom lower only if it involves f128 or i64.
3344  if (N->getOperand(0).getValueType() != MVT::f128
3345  || N->getValueType(0) != MVT::i64)
3346  return;
3347  libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3348  ? RTLIB::FPTOSINT_F128_I64
3349  : RTLIB::FPTOUINT_F128_I64);
3350 
3351  Results.push_back(LowerF128Op(SDValue(N, 0),
3352  DAG,
3353  getLibcallName(libCall),
3354  1));
3355  return;
3356  case ISD::READCYCLECOUNTER: {
3357  assert(Subtarget->hasLeonCycleCounter());
3358  SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3359  SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3360  SDValue Ops[] = { Lo, Hi };
3361  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3362  Results.push_back(Pair);
3363  Results.push_back(N->getOperand(0));
3364  return;
3365  }
3366  case ISD::SINT_TO_FP:
3367  case ISD::UINT_TO_FP:
3368  // Custom lower only if it involves f128 or i64.
3369  if (N->getValueType(0) != MVT::f128
3370  || N->getOperand(0).getValueType() != MVT::i64)
3371  return;
3372 
3373  libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3374  ? RTLIB::SINTTOFP_I64_F128
3375  : RTLIB::UINTTOFP_I64_F128);
3376 
3377  Results.push_back(LowerF128Op(SDValue(N, 0),
3378  DAG,
3379  getLibcallName(libCall),
3380  1));
3381  return;
3382  case ISD::LOAD: {
3383  LoadSDNode *Ld = cast<LoadSDNode>(N);
3384  // Custom handling only for i64: turn i64 load into a v2i32 load,
3385  // and a bitcast.
3386  if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3387  return;
3388 
3389  SDLoc dl(N);
3390  SDValue LoadRes = DAG.getExtLoad(
3391  Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3392  Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
3393  Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3394 
3395  SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3396  Results.push_back(Res);
3397  Results.push_back(LoadRes.getValue(1));
3398  return;
3399  }
3400  }
3401 }
3402 
3403 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3405  if (!Subtarget->isTargetLinux())
3407  return true;
3408 }
3409 
3410 // Override to disable global variable loading on Linux.
3412  if (!Subtarget->isTargetLinux())
3414 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setFrameAddressIsTaken(bool T)
uint64_t CallInst * C
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:570
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:537
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:883
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getValueType() const
Return the ValueType of the referenced return value.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool usePopc() const
const SDValue & getOffset() const
bool isUndef() const
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:183
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
DiagnosticInfoOptimizationBase::Argument NV
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC&#39;s if you just have an ISD::CondCode instead of an SD...
Definition: SelectionDAG.h:936
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:835
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC)
IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC condition.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:651
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
This class represents lattice values for constants.
Definition: AllocatorList.h:23
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:64
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:222
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
bool isFP128Ty() const
Return true if this is &#39;fp128&#39;.
Definition: Type.h:155
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:703
unsigned Reg
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getChain() const
Function Alias Analysis Results
unsigned getValNo() const
unsigned getAlignment() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:320
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const uint32_t * getRTCallPreservedMask(CallingConv::ID CC) const
bool hasHardQuad() const
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:140
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:875
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
const SparcInstrInfo * getInstrInfo() const override
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:785
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:434
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:67
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool useSoftFloat() const override
SDValue getExternalSymbol(const char *Sym, EVT VT)
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isMemLoc() const
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
bool needsCustom() const
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:578
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:209
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:455
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
bool isTargetLinux() const
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const HexagonInstrInfo * TII
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
Shift and rotation operations.
Definition: ISDOpcodes.h:409
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:201
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
BinOp getOperation() const
Definition: Instructions.h:750
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
void addLoc(const CCValAssign &V)
uint64_t getConstantOperandVal(unsigned i) const
LLVM_NODISCARD R Default(T Value)
Definition: StringSwitch.h:181
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:814
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:459
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:400
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
LocInfo getLocInfo() const
LLVM_NODISCARD bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:126
bool useSoftFloat() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool hasLeonCycleCounter() const
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:699
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
unsigned getSizeInBits() const
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool fixAllFDIVSQRT() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:397
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:477
SDValue getRegisterMask(const uint32_t *RegMask)
static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:401
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
bool useSoftMulDiv() const
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
SmallVector< ISD::OutputArg, 32 > Outs
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:220
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:965
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:751
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:575
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
bool isStrongerThanMonotonic(AtomicOrdering ao)
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true, bool isPostTypeLegalization=false) const
Returns a pair of (return value, chain).
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Class to represent pointers.
Definition: DerivedTypes.h:498
unsigned getByValSize() const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
UNDEF - An undefined node.
Definition: ISDOpcodes.h:177
This class is used to represent ISD::STORE nodes.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:523
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:117
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:326
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool hasNoFMULS() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
const SDValue & getBasePtr() const
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:42
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Simple binary floating point operators.
Definition: ISDOpcodes.h:282
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:65
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:732
const SDValue & getOperand(unsigned Num) const
static bool is64Bit(const char *name)
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:331
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:231
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:823
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
const SDValue & getOffset() const
static mvt_range fp_valuetypes()
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:160
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:771
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:56
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:774
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
self_iterator getIterator()
Definition: ilist_node.h:81
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:723
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
Definition: ISDOpcodes.h:800
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:555
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isPositionIndependent() const
This structure contains all information that is necessary for lowering calls.
size_t size() const
Definition: SmallVector.h:52
const TargetMachine & getTargetMachine() const
This class contains a discriminated union of information about pointers in memory operands...
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array...
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:970
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
const SparcRegisterInfo * getRegisterInfo() const override
unsigned first
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this t