LLVM  3.7.0
HexagonISelLowering.cpp
Go to the documentation of this file.
1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "HexagonISelLowering.h"
17 #include "HexagonSubtarget.h"
18 #include "HexagonTargetMachine.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/Support/Debug.h"
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "hexagon-lowering"
43 
44 static cl::opt<bool>
45 EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
46  cl::desc("Control jump table emission on Hexagon target"));
47 
48 static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched",
50  cl::desc("Enable Hexagon SDNode scheduling"));
51 
52 static cl::opt<bool> EnableFastMath("ffast-math",
54  cl::desc("Enable Fast Math processing"));
55 
56 static cl::opt<int> MinimumJumpTables("minimum-jump-tables",
58  cl::desc("Set minimum jump tables"));
59 
60 static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy",
62  cl::desc("Max #stores to inline memcpy"));
63 
64 static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os",
66  cl::desc("Max #stores to inline memcpy"));
67 
68 static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove",
70  cl::desc("Max #stores to inline memmove"));
71 
72 static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os",
74  cl::desc("Max #stores to inline memmove"));
75 
76 static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset",
78  cl::desc("Max #stores to inline memset"));
79 
80 static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os",
82  cl::desc("Max #stores to inline memset"));
83 
84 
85 namespace {
86 class HexagonCCState : public CCState {
87  unsigned NumNamedVarArgParams;
88 
89 public:
90  HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
92  int NumNamedVarArgParams)
93  : CCState(CC, isVarArg, MF, locs, C),
94  NumNamedVarArgParams(NumNamedVarArgParams) {}
95 
96  unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
97 };
98 }
99 
100 // Implement calling convention for Hexagon.
101 static bool
102 CC_Hexagon(unsigned ValNo, MVT ValVT,
103  MVT LocVT, CCValAssign::LocInfo LocInfo,
104  ISD::ArgFlagsTy ArgFlags, CCState &State);
105 
106 static bool
107 CC_Hexagon32(unsigned ValNo, MVT ValVT,
108  MVT LocVT, CCValAssign::LocInfo LocInfo,
109  ISD::ArgFlagsTy ArgFlags, CCState &State);
110 
111 static bool
112 CC_Hexagon64(unsigned ValNo, MVT ValVT,
113  MVT LocVT, CCValAssign::LocInfo LocInfo,
114  ISD::ArgFlagsTy ArgFlags, CCState &State);
115 
116 static bool
117 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
118  MVT LocVT, CCValAssign::LocInfo LocInfo,
119  ISD::ArgFlagsTy ArgFlags, CCState &State);
120 
121 static bool
122 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
123  MVT LocVT, CCValAssign::LocInfo LocInfo,
124  ISD::ArgFlagsTy ArgFlags, CCState &State);
125 
126 static bool
127 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
128  MVT LocVT, CCValAssign::LocInfo LocInfo,
129  ISD::ArgFlagsTy ArgFlags, CCState &State);
130 
131 static bool
132 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
133  MVT LocVT, CCValAssign::LocInfo LocInfo,
134  ISD::ArgFlagsTy ArgFlags, CCState &State) {
135  HexagonCCState &HState = static_cast<HexagonCCState &>(State);
136 
137  if (ValNo < HState.getNumNamedVarArgParams()) {
138  // Deal with named arguments.
139  return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
140  }
141 
142  // Deal with un-named arguments.
143  unsigned ofst;
144  if (ArgFlags.isByVal()) {
145  // If pass-by-value, the size allocated on stack is decided
146  // by ArgFlags.getByValSize(), not by the size of LocVT.
147  ofst = State.AllocateStack(ArgFlags.getByValSize(),
148  ArgFlags.getByValAlign());
149  State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
150  return false;
151  }
152  if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
153  LocVT = MVT::i32;
154  ValVT = MVT::i32;
155  if (ArgFlags.isSExt())
156  LocInfo = CCValAssign::SExt;
157  else if (ArgFlags.isZExt())
158  LocInfo = CCValAssign::ZExt;
159  else
160  LocInfo = CCValAssign::AExt;
161  }
162  if (LocVT == MVT::i32 || LocVT == MVT::f32) {
163  ofst = State.AllocateStack(4, 4);
164  State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
165  return false;
166  }
167  if (LocVT == MVT::i64 || LocVT == MVT::f64) {
168  ofst = State.AllocateStack(8, 8);
169  State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
170  return false;
171  }
172  llvm_unreachable(nullptr);
173 }
174 
175 
176 static bool
177 CC_Hexagon (unsigned ValNo, MVT ValVT,
178  MVT LocVT, CCValAssign::LocInfo LocInfo,
179  ISD::ArgFlagsTy ArgFlags, CCState &State) {
180 
181  if (ArgFlags.isByVal()) {
182  // Passed on stack.
183  unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(),
184  ArgFlags.getByValAlign());
185  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
186  return false;
187  }
188 
189  if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
190  LocVT = MVT::i32;
191  ValVT = MVT::i32;
192  if (ArgFlags.isSExt())
193  LocInfo = CCValAssign::SExt;
194  else if (ArgFlags.isZExt())
195  LocInfo = CCValAssign::ZExt;
196  else
197  LocInfo = CCValAssign::AExt;
198  } else if (LocVT == MVT::v4i8 || LocVT == MVT::v2i16) {
199  LocVT = MVT::i32;
200  LocInfo = CCValAssign::BCvt;
201  } else if (LocVT == MVT::v8i8 || LocVT == MVT::v4i16 || LocVT == MVT::v2i32) {
202  LocVT = MVT::i64;
203  LocInfo = CCValAssign::BCvt;
204  }
205 
206  if (LocVT == MVT::i32 || LocVT == MVT::f32) {
207  if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
208  return false;
209  }
210 
211  if (LocVT == MVT::i64 || LocVT == MVT::f64) {
212  if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
213  return false;
214  }
215 
216  return true; // CC didn't match.
217 }
218 
219 
220 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
221  MVT LocVT, CCValAssign::LocInfo LocInfo,
222  ISD::ArgFlagsTy ArgFlags, CCState &State) {
223 
224  static const MCPhysReg RegList[] = {
225  Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
226  Hexagon::R5
227  };
228  if (unsigned Reg = State.AllocateReg(RegList)) {
229  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
230  return false;
231  }
232 
233  unsigned Offset = State.AllocateStack(4, 4);
234  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
235  return false;
236 }
237 
238 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
239  MVT LocVT, CCValAssign::LocInfo LocInfo,
240  ISD::ArgFlagsTy ArgFlags, CCState &State) {
241 
242  if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
243  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
244  return false;
245  }
246 
247  static const MCPhysReg RegList1[] = {
248  Hexagon::D1, Hexagon::D2
249  };
250  static const MCPhysReg RegList2[] = {
251  Hexagon::R1, Hexagon::R3
252  };
253  if (unsigned Reg = State.AllocateReg(RegList1, RegList2)) {
254  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
255  return false;
256  }
257 
258  unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
259  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
260  return false;
261 }
262 
263 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
264  MVT LocVT, CCValAssign::LocInfo LocInfo,
265  ISD::ArgFlagsTy ArgFlags, CCState &State) {
266 
267 
268  if (LocVT == MVT::i1 ||
269  LocVT == MVT::i8 ||
270  LocVT == MVT::i16) {
271  LocVT = MVT::i32;
272  ValVT = MVT::i32;
273  if (ArgFlags.isSExt())
274  LocInfo = CCValAssign::SExt;
275  else if (ArgFlags.isZExt())
276  LocInfo = CCValAssign::ZExt;
277  else
278  LocInfo = CCValAssign::AExt;
279  } else if (LocVT == MVT::v4i8 || LocVT == MVT::v2i16) {
280  LocVT = MVT::i32;
281  LocInfo = CCValAssign::BCvt;
282  } else if (LocVT == MVT::v8i8 || LocVT == MVT::v4i16 || LocVT == MVT::v2i32) {
283  LocVT = MVT::i64;
284  LocInfo = CCValAssign::BCvt;
285  }
286 
287  if (LocVT == MVT::i32 || LocVT == MVT::f32) {
288  if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
289  return false;
290  }
291 
292  if (LocVT == MVT::i64 || LocVT == MVT::f64) {
293  if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
294  return false;
295  }
296 
297  return true; // CC didn't match.
298 }
299 
300 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
301  MVT LocVT, CCValAssign::LocInfo LocInfo,
302  ISD::ArgFlagsTy ArgFlags, CCState &State) {
303 
304  if (LocVT == MVT::i32 || LocVT == MVT::f32) {
305  if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
306  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
307  return false;
308  }
309  }
310 
311  unsigned Offset = State.AllocateStack(4, 4);
312  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
313  return false;
314 }
315 
316 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
317  MVT LocVT, CCValAssign::LocInfo LocInfo,
318  ISD::ArgFlagsTy ArgFlags, CCState &State) {
319  if (LocVT == MVT::i64 || LocVT == MVT::f64) {
320  if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
321  State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
322  return false;
323  }
324  }
325 
326  unsigned Offset = State.AllocateStack(8, 8);
327  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
328  return false;
329 }
330 
331 SDValue
333 const {
334  return SDValue();
335 }
336 
337 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
338 /// by "Src" to address "Dst" of size "Size". Alignment information is
339 /// specified by the specific parameter attribute. The copy will be passed as
340 /// a byval function parameter. Sometimes what we are copying is the end of a
341 /// larger object, the part that does not fit in registers.
342 static SDValue
345  SDLoc dl) {
346 
347  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
348  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
349  /*isVolatile=*/false, /*AlwaysInline=*/false,
350  /*isTailCall=*/false,
352 }
353 
354 
355 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
356 // passed by value, the function prototype is modified to return void and
357 // the value is stored in memory pointed by a pointer passed by caller.
358 SDValue
360  CallingConv::ID CallConv, bool isVarArg,
362  const SmallVectorImpl<SDValue> &OutVals,
363  SDLoc dl, SelectionDAG &DAG) const {
364 
365  // CCValAssign - represent the assignment of the return value to locations.
367 
368  // CCState - Info about the registers and stack slot.
369  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
370  *DAG.getContext());
371 
372  // Analyze return values of ISD::RET
373  CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
374 
375  SDValue Flag;
376  SmallVector<SDValue, 4> RetOps(1, Chain);
377 
378  // Copy the result values into the output registers.
379  for (unsigned i = 0; i != RVLocs.size(); ++i) {
380  CCValAssign &VA = RVLocs[i];
381 
382  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
383 
384  // Guarantee that all emitted copies are stuck together with flags.
385  Flag = Chain.getValue(1);
386  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
387  }
388 
389  RetOps[0] = Chain; // Update chain.
390 
391  // Add the flag if we have it.
392  if (Flag.getNode())
393  RetOps.push_back(Flag);
394 
395  return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
396 }
397 
399  // If either no tail call or told not to tail call at all, don't.
400  auto Attr =
401  CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
402  if (!CI->isTailCall() || Attr.getValueAsString() == "true")
403  return false;
404 
405  return true;
406 }
407 
408 /// LowerCallResult - Lower the result values of an ISD::CALL into the
409 /// appropriate copies out of appropriate physical registers. This assumes that
410 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
411 /// being lowered. Returns a SDNode with the same number of values as the
412 /// ISD::CALL.
413 SDValue
415  CallingConv::ID CallConv, bool isVarArg,
416  const
418  SDLoc dl, SelectionDAG &DAG,
419  SmallVectorImpl<SDValue> &InVals,
420  const SmallVectorImpl<SDValue> &OutVals,
421  SDValue Callee) const {
422 
423  // Assign locations to each value returned by this call.
425 
426  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
427  *DAG.getContext());
428 
429  CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
430 
431  // Copy all of the result registers out of their specified physreg.
432  for (unsigned i = 0; i != RVLocs.size(); ++i) {
433  Chain = DAG.getCopyFromReg(Chain, dl,
434  RVLocs[i].getLocReg(),
435  RVLocs[i].getValVT(), InFlag).getValue(1);
436  InFlag = Chain.getValue(2);
437  InVals.push_back(Chain.getValue(0));
438  }
439 
440  return Chain;
441 }
442 
443 /// LowerCall - Functions arguments are copied from virtual regs to
444 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
445 SDValue
447  SmallVectorImpl<SDValue> &InVals) const {
448  SelectionDAG &DAG = CLI.DAG;
449  SDLoc &dl = CLI.DL;
451  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
453  SDValue Chain = CLI.Chain;
454  SDValue Callee = CLI.Callee;
455  bool &isTailCall = CLI.IsTailCall;
456  CallingConv::ID CallConv = CLI.CallConv;
457  bool isVarArg = CLI.IsVarArg;
458  bool doesNotReturn = CLI.DoesNotReturn;
459 
460  bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
462  auto PtrVT = getPointerTy(MF.getDataLayout());
463 
464  // Check for varargs.
465  int NumNamedVarArgParams = -1;
466  if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
467  {
468  const Function* CalleeFn = nullptr;
469  Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
470  if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
471  {
472  // If a function has zero args and is a vararg function, that's
473  // disallowed so it must be an undeclared function. Do not assume
474  // varargs if the callee is undefined.
475  if (CalleeFn->isVarArg() &&
476  CalleeFn->getFunctionType()->getNumParams() != 0) {
477  NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
478  }
479  }
480  }
481 
482  // Analyze operands of the call, assigning locations to each operand.
484  HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
485  *DAG.getContext(), NumNamedVarArgParams);
486 
487  if (isVarArg)
488  CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
489  else
490  CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
491 
492  auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
493  if (Attr.getValueAsString() == "true")
494  isTailCall = false;
495 
496  if (isTailCall) {
497  bool StructAttrFlag = MF.getFunction()->hasStructRetAttr();
498  isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
499  isVarArg, IsStructRet,
500  StructAttrFlag,
501  Outs, OutVals, Ins, DAG);
502  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
503  CCValAssign &VA = ArgLocs[i];
504  if (VA.isMemLoc()) {
505  isTailCall = false;
506  break;
507  }
508  }
509  DEBUG(dbgs() << (isTailCall ? "Eligible for Tail Call\n"
510  : "Argument must be passed on stack. "
511  "Not eligible for Tail Call\n"));
512  }
513  // Get a count of how many bytes are to be pushed on the stack.
514  unsigned NumBytes = CCInfo.getNextStackOffset();
516  SmallVector<SDValue, 8> MemOpChains;
517 
518  auto &HRI = *Subtarget.getRegisterInfo();
519  SDValue StackPtr =
520  DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
521 
522  // Walk the register/memloc assignments, inserting copies/loads.
523  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
524  CCValAssign &VA = ArgLocs[i];
525  SDValue Arg = OutVals[i];
526  ISD::ArgFlagsTy Flags = Outs[i].Flags;
527 
528  // Promote the value if needed.
529  switch (VA.getLocInfo()) {
530  default:
531  // Loc info must be one of Full, SExt, ZExt, or AExt.
532  llvm_unreachable("Unknown loc info!");
533  case CCValAssign::BCvt:
534  case CCValAssign::Full:
535  break;
536  case CCValAssign::SExt:
537  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
538  break;
539  case CCValAssign::ZExt:
540  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
541  break;
542  case CCValAssign::AExt:
543  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
544  break;
545  }
546 
547  if (VA.isMemLoc()) {
548  unsigned LocMemOffset = VA.getLocMemOffset();
549  SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
550  StackPtr.getValueType());
551  MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
552  if (Flags.isByVal()) {
553  // The argument is a struct passed by value. According to LLVM, "Arg"
554  // is is pointer.
555  MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
556  Flags, DAG, dl));
557  } else {
558  MachinePointerInfo LocPI = MachinePointerInfo::getStack(LocMemOffset);
559  SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI, false,
560  false, 0);
561  MemOpChains.push_back(S);
562  }
563  continue;
564  }
565 
566  // Arguments that can be passed on register must be kept at RegsToPass
567  // vector.
568  if (VA.isRegLoc())
569  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
570  }
571 
572  // Transform all store nodes into one single node because all store
573  // nodes are independent of each other.
574  if (!MemOpChains.empty())
575  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
576 
577  if (!isTailCall) {
578  SDValue C = DAG.getConstant(NumBytes, dl, PtrVT, true);
579  Chain = DAG.getCALLSEQ_START(Chain, C, dl);
580  }
581 
582  // Build a sequence of copy-to-reg nodes chained together with token
583  // chain and flag operands which copy the outgoing args into registers.
584  // The InFlag in necessary since all emitted instructions must be
585  // stuck together.
586  SDValue InFlag;
587  if (!isTailCall) {
588  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
589  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
590  RegsToPass[i].second, InFlag);
591  InFlag = Chain.getValue(1);
592  }
593  } else {
594  // For tail calls lower the arguments to the 'real' stack slot.
595  //
596  // Force all the incoming stack arguments to be loaded from the stack
597  // before any new outgoing arguments are stored to the stack, because the
598  // outgoing stack slots may alias the incoming argument stack slots, and
599  // the alias isn't otherwise explicit. This is slightly more conservative
600  // than necessary, because it means that each store effectively depends
601  // on every argument instead of just those arguments it would clobber.
602  //
603  // Do not flag preceding copytoreg stuff together with the following stuff.
604  InFlag = SDValue();
605  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
606  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
607  RegsToPass[i].second, InFlag);
608  InFlag = Chain.getValue(1);
609  }
610  InFlag = SDValue();
611  }
612 
613  // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
614  // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
615  // node so that legalize doesn't hack it.
616  if (flag_aligned_memcpy) {
617  const char *MemcpyName =
618  "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
619  Callee = DAG.getTargetExternalSymbol(MemcpyName, PtrVT);
620  flag_aligned_memcpy = false;
621  } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
622  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT);
623  } else if (ExternalSymbolSDNode *S =
624  dyn_cast<ExternalSymbolSDNode>(Callee)) {
625  Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT);
626  }
627 
628  // Returns a chain & a flag for retval copy to use.
629  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
631  Ops.push_back(Chain);
632  Ops.push_back(Callee);
633 
634  // Add argument registers to the end of the list so that they are
635  // known live into the call.
636  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
637  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
638  RegsToPass[i].second.getValueType()));
639  }
640 
641  if (InFlag.getNode())
642  Ops.push_back(InFlag);
643 
644  if (isTailCall) {
646  return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
647  }
648 
649  int OpCode = doesNotReturn ? HexagonISD::CALLv3nr : HexagonISD::CALLv3;
650  Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
651  InFlag = Chain.getValue(1);
652 
653  // Create the CALLSEQ_END node.
654  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
655  DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
656  InFlag = Chain.getValue(1);
657 
658  // Handle result values, copying them out of physregs into vregs that we
659  // return.
660  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
661  InVals, OutVals, Callee);
662 }
663 
664 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
665  bool isSEXTLoad, SDValue &Base,
666  SDValue &Offset, bool &isInc,
667  SelectionDAG &DAG) {
668  if (Ptr->getOpcode() != ISD::ADD)
669  return false;
670 
671  if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
672  isInc = (Ptr->getOpcode() == ISD::ADD);
673  Base = Ptr->getOperand(0);
674  Offset = Ptr->getOperand(1);
675  // Ensure that Offset is a constant.
676  return (isa<ConstantSDNode>(Offset));
677  }
678 
679  return false;
680 }
681 
682 // TODO: Put this function along with the other isS* functions in
683 // HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
684 // functions defined in HexagonOperands.td.
685 static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
686  ConstantSDNode *N = cast<ConstantSDNode>(S);
687 
688  // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
689  // field.
690  int64_t v = (int64_t)N->getSExtValue();
691  int64_t m = 0;
692  if (ShiftAmount > 0) {
693  m = v % ShiftAmount;
694  v = v >> ShiftAmount;
695  }
696  return (v <= 7) && (v >= -8) && (m == 0);
697 }
698 
699 /// getPostIndexedAddressParts - returns true by value, base pointer and
700 /// offset pointer and addressing mode by reference if this node can be
701 /// combined with a load / store to form a post-indexed load / store.
703  SDValue &Base,
704  SDValue &Offset,
706  SelectionDAG &DAG) const
707 {
708  EVT VT;
709  SDValue Ptr;
710  bool isSEXTLoad = false;
711 
712  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
713  VT = LD->getMemoryVT();
714  isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
715  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
716  VT = ST->getMemoryVT();
717  if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
718  return false;
719  }
720  } else {
721  return false;
722  }
723 
724  bool isInc = false;
725  bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
726  isInc, DAG);
727  // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
728  int ShiftAmount = VT.getSizeInBits() / 16;
729  if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
730  AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
731  return true;
732  }
733 
734  return false;
735 }
736 
738  SelectionDAG &DAG) const {
739  SDNode *Node = Op.getNode();
741  auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
742  switch (Node->getOpcode()) {
743  case ISD::INLINEASM: {
744  unsigned NumOps = Node->getNumOperands();
745  if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
746  --NumOps; // Ignore the flag operand.
747 
748  for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
749  if (FuncInfo.hasClobberLR())
750  break;
751  unsigned Flags =
752  cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
753  unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
754  ++i; // Skip the ID value.
755 
756  switch (InlineAsm::getKind(Flags)) {
757  default: llvm_unreachable("Bad flags!");
760  case InlineAsm::Kind_Imm:
762  case InlineAsm::Kind_Mem: {
763  for (; NumVals; --NumVals, ++i) {}
764  break;
765  }
767  for (; NumVals; --NumVals, ++i) {
768  unsigned Reg =
769  cast<RegisterSDNode>(Node->getOperand(i))->getReg();
770 
771  // Check it to be lr
772  const HexagonRegisterInfo *QRI = Subtarget.getRegisterInfo();
773  if (Reg == QRI->getRARegister()) {
774  FuncInfo.setHasClobberLR(true);
775  break;
776  }
777  }
778  break;
779  }
780  }
781  }
782  }
783  } // Node->getOpcode
784  return Op;
785 }
786 
787 
788 //
789 // Taken from the XCore backend.
790 //
793 {
794  SDValue Chain = Op.getOperand(0);
795  SDValue Table = Op.getOperand(1);
796  SDValue Index = Op.getOperand(2);
797  SDLoc dl(Op);
798  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
799  unsigned JTI = JT->getIndex();
801  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
802  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
803 
804  // Mark all jump table targets as address taken.
805  const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
806  const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
807  for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
808  MachineBasicBlock *MBB = JTBBs[i];
809  MBB->setHasAddressTaken();
810  // This line is needed to set the hasAddressTaken flag on the BasicBlock
811  // object.
812  BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
813  }
814 
815  SDValue JumpTableBase = DAG.getNode(
816  HexagonISD::JT, dl, getPointerTy(DAG.getDataLayout()), TargetJT);
817  SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
818  DAG.getConstant(2, dl, MVT::i32));
819  SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
820  ShiftIndex);
821  SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
822  MachinePointerInfo(), false, false, false,
823  0);
824  return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
825 }
826 
827 
828 SDValue
830  SelectionDAG &DAG) const {
831  SDValue Chain = Op.getOperand(0);
832  SDValue Size = Op.getOperand(1);
833  SDValue Align = Op.getOperand(2);
834  SDLoc dl(Op);
835 
837  assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
838 
839  unsigned A = AlignConst->getSExtValue();
840  auto &HFI = *Subtarget.getFrameLowering();
841  // "Zero" means natural stack alignment.
842  if (A == 0)
843  A = HFI.getStackAlignment();
844 
845  DEBUG({
846  dbgs () << LLVM_FUNCTION_NAME << " Align: " << A << " Size: ";
847  Size.getNode()->dump(&DAG);
848  dbgs() << "\n";
849  });
850 
851  SDValue AC = DAG.getConstant(A, dl, MVT::i32);
853  return DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
854 }
855 
856 SDValue
858  CallingConv::ID CallConv,
859  bool isVarArg,
860  const
862  SDLoc dl, SelectionDAG &DAG,
863  SmallVectorImpl<SDValue> &InVals)
864 const {
865 
867  MachineFrameInfo *MFI = MF.getFrameInfo();
868  MachineRegisterInfo &RegInfo = MF.getRegInfo();
869  auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
870 
871  // Assign locations to all of the incoming arguments.
873  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
874  *DAG.getContext());
875 
876  CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
877 
878  // For LLVM, in the case when returning a struct by value (>8byte),
879  // the first argument is a pointer that points to the location on caller's
880  // stack where the return value will be stored. For Hexagon, the location on
881  // caller's stack is passed only when the struct size is smaller than (and
882  // equal to) 8 bytes. If not, no address will be passed into callee and
883  // callee return the result direclty through R0/R1.
884 
886 
887  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
888  CCValAssign &VA = ArgLocs[i];
889  ISD::ArgFlagsTy Flags = Ins[i].Flags;
890  unsigned ObjSize;
891  unsigned StackLocation;
892  int FI;
893 
894  if ( (VA.isRegLoc() && !Flags.isByVal())
895  || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
896  // Arguments passed in registers
897  // 1. int, long long, ptr args that get allocated in register.
898  // 2. Large struct that gets an register to put its address in.
899  EVT RegVT = VA.getLocVT();
900  if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
901  RegVT == MVT::i32 || RegVT == MVT::f32) {
902  unsigned VReg =
903  RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
904  RegInfo.addLiveIn(VA.getLocReg(), VReg);
905  InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
906  } else if (RegVT == MVT::i64 || RegVT == MVT::f64) {
907  unsigned VReg =
908  RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
909  RegInfo.addLiveIn(VA.getLocReg(), VReg);
910  InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
911  } else {
912  assert (0);
913  }
914  } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
915  assert (0 && "ByValSize must be bigger than 8 bytes");
916  } else {
917  // Sanity check.
918  assert(VA.isMemLoc());
919 
920  if (Flags.isByVal()) {
921  // If it's a byval parameter, then we need to compute the
922  // "real" size, not the size of the pointer.
923  ObjSize = Flags.getByValSize();
924  } else {
925  ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
926  }
927 
928  StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
929  // Create the frame index object for this incoming parameter...
930  FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
931 
932  // Create the SelectionDAG nodes cordl, responding to a load
933  // from this parameter.
934  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
935 
936  if (Flags.isByVal()) {
937  // If it's a pass-by-value aggregate, then do not dereference the stack
938  // location. Instead, we should generate a reference to the stack
939  // location.
940  InVals.push_back(FIN);
941  } else {
942  InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
943  MachinePointerInfo(), false, false,
944  false, 0));
945  }
946  }
947  }
948 
949  if (!MemOps.empty())
950  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
951 
952  if (isVarArg) {
953  // This will point to the next argument passed via stack.
956  CCInfo.getNextStackOffset(),
957  true);
958  FuncInfo.setVarArgsFrameIndex(FrameIndex);
959  }
960 
961  return Chain;
962 }
963 
964 SDValue
966  // VASTART stores the address of the VarArgsFrameIndex slot into the
967  // memory location argument.
971  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
972  return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
973  Op.getOperand(1), MachinePointerInfo(SV), false,
974  false, 0);
975 }
976 
977 // Creates a SPLAT instruction for a constant value VAL.
978 static SDValue createSplat(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue Val) {
979  if (VT.getSimpleVT() == MVT::v4i8)
980  return DAG.getNode(HexagonISD::VSPLATB, dl, VT, Val);
981 
982  if (VT.getSimpleVT() == MVT::v4i16)
983  return DAG.getNode(HexagonISD::VSPLATH, dl, VT, Val);
984 
985  return SDValue();
986 }
987 
988 static bool isSExtFree(SDValue N) {
989  // A sign-extend of a truncate of a sign-extend is free.
990  if (N.getOpcode() == ISD::TRUNCATE &&
992  return true;
993  // We have sign-extended loads.
994  if (N.getOpcode() == ISD::LOAD)
995  return true;
996  return false;
997 }
998 
1000  SDLoc dl(Op);
1001  SDValue InpVal = Op.getOperand(0);
1002  if (isa<ConstantSDNode>(InpVal)) {
1003  uint64_t V = cast<ConstantSDNode>(InpVal)->getZExtValue();
1004  return DAG.getTargetConstant(countPopulation(V), dl, MVT::i64);
1005  }
1006  SDValue PopOut = DAG.getNode(HexagonISD::POPCOUNT, dl, MVT::i32, InpVal);
1007  return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, PopOut);
1008 }
1009 
1011  SDLoc dl(Op);
1012 
1013  SDValue LHS = Op.getOperand(0);
1014  SDValue RHS = Op.getOperand(1);
1015  SDValue Cmp = Op.getOperand(2);
1016  ISD::CondCode CC = cast<CondCodeSDNode>(Cmp)->get();
1017 
1018  EVT VT = Op.getValueType();
1019  EVT LHSVT = LHS.getValueType();
1020  EVT RHSVT = RHS.getValueType();
1021 
1022  if (LHSVT == MVT::v2i16) {
1024  unsigned ExtOpc = ISD::isSignedIntSetCC(CC) ? ISD::SIGN_EXTEND
1025  : ISD::ZERO_EXTEND;
1026  SDValue LX = DAG.getNode(ExtOpc, dl, MVT::v2i32, LHS);
1027  SDValue RX = DAG.getNode(ExtOpc, dl, MVT::v2i32, RHS);
1028  SDValue SC = DAG.getNode(ISD::SETCC, dl, MVT::v2i1, LX, RX, Cmp);
1029  return SC;
1030  }
1031 
1032  // Treat all other vector types as legal.
1033  if (VT.isVector())
1034  return Op;
1035 
1036  // Equals and not equals should use sign-extend, not zero-extend, since
1037  // we can represent small negative values in the compare instructions.
1038  // The LLVM default is to use zero-extend arbitrarily in these cases.
1039  if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
1040  (RHSVT == MVT::i8 || RHSVT == MVT::i16) &&
1041  (LHSVT == MVT::i8 || LHSVT == MVT::i16)) {
1043  if (C && C->getAPIntValue().isNegative()) {
1044  LHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, LHS);
1045  RHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, RHS);
1046  return DAG.getNode(ISD::SETCC, dl, Op.getValueType(),
1047  LHS, RHS, Op.getOperand(2));
1048  }
1049  if (isSExtFree(LHS) || isSExtFree(RHS)) {
1050  LHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, LHS);
1051  RHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, RHS);
1052  return DAG.getNode(ISD::SETCC, dl, Op.getValueType(),
1053  LHS, RHS, Op.getOperand(2));
1054  }
1055  }
1056  return SDValue();
1057 }
1058 
1060  const {
1061  SDValue PredOp = Op.getOperand(0);
1062  SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1063  EVT OpVT = Op1.getValueType();
1064  SDLoc DL(Op);
1065 
1066  if (OpVT == MVT::v2i16) {
1067  SDValue X1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op1);
1068  SDValue X2 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op2);
1069  SDValue SL = DAG.getNode(ISD::VSELECT, DL, MVT::v2i32, PredOp, X1, X2);
1070  SDValue TR = DAG.getNode(ISD::TRUNCATE, DL, MVT::v2i16, SL);
1071  return TR;
1072  }
1073 
1074  return SDValue();
1075 }
1076 
1077 // Handle only specific vector loads.
1079  EVT VT = Op.getValueType();
1080  SDLoc DL(Op);
1081  LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
1082  SDValue Chain = LoadNode->getChain();
1083  SDValue Ptr = Op.getOperand(1);
1084  SDValue LoweredLoad;
1085  SDValue Result;
1086  SDValue Base = LoadNode->getBasePtr();
1087  ISD::LoadExtType Ext = LoadNode->getExtensionType();
1088  unsigned Alignment = LoadNode->getAlignment();
1089  SDValue LoadChain;
1090 
1091  if(Ext == ISD::NON_EXTLOAD)
1092  Ext = ISD::ZEXTLOAD;
1093 
1094  if (VT == MVT::v4i16) {
1095  if (Alignment == 2) {
1096  SDValue Loads[4];
1097  // Base load.
1098  Loads[0] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Base,
1099  LoadNode->getPointerInfo(), MVT::i16,
1100  LoadNode->isVolatile(),
1101  LoadNode->isNonTemporal(),
1102  LoadNode->isInvariant(),
1103  Alignment);
1104  // Base+2 load.
1105  SDValue Increment = DAG.getConstant(2, DL, MVT::i32);
1106  Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1107  Loads[1] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1108  LoadNode->getPointerInfo(), MVT::i16,
1109  LoadNode->isVolatile(),
1110  LoadNode->isNonTemporal(),
1111  LoadNode->isInvariant(),
1112  Alignment);
1113  // SHL 16, then OR base and base+2.
1114  SDValue ShiftAmount = DAG.getConstant(16, DL, MVT::i32);
1115  SDValue Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[1], ShiftAmount);
1116  SDValue Tmp2 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[0]);
1117  // Base + 4.
1118  Increment = DAG.getConstant(4, DL, MVT::i32);
1119  Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1120  Loads[2] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1121  LoadNode->getPointerInfo(), MVT::i16,
1122  LoadNode->isVolatile(),
1123  LoadNode->isNonTemporal(),
1124  LoadNode->isInvariant(),
1125  Alignment);
1126  // Base + 6.
1127  Increment = DAG.getConstant(6, DL, MVT::i32);
1128  Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1129  Loads[3] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1130  LoadNode->getPointerInfo(), MVT::i16,
1131  LoadNode->isVolatile(),
1132  LoadNode->isNonTemporal(),
1133  LoadNode->isInvariant(),
1134  Alignment);
1135  // SHL 16, then OR base+4 and base+6.
1136  Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[3], ShiftAmount);
1137  SDValue Tmp4 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[2]);
1138  // Combine to i64. This could be optimised out later if we can
1139  // affect reg allocation of this code.
1140  Result = DAG.getNode(HexagonISD::COMBINE, DL, MVT::i64, Tmp4, Tmp2);
1141  LoadChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1142  Loads[0].getValue(1), Loads[1].getValue(1),
1143  Loads[2].getValue(1), Loads[3].getValue(1));
1144  } else {
1145  // Perform default type expansion.
1146  Result = DAG.getLoad(MVT::i64, DL, Chain, Ptr, LoadNode->getPointerInfo(),
1147  LoadNode->isVolatile(), LoadNode->isNonTemporal(),
1148  LoadNode->isInvariant(), LoadNode->getAlignment());
1149  LoadChain = Result.getValue(1);
1150  }
1151  } else
1152  llvm_unreachable("Custom lowering unsupported load");
1153 
1154  Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
1155  // Since we pretend to lower a load, we need the original chain
1156  // info attached to the result.
1157  SDValue Ops[] = { Result, LoadChain };
1158 
1159  return DAG.getMergeValues(Ops, DL);
1160 }
1161 
1162 
1163 SDValue
1165  EVT ValTy = Op.getValueType();
1166  SDLoc dl(Op);
1167  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1168  SDValue Res;
1169  if (CP->isMachineConstantPoolEntry())
1170  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
1171  CP->getAlignment());
1172  else
1173  Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
1174  CP->getAlignment());
1175  return DAG.getNode(HexagonISD::CP, dl, ValTy, Res);
1176 }
1177 
1178 SDValue
1180  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1181  MachineFunction &MF = DAG.getMachineFunction();
1182  MachineFrameInfo &MFI = *MF.getFrameInfo();
1183  MFI.setReturnAddressIsTaken(true);
1184 
1186  return SDValue();
1187 
1188  EVT VT = Op.getValueType();
1189  SDLoc dl(Op);
1190  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1191  if (Depth) {
1192  SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1193  SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
1194  return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1195  DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1196  MachinePointerInfo(), false, false, false, 0);
1197  }
1198 
1199  // Return LR, which contains the return address. Mark it an implicit live-in.
1200  unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
1201  return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1202 }
1203 
1204 SDValue
1206  const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1208  MFI.setFrameAddressIsTaken(true);
1209 
1210  EVT VT = Op.getValueType();
1211  SDLoc dl(Op);
1212  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1213  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1214  HRI.getFrameRegister(), VT);
1215  while (Depth--)
1216  FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1218  false, false, false, 0);
1219  return FrameAddr;
1220 }
1221 
1223  SelectionDAG& DAG) const {
1224  SDLoc dl(Op);
1225  return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1226 }
1227 
1228 
1230  SelectionDAG &DAG) const {
1231  SDValue Result;
1232  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1233  int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
1234  SDLoc dl(Op);
1235  auto PtrVT = getPointerTy(DAG.getDataLayout());
1236  Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1237 
1238  const HexagonTargetObjectFile *TLOF =
1239  static_cast<const HexagonTargetObjectFile *>(
1241  if (TLOF->IsGlobalInSmallSection(GV, getTargetMachine())) {
1242  return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, Result);
1243  }
1244 
1245  return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, Result);
1246 }
1247 
1248 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1249 void HexagonTargetLowering::promoteLdStType(EVT VT, EVT PromotedLdStVT) {
1250  if (VT != PromotedLdStVT) {
1253  PromotedLdStVT.getSimpleVT());
1254 
1257  PromotedLdStVT.getSimpleVT());
1258  }
1259 }
1260 
1261 SDValue
1263  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1264  SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
1265  SDLoc dl(Op);
1266  return DAG.getNode(HexagonISD::CONST32_GP, dl,
1267  getPointerTy(DAG.getDataLayout()), BA_SD);
1268 }
1269 
1270 //===----------------------------------------------------------------------===//
1271 // TargetLowering Implementation
1272 //===----------------------------------------------------------------------===//
1273 
1275  const HexagonSubtarget &STI)
1276  : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1277  Subtarget(STI) {
1278  bool IsV4 = !Subtarget.hasV5TOps();
1279  auto &HRI = *Subtarget.getRegisterInfo();
1280 
1284  setInsertFencesForAtomic(false);
1285  setExceptionPointerRegister(Hexagon::R0);
1286  setExceptionSelectorRegister(Hexagon::R1);
1287  setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1288 
1291  else
1293 
1294  // Limits for inline expansion of memcpy/memmove
1301 
1302  //
1303  // Set up register classes.
1304  //
1305 
1306  addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1307  addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1308  addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1309  addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1310  addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1311  addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1312  addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1313  addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1314  addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1315  addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1316  addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1317 
1318  if (Subtarget.hasV5TOps()) {
1319  addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1320  addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1321  }
1322 
1323  //
1324  // Handling of scalar operations.
1325  //
1326  // All operations default to "legal", except:
1327  // - indexed loads and stores (pre-/post-incremented),
1328  // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1329  // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1330  // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1331  // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1332  // which default to "expand" for at least one type.
1333 
1334  // Misc operations.
1335  setOperationAction(ISD::ConstantFP, MVT::f32, Legal); // Default: expand
1336  setOperationAction(ISD::ConstantFP, MVT::f64, Legal); // Default: expand
1337 
1344 
1345  // Custom legalize GlobalAddress nodes into CONST32.
1349 
1350  // Hexagon needs to optimize cases with negative constants.
1353 
1354  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1358 
1362 
1363  if (EmitJumpTables)
1365  else
1367  // Increase jump tables cutover to 5, was 4.
1369 
1370  // Hexagon has instructions for add/sub with carry. The problem with
1371  // modeling these instructions is that they produce 2 results: Rdd and Px.
1372  // To model the update of Px, we will have to use Defs[p0..p3] which will
1373  // cause any predicate live range to spill. So, we pretend we dont't have
1374  // these instructions.
1391 
1392  // Only add and sub that detect overflow are the saturating ones.
1393  for (MVT VT : MVT::integer_valuetypes()) {
1398  }
1399 
1408 
1409  // In V5, popcount can count # of 1s in i64 but returns i32.
1410  // On V4 it will be expanded (set later).
1415 
1416  // We custom lower i64 to i64 mul, so that it is not considered as a legal
1417  // operation. There is a pattern that will match i64 mul and transform it
1418  // to a series of instructions.
1421 
1422  for (unsigned IntExpOp :
1426  setOperationAction(IntExpOp, MVT::i32, Expand);
1427  setOperationAction(IntExpOp, MVT::i64, Expand);
1428  }
1429 
1430  for (unsigned FPExpOp :
1433  setOperationAction(FPExpOp, MVT::f32, Expand);
1434  setOperationAction(FPExpOp, MVT::f64, Expand);
1435  }
1436 
1437  // No extending loads from i32.
1438  for (MVT VT : MVT::integer_valuetypes()) {
1442  }
1443  // Turn FP truncstore into trunc + store.
1445  // Turn FP extload into load/fextend.
1446  for (MVT VT : MVT::fp_valuetypes())
1448 
1449  // Expand BR_CC and SELECT_CC for all integer and fp types.
1450  for (MVT VT : MVT::integer_valuetypes()) {
1453  }
1454  for (MVT VT : MVT::fp_valuetypes()) {
1457  }
1459 
1460  //
1461  // Handling of vector operations.
1462  //
1463 
1464  // Custom lower v4i16 load only. Let v4i16 store to be
1465  // promoted for now.
1466  promoteLdStType(MVT::v4i8, MVT::i32);
1467  promoteLdStType(MVT::v2i16, MVT::i32);
1468  promoteLdStType(MVT::v8i8, MVT::i64);
1469  promoteLdStType(MVT::v2i32, MVT::i64);
1470 
1475 
1476  // Set the action for vector operations to "expand", then override it with
1477  // either "custom" or "legal" for specific cases.
1478  static unsigned VectExpOps[] = {
1479  // Integer arithmetic:
1484  // Logical/bit:
1488  // Floating point arithmetic/math functions:
1495  // Misc:
1497  // Vector:
1502  };
1503 
1504  for (MVT VT : MVT::vector_valuetypes()) {
1505  for (unsigned VectExpOp : VectExpOps)
1506  setOperationAction(VectExpOp, VT, Expand);
1507 
1508  // Expand all extended loads and truncating stores:
1509  for (MVT TargetVT : MVT::vector_valuetypes()) {
1510  setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
1511  setTruncStoreAction(VT, TargetVT, Expand);
1512  }
1513 
1517  }
1518 
1519  // Types natively supported:
1520  for (MVT NativeVT : {MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v32i1, MVT::v64i1,
1522  MVT::v2i32, MVT::v1i64}) {
1529 
1530  setOperationAction(ISD::ADD, NativeVT, Legal);
1531  setOperationAction(ISD::SUB, NativeVT, Legal);
1532  setOperationAction(ISD::MUL, NativeVT, Legal);
1533  setOperationAction(ISD::AND, NativeVT, Legal);
1534  setOperationAction(ISD::OR, NativeVT, Legal);
1535  setOperationAction(ISD::XOR, NativeVT, Legal);
1536  }
1537 
1542 
1543  // Subtarget-specific operation actions.
1544  //
1545  if (Subtarget.hasV5TOps()) {
1550 
1563 
1564  } else { // V4
1574 
1579 
1580  // Expand these operations for both f32 and f64:
1581  for (unsigned FPExpOpV4 :
1583  setOperationAction(FPExpOpV4, MVT::f32, Expand);
1584  setOperationAction(FPExpOpV4, MVT::f64, Expand);
1585  }
1586 
1587  for (ISD::CondCode FPExpCCV4 :
1589  ISD::SETUO, ISD::SETO}) {
1590  setCondCodeAction(FPExpCCV4, MVT::f32, Expand);
1591  setCondCodeAction(FPExpCCV4, MVT::f64, Expand);
1592  }
1593  }
1594 
1595  // Handling of indexed loads/stores: default is "expand".
1596  //
1597  for (MVT LSXTy : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
1600  }
1601 
1603 
1604  //
1605  // Library calls for unsupported operations
1606  //
1607  bool FastMath = EnableFastMath;
1608 
1609  setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1610  setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1611  setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1612  setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1613  setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1614  setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1615  setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1616  setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1617 
1618  setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1619  setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1620  setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1621  setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1622  setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1623  setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1624 
1625  if (IsV4) {
1626  // Handle single-precision floating point operations on V4.
1627  if (FastMath) {
1628  setLibcallName(RTLIB::ADD_F32, "__hexagon_fast_addsf3");
1629  setLibcallName(RTLIB::SUB_F32, "__hexagon_fast_subsf3");
1630  setLibcallName(RTLIB::MUL_F32, "__hexagon_fast_mulsf3");
1631  setLibcallName(RTLIB::OGT_F32, "__hexagon_fast_gtsf2");
1632  setLibcallName(RTLIB::OLT_F32, "__hexagon_fast_ltsf2");
1633  // Double-precision compares.
1634  setLibcallName(RTLIB::OGT_F64, "__hexagon_fast_gtdf2");
1635  setLibcallName(RTLIB::OLT_F64, "__hexagon_fast_ltdf2");
1636  } else {
1637  setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
1638  setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1639  setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
1640  setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
1641  setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
1642  // Double-precision compares.
1643  setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
1644  setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
1645  }
1646  }
1647 
1648  // This is the only fast library function for sqrtd.
1649  if (FastMath)
1650  setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1651 
1652  // Prefix is: nothing for "slow-math",
1653  // "fast2_" for V4 fast-math and V5+ fast-math double-precision
1654  // (actually, keep fast-math and fast-math2 separate for now)
1655  if (FastMath) {
1656  setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1657  setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1658  setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1659  setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1660  // Calling __hexagon_fast2_divsf3 with fast-math on V5 (ok).
1661  setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1662  } else {
1663  setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1664  setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1665  setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1666  setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1667  setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1668  }
1669 
1670  if (Subtarget.hasV5TOps()) {
1671  if (FastMath)
1672  setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1673  else
1674  setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
1675  } else {
1676  // V4
1677  setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
1678  setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
1679  setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
1680  setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
1681  setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
1682  setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
1683  setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
1684  setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
1685  setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
1686  setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
1687  setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
1688  setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
1689  setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
1690  setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
1691  setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
1692  setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
1693  setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
1694  setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
1695  setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
1696  setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
1697  setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
1698  setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
1699  setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
1700  setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
1701  setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
1702  setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
1703  setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
1704  setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
1705  setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
1706  setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
1707  }
1708 
1709  // These cause problems when the shift amount is non-constant.
1710  setLibcallName(RTLIB::SHL_I128, nullptr);
1711  setLibcallName(RTLIB::SRL_I128, nullptr);
1712  setLibcallName(RTLIB::SRA_I128, nullptr);
1713 }
1714 
1715 
1716 const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1717  switch ((HexagonISD::NodeType)Opcode) {
1718  case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
1719  case HexagonISD::ARGEXTEND: return "HexagonISD::ARGEXTEND";
1720  case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
1721  case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
1722  case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
1723  case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
1724  case HexagonISD::CALLR: return "HexagonISD::CALLR";
1725  case HexagonISD::CALLv3nr: return "HexagonISD::CALLv3nr";
1726  case HexagonISD::CALLv3: return "HexagonISD::CALLv3";
1727  case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1728  case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1729  case HexagonISD::CONST32: return "HexagonISD::CONST32";
1730  case HexagonISD::CP: return "HexagonISD::CP";
1731  case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
1732  case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1733  case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
1734  case HexagonISD::EXTRACTURP: return "HexagonISD::EXTRACTURP";
1735  case HexagonISD::FCONST32: return "HexagonISD::FCONST32";
1736  case HexagonISD::INSERT: return "HexagonISD::INSERT";
1737  case HexagonISD::INSERTRP: return "HexagonISD::INSERTRP";
1738  case HexagonISD::JT: return "HexagonISD::JT";
1739  case HexagonISD::PACKHL: return "HexagonISD::PACKHL";
1740  case HexagonISD::PIC_ADD: return "HexagonISD::PIC_ADD";
1741  case HexagonISD::POPCOUNT: return "HexagonISD::POPCOUNT";
1742  case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1743  case HexagonISD::SHUFFEB: return "HexagonISD::SHUFFEB";
1744  case HexagonISD::SHUFFEH: return "HexagonISD::SHUFFEH";
1745  case HexagonISD::SHUFFOB: return "HexagonISD::SHUFFOB";
1746  case HexagonISD::SHUFFOH: return "HexagonISD::SHUFFOH";
1747  case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1748  case HexagonISD::VCMPBEQ: return "HexagonISD::VCMPBEQ";
1749  case HexagonISD::VCMPBGT: return "HexagonISD::VCMPBGT";
1750  case HexagonISD::VCMPBGTU: return "HexagonISD::VCMPBGTU";
1751  case HexagonISD::VCMPHEQ: return "HexagonISD::VCMPHEQ";
1752  case HexagonISD::VCMPHGT: return "HexagonISD::VCMPHGT";
1753  case HexagonISD::VCMPHGTU: return "HexagonISD::VCMPHGTU";
1754  case HexagonISD::VCMPWEQ: return "HexagonISD::VCMPWEQ";
1755  case HexagonISD::VCMPWGT: return "HexagonISD::VCMPWGT";
1756  case HexagonISD::VCMPWGTU: return "HexagonISD::VCMPWGTU";
1757  case HexagonISD::VSHLH: return "HexagonISD::VSHLH";
1758  case HexagonISD::VSHLW: return "HexagonISD::VSHLW";
1759  case HexagonISD::VSPLATB: return "HexagonISD::VSPLTB";
1760  case HexagonISD::VSPLATH: return "HexagonISD::VSPLATH";
1761  case HexagonISD::VSRAH: return "HexagonISD::VSRAH";
1762  case HexagonISD::VSRAW: return "HexagonISD::VSRAW";
1763  case HexagonISD::VSRLH: return "HexagonISD::VSRLH";
1764  case HexagonISD::VSRLW: return "HexagonISD::VSRLW";
1765  case HexagonISD::VSXTBH: return "HexagonISD::VSXTBH";
1766  case HexagonISD::VSXTBW: return "HexagonISD::VSXTBW";
1767  case HexagonISD::OP_END: break;
1768  }
1769  return nullptr;
1770 }
1771 
1773  EVT MTy1 = EVT::getEVT(Ty1);
1774  EVT MTy2 = EVT::getEVT(Ty2);
1775  if (!MTy1.isSimple() || !MTy2.isSimple())
1776  return false;
1777  return (MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32);
1778 }
1779 
1781  if (!VT1.isSimple() || !VT2.isSimple())
1782  return false;
1783  return (VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32);
1784 }
1785 
1786 // shouldExpandBuildVectorWithShuffles
1787 // Should we expand the build vector with shuffles?
1788 bool
1790  unsigned DefinedValues) const {
1791 
1792  // Hexagon vector shuffle operates on element sizes of bytes or halfwords
1793  EVT EltVT = VT.getVectorElementType();
1794  int EltBits = EltVT.getSizeInBits();
1795  if ((EltBits != 8) && (EltBits != 16))
1796  return false;
1797 
1798  return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
1799 }
1800 
1801 // LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3). V1 and
1802 // V2 are the two vectors to select data from, V3 is the permutation.
1804  const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
1805  SDValue V1 = Op.getOperand(0);
1806  SDValue V2 = Op.getOperand(1);
1807  SDLoc dl(Op);
1808  EVT VT = Op.getValueType();
1809 
1810  if (V2.getOpcode() == ISD::UNDEF)
1811  V2 = V1;
1812 
1813  if (SVN->isSplat()) {
1814  int Lane = SVN->getSplatIndex();
1815  if (Lane == -1) Lane = 0;
1816 
1817  // Test if V1 is a SCALAR_TO_VECTOR.
1818  if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
1819  return createSplat(DAG, dl, VT, V1.getOperand(0));
1820 
1821  // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
1822  // (and probably will turn into a SCALAR_TO_VECTOR once legalization
1823  // reaches it).
1824  if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
1825  !isa<ConstantSDNode>(V1.getOperand(0))) {
1826  bool IsScalarToVector = true;
1827  for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
1828  if (V1.getOperand(i).getOpcode() != ISD::UNDEF) {
1829  IsScalarToVector = false;
1830  break;
1831  }
1832  if (IsScalarToVector)
1833  return createSplat(DAG, dl, VT, V1.getOperand(0));
1834  }
1835  return createSplat(DAG, dl, VT, DAG.getConstant(Lane, dl, MVT::i32));
1836  }
1837 
1838  // FIXME: We need to support more general vector shuffles. See
1839  // below the comment from the ARM backend that deals in the general
1840  // case with the vector shuffles. For now, let expand handle these.
1841  return SDValue();
1842 
1843  // If the shuffle is not directly supported and it has 4 elements, use
1844  // the PerfectShuffle-generated table to synthesize it from other shuffles.
1845 }
1846 
1847 // If BUILD_VECTOR has same base element repeated several times,
1848 // report true.
1850  unsigned NElts = BVN->getNumOperands();
1851  SDValue V0 = BVN->getOperand(0);
1852 
1853  for (unsigned i = 1, e = NElts; i != e; ++i) {
1854  if (BVN->getOperand(i) != V0)
1855  return false;
1856  }
1857  return true;
1858 }
1859 
1860 // LowerVECTOR_SHIFT - Lower a vector shift. Try to convert
1861 // <VT> = SHL/SRA/SRL <VT> by <VT> to Hexagon specific
1862 // <VT> = SHL/SRA/SRL <VT> by <IT/i32>.
1864  BuildVectorSDNode *BVN = 0;
1865  SDValue V1 = Op.getOperand(0);
1866  SDValue V2 = Op.getOperand(1);
1867  SDValue V3;
1868  SDLoc dl(Op);
1869  EVT VT = Op.getValueType();
1870 
1871  if ((BVN = dyn_cast<BuildVectorSDNode>(V1.getNode())) &&
1872  isCommonSplatElement(BVN))
1873  V3 = V2;
1874  else if ((BVN = dyn_cast<BuildVectorSDNode>(V2.getNode())) &&
1875  isCommonSplatElement(BVN))
1876  V3 = V1;
1877  else
1878  return SDValue();
1879 
1880  SDValue CommonSplat = BVN->getOperand(0);
1881  SDValue Result;
1882 
1883  if (VT.getSimpleVT() == MVT::v4i16) {
1884  switch (Op.getOpcode()) {
1885  case ISD::SRA:
1886  Result = DAG.getNode(HexagonISD::VSRAH, dl, VT, V3, CommonSplat);
1887  break;
1888  case ISD::SHL:
1889  Result = DAG.getNode(HexagonISD::VSHLH, dl, VT, V3, CommonSplat);
1890  break;
1891  case ISD::SRL:
1892  Result = DAG.getNode(HexagonISD::VSRLH, dl, VT, V3, CommonSplat);
1893  break;
1894  default:
1895  return SDValue();
1896  }
1897  } else if (VT.getSimpleVT() == MVT::v2i32) {
1898  switch (Op.getOpcode()) {
1899  case ISD::SRA:
1900  Result = DAG.getNode(HexagonISD::VSRAW, dl, VT, V3, CommonSplat);
1901  break;
1902  case ISD::SHL:
1903  Result = DAG.getNode(HexagonISD::VSHLW, dl, VT, V3, CommonSplat);
1904  break;
1905  case ISD::SRL:
1906  Result = DAG.getNode(HexagonISD::VSRLW, dl, VT, V3, CommonSplat);
1907  break;
1908  default:
1909  return SDValue();
1910  }
1911  } else {
1912  return SDValue();
1913  }
1914 
1915  return DAG.getNode(ISD::BITCAST, dl, VT, Result);
1916 }
1917 
1918 SDValue
1920  BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
1921  SDLoc dl(Op);
1922  EVT VT = Op.getValueType();
1923 
1924  unsigned Size = VT.getSizeInBits();
1925 
1926  // A vector larger than 64 bits cannot be represented in Hexagon.
1927  // Expand will split the vector.
1928  if (Size > 64)
1929  return SDValue();
1930 
1931  APInt APSplatBits, APSplatUndef;
1932  unsigned SplatBitSize;
1933  bool HasAnyUndefs;
1934  unsigned NElts = BVN->getNumOperands();
1935 
1936  // Try to generate a SPLAT instruction.
1937  if ((VT.getSimpleVT() == MVT::v4i8 || VT.getSimpleVT() == MVT::v4i16) &&
1938  (BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
1939  HasAnyUndefs, 0, true) && SplatBitSize <= 16)) {
1940  unsigned SplatBits = APSplatBits.getZExtValue();
1941  int32_t SextVal = ((int32_t) (SplatBits << (32 - SplatBitSize)) >>
1942  (32 - SplatBitSize));
1943  return createSplat(DAG, dl, VT, DAG.getConstant(SextVal, dl, MVT::i32));
1944  }
1945 
1946  // Try to generate COMBINE to build v2i32 vectors.
1947  if (VT.getSimpleVT() == MVT::v2i32) {
1948  SDValue V0 = BVN->getOperand(0);
1949  SDValue V1 = BVN->getOperand(1);
1950 
1951  if (V0.getOpcode() == ISD::UNDEF)
1952  V0 = DAG.getConstant(0, dl, MVT::i32);
1953  if (V1.getOpcode() == ISD::UNDEF)
1954  V1 = DAG.getConstant(0, dl, MVT::i32);
1955 
1958  // If the element isn't a constant, it is in a register:
1959  // generate a COMBINE Register Register instruction.
1960  if (!C0 || !C1)
1961  return DAG.getNode(HexagonISD::COMBINE, dl, VT, V1, V0);
1962 
1963  // If one of the operands is an 8 bit integer constant, generate
1964  // a COMBINE Immediate Immediate instruction.
1965  if (isInt<8>(C0->getSExtValue()) ||
1966  isInt<8>(C1->getSExtValue()))
1967  return DAG.getNode(HexagonISD::COMBINE, dl, VT, V1, V0);
1968  }
1969 
1970  // Try to generate a S2_packhl to build v2i16 vectors.
1971  if (VT.getSimpleVT() == MVT::v2i16) {
1972  for (unsigned i = 0, e = NElts; i != e; ++i) {
1973  if (BVN->getOperand(i).getOpcode() == ISD::UNDEF)
1974  continue;
1976  // If the element isn't a constant, it is in a register:
1977  // generate a S2_packhl instruction.
1978  if (!Cst) {
1979  SDValue pack = DAG.getNode(HexagonISD::PACKHL, dl, MVT::v4i16,
1980  BVN->getOperand(1), BVN->getOperand(0));
1981 
1982  return DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl, MVT::v2i16,
1983  pack);
1984  }
1985  }
1986  }
1987 
1988  // In the general case, generate a CONST32 or a CONST64 for constant vectors,
1989  // and insert_vector_elt for all the other cases.
1990  uint64_t Res = 0;
1991  unsigned EltSize = Size / NElts;
1992  SDValue ConstVal;
1993  uint64_t Mask = ~uint64_t(0ULL) >> (64 - EltSize);
1994  bool HasNonConstantElements = false;
1995 
1996  for (unsigned i = 0, e = NElts; i != e; ++i) {
1997  // LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon's
1998  // combine, const64, etc. are Big Endian.
1999  unsigned OpIdx = NElts - i - 1;
2000  SDValue Operand = BVN->getOperand(OpIdx);
2001  if (Operand.getOpcode() == ISD::UNDEF)
2002  continue;
2003 
2004  int64_t Val = 0;
2005  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Operand))
2006  Val = Cst->getSExtValue();
2007  else
2008  HasNonConstantElements = true;
2009 
2010  Val &= Mask;
2011  Res = (Res << EltSize) | Val;
2012  }
2013 
2014  if (Size == 64)
2015  ConstVal = DAG.getConstant(Res, dl, MVT::i64);
2016  else
2017  ConstVal = DAG.getConstant(Res, dl, MVT::i32);
2018 
2019  // When there are non constant operands, add them with INSERT_VECTOR_ELT to
2020  // ConstVal, the constant part of the vector.
2021  if (HasNonConstantElements) {
2022  EVT EltVT = VT.getVectorElementType();
2023  SDValue Width = DAG.getConstant(EltVT.getSizeInBits(), dl, MVT::i64);
2024  SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2025  DAG.getConstant(32, dl, MVT::i64));
2026 
2027  for (unsigned i = 0, e = NElts; i != e; ++i) {
2028  // LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon
2029  // is Big Endian.
2030  unsigned OpIdx = NElts - i - 1;
2031  SDValue Operand = BVN->getOperand(OpIdx);
2032  if (isa<ConstantSDNode>(Operand))
2033  // This operand is already in ConstVal.
2034  continue;
2035 
2036  if (VT.getSizeInBits() == 64 &&
2037  Operand.getValueType().getSizeInBits() == 32) {
2038  SDValue C = DAG.getConstant(0, dl, MVT::i32);
2039  Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
2040  }
2041 
2042  SDValue Idx = DAG.getConstant(OpIdx, dl, MVT::i64);
2043  SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
2044  SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2045  const SDValue Ops[] = {ConstVal, Operand, Combined};
2046 
2047  if (VT.getSizeInBits() == 32)
2048  ConstVal = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, Ops);
2049  else
2050  ConstVal = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, Ops);
2051  }
2052  }
2053 
2054  return DAG.getNode(ISD::BITCAST, dl, VT, ConstVal);
2055 }
2056 
2057 SDValue
2059  SelectionDAG &DAG) const {
2060  SDLoc dl(Op);
2061  EVT VT = Op.getValueType();
2062  unsigned NElts = Op.getNumOperands();
2063  SDValue Vec = Op.getOperand(0);
2064  EVT VecVT = Vec.getValueType();
2065  SDValue Width = DAG.getConstant(VecVT.getSizeInBits(), dl, MVT::i64);
2066  SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2067  DAG.getConstant(32, dl, MVT::i64));
2068  SDValue ConstVal = DAG.getConstant(0, dl, MVT::i64);
2069 
2070  ConstantSDNode *W = dyn_cast<ConstantSDNode>(Width);
2071  ConstantSDNode *S = dyn_cast<ConstantSDNode>(Shifted);
2072 
2073  if ((VecVT.getSimpleVT() == MVT::v2i16) && (NElts == 2) && W && S) {
2074  if ((W->getZExtValue() == 32) && ((S->getZExtValue() >> 32) == 32)) {
2075  // We are trying to concat two v2i16 to a single v4i16.
2076  SDValue Vec0 = Op.getOperand(1);
2077  SDValue Combined = DAG.getNode(HexagonISD::COMBINE, dl, VT, Vec0, Vec);
2078  return DAG.getNode(ISD::BITCAST, dl, VT, Combined);
2079  }
2080  }
2081 
2082  if ((VecVT.getSimpleVT() == MVT::v4i8) && (NElts == 2) && W && S) {
2083  if ((W->getZExtValue() == 32) && ((S->getZExtValue() >> 32) == 32)) {
2084  // We are trying to concat two v4i8 to a single v8i8.
2085  SDValue Vec0 = Op.getOperand(1);
2086  SDValue Combined = DAG.getNode(HexagonISD::COMBINE, dl, VT, Vec0, Vec);
2087  return DAG.getNode(ISD::BITCAST, dl, VT, Combined);
2088  }
2089  }
2090 
2091  for (unsigned i = 0, e = NElts; i != e; ++i) {
2092  unsigned OpIdx = NElts - i - 1;
2093  SDValue Operand = Op.getOperand(OpIdx);
2094 
2095  if (VT.getSizeInBits() == 64 &&
2096  Operand.getValueType().getSizeInBits() == 32) {
2097  SDValue C = DAG.getConstant(0, dl, MVT::i32);
2098  Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
2099  }
2100 
2101  SDValue Idx = DAG.getConstant(OpIdx, dl, MVT::i64);
2102  SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
2103  SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2104  const SDValue Ops[] = {ConstVal, Operand, Combined};
2105 
2106  if (VT.getSizeInBits() == 32)
2107  ConstVal = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, Ops);
2108  else
2109  ConstVal = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, Ops);
2110  }
2111 
2112  return DAG.getNode(ISD::BITCAST, dl, VT, ConstVal);
2113 }
2114 
2115 SDValue
2117  SelectionDAG &DAG) const {
2118  EVT VT = Op.getValueType();
2119  int VTN = VT.isVector() ? VT.getVectorNumElements() : 1;
2120  SDLoc dl(Op);
2121  SDValue Idx = Op.getOperand(1);
2122  SDValue Vec = Op.getOperand(0);
2123  EVT VecVT = Vec.getValueType();
2124  EVT EltVT = VecVT.getVectorElementType();
2125  int EltSize = EltVT.getSizeInBits();
2127  EltSize : VTN * EltSize, dl, MVT::i64);
2128 
2129  // Constant element number.
2130  if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Idx)) {
2131  uint64_t X = CI->getZExtValue();
2132  SDValue Offset = DAG.getConstant(X * EltSize, dl, MVT::i32);
2133  const SDValue Ops[] = {Vec, Width, Offset};
2134 
2135  ConstantSDNode *CW = dyn_cast<ConstantSDNode>(Width);
2136  assert(CW && "Non constant width in LowerEXTRACT_VECTOR");
2137 
2138  SDValue N;
2139  MVT SVT = VecVT.getSimpleVT();
2140  uint64_t W = CW->getZExtValue();
2141 
2142  if (W == 32) {
2143  // Translate this node into EXTRACT_SUBREG.
2144  unsigned Subreg = (X == 0) ? Hexagon::subreg_loreg : 0;
2145 
2146  if (X == 0)
2147  Subreg = Hexagon::subreg_loreg;
2148  else if (SVT == MVT::v2i32 && X == 1)
2149  Subreg = Hexagon::subreg_hireg;
2150  else if (SVT == MVT::v4i16 && X == 2)
2151  Subreg = Hexagon::subreg_hireg;
2152  else if (SVT == MVT::v8i8 && X == 4)
2153  Subreg = Hexagon::subreg_hireg;
2154  else
2155  llvm_unreachable("Bad offset");
2156  N = DAG.getTargetExtractSubreg(Subreg, dl, MVT::i32, Vec);
2157 
2158  } else if (VecVT.getSizeInBits() == 32) {
2159  N = DAG.getNode(HexagonISD::EXTRACTU, dl, MVT::i32, Ops);
2160  } else {
2161  N = DAG.getNode(HexagonISD::EXTRACTU, dl, MVT::i64, Ops);
2162  if (VT.getSizeInBits() == 32)
2163  N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl, MVT::i32, N);
2164  }
2165 
2166  return DAG.getNode(ISD::BITCAST, dl, VT, N);
2167  }
2168 
2169  // Variable element number.
2170  SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
2171  DAG.getConstant(EltSize, dl, MVT::i32));
2172  SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2173  DAG.getConstant(32, dl, MVT::i64));
2174  SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2175 
2176  const SDValue Ops[] = {Vec, Combined};
2177 
2178  SDValue N;
2179  if (VecVT.getSizeInBits() == 32) {
2180  N = DAG.getNode(HexagonISD::EXTRACTURP, dl, MVT::i32, Ops);
2181  } else {
2182  N = DAG.getNode(HexagonISD::EXTRACTURP, dl, MVT::i64, Ops);
2183  if (VT.getSizeInBits() == 32)
2184  N = DAG.getTargetExtractSubreg(Hexagon::subreg_loreg, dl, MVT::i32, N);
2185  }
2186  return DAG.getNode(ISD::BITCAST, dl, VT, N);
2187 }
2188 
2189 SDValue
2191  SelectionDAG &DAG) const {
2192  EVT VT = Op.getValueType();
2193  int VTN = VT.isVector() ? VT.getVectorNumElements() : 1;
2194  SDLoc dl(Op);
2195  SDValue Vec = Op.getOperand(0);
2196  SDValue Val = Op.getOperand(1);
2197  SDValue Idx = Op.getOperand(2);
2198  EVT VecVT = Vec.getValueType();
2199  EVT EltVT = VecVT.getVectorElementType();
2200  int EltSize = EltVT.getSizeInBits();
2201  SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::INSERT_VECTOR_ELT ?
2202  EltSize : VTN * EltSize, dl, MVT::i64);
2203 
2204  if (ConstantSDNode *C = cast<ConstantSDNode>(Idx)) {
2205  SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, dl, MVT::i32);
2206  const SDValue Ops[] = {Vec, Val, Width, Offset};
2207 
2208  SDValue N;
2209  if (VT.getSizeInBits() == 32)
2210  N = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, Ops);
2211  else
2212  N = DAG.getNode(HexagonISD::INSERT, dl, MVT::i64, Ops);
2213 
2214  return DAG.getNode(ISD::BITCAST, dl, VT, N);
2215  }
2216 
2217  // Variable element number.
2218  SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
2219  DAG.getConstant(EltSize, dl, MVT::i32));
2220  SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2221  DAG.getConstant(32, dl, MVT::i64));
2222  SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2223 
2224  if (VT.getSizeInBits() == 64 &&
2225  Val.getValueType().getSizeInBits() == 32) {
2226  SDValue C = DAG.getConstant(0, dl, MVT::i32);
2227  Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);
2228  }
2229 
2230  const SDValue Ops[] = {Vec, Val, Combined};
2231 
2232  SDValue N;
2233  if (VT.getSizeInBits() == 32)
2234  N = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, Ops);
2235  else
2236  N = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, Ops);
2237 
2238  return DAG.getNode(ISD::BITCAST, dl, VT, N);
2239 }
2240 
2241 bool
2243  // Assuming the caller does not have either a signext or zeroext modifier, and
2244  // only one value is accepted, any reasonable truncation is allowed.
2245  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2246  return false;
2247 
2248  // FIXME: in principle up to 64-bit could be made safe, but it would be very
2249  // fragile at the moment: any support for multiple value returns would be
2250  // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2251  return Ty1->getPrimitiveSizeInBits() <= 32;
2252 }
2253 
2254 SDValue
2256  SDValue Chain = Op.getOperand(0);
2257  SDValue Offset = Op.getOperand(1);
2258  SDValue Handler = Op.getOperand(2);
2259  SDLoc dl(Op);
2260  auto PtrVT = getPointerTy(DAG.getDataLayout());
2261 
2262  // Mark function as containing a call to EH_RETURN.
2263  HexagonMachineFunctionInfo *FuncInfo =
2265  FuncInfo->setHasEHReturn();
2266 
2267  unsigned OffsetReg = Hexagon::R28;
2268 
2269  SDValue StoreAddr =
2270  DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
2271  DAG.getIntPtrConstant(4, dl));
2272  Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
2273  false, false, 0);
2274  Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
2275 
2276  // Not needed we already use it as explict input to EH_RETURN.
2277  // MF.getRegInfo().addLiveOut(OffsetReg);
2278 
2279  return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
2280 }
2281 
2282 SDValue
2284  unsigned Opc = Op.getOpcode();
2285  switch (Opc) {
2286  default:
2287 #ifndef NDEBUG
2288  Op.getNode()->dumpr(&DAG);
2289  if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
2290  errs() << "Check for a non-legal type in this operation\n";
2291 #endif
2292  llvm_unreachable("Should not custom lower this!");
2293  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
2294  case ISD::INSERT_SUBVECTOR: return LowerINSERT_VECTOR(Op, DAG);
2295  case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR(Op, DAG);
2296  case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_VECTOR(Op, DAG);
2297  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR(Op, DAG);
2298  case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
2299  case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2300  case ISD::SRA:
2301  case ISD::SHL:
2302  case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
2303  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
2304  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
2305  // Frame & Return address. Currently unimplemented.
2306  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
2307  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
2308  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
2309  case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
2310  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2311  case ISD::VASTART: return LowerVASTART(Op, DAG);
2312  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
2313  // Custom lower some vector loads.
2314  case ISD::LOAD: return LowerLOAD(Op, DAG);
2315  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
2316  case ISD::SETCC: return LowerSETCC(Op, DAG);
2317  case ISD::VSELECT: return LowerVSELECT(Op, DAG);
2318  case ISD::CTPOP: return LowerCTPOP(Op, DAG);
2319  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2320  case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
2321  }
2322 }
2323 
2326  MachineBasicBlock *BB)
2327  const {
2328  switch (MI->getOpcode()) {
2329  case Hexagon::ALLOCA: {
2330  MachineFunction *MF = BB->getParent();
2331  auto *FuncInfo = MF->getInfo<HexagonMachineFunctionInfo>();
2332  FuncInfo->addAllocaAdjustInst(MI);
2333  return BB;
2334  }
2335  default: llvm_unreachable("Unexpected instr type to insert");
2336  } // switch
2337 }
2338 
2339 //===----------------------------------------------------------------------===//
2340 // Inline Assembly Support
2341 //===----------------------------------------------------------------------===//
2342 
2343 std::pair<unsigned, const TargetRegisterClass *>
2345  const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
2346  if (Constraint.size() == 1) {
2347  switch (Constraint[0]) {
2348  case 'r': // R0-R31
2349  switch (VT.SimpleTy) {
2350  default:
2351  llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
2352  case MVT::i32:
2353  case MVT::i16:
2354  case MVT::i8:
2355  case MVT::f32:
2356  return std::make_pair(0U, &Hexagon::IntRegsRegClass);
2357  case MVT::i64:
2358  case MVT::f64:
2359  return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
2360  }
2361  default:
2362  llvm_unreachable("Unknown asm register class");
2363  }
2364  }
2365 
2366  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2367 }
2368 
2369 /// isFPImmLegal - Returns true if the target can instruction select the
2370 /// specified FP immediate natively. If false, the legalizer will
2371 /// materialize the FP immediate as a load from a constant pool.
2373  return Subtarget.hasV5TOps();
2374 }
2375 
2376 /// isLegalAddressingMode - Return true if the addressing mode represented by
2377 /// AM is legal for this target, for a load/store of the specified type.
2379  const AddrMode &AM, Type *Ty,
2380  unsigned AS) const {
2381  // Allows a signed-extended 11-bit immediate field.
2382  if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1)
2383  return false;
2384 
2385  // No global is ever allowed as a base.
2386  if (AM.BaseGV)
2387  return false;
2388 
2389  int Scale = AM.Scale;
2390  if (Scale < 0) Scale = -Scale;
2391  switch (Scale) {
2392  case 0: // No scale reg, "r+i", "r", or just "i".
2393  break;
2394  default: // No scaled addressing mode.
2395  return false;
2396  }
2397  return true;
2398 }
2399 
2400 /// isLegalICmpImmediate - Return true if the specified immediate is legal
2401 /// icmp immediate, that is the target has icmp instructions which can compare
2402 /// a register against the immediate without having to materialize the
2403 /// immediate into a register.
2405  return Imm >= -512 && Imm <= 511;
2406 }
2407 
2408 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2409 /// for tail call optimization. Targets which want to do tail call
2410 /// optimization should implement this function.
2412  SDValue Callee,
2413  CallingConv::ID CalleeCC,
2414  bool isVarArg,
2415  bool isCalleeStructRet,
2416  bool isCallerStructRet,
2417  const SmallVectorImpl<ISD::OutputArg> &Outs,
2418  const SmallVectorImpl<SDValue> &OutVals,
2420  SelectionDAG& DAG) const {
2421  const Function *CallerF = DAG.getMachineFunction().getFunction();
2422  CallingConv::ID CallerCC = CallerF->getCallingConv();
2423  bool CCMatch = CallerCC == CalleeCC;
2424 
2425  // ***************************************************************************
2426  // Look for obvious safe cases to perform tail call optimization that do not
2427  // require ABI changes.
2428  // ***************************************************************************
2429 
2430  // If this is a tail call via a function pointer, then don't do it!
2431  if (!(dyn_cast<GlobalAddressSDNode>(Callee))
2432  && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
2433  return false;
2434  }
2435 
2436  // Do not optimize if the calling conventions do not match.
2437  if (!CCMatch)
2438  return false;
2439 
2440  // Do not tail call optimize vararg calls.
2441  if (isVarArg)
2442  return false;
2443 
2444  // Also avoid tail call optimization if either caller or callee uses struct
2445  // return semantics.
2446  if (isCalleeStructRet || isCallerStructRet)
2447  return false;
2448 
2449  // In addition to the cases above, we also disable Tail Call Optimization if
2450  // the calling convention code that at least one outgoing argument needs to
2451  // go on the stack. We cannot check that here because at this point that
2452  // information is not available.
2453  return true;
2454 }
2455 
2456 // Return true when the given node fits in a positive half word.
2459  if (CN && CN->getSExtValue() > 0 && isInt<16>(CN->getSExtValue()))
2460  return true;
2461 
2462  switch (N->getOpcode()) {
2463  default:
2464  return false;
2466  return true;
2467  }
2468 }
2469 
2471  AtomicOrdering Ord) const {
2472  BasicBlock *BB = Builder.GetInsertBlock();
2473  Module *M = BB->getParent()->getParent();
2474  Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
2475  unsigned SZ = Ty->getPrimitiveSizeInBits();
2476  assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
2477  Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
2478  : Intrinsic::hexagon_L4_loadd_locked;
2479  Value *Fn = Intrinsic::getDeclaration(M, IntID);
2480  return Builder.CreateCall(Fn, Addr, "larx");
2481 }
2482 
2483 /// Perform a store-conditional operation to Addr. Return the status of the
2484 /// store. This should be 0 if the store succeeded, non-zero otherwise.
2486  Value *Val, Value *Addr, AtomicOrdering Ord) const {
2487  BasicBlock *BB = Builder.GetInsertBlock();
2488  Module *M = BB->getParent()->getParent();
2489  Type *Ty = Val->getType();
2490  unsigned SZ = Ty->getPrimitiveSizeInBits();
2491  assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
2492  Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
2493  : Intrinsic::hexagon_S4_stored_locked;
2494  Value *Fn = Intrinsic::getDeclaration(M, IntID);
2495  Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
2496  Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
2497  Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
2498  return Ext;
2499 }
2500 
2502  // Do not expand loads and stores that don't exceed 64 bits.
2503  return LI->getType()->getPrimitiveSizeInBits() > 64;
2504 }
2505 
2507  // Do not expand loads and stores that don't exceed 64 bits.
2508  return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
2509 }
2510 
static bool Is_PostInc_S4_Offset(SDNode *S, int ShiftAmount)
void setFrameAddressIsTaken(bool T)
bool IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM, SectionKind Kind) const
IsGlobalInSmallSection - Return true if this global address should be placed into small data/bss sect...
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:477
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:450
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
Value * getValueOperand()
Definition: Instructions.h:406
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
const MachineFunction * getParent() const
getParent - Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns true if the given (atomic) load should be expanded by the IR-level AtomicExpand pass into a l...
#define R4(n)
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVMContext * getContext() const
Definition: SelectionDAG.h:289
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:522
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1327
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
Definition: SelectionDAG.h:646
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
void dump() const
Dump this node, for debugging.
size_t size() const
size - Get the string size.
Definition: StringRef.h:113
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
Definition: ISDOpcodes.h:292
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:554
LocInfo getLocInfo() const
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:301
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:114
unsigned getNumParams() const
getNumParams - Return the number of fixed parameters this function type requires. ...
Definition: DerivedTypes.h:136
const TargetMachine & getTargetMachine() const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
SDValue getMergeValues(ArrayRef< SDValue > Ops, SDLoc dl)
Create a MERGE_VALUES node from the given operands.
SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const
LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:210
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:585
CallInst - This class represents a function call, abstracting a target machine's calling convention...
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:228
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
unsigned getByValSize() const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, SDLoc dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array...
static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
Hexagon target-specific information for each MachineFunction.
unsigned getNumOperands() const
Return the number of values used by this operation.
bool isPositiveHalfWord(SDNode *N)
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:111
unsigned getNumOperands() const
const SDValue & getOperand(unsigned Num) const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:225
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
LoadInst - an instruction for reading from memory.
Definition: Instructions.h:177
const HexagonFrameLowering * getFrameLowering() const override
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1354
#define R2(n)
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue LowerEXTRACT_VECTOR(SDValue Op, SelectionDAG &DAG) const
Same for subtraction.
Definition: ISDOpcodes.h:231
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
static std::error_code getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
const SDValue & getBasePtr() const
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1 at the ...
Definition: ISDOpcodes.h:287
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:658
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:357
bool isUnsignedIntSetCC(CondCode Code)
isUnsignedIntSetCC - Return true if this is a setcc instruction that performs an unsigned comparison ...
Definition: ISDOpcodes.h:843
bool isRegLoc() const
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:172
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
void dumpr() const
Dump (recursively) this node and its use-def subgraph.
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:200
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:319
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:371
bool isVector() const
isVector - Return true if this is a vector value type.
Definition: ValueTypes.h:115
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
const std::vector< MachineJumpTableEntry > & getJumpTables() const
BlockAddress - The address of a basic block.
Definition: Constants.h:802
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
Shift and rotation operations.
Definition: ISDOpcodes.h:332
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:283
const HexagonRegisterInfo * getRegisterInfo() const override
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:181
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APInt.h:33
bool isInt< 8 >(int64_t x)
Definition: MathExtras.h:268
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:467
virtual TargetLoweringObjectFile * getObjFileLowering() const
void addLoc(const CCValAssign &V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:517
Reg
All possible values of the reg field in the ModR/M byte.
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
SimpleValueType SimpleTy
#define Hexagon_PointerSize
Definition: Hexagon.h:20
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
#define G(x, y, z)
Definition: MD5.cpp:52
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:571
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Definition: Instructions.h:38
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:581
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
Definition: ValueTypes.h:216
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
load Combine Adjacent Loads
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
Definition: SelectionDAG.h:637
unsigned getLocReg() const
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:393
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallVector.h:57
bool hasStructRetAttr() const
Determine if the function returns a structure through first pointer argument.
Definition: Function.h:360
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:351
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:191
static bool CC_Hexagon32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
SmallVector< ISD::OutputArg, 32 > Outs
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:866
MachineConstantPoolValue * getMachineCPVal() const
StoreInst - an instruction for storing to memory.
Definition: Instructions.h:316
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:804
const APInt & getAPIntValue() const
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from Ty1 to Ty2 is permitted when deciding whether a call is in tail posi...
bool isSignedIntSetCC(CondCode Code)
isSignedIntSetCC - Return true if this is a setcc instruction that performs a signed comparison when ...
Definition: ISDOpcodes.h:837
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::ZeroOrMore, cl::init(5), cl::desc("Set minimum jump tables"))
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:142
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:284
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
const BasicBlock * getBasicBlock() const
getBasicBlock - Return the LLVM basic block that this instance corresponded to originally.
UNDEF - An undefined node.
Definition: ISDOpcodes.h:169
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:267
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:436
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:97
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Definition: ISDOpcodes.h:262
SDNode * getNode() const
get the SDNode which holds the desired result
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:325
bool isMachineConstantPoolEntry() const
MVT - Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:65
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:41
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
Simple binary floating point operators.
Definition: ISDOpcodes.h:237
bool isNonTemporal() const
static BlockAddress * get(Function *F, BasicBlock *BB)
get - Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1496
MVT getLocVT() const
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:607
static bool getIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
static bool CC_Hexagon(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:780
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::ZeroOrMore, cl::init(4), cl::desc("Max #stores to inline memmove"))
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:267
const Constant * getConstVal() const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:219
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type Ty1 to type Ty2.
bool mayBeEmittedAsTailCall(CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const
static mvt_range fp_valuetypes()
static unsigned getNumOperandRegisters(unsigned Flag)
getNumOperandRegisters - Extract the number of registers field from the inline asm operand flag...
Definition: InlineAsm.h:332
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:547
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:436
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
unsigned getOpcode() const
static unsigned getKind(unsigned Flags)
Definition: InlineAsm.h:311
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:57
static mvt_range vector_valuetypes()
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:598
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
bool isVolatile() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:338
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:468
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
EVT - Extended Value Type.
Definition: ValueTypes.h:31
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="")
Definition: IRBuilder.h:1467
This structure contains all information that is necessary for lowering calls.
static bool isSExtFree(SDValue N)
MachinePointerInfo - This class contains a discriminated union of information about pointers in memor...
const MachinePointerInfo & getPointerInfo() const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
unsigned getByValAlign() const
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:484
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:478
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
#define HEXAGON_LRFP_SIZE
Definition: Hexagon.h:27
CCState - This class holds information needed while lowering arguments and return values...
unsigned countPopulation(T Value)
Count the number of set bits in a value.
Definition: MathExtras.h:449
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1192
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:273
void setExceptionPointerRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception address on entry to...
bool isInvariant() const
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:179
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:222
CCValAssign - Represent assignment of one arg/retval to a location.
const SDValue & getChain() const
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:335
CHAIN = SC CHAIN, Imm128 - System call.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:266
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:79
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
MachineFrameInfo * getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Represents one node in the SelectionDAG.
static MachinePointerInfo getStack(int64_t Offset)
getStack - stack pointer relative access.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:123
static mvt_range integer_valuetypes()
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
void setMinimumJumpTableEntries(int Val)
Indicate the number of blocks to generate jump tables rather than if sequence.
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Class for arbitrary precision integers.
Definition: APInt.h:73
void setExceptionSelectorRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception typeid on entry to ...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:342
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
Definition: Type.h:193
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:383
void setHasAddressTaken()
setHasAddressTaken - Set this block to reflect that it potentially is the target of an indirect branc...
bool isMemLoc() const
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:285
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:386
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:250
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::ZeroOrMore, cl::init(8), cl::desc("Max #stores to inline memset"))
static SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG)
static SDValue createSplat(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:542
Representation of each machine instruction.
Definition: MachineInstr.h:51
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:516
SmallVector< SDValue, 32 > OutVals
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Enable Hexagon SDNode scheduling"))
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:321
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:239
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:196
unsigned getStoreSizeInBits() const
getStoreSizeInBits - Return the number of bits overwritten by a store of the specified value type...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:401
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:518
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:233
#define N
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
FunctionType * getFunctionType() const
Definition: Function.cpp:227
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::ZeroOrMore, cl::init(4), cl::desc("Max #stores to inline memcpy"))
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OpSize attribute.
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
bool isTailCall() const
static bool CC_Hexagon_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static bool CC_Hexagon64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:512
bool isInt< 16 >(int64_t x)
Definition: MathExtras.h:272
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:277
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
EVT getValueType() const
Return the ValueType of the referenced return value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin...
Definition: ISDOpcodes.h:97
SDValue getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isTarget=false, bool isOpaque=false)
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:279
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:94
unsigned getAlignment() const
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::ZeroOrMore, cl::init(6), cl::desc("Max #stores to inline memcpy"))
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
aarch64 promote const
SDValue getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:121
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:365
LLVM Value Representation.
Definition: Value.h:69
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:240
SDValue getRegister(unsigned Reg, EVT VT)
void setInsertFencesForAtomic(bool fence)
Set if the DAG builder should automatically insert fences and reduce the order of atomic memory opera...
static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
#define DEBUG(X)
Definition: Debug.h:92
Primary interface to the complete machine description for the target machine.
static bool isCommonSplatElement(BuildVectorSDNode *BVN)
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:40
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame)...
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:365
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
unsigned getLocMemOffset() const
Conversion operators.
Definition: ISDOpcodes.h:380
SDValue LowerINSERT_VECTOR(SDValue Op, SelectionDAG &DAG) const
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:338
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:389
unsigned getAlignment() const
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::ZeroOrMore, cl::init(6), cl::desc("Max #stores to inline memmove"))
#define LLVM_FUNCTION_NAME
LLVM_FUNCTION_NAME
Definition: Compiler.h:318
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::ZeroOrMore, cl::init(4), cl::desc("Max #stores to inline memset"))
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Definition: ISDOpcodes.h:506
static cl::opt< bool > EnableFastMath("ffast-math", cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::desc("Enable Fast Math processing"))
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.cpp:229
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
const BasicBlock * getParent() const
Definition: Instruction.h:72
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition: ISDOpcodes.h:309
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, SDLoc dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:203
SDValue getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget=false)
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:265
HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
uint64_t getZExtValue() const
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:761
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:225
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:527