LLVM  6.0.0svn
XCoreISelLowering.cpp
Go to the documentation of this file.
1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the XCoreTargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "XCoreISelLowering.h"
15 #include "XCore.h"
17 #include "XCoreSubtarget.h"
18 #include "XCoreTargetMachine.h"
19 #include "XCoreTargetObjectFile.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/KnownBits.h"
39 #include <algorithm>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "xcore-lower"
44 
45 const char *XCoreTargetLowering::
46 getTargetNodeName(unsigned Opcode) const
47 {
48  switch ((XCoreISD::NodeType)Opcode)
49  {
50  case XCoreISD::FIRST_NUMBER : break;
51  case XCoreISD::BL : return "XCoreISD::BL";
52  case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
53  case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
54  case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
55  case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
56  case XCoreISD::STWSP : return "XCoreISD::STWSP";
57  case XCoreISD::RETSP : return "XCoreISD::RETSP";
58  case XCoreISD::LADD : return "XCoreISD::LADD";
59  case XCoreISD::LSUB : return "XCoreISD::LSUB";
60  case XCoreISD::LMUL : return "XCoreISD::LMUL";
61  case XCoreISD::MACCU : return "XCoreISD::MACCU";
62  case XCoreISD::MACCS : return "XCoreISD::MACCS";
63  case XCoreISD::CRC8 : return "XCoreISD::CRC8";
64  case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
65  case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
66  case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
67  case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
68  case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
69  }
70  return nullptr;
71 }
72 
74  const XCoreSubtarget &Subtarget)
75  : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
76 
77  // Set up the register classes.
78  addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
79 
80  // Compute derived properties from the register classes
82 
84 
86 
87  // Use i32 for setcc operations results (slt, sgt, ...).
89  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
90 
91  // XCore does not have the NodeTypes below.
98 
99  // 64bit
109 
110  // Bit Manipulation
114 
116 
117  // Jump tables.
119 
122 
123  // Conversion of i64 -> double produces constantpool nodes
125 
126  // Loads
127  for (MVT VT : MVT::integer_valuetypes()) {
131 
134  }
135 
136  // Custom expand misaligned loads / stores.
139 
140  // Varargs
145 
146  // Dynamic stack
150 
151  // Exception handling
154 
155  // Atomic operations
156  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
157  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
161 
162  // TRAMPOLINE is custom lowered.
165 
166  // We want to custom lower some of our intrinsics.
168 
172 
173  // We have target-specific dag combine patterns for the following nodes:
178 
181 }
182 
184  if (Val.getOpcode() != ISD::LOAD)
185  return false;
186 
187  EVT VT1 = Val.getValueType();
188  if (!VT1.isSimple() || !VT1.isInteger() ||
189  !VT2.isSimple() || !VT2.isInteger())
190  return false;
191 
192  switch (VT1.getSimpleVT().SimpleTy) {
193  default: break;
194  case MVT::i8:
195  return true;
196  }
197 
198  return false;
199 }
200 
203  switch (Op.getOpcode())
204  {
205  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
206  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
207  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
208  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
209  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
210  case ISD::LOAD: return LowerLOAD(Op, DAG);
211  case ISD::STORE: return LowerSTORE(Op, DAG);
212  case ISD::VAARG: return LowerVAARG(Op, DAG);
213  case ISD::VASTART: return LowerVASTART(Op, DAG);
214  case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
215  case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
216  // FIXME: Remove these when LegalizeDAGTypes lands.
217  case ISD::ADD:
218  case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
219  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
220  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
221  case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
222  case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
223  case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
224  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
225  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
226  case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
227  case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
228  default:
229  llvm_unreachable("unimplemented operand");
230  }
231 }
232 
233 /// ReplaceNodeResults - Replace the results of node with an illegal result
234 /// type with new values built out of custom code.
237  SelectionDAG &DAG) const {
238  switch (N->getOpcode()) {
239  default:
240  llvm_unreachable("Don't know how to custom expand this!");
241  case ISD::ADD:
242  case ISD::SUB:
243  Results.push_back(ExpandADDSUB(N, DAG));
244  return;
245  }
246 }
247 
248 //===----------------------------------------------------------------------===//
249 // Misc Lower Operation implementation
250 //===----------------------------------------------------------------------===//
251 
252 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
253  const GlobalValue *GV,
254  SelectionDAG &DAG) const {
255  // FIXME there is no actual debug info here
256  SDLoc dl(GA);
257 
258  if (GV->getValueType()->isFunctionTy())
259  return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
260 
261  const auto *GVar = dyn_cast<GlobalVariable>(GV);
262  if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
263  (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
264  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
265 
266  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
267 }
268 
269 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
271  return true;
272 
273  Type *ObjType = GV->getValueType();
274  if (!ObjType->isSized())
275  return false;
276 
277  auto &DL = GV->getParent()->getDataLayout();
278  unsigned ObjSize = DL.getTypeAllocSize(ObjType);
279  return ObjSize < CodeModelLargeSize && ObjSize != 0;
280 }
281 
282 SDValue XCoreTargetLowering::
283 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
284 {
285  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
286  const GlobalValue *GV = GN->getGlobal();
287  SDLoc DL(GN);
288  int64_t Offset = GN->getOffset();
289  if (IsSmallObject(GV, *this)) {
290  // We can only fold positive offsets that are a multiple of the word size.
291  int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
292  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
293  GA = getGlobalAddressWrapper(GA, GV, DAG);
294  // Handle the rest of the offset.
295  if (Offset != FoldedOffset) {
296  SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
297  GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
298  }
299  return GA;
300  } else {
301  // Ideally we would not fold in offset with an index <= 11.
302  Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
303  Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
304  Ty = Type::getInt32Ty(*DAG.getContext());
305  Constant *Idx = ConstantInt::get(Ty, Offset);
307  Type::getInt8Ty(*DAG.getContext()), GA, Idx);
308  SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
309  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
311  }
312 }
313 
314 SDValue XCoreTargetLowering::
315 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
316 {
317  SDLoc DL(Op);
318  auto PtrVT = getPointerTy(DAG.getDataLayout());
319  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
320  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
321 
322  return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
323 }
324 
325 SDValue XCoreTargetLowering::
326 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
327 {
328  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
329  // FIXME there isn't really debug info here
330  SDLoc dl(CP);
331  EVT PtrVT = Op.getValueType();
332  SDValue Res;
333  if (CP->isMachineConstantPoolEntry()) {
334  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
335  CP->getAlignment(), CP->getOffset());
336  } else {
337  Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
338  CP->getAlignment(), CP->getOffset());
339  }
340  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
341 }
342 
345 }
346 
347 SDValue XCoreTargetLowering::
348 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
349 {
350  SDValue Chain = Op.getOperand(0);
351  SDValue Table = Op.getOperand(1);
352  SDValue Index = Op.getOperand(2);
353  SDLoc dl(Op);
354  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
355  unsigned JTI = JT->getIndex();
357  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
358  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
359 
360  unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
361  if (NumEntries <= 32) {
362  return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
363  }
364  assert((NumEntries >> 31) == 0);
365  SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
366  DAG.getConstant(1, dl, MVT::i32));
367  return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
368  ScaledIndex);
369 }
370 
371 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
372  const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
373  SelectionDAG &DAG) const {
374  auto PtrVT = getPointerTy(DAG.getDataLayout());
375  if ((Offset & 0x3) == 0) {
376  return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
377  }
378  // Lower to pair of consecutive word aligned loads plus some bit shifting.
379  int32_t HighOffset = alignTo(Offset, 4);
380  int32_t LowOffset = HighOffset - 4;
381  SDValue LowAddr, HighAddr;
382  if (GlobalAddressSDNode *GASD =
383  dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
384  LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
385  LowOffset);
386  HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
387  HighOffset);
388  } else {
389  LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
390  DAG.getConstant(LowOffset, DL, MVT::i32));
391  HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
392  DAG.getConstant(HighOffset, DL, MVT::i32));
393  }
394  SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
395  SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
396 
397  SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
398  SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
399  SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
400  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
401  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
402  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
403  High.getValue(1));
404  SDValue Ops[] = { Result, Chain };
405  return DAG.getMergeValues(Ops, DL);
406 }
407 
409 {
410  KnownBits Known;
411  DAG.computeKnownBits(Value, Known);
412  return Known.countMinTrailingZeros() >= 2;
413 }
414 
415 SDValue XCoreTargetLowering::
416 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
417  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
418  LoadSDNode *LD = cast<LoadSDNode>(Op);
420  "Unexpected extension type");
421  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
423  LD->getAddressSpace(),
424  LD->getAlignment()))
425  return SDValue();
426 
427  auto &TD = DAG.getDataLayout();
428  unsigned ABIAlignment = TD.getABITypeAlignment(
429  LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
430  // Leave aligned load alone.
431  if (LD->getAlignment() >= ABIAlignment)
432  return SDValue();
433 
434  SDValue Chain = LD->getChain();
435  SDValue BasePtr = LD->getBasePtr();
436  SDLoc DL(Op);
437 
438  if (!LD->isVolatile()) {
439  const GlobalValue *GV;
440  int64_t Offset = 0;
441  if (DAG.isBaseWithConstantOffset(BasePtr) &&
442  isWordAligned(BasePtr->getOperand(0), DAG)) {
443  SDValue NewBasePtr = BasePtr->getOperand(0);
444  Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
445  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
446  Offset, DAG);
447  }
448  if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
449  MinAlign(GV->getAlignment(), 4) == 4) {
450  SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
451  BasePtr->getValueType(0));
452  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
453  Offset, DAG);
454  }
455  }
456 
457  if (LD->getAlignment() == 2) {
458  SDValue Low =
459  DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
460  LD->getPointerInfo(), MVT::i16,
461  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
462  SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
463  DAG.getConstant(2, DL, MVT::i32));
464  SDValue High =
465  DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
467  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
468  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
469  DAG.getConstant(16, DL, MVT::i32));
470  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
471  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
472  High.getValue(1));
473  SDValue Ops[] = { Result, Chain };
474  return DAG.getMergeValues(Ops, DL);
475  }
476 
477  // Lower to a call to __misaligned_load(BasePtr).
478  Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
480  TargetLowering::ArgListEntry Entry;
481 
482  Entry.Ty = IntPtrTy;
483  Entry.Node = BasePtr;
484  Args.push_back(Entry);
485 
487  CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
488  CallingConv::C, IntPtrTy,
489  DAG.getExternalSymbol("__misaligned_load",
490  getPointerTy(DAG.getDataLayout())),
491  std::move(Args));
492 
493  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
494  SDValue Ops[] = { CallResult.first, CallResult.second };
495  return DAG.getMergeValues(Ops, DL);
496 }
497 
498 SDValue XCoreTargetLowering::
499 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
500 {
501  StoreSDNode *ST = cast<StoreSDNode>(Op);
502  assert(!ST->isTruncatingStore() && "Unexpected store type");
503  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
505  ST->getAddressSpace(),
506  ST->getAlignment())) {
507  return SDValue();
508  }
509  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
510  ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
511  // Leave aligned store alone.
512  if (ST->getAlignment() >= ABIAlignment) {
513  return SDValue();
514  }
515  SDValue Chain = ST->getChain();
516  SDValue BasePtr = ST->getBasePtr();
517  SDValue Value = ST->getValue();
518  SDLoc dl(Op);
519 
520  if (ST->getAlignment() == 2) {
521  SDValue Low = Value;
522  SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
523  DAG.getConstant(16, dl, MVT::i32));
524  SDValue StoreLow = DAG.getTruncStore(
525  Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
526  /* Alignment = */ 2, ST->getMemOperand()->getFlags());
527  SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
528  DAG.getConstant(2, dl, MVT::i32));
529  SDValue StoreHigh = DAG.getTruncStore(
530  Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
531  MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
532  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
533  }
534 
535  // Lower to a call to __misaligned_store(BasePtr, Value).
536  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
538  TargetLowering::ArgListEntry Entry;
539 
540  Entry.Ty = IntPtrTy;
541  Entry.Node = BasePtr;
542  Args.push_back(Entry);
543 
544  Entry.Node = Value;
545  Args.push_back(Entry);
546 
548  CLI.setDebugLoc(dl).setChain(Chain).setCallee(
550  DAG.getExternalSymbol("__misaligned_store",
551  getPointerTy(DAG.getDataLayout())),
552  std::move(Args));
553 
554  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
555  return CallResult.second;
556 }
557 
558 SDValue XCoreTargetLowering::
559 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
560 {
561  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
562  "Unexpected operand to lower!");
563  SDLoc dl(Op);
564  SDValue LHS = Op.getOperand(0);
565  SDValue RHS = Op.getOperand(1);
566  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
567  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
568  DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
569  LHS, RHS);
570  SDValue Lo(Hi.getNode(), 1);
571  SDValue Ops[] = { Lo, Hi };
572  return DAG.getMergeValues(Ops, dl);
573 }
574 
575 SDValue XCoreTargetLowering::
576 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
577 {
578  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
579  "Unexpected operand to lower!");
580  SDLoc dl(Op);
581  SDValue LHS = Op.getOperand(0);
582  SDValue RHS = Op.getOperand(1);
583  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
584  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
585  DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
586  Zero, Zero);
587  SDValue Lo(Hi.getNode(), 1);
588  SDValue Ops[] = { Lo, Hi };
589  return DAG.getMergeValues(Ops, dl);
590 }
591 
592 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
593 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
594 /// each intermediate result in the calculation must also have a single use.
595 /// If the Op is in the correct form the constituent parts are written to Mul0,
596 /// Mul1, Addend0 and Addend1.
597 static bool
598 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
599  SDValue &Addend1, bool requireIntermediatesHaveOneUse)
600 {
601  if (Op.getOpcode() != ISD::ADD)
602  return false;
603  SDValue N0 = Op.getOperand(0);
604  SDValue N1 = Op.getOperand(1);
605  SDValue AddOp;
606  SDValue OtherOp;
607  if (N0.getOpcode() == ISD::ADD) {
608  AddOp = N0;
609  OtherOp = N1;
610  } else if (N1.getOpcode() == ISD::ADD) {
611  AddOp = N1;
612  OtherOp = N0;
613  } else {
614  return false;
615  }
616  if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
617  return false;
618  if (OtherOp.getOpcode() == ISD::MUL) {
619  // add(add(a,b),mul(x,y))
620  if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
621  return false;
622  Mul0 = OtherOp.getOperand(0);
623  Mul1 = OtherOp.getOperand(1);
624  Addend0 = AddOp.getOperand(0);
625  Addend1 = AddOp.getOperand(1);
626  return true;
627  }
628  if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
629  // add(add(mul(x,y),a),b)
630  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
631  return false;
632  Mul0 = AddOp.getOperand(0).getOperand(0);
633  Mul1 = AddOp.getOperand(0).getOperand(1);
634  Addend0 = AddOp.getOperand(1);
635  Addend1 = OtherOp;
636  return true;
637  }
638  if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
639  // add(add(a,mul(x,y)),b)
640  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
641  return false;
642  Mul0 = AddOp.getOperand(1).getOperand(0);
643  Mul1 = AddOp.getOperand(1).getOperand(1);
644  Addend0 = AddOp.getOperand(0);
645  Addend1 = OtherOp;
646  return true;
647  }
648  return false;
649 }
650 
651 SDValue XCoreTargetLowering::
652 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
653 {
654  SDValue Mul;
655  SDValue Other;
656  if (N->getOperand(0).getOpcode() == ISD::MUL) {
657  Mul = N->getOperand(0);
658  Other = N->getOperand(1);
659  } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
660  Mul = N->getOperand(1);
661  Other = N->getOperand(0);
662  } else {
663  return SDValue();
664  }
665  SDLoc dl(N);
666  SDValue LL, RL, AddendL, AddendH;
667  LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
668  Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
669  RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
670  Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
671  AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
672  Other, DAG.getConstant(0, dl, MVT::i32));
673  AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
674  Other, DAG.getConstant(1, dl, MVT::i32));
675  APInt HighMask = APInt::getHighBitsSet(64, 32);
676  unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
677  unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
678  if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
679  DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
680  // The inputs are both zero-extended.
681  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
682  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
683  AddendL, LL, RL);
684  SDValue Lo(Hi.getNode(), 1);
685  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
686  }
687  if (LHSSB > 32 && RHSSB > 32) {
688  // The inputs are both sign-extended.
689  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
690  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
691  AddendL, LL, RL);
692  SDValue Lo(Hi.getNode(), 1);
693  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
694  }
695  SDValue LH, RH;
696  LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
697  Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
698  RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
699  Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
700  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
701  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
702  AddendL, LL, RL);
703  SDValue Lo(Hi.getNode(), 1);
704  RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
705  LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
706  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
707  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
708  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
709 }
710 
711 SDValue XCoreTargetLowering::
712 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
713 {
714  assert(N->getValueType(0) == MVT::i64 &&
715  (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
716  "Unknown operand to lower!");
717 
718  if (N->getOpcode() == ISD::ADD)
719  if (SDValue Result = TryExpandADDWithMul(N, DAG))
720  return Result;
721 
722  SDLoc dl(N);
723 
724  // Extract components
726  N->getOperand(0),
727  DAG.getConstant(0, dl, MVT::i32));
729  N->getOperand(0),
730  DAG.getConstant(1, dl, MVT::i32));
732  N->getOperand(1),
733  DAG.getConstant(0, dl, MVT::i32));
735  N->getOperand(1),
736  DAG.getConstant(1, dl, MVT::i32));
737 
738  // Expand
739  unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
741  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
742  SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
743  LHSL, RHSL, Zero);
744  SDValue Carry(Lo.getNode(), 1);
745 
746  SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
747  LHSH, RHSH, Carry);
748  SDValue Ignored(Hi.getNode(), 1);
749  // Merge the pieces
750  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
751 }
752 
753 SDValue XCoreTargetLowering::
754 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
755 {
756  // Whist llvm does not support aggregate varargs we can ignore
757  // the possibility of the ValueType being an implicit byVal vararg.
758  SDNode *Node = Op.getNode();
759  EVT VT = Node->getValueType(0); // not an aggregate
760  SDValue InChain = Node->getOperand(0);
761  SDValue VAListPtr = Node->getOperand(1);
762  EVT PtrVT = VAListPtr.getValueType();
763  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
764  SDLoc dl(Node);
765  SDValue VAList =
766  DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
767  // Increment the pointer, VAList, to the next vararg
768  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
769  DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
770  dl));
771  // Store the incremented VAList to the legalized pointer
772  InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
773  MachinePointerInfo(SV));
774  // Load the actual argument out of the pointer VAList
775  return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
776 }
777 
778 SDValue XCoreTargetLowering::
779 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
780 {
781  SDLoc dl(Op);
782  // vastart stores the address of the VarArgsFrameIndex slot into the
783  // memory location argument
787  return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
789 }
790 
791 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
792  SelectionDAG &DAG) const {
793  // This nodes represent llvm.frameaddress on the DAG.
794  // It takes one operand, the index of the frame address to return.
795  // An index of zero corresponds to the current function's frame address.
796  // An index of one to the parent's frame address, and so on.
797  // Depths > 0 not supported yet!
798  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
799  return SDValue();
800 
802  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
803  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
804  RegInfo->getFrameRegister(MF), MVT::i32);
805 }
806 
807 SDValue XCoreTargetLowering::
808 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
809  // This nodes represent llvm.returnaddress on the DAG.
810  // It takes one operand, the index of the return address to return.
811  // An index of zero corresponds to the current function's return address.
812  // An index of one to the parent's return address, and so on.
813  // Depths > 0 not supported yet!
814  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
815  return SDValue();
816 
819  int FI = XFI->createLRSpillSlot(MF);
820  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
821  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
822  DAG.getEntryNode(), FIN,
824 }
825 
826 SDValue XCoreTargetLowering::
827 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
828  // This node represents offset from frame pointer to first on-stack argument.
829  // This is needed for correct stack adjustment during unwind.
830  // However, we don't know the offset until after the frame has be finalised.
831  // This is done during the XCoreFTAOElim pass.
833 }
834 
835 SDValue XCoreTargetLowering::
836 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
837  // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
838  // This node represents 'eh_return' gcc dwarf builtin, which is used to
839  // return from exception. The general meaning is: adjust stack by OFFSET and
840  // pass execution to HANDLER.
842  SDValue Chain = Op.getOperand(0);
843  SDValue Offset = Op.getOperand(1);
844  SDValue Handler = Op.getOperand(2);
845  SDLoc dl(Op);
846 
847  // Absolute SP = (FP + FrameToArgs) + Offset
848  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
849  SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
850  RegInfo->getFrameRegister(MF), MVT::i32);
851  SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
852  MVT::i32);
853  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
854  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
855 
856  // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
857  // which leaves 2 caller saved registers, R2 & R3 for us to use.
858  unsigned StackReg = XCore::R2;
859  unsigned HandlerReg = XCore::R3;
860 
861  SDValue OutChains[] = {
862  DAG.getCopyToReg(Chain, dl, StackReg, Stack),
863  DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
864  };
865 
866  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
867 
868  return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
869  DAG.getRegister(StackReg, MVT::i32),
870  DAG.getRegister(HandlerReg, MVT::i32));
871 
872 }
873 
874 SDValue XCoreTargetLowering::
875 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
876  return Op.getOperand(0);
877 }
878 
879 SDValue XCoreTargetLowering::
880 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
881  SDValue Chain = Op.getOperand(0);
882  SDValue Trmp = Op.getOperand(1); // trampoline
883  SDValue FPtr = Op.getOperand(2); // nested function
884  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
885 
886  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
887 
888  // .align 4
889  // LDAPF_u10 r11, nest
890  // LDW_2rus r11, r11[0]
891  // STWSP_ru6 r11, sp[0]
892  // LDAPF_u10 r11, fptr
893  // LDW_2rus r11, r11[0]
894  // BAU_1r r11
895  // nest:
896  // .word nest
897  // fptr:
898  // .word fptr
899  SDValue OutChains[5];
900 
901  SDValue Addr = Trmp;
902 
903  SDLoc dl(Op);
904  OutChains[0] =
905  DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
906  MachinePointerInfo(TrmpAddr));
907 
908  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
909  DAG.getConstant(4, dl, MVT::i32));
910  OutChains[1] =
911  DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
912  MachinePointerInfo(TrmpAddr, 4));
913 
914  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
915  DAG.getConstant(8, dl, MVT::i32));
916  OutChains[2] =
917  DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
918  MachinePointerInfo(TrmpAddr, 8));
919 
920  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
921  DAG.getConstant(12, dl, MVT::i32));
922  OutChains[3] =
923  DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
924 
925  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
926  DAG.getConstant(16, dl, MVT::i32));
927  OutChains[4] =
928  DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
929 
930  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
931 }
932 
933 SDValue XCoreTargetLowering::
934 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
935  SDLoc DL(Op);
936  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
937  switch (IntNo) {
938  case Intrinsic::xcore_crc8:
939  EVT VT = Op.getValueType();
940  SDValue Data =
941  DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
942  Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
943  SDValue Crc(Data.getNode(), 1);
944  SDValue Results[] = { Crc, Data };
945  return DAG.getMergeValues(Results, DL);
946  }
947  return SDValue();
948 }
949 
950 SDValue XCoreTargetLowering::
951 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
952  SDLoc DL(Op);
953  return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
954 }
955 
956 SDValue XCoreTargetLowering::
957 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
958  AtomicSDNode *N = cast<AtomicSDNode>(Op);
959  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
962  "setInsertFencesForAtomic(true) expects unordered / monotonic");
963  if (N->getMemoryVT() == MVT::i32) {
964  if (N->getAlignment() < 4)
965  report_fatal_error("atomic load must be aligned");
966  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
967  N->getChain(), N->getBasePtr(), N->getPointerInfo(),
968  N->getAlignment(), N->getMemOperand()->getFlags(),
969  N->getAAInfo(), N->getRanges());
970  }
971  if (N->getMemoryVT() == MVT::i16) {
972  if (N->getAlignment() < 2)
973  report_fatal_error("atomic load must be aligned");
974  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
975  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
976  N->getAlignment(), N->getMemOperand()->getFlags(),
977  N->getAAInfo());
978  }
979  if (N->getMemoryVT() == MVT::i8)
980  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
981  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
982  N->getAlignment(), N->getMemOperand()->getFlags(),
983  N->getAAInfo());
984  return SDValue();
985 }
986 
987 SDValue XCoreTargetLowering::
988 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
989  AtomicSDNode *N = cast<AtomicSDNode>(Op);
990  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
993  "setInsertFencesForAtomic(true) expects unordered / monotonic");
994  if (N->getMemoryVT() == MVT::i32) {
995  if (N->getAlignment() < 4)
996  report_fatal_error("atomic store must be aligned");
997  return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
998  N->getPointerInfo(), N->getAlignment(),
999  N->getMemOperand()->getFlags(), N->getAAInfo());
1000  }
1001  if (N->getMemoryVT() == MVT::i16) {
1002  if (N->getAlignment() < 2)
1003  report_fatal_error("atomic store must be aligned");
1004  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1005  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
1006  N->getAlignment(), N->getMemOperand()->getFlags(),
1007  N->getAAInfo());
1008  }
1009  if (N->getMemoryVT() == MVT::i8)
1010  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1011  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
1012  N->getAlignment(), N->getMemOperand()->getFlags(),
1013  N->getAAInfo());
1014  return SDValue();
1015 }
1016 
1017 //===----------------------------------------------------------------------===//
1018 // Calling Convention Implementation
1019 //===----------------------------------------------------------------------===//
1020 
1021 #include "XCoreGenCallingConv.inc"
1022 
1023 //===----------------------------------------------------------------------===//
1024 // Call Calling Convention Implementation
1025 //===----------------------------------------------------------------------===//
1026 
1027 /// XCore call implementation
1028 SDValue
1029 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1030  SmallVectorImpl<SDValue> &InVals) const {
1031  SelectionDAG &DAG = CLI.DAG;
1032  SDLoc &dl = CLI.DL;
1034  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1036  SDValue Chain = CLI.Chain;
1037  SDValue Callee = CLI.Callee;
1038  bool &isTailCall = CLI.IsTailCall;
1039  CallingConv::ID CallConv = CLI.CallConv;
1040  bool isVarArg = CLI.IsVarArg;
1041 
1042  // XCore target does not yet support tail call optimization.
1043  isTailCall = false;
1044 
1045  // For now, only CallingConv::C implemented
1046  switch (CallConv)
1047  {
1048  default:
1049  report_fatal_error("Unsupported calling convention");
1050  case CallingConv::Fast:
1051  case CallingConv::C:
1052  return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1053  Outs, OutVals, Ins, dl, DAG, InVals);
1054  }
1055 }
1056 
1057 /// LowerCallResult - Lower the result values of a call into the
1058 /// appropriate copies out of appropriate physical registers / memory locations.
1060  const SmallVectorImpl<CCValAssign> &RVLocs,
1061  const SDLoc &dl, SelectionDAG &DAG,
1062  SmallVectorImpl<SDValue> &InVals) {
1063  SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1064  // Copy results out of physical registers.
1065  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1066  const CCValAssign &VA = RVLocs[i];
1067  if (VA.isRegLoc()) {
1068  Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1069  InFlag).getValue(1);
1070  InFlag = Chain.getValue(2);
1071  InVals.push_back(Chain.getValue(0));
1072  } else {
1073  assert(VA.isMemLoc());
1074  ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1075  InVals.size()));
1076  // Reserve space for this result.
1077  InVals.push_back(SDValue());
1078  }
1079  }
1080 
1081  // Copy results out of memory.
1082  SmallVector<SDValue, 4> MemOpChains;
1083  for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1084  int offset = ResultMemLocs[i].first;
1085  unsigned index = ResultMemLocs[i].second;
1086  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1087  SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1088  SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1089  InVals[index] = load;
1090  MemOpChains.push_back(load.getValue(1));
1091  }
1092 
1093  // Transform all loads nodes into one single node because
1094  // all load nodes are independent of each other.
1095  if (!MemOpChains.empty())
1096  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1097 
1098  return Chain;
1099 }
1100 
1101 /// LowerCCCCallTo - functions arguments are copied from virtual
1102 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1103 /// CALLSEQ_END are emitted.
1104 /// TODO: isTailCall, sret.
1105 SDValue XCoreTargetLowering::LowerCCCCallTo(
1106  SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1107  bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1108  const SmallVectorImpl<SDValue> &OutVals,
1109  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1110  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1111 
1112  // Analyze operands of the call, assigning locations to each operand.
1114  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1115  *DAG.getContext());
1116 
1117  // The ABI dictates there should be one stack slot available to the callee
1118  // on function entry (for saving lr).
1119  CCInfo.AllocateStack(4, 4);
1120 
1121  CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1122 
1124  // Analyze return values to determine the number of bytes of stack required.
1125  CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1126  *DAG.getContext());
1127  RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1128  RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1129 
1130  // Get a count of how many bytes are to be pushed on the stack.
1131  unsigned NumBytes = RetCCInfo.getNextStackOffset();
1132  auto PtrVT = getPointerTy(DAG.getDataLayout());
1133 
1134  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1135 
1137  SmallVector<SDValue, 12> MemOpChains;
1138 
1139  // Walk the register/memloc assignments, inserting copies/loads.
1140  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1141  CCValAssign &VA = ArgLocs[i];
1142  SDValue Arg = OutVals[i];
1143 
1144  // Promote the value if needed.
1145  switch (VA.getLocInfo()) {
1146  default: llvm_unreachable("Unknown loc info!");
1147  case CCValAssign::Full: break;
1148  case CCValAssign::SExt:
1149  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1150  break;
1151  case CCValAssign::ZExt:
1152  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1153  break;
1154  case CCValAssign::AExt:
1155  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1156  break;
1157  }
1158 
1159  // Arguments that can be passed on register must be kept at
1160  // RegsToPass vector
1161  if (VA.isRegLoc()) {
1162  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1163  } else {
1164  assert(VA.isMemLoc());
1165 
1166  int Offset = VA.getLocMemOffset();
1167 
1168  MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1169  Chain, Arg,
1170  DAG.getConstant(Offset/4, dl,
1171  MVT::i32)));
1172  }
1173  }
1174 
1175  // Transform all store nodes into one single node because
1176  // all store nodes are independent of each other.
1177  if (!MemOpChains.empty())
1178  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1179 
1180  // Build a sequence of copy-to-reg nodes chained together with token
1181  // chain and flag operands which copy the outgoing args into registers.
1182  // The InFlag in necessary since all emitted instructions must be
1183  // stuck together.
1184  SDValue InFlag;
1185  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1186  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1187  RegsToPass[i].second, InFlag);
1188  InFlag = Chain.getValue(1);
1189  }
1190 
1191  // If the callee is a GlobalAddress node (quite common, every direct call is)
1192  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1193  // Likewise ExternalSymbol -> TargetExternalSymbol.
1194  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1195  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1196  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1197  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1198 
1199  // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1200  // = Chain, Callee, Reg#1, Reg#2, ...
1201  //
1202  // Returns a chain & a flag for retval copy to use.
1203  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1205  Ops.push_back(Chain);
1206  Ops.push_back(Callee);
1207 
1208  // Add argument registers to the end of the list so that they are
1209  // known live into the call.
1210  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1211  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1212  RegsToPass[i].second.getValueType()));
1213 
1214  if (InFlag.getNode())
1215  Ops.push_back(InFlag);
1216 
1217  Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1218  InFlag = Chain.getValue(1);
1219 
1220  // Create the CALLSEQ_END node.
1221  Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1222  DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1223  InFlag = Chain.getValue(1);
1224 
1225  // Handle result values, copying them out of physregs into vregs that we
1226  // return.
1227  return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1228 }
1229 
1230 //===----------------------------------------------------------------------===//
1231 // Formal Arguments Calling Convention Implementation
1232 //===----------------------------------------------------------------------===//
1233 
1234 namespace {
1235  struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1236 }
1237 
1238 /// XCore formal arguments implementation
1239 SDValue XCoreTargetLowering::LowerFormalArguments(
1240  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1241  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1242  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1243  switch (CallConv)
1244  {
1245  default:
1246  report_fatal_error("Unsupported calling convention");
1247  case CallingConv::C:
1248  case CallingConv::Fast:
1249  return LowerCCCArguments(Chain, CallConv, isVarArg,
1250  Ins, dl, DAG, InVals);
1251  }
1252 }
1253 
1254 /// LowerCCCArguments - transform physical registers into
1255 /// virtual registers and generate load operations for
1256 /// arguments places on the stack.
1257 /// TODO: sret
1258 SDValue XCoreTargetLowering::LowerCCCArguments(
1259  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1260  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1261  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1262  MachineFunction &MF = DAG.getMachineFunction();
1263  MachineFrameInfo &MFI = MF.getFrameInfo();
1264  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1266 
1267  // Assign locations to all of the incoming arguments.
1269  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1270  *DAG.getContext());
1271 
1272  CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1273 
1274  unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1275 
1276  unsigned LRSaveSize = StackSlotSize;
1277 
1278  if (!isVarArg)
1279  XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1280 
1281  // All getCopyFromReg ops must precede any getMemcpys to prevent the
1282  // scheduler clobbering a register before it has been copied.
1283  // The stages are:
1284  // 1. CopyFromReg (and load) arg & vararg registers.
1285  // 2. Chain CopyFromReg nodes into a TokenFactor.
1286  // 3. Memcpy 'byVal' args & push final InVals.
1287  // 4. Chain mem ops nodes into a TokenFactor.
1288  SmallVector<SDValue, 4> CFRegNode;
1290  SmallVector<SDValue, 4> MemOps;
1291 
1292  // 1a. CopyFromReg (and load) arg registers.
1293  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1294 
1295  CCValAssign &VA = ArgLocs[i];
1296  SDValue ArgIn;
1297 
1298  if (VA.isRegLoc()) {
1299  // Arguments passed in registers
1300  EVT RegVT = VA.getLocVT();
1301  switch (RegVT.getSimpleVT().SimpleTy) {
1302  default:
1303  {
1304 #ifndef NDEBUG
1305  errs() << "LowerFormalArguments Unhandled argument type: "
1306  << RegVT.getEVTString() << "\n";
1307 #endif
1308  llvm_unreachable(nullptr);
1309  }
1310  case MVT::i32:
1311  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1312  RegInfo.addLiveIn(VA.getLocReg(), VReg);
1313  ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1314  CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1315  }
1316  } else {
1317  // sanity check
1318  assert(VA.isMemLoc());
1319  // Load the argument to a virtual register
1320  unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1321  if (ObjSize > StackSlotSize) {
1322  errs() << "LowerFormalArguments Unhandled argument type: "
1323  << EVT(VA.getLocVT()).getEVTString()
1324  << "\n";
1325  }
1326  // Create the frame index object for this incoming parameter...
1327  int FI = MFI.CreateFixedObject(ObjSize,
1328  LRSaveSize + VA.getLocMemOffset(),
1329  true);
1330 
1331  // Create the SelectionDAG nodes corresponding to a load
1332  //from this parameter
1333  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1334  ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1336  }
1337  const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1338  ArgData.push_back(ADP);
1339  }
1340 
1341  // 1b. CopyFromReg vararg registers.
1342  if (isVarArg) {
1343  // Argument registers
1344  static const MCPhysReg ArgRegs[] = {
1345  XCore::R0, XCore::R1, XCore::R2, XCore::R3
1346  };
1348  unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1349  if (FirstVAReg < array_lengthof(ArgRegs)) {
1350  int offset = 0;
1351  // Save remaining registers, storing higher register numbers at a higher
1352  // address
1353  for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1354  // Create a stack slot
1355  int FI = MFI.CreateFixedObject(4, offset, true);
1356  if (i == (int)FirstVAReg) {
1357  XFI->setVarArgsFrameIndex(FI);
1358  }
1359  offset -= StackSlotSize;
1360  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1361  // Move argument from phys reg -> virt reg
1362  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1363  RegInfo.addLiveIn(ArgRegs[i], VReg);
1364  SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1365  CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1366  // Move argument from virt reg -> stack
1367  SDValue Store =
1368  DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1369  MemOps.push_back(Store);
1370  }
1371  } else {
1372  // This will point to the next argument passed via stack.
1373  XFI->setVarArgsFrameIndex(
1374  MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1375  true));
1376  }
1377  }
1378 
1379  // 2. chain CopyFromReg nodes into a TokenFactor.
1380  if (!CFRegNode.empty())
1381  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1382 
1383  // 3. Memcpy 'byVal' args & push final InVals.
1384  // Aggregates passed "byVal" need to be copied by the callee.
1385  // The callee will use a pointer to this copy, rather than the original
1386  // pointer.
1387  for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1388  ArgDE = ArgData.end();
1389  ArgDI != ArgDE; ++ArgDI) {
1390  if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1391  unsigned Size = ArgDI->Flags.getByValSize();
1392  unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1393  // Create a new object on the stack and copy the pointee into it.
1394  int FI = MFI.CreateStackObject(Size, Align, false);
1395  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1396  InVals.push_back(FIN);
1397  MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1398  DAG.getConstant(Size, dl, MVT::i32),
1399  Align, false, false, false,
1401  MachinePointerInfo()));
1402  } else {
1403  InVals.push_back(ArgDI->SDV);
1404  }
1405  }
1406 
1407  // 4, chain mem ops nodes into a TokenFactor.
1408  if (!MemOps.empty()) {
1409  MemOps.push_back(Chain);
1410  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1411  }
1412 
1413  return Chain;
1414 }
1415 
1416 //===----------------------------------------------------------------------===//
1417 // Return Value Calling Convention Implementation
1418 //===----------------------------------------------------------------------===//
1419 
1420 bool XCoreTargetLowering::
1421 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1422  bool isVarArg,
1423  const SmallVectorImpl<ISD::OutputArg> &Outs,
1424  LLVMContext &Context) const {
1426  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1427  if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1428  return false;
1429  if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1430  return false;
1431  return true;
1432 }
1433 
1434 SDValue
1435 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1436  bool isVarArg,
1437  const SmallVectorImpl<ISD::OutputArg> &Outs,
1438  const SmallVectorImpl<SDValue> &OutVals,
1439  const SDLoc &dl, SelectionDAG &DAG) const {
1440 
1441  XCoreFunctionInfo *XFI =
1444 
1445  // CCValAssign - represent the assignment of
1446  // the return value to a location
1448 
1449  // CCState - Info about the registers and stack slot.
1450  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1451  *DAG.getContext());
1452 
1453  // Analyze return values.
1454  if (!isVarArg)
1455  CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1456 
1457  CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1458 
1459  SDValue Flag;
1460  SmallVector<SDValue, 4> RetOps(1, Chain);
1461 
1462  // Return on XCore is always a "retsp 0"
1463  RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1464 
1465  SmallVector<SDValue, 4> MemOpChains;
1466  // Handle return values that must be copied to memory.
1467  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1468  CCValAssign &VA = RVLocs[i];
1469  if (VA.isRegLoc())
1470  continue;
1471  assert(VA.isMemLoc());
1472  if (isVarArg) {
1473  report_fatal_error("Can't return value from vararg function in memory");
1474  }
1475 
1476  int Offset = VA.getLocMemOffset();
1477  unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1478  // Create the frame index object for the memory location.
1479  int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1480 
1481  // Create a SelectionDAG node corresponding to a store
1482  // to this memory location.
1483  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1484  MemOpChains.push_back(DAG.getStore(
1485  Chain, dl, OutVals[i], FIN,
1487  }
1488 
1489  // Transform all store nodes into one single node because
1490  // all stores are independent of each other.
1491  if (!MemOpChains.empty())
1492  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1493 
1494  // Now handle return values copied to registers.
1495  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1496  CCValAssign &VA = RVLocs[i];
1497  if (!VA.isRegLoc())
1498  continue;
1499  // Copy the result values into the output registers.
1500  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1501 
1502  // guarantee that all emitted copies are
1503  // stuck together, avoiding something bad
1504  Flag = Chain.getValue(1);
1505  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1506  }
1507 
1508  RetOps[0] = Chain; // Update chain.
1509 
1510  // Add the flag if we have it.
1511  if (Flag.getNode())
1512  RetOps.push_back(Flag);
1513 
1514  return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1515 }
1516 
1517 //===----------------------------------------------------------------------===//
1518 // Other Lowering Code
1519 //===----------------------------------------------------------------------===//
1520 
1523  MachineBasicBlock *BB) const {
1524  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1525  DebugLoc dl = MI.getDebugLoc();
1526  assert((MI.getOpcode() == XCore::SELECT_CC) &&
1527  "Unexpected instr type to insert");
1528 
1529  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1530  // control-flow pattern. The incoming instruction knows the destination vreg
1531  // to set, the condition code register to branch on, the true/false values to
1532  // select between, and a branch opcode to use.
1533  const BasicBlock *LLVM_BB = BB->getBasicBlock();
1535 
1536  // thisMBB:
1537  // ...
1538  // TrueVal = ...
1539  // cmpTY ccX, r1, r2
1540  // bCC copy1MBB
1541  // fallthrough --> copy0MBB
1542  MachineBasicBlock *thisMBB = BB;
1543  MachineFunction *F = BB->getParent();
1544  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1545  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1546  F->insert(It, copy0MBB);
1547  F->insert(It, sinkMBB);
1548 
1549  // Transfer the remainder of BB and its successor edges to sinkMBB.
1550  sinkMBB->splice(sinkMBB->begin(), BB,
1551  std::next(MachineBasicBlock::iterator(MI)), BB->end());
1552  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1553 
1554  // Next, add the true and fallthrough blocks as its successors.
1555  BB->addSuccessor(copy0MBB);
1556  BB->addSuccessor(sinkMBB);
1557 
1558  BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1559  .addReg(MI.getOperand(1).getReg())
1560  .addMBB(sinkMBB);
1561 
1562  // copy0MBB:
1563  // %FalseValue = ...
1564  // # fallthrough to sinkMBB
1565  BB = copy0MBB;
1566 
1567  // Update machine-CFG edges
1568  BB->addSuccessor(sinkMBB);
1569 
1570  // sinkMBB:
1571  // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1572  // ...
1573  BB = sinkMBB;
1574  BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1575  .addReg(MI.getOperand(3).getReg())
1576  .addMBB(copy0MBB)
1577  .addReg(MI.getOperand(2).getReg())
1578  .addMBB(thisMBB);
1579 
1580  MI.eraseFromParent(); // The pseudo instruction is gone now.
1581  return BB;
1582 }
1583 
1584 //===----------------------------------------------------------------------===//
1585 // Target Optimization Hooks
1586 //===----------------------------------------------------------------------===//
1587 
1588 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1589  DAGCombinerInfo &DCI) const {
1590  SelectionDAG &DAG = DCI.DAG;
1591  SDLoc dl(N);
1592  switch (N->getOpcode()) {
1593  default: break;
1594  case ISD::INTRINSIC_VOID:
1595  switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1596  case Intrinsic::xcore_outt:
1597  case Intrinsic::xcore_outct:
1598  case Intrinsic::xcore_chkct: {
1599  SDValue OutVal = N->getOperand(3);
1600  // These instructions ignore the high bits.
1601  if (OutVal.hasOneUse()) {
1602  unsigned BitWidth = OutVal.getValueSizeInBits();
1603  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1604  KnownBits Known;
1606  !DCI.isBeforeLegalizeOps());
1607  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1608  if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1609  TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1610  DCI.CommitTargetLoweringOpt(TLO);
1611  }
1612  break;
1613  }
1614  case Intrinsic::xcore_setpt: {
1615  SDValue Time = N->getOperand(3);
1616  // This instruction ignores the high bits.
1617  if (Time.hasOneUse()) {
1618  unsigned BitWidth = Time.getValueSizeInBits();
1619  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1620  KnownBits Known;
1622  !DCI.isBeforeLegalizeOps());
1623  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1624  if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1625  TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1626  DCI.CommitTargetLoweringOpt(TLO);
1627  }
1628  break;
1629  }
1630  }
1631  break;
1632  case XCoreISD::LADD: {
1633  SDValue N0 = N->getOperand(0);
1634  SDValue N1 = N->getOperand(1);
1635  SDValue N2 = N->getOperand(2);
1638  EVT VT = N0.getValueType();
1639 
1640  // canonicalize constant to RHS
1641  if (N0C && !N1C)
1642  return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1643 
1644  // fold (ladd 0, 0, x) -> 0, x & 1
1645  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1646  SDValue Carry = DAG.getConstant(0, dl, VT);
1647  SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1648  DAG.getConstant(1, dl, VT));
1649  SDValue Ops[] = { Result, Carry };
1650  return DAG.getMergeValues(Ops, dl);
1651  }
1652 
1653  // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1654  // low bit set
1655  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1656  KnownBits Known;
1657  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1658  VT.getSizeInBits() - 1);
1659  DAG.computeKnownBits(N2, Known);
1660  if ((Known.Zero & Mask) == Mask) {
1661  SDValue Carry = DAG.getConstant(0, dl, VT);
1662  SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1663  SDValue Ops[] = { Result, Carry };
1664  return DAG.getMergeValues(Ops, dl);
1665  }
1666  }
1667  }
1668  break;
1669  case XCoreISD::LSUB: {
1670  SDValue N0 = N->getOperand(0);
1671  SDValue N1 = N->getOperand(1);
1672  SDValue N2 = N->getOperand(2);
1675  EVT VT = N0.getValueType();
1676 
1677  // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1678  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1679  KnownBits Known;
1680  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1681  VT.getSizeInBits() - 1);
1682  DAG.computeKnownBits(N2, Known);
1683  if ((Known.Zero & Mask) == Mask) {
1684  SDValue Borrow = N2;
1685  SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1686  DAG.getConstant(0, dl, VT), N2);
1687  SDValue Ops[] = { Result, Borrow };
1688  return DAG.getMergeValues(Ops, dl);
1689  }
1690  }
1691 
1692  // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1693  // low bit set
1694  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1695  KnownBits Known;
1696  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1697  VT.getSizeInBits() - 1);
1698  DAG.computeKnownBits(N2, Known);
1699  if ((Known.Zero & Mask) == Mask) {
1700  SDValue Borrow = DAG.getConstant(0, dl, VT);
1701  SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1702  SDValue Ops[] = { Result, Borrow };
1703  return DAG.getMergeValues(Ops, dl);
1704  }
1705  }
1706  }
1707  break;
1708  case XCoreISD::LMUL: {
1709  SDValue N0 = N->getOperand(0);
1710  SDValue N1 = N->getOperand(1);
1711  SDValue N2 = N->getOperand(2);
1712  SDValue N3 = N->getOperand(3);
1715  EVT VT = N0.getValueType();
1716  // Canonicalize multiplicative constant to RHS. If both multiplicative
1717  // operands are constant canonicalize smallest to RHS.
1718  if ((N0C && !N1C) ||
1719  (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1720  return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1721  N1, N0, N2, N3);
1722 
1723  // lmul(x, 0, a, b)
1724  if (N1C && N1C->isNullValue()) {
1725  // If the high result is unused fold to add(a, b)
1726  if (N->hasNUsesOfValue(0, 0)) {
1727  SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1728  SDValue Ops[] = { Lo, Lo };
1729  return DAG.getMergeValues(Ops, dl);
1730  }
1731  // Otherwise fold to ladd(a, b, 0)
1732  SDValue Result =
1733  DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1734  SDValue Carry(Result.getNode(), 1);
1735  SDValue Ops[] = { Carry, Result };
1736  return DAG.getMergeValues(Ops, dl);
1737  }
1738  }
1739  break;
1740  case ISD::ADD: {
1741  // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1742  // lmul(x, y, a, b). The high result of lmul will be ignored.
1743  // This is only profitable if the intermediate results are unused
1744  // elsewhere.
1745  SDValue Mul0, Mul1, Addend0, Addend1;
1746  if (N->getValueType(0) == MVT::i32 &&
1747  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1748  SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1749  DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1750  Mul1, Addend0, Addend1);
1751  SDValue Result(Ignored.getNode(), 1);
1752  return Result;
1753  }
1754  APInt HighMask = APInt::getHighBitsSet(64, 32);
1755  // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1756  // lmul(x, y, a, b) if all operands are zero-extended. We do this
1757  // before type legalization as it is messy to match the operands after
1758  // that.
1759  if (N->getValueType(0) == MVT::i64 &&
1760  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1761  DAG.MaskedValueIsZero(Mul0, HighMask) &&
1762  DAG.MaskedValueIsZero(Mul1, HighMask) &&
1763  DAG.MaskedValueIsZero(Addend0, HighMask) &&
1764  DAG.MaskedValueIsZero(Addend1, HighMask)) {
1765  SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1766  Mul0, DAG.getConstant(0, dl, MVT::i32));
1767  SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1768  Mul1, DAG.getConstant(0, dl, MVT::i32));
1769  SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1770  Addend0, DAG.getConstant(0, dl, MVT::i32));
1771  SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1772  Addend1, DAG.getConstant(0, dl, MVT::i32));
1773  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1774  DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1775  Addend0L, Addend1L);
1776  SDValue Lo(Hi.getNode(), 1);
1777  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1778  }
1779  }
1780  break;
1781  case ISD::STORE: {
1782  // Replace unaligned store of unaligned load with memmove.
1783  StoreSDNode *ST = cast<StoreSDNode>(N);
1784  if (!DCI.isBeforeLegalize() ||
1786  ST->getAddressSpace(),
1787  ST->getAlignment()) ||
1788  ST->isVolatile() || ST->isIndexed()) {
1789  break;
1790  }
1791  SDValue Chain = ST->getChain();
1792 
1793  unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1794  assert((StoreBits % 8) == 0 &&
1795  "Store size in bits must be a multiple of 8");
1796  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
1797  ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1798  unsigned Alignment = ST->getAlignment();
1799  if (Alignment >= ABIAlignment) {
1800  break;
1801  }
1802 
1803  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1804  if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1805  LD->getAlignment() == Alignment &&
1806  !LD->isVolatile() && !LD->isIndexed() &&
1808  bool isTail = isInTailCallPosition(DAG, ST, Chain);
1809  return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1810  LD->getBasePtr(),
1811  DAG.getConstant(StoreBits/8, dl, MVT::i32),
1812  Alignment, false, isTail, ST->getPointerInfo(),
1813  LD->getPointerInfo());
1814  }
1815  }
1816  break;
1817  }
1818  }
1819  return SDValue();
1820 }
1821 
1822 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1823  KnownBits &Known,
1824  const APInt &DemandedElts,
1825  const SelectionDAG &DAG,
1826  unsigned Depth) const {
1827  Known.resetAll();
1828  switch (Op.getOpcode()) {
1829  default: break;
1830  case XCoreISD::LADD:
1831  case XCoreISD::LSUB:
1832  if (Op.getResNo() == 1) {
1833  // Top bits of carry / borrow are clear.
1834  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1835  Known.getBitWidth() - 1);
1836  }
1837  break;
1839  {
1840  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1841  switch (IntNo) {
1842  case Intrinsic::xcore_getts:
1843  // High bits are known to be zero.
1844  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1845  Known.getBitWidth() - 16);
1846  break;
1847  case Intrinsic::xcore_int:
1848  case Intrinsic::xcore_inct:
1849  // High bits are known to be zero.
1850  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1851  Known.getBitWidth() - 8);
1852  break;
1853  case Intrinsic::xcore_testct:
1854  // Result is either 0 or 1.
1855  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1856  Known.getBitWidth() - 1);
1857  break;
1858  case Intrinsic::xcore_testwct:
1859  // Result is in the range 0 - 4.
1860  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1861  Known.getBitWidth() - 3);
1862  break;
1863  }
1864  }
1865  break;
1866  }
1867 }
1868 
1869 //===----------------------------------------------------------------------===//
1870 // Addressing mode description hooks
1871 //===----------------------------------------------------------------------===//
1872 
1873 static inline bool isImmUs(int64_t val)
1874 {
1875  return (val >= 0 && val <= 11);
1876 }
1877 
1878 static inline bool isImmUs2(int64_t val)
1879 {
1880  return (val%2 == 0 && isImmUs(val/2));
1881 }
1882 
1883 static inline bool isImmUs4(int64_t val)
1884 {
1885  return (val%4 == 0 && isImmUs(val/4));
1886 }
1887 
1888 /// isLegalAddressingMode - Return true if the addressing mode represented
1889 /// by AM is legal for this target, for a load/store of the specified type.
1891  const AddrMode &AM, Type *Ty,
1892  unsigned AS,
1893  Instruction *I) const {
1894  if (Ty->getTypeID() == Type::VoidTyID)
1895  return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1896 
1897  unsigned Size = DL.getTypeAllocSize(Ty);
1898  if (AM.BaseGV) {
1899  return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1900  AM.BaseOffs%4 == 0;
1901  }
1902 
1903  switch (Size) {
1904  case 1:
1905  // reg + imm
1906  if (AM.Scale == 0) {
1907  return isImmUs(AM.BaseOffs);
1908  }
1909  // reg + reg
1910  return AM.Scale == 1 && AM.BaseOffs == 0;
1911  case 2:
1912  case 3:
1913  // reg + imm
1914  if (AM.Scale == 0) {
1915  return isImmUs2(AM.BaseOffs);
1916  }
1917  // reg + reg<<1
1918  return AM.Scale == 2 && AM.BaseOffs == 0;
1919  default:
1920  // reg + imm
1921  if (AM.Scale == 0) {
1922  return isImmUs4(AM.BaseOffs);
1923  }
1924  // reg + reg<<2
1925  return AM.Scale == 4 && AM.BaseOffs == 0;
1926  }
1927 }
1928 
1929 //===----------------------------------------------------------------------===//
1930 // XCore Inline Assembly Support
1931 //===----------------------------------------------------------------------===//
1932 
1933 std::pair<unsigned, const TargetRegisterClass *>
1934 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1935  StringRef Constraint,
1936  MVT VT) const {
1937  if (Constraint.size() == 1) {
1938  switch (Constraint[0]) {
1939  default : break;
1940  case 'r':
1941  return std::make_pair(0U, &XCore::GRRegsRegClass);
1942  }
1943  }
1944  // Use the default implementation in TargetLowering to convert the register
1945  // constraint into a member of a register class.
1946  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1947 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isMachineConstantPoolEntry() const
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:731
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
int createLRSpillSlot(MachineFunction &MF)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static bool isImmUs2(int64_t val)
const std::vector< MachineJumpTableEntry > & getJumpTables() const
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool hasLocalLinkage() const
Definition: GlobalValue.h:427
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:329
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:814
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:617
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1115
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const SDValue & getVal() const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:222
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:268
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:666
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:641
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getBasePtr() const
const SDValue & getChain() const
Function Alias Analysis Results
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
unsigned getAlignment() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:677
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
#define R2(n)
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &ArgsFlags, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
uint64_t High
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:748
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:404
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getExternalSymbol(const char *Sym, EVT VT)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool isMemLoc() const
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:138
static bool isImmUs(int64_t val)
unsigned getAddressSpace() const
Return the address space for the associated pointer.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static int stackSlotSize()
Stack slot size (4 bytes)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:433
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:813
bool hasSection() const
Definition: GlobalValue.h:261
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
const HexagonInstrInfo * TII
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:138
Shift and rotation operations.
Definition: ISDOpcodes.h:379
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static const unsigned CodeModelLargeSize
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:290
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:446
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:388
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This is an SDNode representing atomic operations.
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:736
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:662
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:91
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getSizeInBits() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:267
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void setReturnStackOffset(unsigned value)
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:385
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
SmallVector< ISD::OutputArg, 32 > Outs
unsigned getAlignment() const
Definition: Globals.cpp:97
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
amdgpu Simplify well known AMD library false Value * Callee
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
This class is used to represent ISD::STORE nodes.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:121
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:629
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1678
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:602
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:702
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
0: type with no size
Definition: Type.h:57
const SDValue & getBasePtr() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:166
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:66
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:695
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
const SDValue & getOperand(unsigned Num) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:231
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:802
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:161
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:725
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:734
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
Definition: ValueTypes.cpp:120
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:82
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:686
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:34
const AMDGPUAS & AS
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
bool isVolatile() const
const TargetMachine & getTargetMachine() const
StringRef getSection() const
Definition: Globals.cpp:156
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
unsigned first
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:617
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:611
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:391
Iterator for intrusive lists based on ilist_node.
unsigned getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
Definition: ValueTypes.h:310
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:209
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:674
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:682
const DataFlowGraph & G
Definition: RDFGraph.cpp:211
const Constant * getConstVal() const
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
Represents one node in the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:657
static mvt_range integer_valuetypes()
MachinePointerInfo getWithOffset(int64_t O) const
EVT getMemoryVT() const
Return the type of the in-memory value.
Class for arbitrary precision integers.
Definition: APInt.h:69
CodeModel::Model getCodeModel() const
Returns the code model.
static bool isImmUs4(int64_t val)
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:445
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:448
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:403
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:605
Representation of each machine instruction.
Definition: MachineInstr.h:59
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:691
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:651
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
SmallVector< SDValue, 32 > OutVals
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:212
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:362
const TargetRegisterInfo * getRegisterInfo() const override
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:683
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
void computeKnownBits(SDValue Op, KnownBits &Known, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:581
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Flags getFlags() const
Return the raw flags of the source value,.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Type * getValueType() const
Definition: GlobalValue.h:267
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),a),b).
unsigned getOpcode() const
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents &#39;eh_return&#39; gcc dwarf builtin...
Definition: ISDOpcodes.h:102
bool isRegLoc() const
const MachinePointerInfo & getPointerInfo() const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if &#39;Op & Mask&#39; is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:556
LLVM Value Representation.
Definition: Value.h:73
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:57
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
Conversion operators.
Definition: ISDOpcodes.h:442
const SDValue & getOperand(unsigned i) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:756
unsigned getLocReg() const
uint64_t getZExtValue() const
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:295
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:174
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:752
LLVMContext * getContext() const
Definition: SelectionDAG.h:393
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:600
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:355
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:590