LLVM  7.0.0svn
XCoreISelLowering.cpp
Go to the documentation of this file.
1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the XCoreTargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "XCoreISelLowering.h"
15 #include "XCore.h"
17 #include "XCoreSubtarget.h"
18 #include "XCoreTargetMachine.h"
19 #include "XCoreTargetObjectFile.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/KnownBits.h"
39 #include <algorithm>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "xcore-lower"
44 
45 const char *XCoreTargetLowering::
46 getTargetNodeName(unsigned Opcode) const
47 {
48  switch ((XCoreISD::NodeType)Opcode)
49  {
50  case XCoreISD::FIRST_NUMBER : break;
51  case XCoreISD::BL : return "XCoreISD::BL";
52  case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
53  case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
54  case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
55  case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
56  case XCoreISD::STWSP : return "XCoreISD::STWSP";
57  case XCoreISD::RETSP : return "XCoreISD::RETSP";
58  case XCoreISD::LADD : return "XCoreISD::LADD";
59  case XCoreISD::LSUB : return "XCoreISD::LSUB";
60  case XCoreISD::LMUL : return "XCoreISD::LMUL";
61  case XCoreISD::MACCU : return "XCoreISD::MACCU";
62  case XCoreISD::MACCS : return "XCoreISD::MACCS";
63  case XCoreISD::CRC8 : return "XCoreISD::CRC8";
64  case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
65  case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
66  case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
67  case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
68  case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
69  }
70  return nullptr;
71 }
72 
74  const XCoreSubtarget &Subtarget)
75  : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
76 
77  // Set up the register classes.
78  addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
79 
80  // Compute derived properties from the register classes
82 
84 
86 
87  // Use i32 for setcc operations results (slt, sgt, ...).
89  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
90 
91  // XCore does not have the NodeTypes below.
94 
95  // 64bit
105 
106  // Bit Manipulation
110 
112 
113  // Jump tables.
115 
118 
119  // Conversion of i64 -> double produces constantpool nodes
121 
122  // Loads
123  for (MVT VT : MVT::integer_valuetypes()) {
127 
130  }
131 
132  // Custom expand misaligned loads / stores.
135 
136  // Varargs
141 
142  // Dynamic stack
146 
147  // Exception handling
150 
151  // Atomic operations
152  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
153  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
157 
158  // TRAMPOLINE is custom lowered.
161 
162  // We want to custom lower some of our intrinsics.
164 
168 
169  // We have target-specific dag combine patterns for the following nodes:
174 
177 }
178 
180  if (Val.getOpcode() != ISD::LOAD)
181  return false;
182 
183  EVT VT1 = Val.getValueType();
184  if (!VT1.isSimple() || !VT1.isInteger() ||
185  !VT2.isSimple() || !VT2.isInteger())
186  return false;
187 
188  switch (VT1.getSimpleVT().SimpleTy) {
189  default: break;
190  case MVT::i8:
191  return true;
192  }
193 
194  return false;
195 }
196 
199  switch (Op.getOpcode())
200  {
201  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
202  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
203  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
204  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
205  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
206  case ISD::LOAD: return LowerLOAD(Op, DAG);
207  case ISD::STORE: return LowerSTORE(Op, DAG);
208  case ISD::VAARG: return LowerVAARG(Op, DAG);
209  case ISD::VASTART: return LowerVASTART(Op, DAG);
210  case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
211  case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
212  // FIXME: Remove these when LegalizeDAGTypes lands.
213  case ISD::ADD:
214  case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
215  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
216  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
217  case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
218  case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
219  case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
220  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
221  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
222  case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
223  case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
224  default:
225  llvm_unreachable("unimplemented operand");
226  }
227 }
228 
229 /// ReplaceNodeResults - Replace the results of node with an illegal result
230 /// type with new values built out of custom code.
233  SelectionDAG &DAG) const {
234  switch (N->getOpcode()) {
235  default:
236  llvm_unreachable("Don't know how to custom expand this!");
237  case ISD::ADD:
238  case ISD::SUB:
239  Results.push_back(ExpandADDSUB(N, DAG));
240  return;
241  }
242 }
243 
244 //===----------------------------------------------------------------------===//
245 // Misc Lower Operation implementation
246 //===----------------------------------------------------------------------===//
247 
248 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
249  const GlobalValue *GV,
250  SelectionDAG &DAG) const {
251  // FIXME there is no actual debug info here
252  SDLoc dl(GA);
253 
254  if (GV->getValueType()->isFunctionTy())
255  return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
256 
257  const auto *GVar = dyn_cast<GlobalVariable>(GV);
258  if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
259  (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
260  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
261 
262  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
263 }
264 
265 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
267  return true;
268 
269  Type *ObjType = GV->getValueType();
270  if (!ObjType->isSized())
271  return false;
272 
273  auto &DL = GV->getParent()->getDataLayout();
274  unsigned ObjSize = DL.getTypeAllocSize(ObjType);
275  return ObjSize < CodeModelLargeSize && ObjSize != 0;
276 }
277 
278 SDValue XCoreTargetLowering::
279 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
280 {
281  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
282  const GlobalValue *GV = GN->getGlobal();
283  SDLoc DL(GN);
284  int64_t Offset = GN->getOffset();
285  if (IsSmallObject(GV, *this)) {
286  // We can only fold positive offsets that are a multiple of the word size.
287  int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
288  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
289  GA = getGlobalAddressWrapper(GA, GV, DAG);
290  // Handle the rest of the offset.
291  if (Offset != FoldedOffset) {
292  SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
293  GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
294  }
295  return GA;
296  } else {
297  // Ideally we would not fold in offset with an index <= 11.
298  Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
299  Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
300  Ty = Type::getInt32Ty(*DAG.getContext());
301  Constant *Idx = ConstantInt::get(Ty, Offset);
303  Type::getInt8Ty(*DAG.getContext()), GA, Idx);
304  SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
305  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
307  }
308 }
309 
310 SDValue XCoreTargetLowering::
311 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
312 {
313  SDLoc DL(Op);
314  auto PtrVT = getPointerTy(DAG.getDataLayout());
315  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
316  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
317 
318  return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
319 }
320 
321 SDValue XCoreTargetLowering::
322 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
323 {
324  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
325  // FIXME there isn't really debug info here
326  SDLoc dl(CP);
327  EVT PtrVT = Op.getValueType();
328  SDValue Res;
329  if (CP->isMachineConstantPoolEntry()) {
330  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
331  CP->getAlignment(), CP->getOffset());
332  } else {
333  Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
334  CP->getAlignment(), CP->getOffset());
335  }
336  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
337 }
338 
341 }
342 
343 SDValue XCoreTargetLowering::
344 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
345 {
346  SDValue Chain = Op.getOperand(0);
347  SDValue Table = Op.getOperand(1);
348  SDValue Index = Op.getOperand(2);
349  SDLoc dl(Op);
350  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
351  unsigned JTI = JT->getIndex();
353  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
354  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
355 
356  unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
357  if (NumEntries <= 32) {
358  return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
359  }
360  assert((NumEntries >> 31) == 0);
361  SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
362  DAG.getConstant(1, dl, MVT::i32));
363  return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
364  ScaledIndex);
365 }
366 
367 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
368  const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
369  SelectionDAG &DAG) const {
370  auto PtrVT = getPointerTy(DAG.getDataLayout());
371  if ((Offset & 0x3) == 0) {
372  return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
373  }
374  // Lower to pair of consecutive word aligned loads plus some bit shifting.
375  int32_t HighOffset = alignTo(Offset, 4);
376  int32_t LowOffset = HighOffset - 4;
377  SDValue LowAddr, HighAddr;
378  if (GlobalAddressSDNode *GASD =
379  dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
380  LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
381  LowOffset);
382  HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
383  HighOffset);
384  } else {
385  LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
386  DAG.getConstant(LowOffset, DL, MVT::i32));
387  HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
388  DAG.getConstant(HighOffset, DL, MVT::i32));
389  }
390  SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
391  SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
392 
393  SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
394  SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
395  SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
396  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
397  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
398  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
399  High.getValue(1));
400  SDValue Ops[] = { Result, Chain };
401  return DAG.getMergeValues(Ops, DL);
402 }
403 
405 {
406  KnownBits Known;
407  DAG.computeKnownBits(Value, Known);
408  return Known.countMinTrailingZeros() >= 2;
409 }
410 
411 SDValue XCoreTargetLowering::
412 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
413  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
414  LoadSDNode *LD = cast<LoadSDNode>(Op);
416  "Unexpected extension type");
417  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
419  LD->getAddressSpace(),
420  LD->getAlignment()))
421  return SDValue();
422 
423  auto &TD = DAG.getDataLayout();
424  unsigned ABIAlignment = TD.getABITypeAlignment(
425  LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
426  // Leave aligned load alone.
427  if (LD->getAlignment() >= ABIAlignment)
428  return SDValue();
429 
430  SDValue Chain = LD->getChain();
431  SDValue BasePtr = LD->getBasePtr();
432  SDLoc DL(Op);
433 
434  if (!LD->isVolatile()) {
435  const GlobalValue *GV;
436  int64_t Offset = 0;
437  if (DAG.isBaseWithConstantOffset(BasePtr) &&
438  isWordAligned(BasePtr->getOperand(0), DAG)) {
439  SDValue NewBasePtr = BasePtr->getOperand(0);
440  Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
441  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
442  Offset, DAG);
443  }
444  if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
445  MinAlign(GV->getAlignment(), 4) == 4) {
446  SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
447  BasePtr->getValueType(0));
448  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
449  Offset, DAG);
450  }
451  }
452 
453  if (LD->getAlignment() == 2) {
454  SDValue Low =
455  DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
456  LD->getPointerInfo(), MVT::i16,
457  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
458  SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
459  DAG.getConstant(2, DL, MVT::i32));
460  SDValue High =
461  DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
463  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
464  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
465  DAG.getConstant(16, DL, MVT::i32));
466  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
467  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
468  High.getValue(1));
469  SDValue Ops[] = { Result, Chain };
470  return DAG.getMergeValues(Ops, DL);
471  }
472 
473  // Lower to a call to __misaligned_load(BasePtr).
474  Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
476  TargetLowering::ArgListEntry Entry;
477 
478  Entry.Ty = IntPtrTy;
479  Entry.Node = BasePtr;
480  Args.push_back(Entry);
481 
483  CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
484  CallingConv::C, IntPtrTy,
485  DAG.getExternalSymbol("__misaligned_load",
486  getPointerTy(DAG.getDataLayout())),
487  std::move(Args));
488 
489  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
490  SDValue Ops[] = { CallResult.first, CallResult.second };
491  return DAG.getMergeValues(Ops, DL);
492 }
493 
494 SDValue XCoreTargetLowering::
495 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
496 {
497  StoreSDNode *ST = cast<StoreSDNode>(Op);
498  assert(!ST->isTruncatingStore() && "Unexpected store type");
499  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
501  ST->getAddressSpace(),
502  ST->getAlignment())) {
503  return SDValue();
504  }
505  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
506  ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
507  // Leave aligned store alone.
508  if (ST->getAlignment() >= ABIAlignment) {
509  return SDValue();
510  }
511  SDValue Chain = ST->getChain();
512  SDValue BasePtr = ST->getBasePtr();
513  SDValue Value = ST->getValue();
514  SDLoc dl(Op);
515 
516  if (ST->getAlignment() == 2) {
517  SDValue Low = Value;
518  SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
519  DAG.getConstant(16, dl, MVT::i32));
520  SDValue StoreLow = DAG.getTruncStore(
521  Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
522  /* Alignment = */ 2, ST->getMemOperand()->getFlags());
523  SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
524  DAG.getConstant(2, dl, MVT::i32));
525  SDValue StoreHigh = DAG.getTruncStore(
526  Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
527  MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
528  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
529  }
530 
531  // Lower to a call to __misaligned_store(BasePtr, Value).
532  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
534  TargetLowering::ArgListEntry Entry;
535 
536  Entry.Ty = IntPtrTy;
537  Entry.Node = BasePtr;
538  Args.push_back(Entry);
539 
540  Entry.Node = Value;
541  Args.push_back(Entry);
542 
544  CLI.setDebugLoc(dl).setChain(Chain).setCallee(
546  DAG.getExternalSymbol("__misaligned_store",
547  getPointerTy(DAG.getDataLayout())),
548  std::move(Args));
549 
550  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
551  return CallResult.second;
552 }
553 
554 SDValue XCoreTargetLowering::
555 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
556 {
557  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
558  "Unexpected operand to lower!");
559  SDLoc dl(Op);
560  SDValue LHS = Op.getOperand(0);
561  SDValue RHS = Op.getOperand(1);
562  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
563  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
564  DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
565  LHS, RHS);
566  SDValue Lo(Hi.getNode(), 1);
567  SDValue Ops[] = { Lo, Hi };
568  return DAG.getMergeValues(Ops, dl);
569 }
570 
571 SDValue XCoreTargetLowering::
572 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
573 {
574  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
575  "Unexpected operand to lower!");
576  SDLoc dl(Op);
577  SDValue LHS = Op.getOperand(0);
578  SDValue RHS = Op.getOperand(1);
579  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
580  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
581  DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
582  Zero, Zero);
583  SDValue Lo(Hi.getNode(), 1);
584  SDValue Ops[] = { Lo, Hi };
585  return DAG.getMergeValues(Ops, dl);
586 }
587 
588 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
589 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
590 /// each intermediate result in the calculation must also have a single use.
591 /// If the Op is in the correct form the constituent parts are written to Mul0,
592 /// Mul1, Addend0 and Addend1.
593 static bool
594 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
595  SDValue &Addend1, bool requireIntermediatesHaveOneUse)
596 {
597  if (Op.getOpcode() != ISD::ADD)
598  return false;
599  SDValue N0 = Op.getOperand(0);
600  SDValue N1 = Op.getOperand(1);
601  SDValue AddOp;
602  SDValue OtherOp;
603  if (N0.getOpcode() == ISD::ADD) {
604  AddOp = N0;
605  OtherOp = N1;
606  } else if (N1.getOpcode() == ISD::ADD) {
607  AddOp = N1;
608  OtherOp = N0;
609  } else {
610  return false;
611  }
612  if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
613  return false;
614  if (OtherOp.getOpcode() == ISD::MUL) {
615  // add(add(a,b),mul(x,y))
616  if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
617  return false;
618  Mul0 = OtherOp.getOperand(0);
619  Mul1 = OtherOp.getOperand(1);
620  Addend0 = AddOp.getOperand(0);
621  Addend1 = AddOp.getOperand(1);
622  return true;
623  }
624  if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
625  // add(add(mul(x,y),a),b)
626  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
627  return false;
628  Mul0 = AddOp.getOperand(0).getOperand(0);
629  Mul1 = AddOp.getOperand(0).getOperand(1);
630  Addend0 = AddOp.getOperand(1);
631  Addend1 = OtherOp;
632  return true;
633  }
634  if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
635  // add(add(a,mul(x,y)),b)
636  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
637  return false;
638  Mul0 = AddOp.getOperand(1).getOperand(0);
639  Mul1 = AddOp.getOperand(1).getOperand(1);
640  Addend0 = AddOp.getOperand(0);
641  Addend1 = OtherOp;
642  return true;
643  }
644  return false;
645 }
646 
647 SDValue XCoreTargetLowering::
648 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
649 {
650  SDValue Mul;
651  SDValue Other;
652  if (N->getOperand(0).getOpcode() == ISD::MUL) {
653  Mul = N->getOperand(0);
654  Other = N->getOperand(1);
655  } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
656  Mul = N->getOperand(1);
657  Other = N->getOperand(0);
658  } else {
659  return SDValue();
660  }
661  SDLoc dl(N);
662  SDValue LL, RL, AddendL, AddendH;
663  LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
664  Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
665  RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
666  Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
667  AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
668  Other, DAG.getConstant(0, dl, MVT::i32));
669  AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
670  Other, DAG.getConstant(1, dl, MVT::i32));
671  APInt HighMask = APInt::getHighBitsSet(64, 32);
672  unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
673  unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
674  if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
675  DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
676  // The inputs are both zero-extended.
677  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
678  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
679  AddendL, LL, RL);
680  SDValue Lo(Hi.getNode(), 1);
681  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
682  }
683  if (LHSSB > 32 && RHSSB > 32) {
684  // The inputs are both sign-extended.
685  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
686  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
687  AddendL, LL, RL);
688  SDValue Lo(Hi.getNode(), 1);
689  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
690  }
691  SDValue LH, RH;
692  LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
693  Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
694  RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
695  Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
696  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
697  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
698  AddendL, LL, RL);
699  SDValue Lo(Hi.getNode(), 1);
700  RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
701  LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
702  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
703  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
704  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
705 }
706 
707 SDValue XCoreTargetLowering::
708 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
709 {
710  assert(N->getValueType(0) == MVT::i64 &&
711  (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
712  "Unknown operand to lower!");
713 
714  if (N->getOpcode() == ISD::ADD)
715  if (SDValue Result = TryExpandADDWithMul(N, DAG))
716  return Result;
717 
718  SDLoc dl(N);
719 
720  // Extract components
722  N->getOperand(0),
723  DAG.getConstant(0, dl, MVT::i32));
725  N->getOperand(0),
726  DAG.getConstant(1, dl, MVT::i32));
728  N->getOperand(1),
729  DAG.getConstant(0, dl, MVT::i32));
731  N->getOperand(1),
732  DAG.getConstant(1, dl, MVT::i32));
733 
734  // Expand
735  unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
737  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
738  SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
739  LHSL, RHSL, Zero);
740  SDValue Carry(Lo.getNode(), 1);
741 
742  SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
743  LHSH, RHSH, Carry);
744  SDValue Ignored(Hi.getNode(), 1);
745  // Merge the pieces
746  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
747 }
748 
749 SDValue XCoreTargetLowering::
750 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
751 {
752  // Whist llvm does not support aggregate varargs we can ignore
753  // the possibility of the ValueType being an implicit byVal vararg.
754  SDNode *Node = Op.getNode();
755  EVT VT = Node->getValueType(0); // not an aggregate
756  SDValue InChain = Node->getOperand(0);
757  SDValue VAListPtr = Node->getOperand(1);
758  EVT PtrVT = VAListPtr.getValueType();
759  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
760  SDLoc dl(Node);
761  SDValue VAList =
762  DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
763  // Increment the pointer, VAList, to the next vararg
764  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
765  DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
766  dl));
767  // Store the incremented VAList to the legalized pointer
768  InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
769  MachinePointerInfo(SV));
770  // Load the actual argument out of the pointer VAList
771  return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
772 }
773 
774 SDValue XCoreTargetLowering::
775 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
776 {
777  SDLoc dl(Op);
778  // vastart stores the address of the VarArgsFrameIndex slot into the
779  // memory location argument
783  return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
785 }
786 
787 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
788  SelectionDAG &DAG) const {
789  // This nodes represent llvm.frameaddress on the DAG.
790  // It takes one operand, the index of the frame address to return.
791  // An index of zero corresponds to the current function's frame address.
792  // An index of one to the parent's frame address, and so on.
793  // Depths > 0 not supported yet!
794  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
795  return SDValue();
796 
798  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
799  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
800  RegInfo->getFrameRegister(MF), MVT::i32);
801 }
802 
803 SDValue XCoreTargetLowering::
804 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
805  // This nodes represent llvm.returnaddress on the DAG.
806  // It takes one operand, the index of the return address to return.
807  // An index of zero corresponds to the current function's return address.
808  // An index of one to the parent's return address, and so on.
809  // Depths > 0 not supported yet!
810  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
811  return SDValue();
812 
815  int FI = XFI->createLRSpillSlot(MF);
816  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
817  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
818  DAG.getEntryNode(), FIN,
820 }
821 
822 SDValue XCoreTargetLowering::
823 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
824  // This node represents offset from frame pointer to first on-stack argument.
825  // This is needed for correct stack adjustment during unwind.
826  // However, we don't know the offset until after the frame has be finalised.
827  // This is done during the XCoreFTAOElim pass.
829 }
830 
831 SDValue XCoreTargetLowering::
832 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
833  // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
834  // This node represents 'eh_return' gcc dwarf builtin, which is used to
835  // return from exception. The general meaning is: adjust stack by OFFSET and
836  // pass execution to HANDLER.
838  SDValue Chain = Op.getOperand(0);
839  SDValue Offset = Op.getOperand(1);
840  SDValue Handler = Op.getOperand(2);
841  SDLoc dl(Op);
842 
843  // Absolute SP = (FP + FrameToArgs) + Offset
844  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
845  SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
846  RegInfo->getFrameRegister(MF), MVT::i32);
847  SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
848  MVT::i32);
849  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
850  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
851 
852  // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
853  // which leaves 2 caller saved registers, R2 & R3 for us to use.
854  unsigned StackReg = XCore::R2;
855  unsigned HandlerReg = XCore::R3;
856 
857  SDValue OutChains[] = {
858  DAG.getCopyToReg(Chain, dl, StackReg, Stack),
859  DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
860  };
861 
862  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
863 
864  return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
865  DAG.getRegister(StackReg, MVT::i32),
866  DAG.getRegister(HandlerReg, MVT::i32));
867 
868 }
869 
870 SDValue XCoreTargetLowering::
871 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
872  return Op.getOperand(0);
873 }
874 
875 SDValue XCoreTargetLowering::
876 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
877  SDValue Chain = Op.getOperand(0);
878  SDValue Trmp = Op.getOperand(1); // trampoline
879  SDValue FPtr = Op.getOperand(2); // nested function
880  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
881 
882  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
883 
884  // .align 4
885  // LDAPF_u10 r11, nest
886  // LDW_2rus r11, r11[0]
887  // STWSP_ru6 r11, sp[0]
888  // LDAPF_u10 r11, fptr
889  // LDW_2rus r11, r11[0]
890  // BAU_1r r11
891  // nest:
892  // .word nest
893  // fptr:
894  // .word fptr
895  SDValue OutChains[5];
896 
897  SDValue Addr = Trmp;
898 
899  SDLoc dl(Op);
900  OutChains[0] =
901  DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
902  MachinePointerInfo(TrmpAddr));
903 
904  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
905  DAG.getConstant(4, dl, MVT::i32));
906  OutChains[1] =
907  DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
908  MachinePointerInfo(TrmpAddr, 4));
909 
910  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
911  DAG.getConstant(8, dl, MVT::i32));
912  OutChains[2] =
913  DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
914  MachinePointerInfo(TrmpAddr, 8));
915 
916  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
917  DAG.getConstant(12, dl, MVT::i32));
918  OutChains[3] =
919  DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
920 
921  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
922  DAG.getConstant(16, dl, MVT::i32));
923  OutChains[4] =
924  DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
925 
926  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
927 }
928 
929 SDValue XCoreTargetLowering::
930 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
931  SDLoc DL(Op);
932  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
933  switch (IntNo) {
934  case Intrinsic::xcore_crc8:
935  EVT VT = Op.getValueType();
936  SDValue Data =
937  DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
938  Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
939  SDValue Crc(Data.getNode(), 1);
940  SDValue Results[] = { Crc, Data };
941  return DAG.getMergeValues(Results, DL);
942  }
943  return SDValue();
944 }
945 
946 SDValue XCoreTargetLowering::
947 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
948  SDLoc DL(Op);
949  return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
950 }
951 
952 SDValue XCoreTargetLowering::
953 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
954  AtomicSDNode *N = cast<AtomicSDNode>(Op);
955  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
958  "setInsertFencesForAtomic(true) expects unordered / monotonic");
959  if (N->getMemoryVT() == MVT::i32) {
960  if (N->getAlignment() < 4)
961  report_fatal_error("atomic load must be aligned");
962  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
963  N->getChain(), N->getBasePtr(), N->getPointerInfo(),
964  N->getAlignment(), N->getMemOperand()->getFlags(),
965  N->getAAInfo(), N->getRanges());
966  }
967  if (N->getMemoryVT() == MVT::i16) {
968  if (N->getAlignment() < 2)
969  report_fatal_error("atomic load must be aligned");
970  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
971  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
972  N->getAlignment(), N->getMemOperand()->getFlags(),
973  N->getAAInfo());
974  }
975  if (N->getMemoryVT() == MVT::i8)
976  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
977  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
978  N->getAlignment(), N->getMemOperand()->getFlags(),
979  N->getAAInfo());
980  return SDValue();
981 }
982 
983 SDValue XCoreTargetLowering::
984 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
985  AtomicSDNode *N = cast<AtomicSDNode>(Op);
986  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
989  "setInsertFencesForAtomic(true) expects unordered / monotonic");
990  if (N->getMemoryVT() == MVT::i32) {
991  if (N->getAlignment() < 4)
992  report_fatal_error("atomic store must be aligned");
993  return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
994  N->getPointerInfo(), N->getAlignment(),
995  N->getMemOperand()->getFlags(), N->getAAInfo());
996  }
997  if (N->getMemoryVT() == MVT::i16) {
998  if (N->getAlignment() < 2)
999  report_fatal_error("atomic store must be aligned");
1000  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1001  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
1002  N->getAlignment(), N->getMemOperand()->getFlags(),
1003  N->getAAInfo());
1004  }
1005  if (N->getMemoryVT() == MVT::i8)
1006  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1007  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
1008  N->getAlignment(), N->getMemOperand()->getFlags(),
1009  N->getAAInfo());
1010  return SDValue();
1011 }
1012 
1013 //===----------------------------------------------------------------------===//
1014 // Calling Convention Implementation
1015 //===----------------------------------------------------------------------===//
1016 
1017 #include "XCoreGenCallingConv.inc"
1018 
1019 //===----------------------------------------------------------------------===//
1020 // Call Calling Convention Implementation
1021 //===----------------------------------------------------------------------===//
1022 
1023 /// XCore call implementation
1024 SDValue
1025 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1026  SmallVectorImpl<SDValue> &InVals) const {
1027  SelectionDAG &DAG = CLI.DAG;
1028  SDLoc &dl = CLI.DL;
1030  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1032  SDValue Chain = CLI.Chain;
1033  SDValue Callee = CLI.Callee;
1034  bool &isTailCall = CLI.IsTailCall;
1035  CallingConv::ID CallConv = CLI.CallConv;
1036  bool isVarArg = CLI.IsVarArg;
1037 
1038  // XCore target does not yet support tail call optimization.
1039  isTailCall = false;
1040 
1041  // For now, only CallingConv::C implemented
1042  switch (CallConv)
1043  {
1044  default:
1045  report_fatal_error("Unsupported calling convention");
1046  case CallingConv::Fast:
1047  case CallingConv::C:
1048  return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1049  Outs, OutVals, Ins, dl, DAG, InVals);
1050  }
1051 }
1052 
1053 /// LowerCallResult - Lower the result values of a call into the
1054 /// appropriate copies out of appropriate physical registers / memory locations.
1056  const SmallVectorImpl<CCValAssign> &RVLocs,
1057  const SDLoc &dl, SelectionDAG &DAG,
1058  SmallVectorImpl<SDValue> &InVals) {
1059  SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1060  // Copy results out of physical registers.
1061  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1062  const CCValAssign &VA = RVLocs[i];
1063  if (VA.isRegLoc()) {
1064  Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1065  InFlag).getValue(1);
1066  InFlag = Chain.getValue(2);
1067  InVals.push_back(Chain.getValue(0));
1068  } else {
1069  assert(VA.isMemLoc());
1070  ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1071  InVals.size()));
1072  // Reserve space for this result.
1073  InVals.push_back(SDValue());
1074  }
1075  }
1076 
1077  // Copy results out of memory.
1078  SmallVector<SDValue, 4> MemOpChains;
1079  for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1080  int offset = ResultMemLocs[i].first;
1081  unsigned index = ResultMemLocs[i].second;
1082  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1083  SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1084  SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1085  InVals[index] = load;
1086  MemOpChains.push_back(load.getValue(1));
1087  }
1088 
1089  // Transform all loads nodes into one single node because
1090  // all load nodes are independent of each other.
1091  if (!MemOpChains.empty())
1092  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1093 
1094  return Chain;
1095 }
1096 
1097 /// LowerCCCCallTo - functions arguments are copied from virtual
1098 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1099 /// CALLSEQ_END are emitted.
1100 /// TODO: isTailCall, sret.
1101 SDValue XCoreTargetLowering::LowerCCCCallTo(
1102  SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1103  bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1104  const SmallVectorImpl<SDValue> &OutVals,
1105  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1106  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1107 
1108  // Analyze operands of the call, assigning locations to each operand.
1110  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1111  *DAG.getContext());
1112 
1113  // The ABI dictates there should be one stack slot available to the callee
1114  // on function entry (for saving lr).
1115  CCInfo.AllocateStack(4, 4);
1116 
1117  CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1118 
1120  // Analyze return values to determine the number of bytes of stack required.
1121  CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1122  *DAG.getContext());
1123  RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1124  RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1125 
1126  // Get a count of how many bytes are to be pushed on the stack.
1127  unsigned NumBytes = RetCCInfo.getNextStackOffset();
1128  auto PtrVT = getPointerTy(DAG.getDataLayout());
1129 
1130  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1131 
1133  SmallVector<SDValue, 12> MemOpChains;
1134 
1135  // Walk the register/memloc assignments, inserting copies/loads.
1136  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1137  CCValAssign &VA = ArgLocs[i];
1138  SDValue Arg = OutVals[i];
1139 
1140  // Promote the value if needed.
1141  switch (VA.getLocInfo()) {
1142  default: llvm_unreachable("Unknown loc info!");
1143  case CCValAssign::Full: break;
1144  case CCValAssign::SExt:
1145  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1146  break;
1147  case CCValAssign::ZExt:
1148  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1149  break;
1150  case CCValAssign::AExt:
1151  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1152  break;
1153  }
1154 
1155  // Arguments that can be passed on register must be kept at
1156  // RegsToPass vector
1157  if (VA.isRegLoc()) {
1158  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1159  } else {
1160  assert(VA.isMemLoc());
1161 
1162  int Offset = VA.getLocMemOffset();
1163 
1164  MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1165  Chain, Arg,
1166  DAG.getConstant(Offset/4, dl,
1167  MVT::i32)));
1168  }
1169  }
1170 
1171  // Transform all store nodes into one single node because
1172  // all store nodes are independent of each other.
1173  if (!MemOpChains.empty())
1174  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1175 
1176  // Build a sequence of copy-to-reg nodes chained together with token
1177  // chain and flag operands which copy the outgoing args into registers.
1178  // The InFlag in necessary since all emitted instructions must be
1179  // stuck together.
1180  SDValue InFlag;
1181  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1182  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1183  RegsToPass[i].second, InFlag);
1184  InFlag = Chain.getValue(1);
1185  }
1186 
1187  // If the callee is a GlobalAddress node (quite common, every direct call is)
1188  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1189  // Likewise ExternalSymbol -> TargetExternalSymbol.
1190  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1191  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1192  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1193  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1194 
1195  // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1196  // = Chain, Callee, Reg#1, Reg#2, ...
1197  //
1198  // Returns a chain & a flag for retval copy to use.
1199  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1201  Ops.push_back(Chain);
1202  Ops.push_back(Callee);
1203 
1204  // Add argument registers to the end of the list so that they are
1205  // known live into the call.
1206  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1207  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1208  RegsToPass[i].second.getValueType()));
1209 
1210  if (InFlag.getNode())
1211  Ops.push_back(InFlag);
1212 
1213  Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1214  InFlag = Chain.getValue(1);
1215 
1216  // Create the CALLSEQ_END node.
1217  Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1218  DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1219  InFlag = Chain.getValue(1);
1220 
1221  // Handle result values, copying them out of physregs into vregs that we
1222  // return.
1223  return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1224 }
1225 
1226 //===----------------------------------------------------------------------===//
1227 // Formal Arguments Calling Convention Implementation
1228 //===----------------------------------------------------------------------===//
1229 
1230 namespace {
1231  struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1232 }
1233 
1234 /// XCore formal arguments implementation
1235 SDValue XCoreTargetLowering::LowerFormalArguments(
1236  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1237  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1238  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1239  switch (CallConv)
1240  {
1241  default:
1242  report_fatal_error("Unsupported calling convention");
1243  case CallingConv::C:
1244  case CallingConv::Fast:
1245  return LowerCCCArguments(Chain, CallConv, isVarArg,
1246  Ins, dl, DAG, InVals);
1247  }
1248 }
1249 
1250 /// LowerCCCArguments - transform physical registers into
1251 /// virtual registers and generate load operations for
1252 /// arguments places on the stack.
1253 /// TODO: sret
1254 SDValue XCoreTargetLowering::LowerCCCArguments(
1255  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1256  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1257  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1258  MachineFunction &MF = DAG.getMachineFunction();
1259  MachineFrameInfo &MFI = MF.getFrameInfo();
1260  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1262 
1263  // Assign locations to all of the incoming arguments.
1265  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1266  *DAG.getContext());
1267 
1268  CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1269 
1270  unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1271 
1272  unsigned LRSaveSize = StackSlotSize;
1273 
1274  if (!isVarArg)
1275  XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1276 
1277  // All getCopyFromReg ops must precede any getMemcpys to prevent the
1278  // scheduler clobbering a register before it has been copied.
1279  // The stages are:
1280  // 1. CopyFromReg (and load) arg & vararg registers.
1281  // 2. Chain CopyFromReg nodes into a TokenFactor.
1282  // 3. Memcpy 'byVal' args & push final InVals.
1283  // 4. Chain mem ops nodes into a TokenFactor.
1284  SmallVector<SDValue, 4> CFRegNode;
1286  SmallVector<SDValue, 4> MemOps;
1287 
1288  // 1a. CopyFromReg (and load) arg registers.
1289  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1290 
1291  CCValAssign &VA = ArgLocs[i];
1292  SDValue ArgIn;
1293 
1294  if (VA.isRegLoc()) {
1295  // Arguments passed in registers
1296  EVT RegVT = VA.getLocVT();
1297  switch (RegVT.getSimpleVT().SimpleTy) {
1298  default:
1299  {
1300 #ifndef NDEBUG
1301  errs() << "LowerFormalArguments Unhandled argument type: "
1302  << RegVT.getEVTString() << "\n";
1303 #endif
1304  llvm_unreachable(nullptr);
1305  }
1306  case MVT::i32:
1307  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1308  RegInfo.addLiveIn(VA.getLocReg(), VReg);
1309  ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1310  CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1311  }
1312  } else {
1313  // sanity check
1314  assert(VA.isMemLoc());
1315  // Load the argument to a virtual register
1316  unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1317  if (ObjSize > StackSlotSize) {
1318  errs() << "LowerFormalArguments Unhandled argument type: "
1319  << EVT(VA.getLocVT()).getEVTString()
1320  << "\n";
1321  }
1322  // Create the frame index object for this incoming parameter...
1323  int FI = MFI.CreateFixedObject(ObjSize,
1324  LRSaveSize + VA.getLocMemOffset(),
1325  true);
1326 
1327  // Create the SelectionDAG nodes corresponding to a load
1328  //from this parameter
1329  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1330  ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1332  }
1333  const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1334  ArgData.push_back(ADP);
1335  }
1336 
1337  // 1b. CopyFromReg vararg registers.
1338  if (isVarArg) {
1339  // Argument registers
1340  static const MCPhysReg ArgRegs[] = {
1341  XCore::R0, XCore::R1, XCore::R2, XCore::R3
1342  };
1344  unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1345  if (FirstVAReg < array_lengthof(ArgRegs)) {
1346  int offset = 0;
1347  // Save remaining registers, storing higher register numbers at a higher
1348  // address
1349  for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1350  // Create a stack slot
1351  int FI = MFI.CreateFixedObject(4, offset, true);
1352  if (i == (int)FirstVAReg) {
1353  XFI->setVarArgsFrameIndex(FI);
1354  }
1355  offset -= StackSlotSize;
1356  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1357  // Move argument from phys reg -> virt reg
1358  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1359  RegInfo.addLiveIn(ArgRegs[i], VReg);
1360  SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1361  CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1362  // Move argument from virt reg -> stack
1363  SDValue Store =
1364  DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1365  MemOps.push_back(Store);
1366  }
1367  } else {
1368  // This will point to the next argument passed via stack.
1369  XFI->setVarArgsFrameIndex(
1370  MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1371  true));
1372  }
1373  }
1374 
1375  // 2. chain CopyFromReg nodes into a TokenFactor.
1376  if (!CFRegNode.empty())
1377  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1378 
1379  // 3. Memcpy 'byVal' args & push final InVals.
1380  // Aggregates passed "byVal" need to be copied by the callee.
1381  // The callee will use a pointer to this copy, rather than the original
1382  // pointer.
1383  for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1384  ArgDE = ArgData.end();
1385  ArgDI != ArgDE; ++ArgDI) {
1386  if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1387  unsigned Size = ArgDI->Flags.getByValSize();
1388  unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1389  // Create a new object on the stack and copy the pointee into it.
1390  int FI = MFI.CreateStackObject(Size, Align, false);
1391  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1392  InVals.push_back(FIN);
1393  MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1394  DAG.getConstant(Size, dl, MVT::i32),
1395  Align, false, false, false,
1397  MachinePointerInfo()));
1398  } else {
1399  InVals.push_back(ArgDI->SDV);
1400  }
1401  }
1402 
1403  // 4, chain mem ops nodes into a TokenFactor.
1404  if (!MemOps.empty()) {
1405  MemOps.push_back(Chain);
1406  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1407  }
1408 
1409  return Chain;
1410 }
1411 
1412 //===----------------------------------------------------------------------===//
1413 // Return Value Calling Convention Implementation
1414 //===----------------------------------------------------------------------===//
1415 
1416 bool XCoreTargetLowering::
1417 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1418  bool isVarArg,
1419  const SmallVectorImpl<ISD::OutputArg> &Outs,
1420  LLVMContext &Context) const {
1422  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1423  if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1424  return false;
1425  if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1426  return false;
1427  return true;
1428 }
1429 
1430 SDValue
1431 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1432  bool isVarArg,
1433  const SmallVectorImpl<ISD::OutputArg> &Outs,
1434  const SmallVectorImpl<SDValue> &OutVals,
1435  const SDLoc &dl, SelectionDAG &DAG) const {
1436 
1437  XCoreFunctionInfo *XFI =
1440 
1441  // CCValAssign - represent the assignment of
1442  // the return value to a location
1444 
1445  // CCState - Info about the registers and stack slot.
1446  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1447  *DAG.getContext());
1448 
1449  // Analyze return values.
1450  if (!isVarArg)
1451  CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1452 
1453  CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1454 
1455  SDValue Flag;
1456  SmallVector<SDValue, 4> RetOps(1, Chain);
1457 
1458  // Return on XCore is always a "retsp 0"
1459  RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1460 
1461  SmallVector<SDValue, 4> MemOpChains;
1462  // Handle return values that must be copied to memory.
1463  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1464  CCValAssign &VA = RVLocs[i];
1465  if (VA.isRegLoc())
1466  continue;
1467  assert(VA.isMemLoc());
1468  if (isVarArg) {
1469  report_fatal_error("Can't return value from vararg function in memory");
1470  }
1471 
1472  int Offset = VA.getLocMemOffset();
1473  unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1474  // Create the frame index object for the memory location.
1475  int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1476 
1477  // Create a SelectionDAG node corresponding to a store
1478  // to this memory location.
1479  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1480  MemOpChains.push_back(DAG.getStore(
1481  Chain, dl, OutVals[i], FIN,
1483  }
1484 
1485  // Transform all store nodes into one single node because
1486  // all stores are independent of each other.
1487  if (!MemOpChains.empty())
1488  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1489 
1490  // Now handle return values copied to registers.
1491  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1492  CCValAssign &VA = RVLocs[i];
1493  if (!VA.isRegLoc())
1494  continue;
1495  // Copy the result values into the output registers.
1496  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1497 
1498  // guarantee that all emitted copies are
1499  // stuck together, avoiding something bad
1500  Flag = Chain.getValue(1);
1501  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1502  }
1503 
1504  RetOps[0] = Chain; // Update chain.
1505 
1506  // Add the flag if we have it.
1507  if (Flag.getNode())
1508  RetOps.push_back(Flag);
1509 
1510  return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1511 }
1512 
1513 //===----------------------------------------------------------------------===//
1514 // Other Lowering Code
1515 //===----------------------------------------------------------------------===//
1516 
1519  MachineBasicBlock *BB) const {
1520  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1521  DebugLoc dl = MI.getDebugLoc();
1522  assert((MI.getOpcode() == XCore::SELECT_CC) &&
1523  "Unexpected instr type to insert");
1524 
1525  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1526  // control-flow pattern. The incoming instruction knows the destination vreg
1527  // to set, the condition code register to branch on, the true/false values to
1528  // select between, and a branch opcode to use.
1529  const BasicBlock *LLVM_BB = BB->getBasicBlock();
1531 
1532  // thisMBB:
1533  // ...
1534  // TrueVal = ...
1535  // cmpTY ccX, r1, r2
1536  // bCC copy1MBB
1537  // fallthrough --> copy0MBB
1538  MachineBasicBlock *thisMBB = BB;
1539  MachineFunction *F = BB->getParent();
1540  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1541  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1542  F->insert(It, copy0MBB);
1543  F->insert(It, sinkMBB);
1544 
1545  // Transfer the remainder of BB and its successor edges to sinkMBB.
1546  sinkMBB->splice(sinkMBB->begin(), BB,
1547  std::next(MachineBasicBlock::iterator(MI)), BB->end());
1548  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1549 
1550  // Next, add the true and fallthrough blocks as its successors.
1551  BB->addSuccessor(copy0MBB);
1552  BB->addSuccessor(sinkMBB);
1553 
1554  BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1555  .addReg(MI.getOperand(1).getReg())
1556  .addMBB(sinkMBB);
1557 
1558  // copy0MBB:
1559  // %FalseValue = ...
1560  // # fallthrough to sinkMBB
1561  BB = copy0MBB;
1562 
1563  // Update machine-CFG edges
1564  BB->addSuccessor(sinkMBB);
1565 
1566  // sinkMBB:
1567  // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1568  // ...
1569  BB = sinkMBB;
1570  BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1571  .addReg(MI.getOperand(3).getReg())
1572  .addMBB(copy0MBB)
1573  .addReg(MI.getOperand(2).getReg())
1574  .addMBB(thisMBB);
1575 
1576  MI.eraseFromParent(); // The pseudo instruction is gone now.
1577  return BB;
1578 }
1579 
1580 //===----------------------------------------------------------------------===//
1581 // Target Optimization Hooks
1582 //===----------------------------------------------------------------------===//
1583 
1584 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1585  DAGCombinerInfo &DCI) const {
1586  SelectionDAG &DAG = DCI.DAG;
1587  SDLoc dl(N);
1588  switch (N->getOpcode()) {
1589  default: break;
1590  case ISD::INTRINSIC_VOID:
1591  switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1592  case Intrinsic::xcore_outt:
1593  case Intrinsic::xcore_outct:
1594  case Intrinsic::xcore_chkct: {
1595  SDValue OutVal = N->getOperand(3);
1596  // These instructions ignore the high bits.
1597  if (OutVal.hasOneUse()) {
1598  unsigned BitWidth = OutVal.getValueSizeInBits();
1599  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1600  KnownBits Known;
1602  !DCI.isBeforeLegalizeOps());
1603  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1604  if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1605  TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1606  DCI.CommitTargetLoweringOpt(TLO);
1607  }
1608  break;
1609  }
1610  case Intrinsic::xcore_setpt: {
1611  SDValue Time = N->getOperand(3);
1612  // This instruction ignores the high bits.
1613  if (Time.hasOneUse()) {
1614  unsigned BitWidth = Time.getValueSizeInBits();
1615  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1616  KnownBits Known;
1618  !DCI.isBeforeLegalizeOps());
1619  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1620  if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1621  TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1622  DCI.CommitTargetLoweringOpt(TLO);
1623  }
1624  break;
1625  }
1626  }
1627  break;
1628  case XCoreISD::LADD: {
1629  SDValue N0 = N->getOperand(0);
1630  SDValue N1 = N->getOperand(1);
1631  SDValue N2 = N->getOperand(2);
1634  EVT VT = N0.getValueType();
1635 
1636  // canonicalize constant to RHS
1637  if (N0C && !N1C)
1638  return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1639 
1640  // fold (ladd 0, 0, x) -> 0, x & 1
1641  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1642  SDValue Carry = DAG.getConstant(0, dl, VT);
1643  SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1644  DAG.getConstant(1, dl, VT));
1645  SDValue Ops[] = { Result, Carry };
1646  return DAG.getMergeValues(Ops, dl);
1647  }
1648 
1649  // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1650  // low bit set
1651  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1652  KnownBits Known;
1653  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1654  VT.getSizeInBits() - 1);
1655  DAG.computeKnownBits(N2, Known);
1656  if ((Known.Zero & Mask) == Mask) {
1657  SDValue Carry = DAG.getConstant(0, dl, VT);
1658  SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1659  SDValue Ops[] = { Result, Carry };
1660  return DAG.getMergeValues(Ops, dl);
1661  }
1662  }
1663  }
1664  break;
1665  case XCoreISD::LSUB: {
1666  SDValue N0 = N->getOperand(0);
1667  SDValue N1 = N->getOperand(1);
1668  SDValue N2 = N->getOperand(2);
1671  EVT VT = N0.getValueType();
1672 
1673  // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1674  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1675  KnownBits Known;
1676  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1677  VT.getSizeInBits() - 1);
1678  DAG.computeKnownBits(N2, Known);
1679  if ((Known.Zero & Mask) == Mask) {
1680  SDValue Borrow = N2;
1681  SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1682  DAG.getConstant(0, dl, VT), N2);
1683  SDValue Ops[] = { Result, Borrow };
1684  return DAG.getMergeValues(Ops, dl);
1685  }
1686  }
1687 
1688  // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1689  // low bit set
1690  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1691  KnownBits Known;
1692  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1693  VT.getSizeInBits() - 1);
1694  DAG.computeKnownBits(N2, Known);
1695  if ((Known.Zero & Mask) == Mask) {
1696  SDValue Borrow = DAG.getConstant(0, dl, VT);
1697  SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1698  SDValue Ops[] = { Result, Borrow };
1699  return DAG.getMergeValues(Ops, dl);
1700  }
1701  }
1702  }
1703  break;
1704  case XCoreISD::LMUL: {
1705  SDValue N0 = N->getOperand(0);
1706  SDValue N1 = N->getOperand(1);
1707  SDValue N2 = N->getOperand(2);
1708  SDValue N3 = N->getOperand(3);
1711  EVT VT = N0.getValueType();
1712  // Canonicalize multiplicative constant to RHS. If both multiplicative
1713  // operands are constant canonicalize smallest to RHS.
1714  if ((N0C && !N1C) ||
1715  (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1716  return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1717  N1, N0, N2, N3);
1718 
1719  // lmul(x, 0, a, b)
1720  if (N1C && N1C->isNullValue()) {
1721  // If the high result is unused fold to add(a, b)
1722  if (N->hasNUsesOfValue(0, 0)) {
1723  SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1724  SDValue Ops[] = { Lo, Lo };
1725  return DAG.getMergeValues(Ops, dl);
1726  }
1727  // Otherwise fold to ladd(a, b, 0)
1728  SDValue Result =
1729  DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1730  SDValue Carry(Result.getNode(), 1);
1731  SDValue Ops[] = { Carry, Result };
1732  return DAG.getMergeValues(Ops, dl);
1733  }
1734  }
1735  break;
1736  case ISD::ADD: {
1737  // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1738  // lmul(x, y, a, b). The high result of lmul will be ignored.
1739  // This is only profitable if the intermediate results are unused
1740  // elsewhere.
1741  SDValue Mul0, Mul1, Addend0, Addend1;
1742  if (N->getValueType(0) == MVT::i32 &&
1743  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1744  SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1745  DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1746  Mul1, Addend0, Addend1);
1747  SDValue Result(Ignored.getNode(), 1);
1748  return Result;
1749  }
1750  APInt HighMask = APInt::getHighBitsSet(64, 32);
1751  // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1752  // lmul(x, y, a, b) if all operands are zero-extended. We do this
1753  // before type legalization as it is messy to match the operands after
1754  // that.
1755  if (N->getValueType(0) == MVT::i64 &&
1756  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1757  DAG.MaskedValueIsZero(Mul0, HighMask) &&
1758  DAG.MaskedValueIsZero(Mul1, HighMask) &&
1759  DAG.MaskedValueIsZero(Addend0, HighMask) &&
1760  DAG.MaskedValueIsZero(Addend1, HighMask)) {
1761  SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1762  Mul0, DAG.getConstant(0, dl, MVT::i32));
1763  SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1764  Mul1, DAG.getConstant(0, dl, MVT::i32));
1765  SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1766  Addend0, DAG.getConstant(0, dl, MVT::i32));
1767  SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1768  Addend1, DAG.getConstant(0, dl, MVT::i32));
1769  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1770  DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1771  Addend0L, Addend1L);
1772  SDValue Lo(Hi.getNode(), 1);
1773  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1774  }
1775  }
1776  break;
1777  case ISD::STORE: {
1778  // Replace unaligned store of unaligned load with memmove.
1779  StoreSDNode *ST = cast<StoreSDNode>(N);
1780  if (!DCI.isBeforeLegalize() ||
1782  ST->getAddressSpace(),
1783  ST->getAlignment()) ||
1784  ST->isVolatile() || ST->isIndexed()) {
1785  break;
1786  }
1787  SDValue Chain = ST->getChain();
1788 
1789  unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1790  assert((StoreBits % 8) == 0 &&
1791  "Store size in bits must be a multiple of 8");
1792  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
1793  ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1794  unsigned Alignment = ST->getAlignment();
1795  if (Alignment >= ABIAlignment) {
1796  break;
1797  }
1798 
1799  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1800  if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1801  LD->getAlignment() == Alignment &&
1802  !LD->isVolatile() && !LD->isIndexed() &&
1804  bool isTail = isInTailCallPosition(DAG, ST, Chain);
1805  return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1806  LD->getBasePtr(),
1807  DAG.getConstant(StoreBits/8, dl, MVT::i32),
1808  Alignment, false, isTail, ST->getPointerInfo(),
1809  LD->getPointerInfo());
1810  }
1811  }
1812  break;
1813  }
1814  }
1815  return SDValue();
1816 }
1817 
1818 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1819  KnownBits &Known,
1820  const APInt &DemandedElts,
1821  const SelectionDAG &DAG,
1822  unsigned Depth) const {
1823  Known.resetAll();
1824  switch (Op.getOpcode()) {
1825  default: break;
1826  case XCoreISD::LADD:
1827  case XCoreISD::LSUB:
1828  if (Op.getResNo() == 1) {
1829  // Top bits of carry / borrow are clear.
1830  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1831  Known.getBitWidth() - 1);
1832  }
1833  break;
1835  {
1836  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1837  switch (IntNo) {
1838  case Intrinsic::xcore_getts:
1839  // High bits are known to be zero.
1840  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1841  Known.getBitWidth() - 16);
1842  break;
1843  case Intrinsic::xcore_int:
1844  case Intrinsic::xcore_inct:
1845  // High bits are known to be zero.
1846  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1847  Known.getBitWidth() - 8);
1848  break;
1849  case Intrinsic::xcore_testct:
1850  // Result is either 0 or 1.
1851  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1852  Known.getBitWidth() - 1);
1853  break;
1854  case Intrinsic::xcore_testwct:
1855  // Result is in the range 0 - 4.
1856  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1857  Known.getBitWidth() - 3);
1858  break;
1859  }
1860  }
1861  break;
1862  }
1863 }
1864 
1865 //===----------------------------------------------------------------------===//
1866 // Addressing mode description hooks
1867 //===----------------------------------------------------------------------===//
1868 
1869 static inline bool isImmUs(int64_t val)
1870 {
1871  return (val >= 0 && val <= 11);
1872 }
1873 
1874 static inline bool isImmUs2(int64_t val)
1875 {
1876  return (val%2 == 0 && isImmUs(val/2));
1877 }
1878 
1879 static inline bool isImmUs4(int64_t val)
1880 {
1881  return (val%4 == 0 && isImmUs(val/4));
1882 }
1883 
1884 /// isLegalAddressingMode - Return true if the addressing mode represented
1885 /// by AM is legal for this target, for a load/store of the specified type.
1887  const AddrMode &AM, Type *Ty,
1888  unsigned AS,
1889  Instruction *I) const {
1890  if (Ty->getTypeID() == Type::VoidTyID)
1891  return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1892 
1893  unsigned Size = DL.getTypeAllocSize(Ty);
1894  if (AM.BaseGV) {
1895  return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1896  AM.BaseOffs%4 == 0;
1897  }
1898 
1899  switch (Size) {
1900  case 1:
1901  // reg + imm
1902  if (AM.Scale == 0) {
1903  return isImmUs(AM.BaseOffs);
1904  }
1905  // reg + reg
1906  return AM.Scale == 1 && AM.BaseOffs == 0;
1907  case 2:
1908  case 3:
1909  // reg + imm
1910  if (AM.Scale == 0) {
1911  return isImmUs2(AM.BaseOffs);
1912  }
1913  // reg + reg<<1
1914  return AM.Scale == 2 && AM.BaseOffs == 0;
1915  default:
1916  // reg + imm
1917  if (AM.Scale == 0) {
1918  return isImmUs4(AM.BaseOffs);
1919  }
1920  // reg + reg<<2
1921  return AM.Scale == 4 && AM.BaseOffs == 0;
1922  }
1923 }
1924 
1925 //===----------------------------------------------------------------------===//
1926 // XCore Inline Assembly Support
1927 //===----------------------------------------------------------------------===//
1928 
1929 std::pair<unsigned, const TargetRegisterClass *>
1930 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1931  StringRef Constraint,
1932  MVT VT) const {
1933  if (Constraint.size() == 1) {
1934  switch (Constraint[0]) {
1935  default : break;
1936  case 'r':
1937  return std::make_pair(0U, &XCore::GRRegsRegClass);
1938  }
1939  }
1940  // Use the default implementation in TargetLowering to convert the register
1941  // constraint into a member of a register class.
1942  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1943 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isMachineConstantPoolEntry() const
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:725
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
int createLRSpillSlot(MachineFunction &MF)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static bool isImmUs2(int64_t val)
const std::vector< MachineJumpTableEntry > & getJumpTables() const
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool hasLocalLinkage() const
Definition: GlobalValue.h:435
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:328
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:849
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:611
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1138
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:137
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const SDValue & getVal() const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:285
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:660
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:641
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getBasePtr() const
const SDValue & getChain() const
Function Alias Analysis Results
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
unsigned getAlignment() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:34
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:677
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
#define R2(n)
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &ArgsFlags, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
uint64_t High
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:742
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:405
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getExternalSymbol(const char *Sym, EVT VT)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool isMemLoc() const
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:136
static bool isImmUs(int64_t val)
unsigned getAddressSpace() const
Return the address space for the associated pointer.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static int stackSlotSize()
Stack slot size (4 bytes)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:426
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:836
bool hasSection() const
Definition: GlobalValue.h:269
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:361
const HexagonInstrInfo * TII
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:138
Shift and rotation operations.
Definition: ISDOpcodes.h:380
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:191
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static const unsigned CodeModelLargeSize
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:311
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:457
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:398
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This is an SDNode representing atomic operations.
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:770
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:656
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:91
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getSizeInBits() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:267
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void setReturnStackOffset(unsigned value)
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:395
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
SmallVector< ISD::OutputArg, 32 > Outs
unsigned getAlignment() const
Definition: Globals.cpp:97
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
amdgpu Simplify well known AMD library false Value * Callee
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
This class is used to represent ISD::STORE nodes.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:118
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:629
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1740
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:602
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:742
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
0: type with no size
Definition: Type.h:57
const SDValue & getBasePtr() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:166
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:66
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:689
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:117
const SDValue & getOperand(unsigned Num) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:837
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:161
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:719
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:728
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
Definition: ValueTypes.cpp:115
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:82
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:680
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:34
const AMDGPUAS & AS
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
bool isVolatile() const
const TargetMachine & getTargetMachine() const
StringRef getSection() const
Definition: Globals.cpp:156
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
unsigned first
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:634
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:628
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:401
Iterator for intrusive lists based on ilist_node.
unsigned getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
Definition: ValueTypes.h:310
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:787
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:722
const DataFlowGraph & G
Definition: RDFGraph.cpp:211
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:611
Represents one node in the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:674
static mvt_range integer_valuetypes()
MachinePointerInfo getWithOffset(int64_t O) const
EVT getMemoryVT() const
Return the type of the in-memory value.
Class for arbitrary precision integers.
Definition: APInt.h:69
CodeModel::Model getCodeModel() const
Returns the code model.
static bool isImmUs4(int64_t val)
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:438
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:441
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
amdgpu Simplify well known AMD library false Value Value * Arg
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:428
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:599
Representation of each machine instruction.
Definition: MachineInstr.h:60
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:685
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:668
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:121
SmallVector< SDValue, 32 > OutVals
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:212
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:363
const TargetRegisterInfo * getRegisterInfo() const override
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:700
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:176
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:206
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:62
void computeKnownBits(SDValue Op, KnownBits &Known, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:575
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Flags getFlags() const
Return the raw flags of the source value,.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
Type * getValueType() const
Definition: GlobalValue.h:275
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),a),b).
unsigned getOpcode() const
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents &#39;eh_return&#39; gcc dwarf builtin...
Definition: ISDOpcodes.h:102
bool isRegLoc() const
const MachinePointerInfo & getPointerInfo() const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if &#39;Op & Mask&#39; is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:73
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
constexpr char Size[]
Key for Kernel::Arg::Metadata::mSize.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:81
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
Conversion operators.
Definition: ISDOpcodes.h:435
const SDValue & getOperand(unsigned i) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:750
unsigned getLocReg() const
uint64_t getZExtValue() const
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:316
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:174
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:746
LLVMContext * getContext() const
Definition: SelectionDAG.h:404
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:617
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:356
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:584