LLVM  4.0.0
XCoreISelLowering.cpp
Go to the documentation of this file.
1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the XCoreTargetLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "XCoreISelLowering.h"
15 #include "XCore.h"
17 #include "XCoreSubtarget.h"
18 #include "XCoreTargetMachine.h"
19 #include "XCoreTargetObjectFile.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/Debug.h"
38 #include <algorithm>
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "xcore-lower"
43 
44 const char *XCoreTargetLowering::
45 getTargetNodeName(unsigned Opcode) const
46 {
47  switch ((XCoreISD::NodeType)Opcode)
48  {
49  case XCoreISD::FIRST_NUMBER : break;
50  case XCoreISD::BL : return "XCoreISD::BL";
51  case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
52  case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
53  case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
54  case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
55  case XCoreISD::STWSP : return "XCoreISD::STWSP";
56  case XCoreISD::RETSP : return "XCoreISD::RETSP";
57  case XCoreISD::LADD : return "XCoreISD::LADD";
58  case XCoreISD::LSUB : return "XCoreISD::LSUB";
59  case XCoreISD::LMUL : return "XCoreISD::LMUL";
60  case XCoreISD::MACCU : return "XCoreISD::MACCU";
61  case XCoreISD::MACCS : return "XCoreISD::MACCS";
62  case XCoreISD::CRC8 : return "XCoreISD::CRC8";
63  case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
64  case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
65  case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
66  case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
67  case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
68  }
69  return nullptr;
70 }
71 
73  const XCoreSubtarget &Subtarget)
74  : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
75 
76  // Set up the register classes.
77  addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
78 
79  // Compute derived properties from the register classes
81 
83 
85 
86  // Use i32 for setcc operations results (slt, sgt, ...).
88  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
89 
90  // XCore does not have the NodeTypes below.
97 
98  // 64bit
108 
109  // Bit Manipulation
113 
115 
116  // Jump tables.
118 
121 
122  // Conversion of i64 -> double produces constantpool nodes
124 
125  // Loads
126  for (MVT VT : MVT::integer_valuetypes()) {
130 
133  }
134 
135  // Custom expand misaligned loads / stores.
138 
139  // Varargs
144 
145  // Dynamic stack
149 
150  // Exception handling
153 
154  // Atomic operations
155  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
156  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
160 
161  // TRAMPOLINE is custom lowered.
164 
165  // We want to custom lower some of our intrinsics.
167 
171 
172  // We have target-specific dag combine patterns for the following nodes:
177 
180 }
181 
183  if (Val.getOpcode() != ISD::LOAD)
184  return false;
185 
186  EVT VT1 = Val.getValueType();
187  if (!VT1.isSimple() || !VT1.isInteger() ||
188  !VT2.isSimple() || !VT2.isInteger())
189  return false;
190 
191  switch (VT1.getSimpleVT().SimpleTy) {
192  default: break;
193  case MVT::i8:
194  return true;
195  }
196 
197  return false;
198 }
199 
202  switch (Op.getOpcode())
203  {
204  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
205  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
206  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
207  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
208  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
209  case ISD::LOAD: return LowerLOAD(Op, DAG);
210  case ISD::STORE: return LowerSTORE(Op, DAG);
211  case ISD::VAARG: return LowerVAARG(Op, DAG);
212  case ISD::VASTART: return LowerVASTART(Op, DAG);
213  case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
214  case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
215  // FIXME: Remove these when LegalizeDAGTypes lands.
216  case ISD::ADD:
217  case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
218  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
219  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
220  case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
221  case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
222  case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
223  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
224  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
225  case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
226  case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
227  default:
228  llvm_unreachable("unimplemented operand");
229  }
230 }
231 
232 /// ReplaceNodeResults - Replace the results of node with an illegal result
233 /// type with new values built out of custom code.
236  SelectionDAG &DAG) const {
237  switch (N->getOpcode()) {
238  default:
239  llvm_unreachable("Don't know how to custom expand this!");
240  case ISD::ADD:
241  case ISD::SUB:
242  Results.push_back(ExpandADDSUB(N, DAG));
243  return;
244  }
245 }
246 
247 //===----------------------------------------------------------------------===//
248 // Misc Lower Operation implementation
249 //===----------------------------------------------------------------------===//
250 
251 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
252  const GlobalValue *GV,
253  SelectionDAG &DAG) const {
254  // FIXME there is no actual debug info here
255  SDLoc dl(GA);
256 
257  if (GV->getValueType()->isFunctionTy())
258  return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
259 
260  const auto *GVar = dyn_cast<GlobalVariable>(GV);
261  if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
262  (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
263  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
264 
265  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
266 }
267 
268 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
270  return true;
271 
272  Type *ObjType = GV->getValueType();
273  if (!ObjType->isSized())
274  return false;
275 
276  auto &DL = GV->getParent()->getDataLayout();
277  unsigned ObjSize = DL.getTypeAllocSize(ObjType);
278  return ObjSize < CodeModelLargeSize && ObjSize != 0;
279 }
280 
281 SDValue XCoreTargetLowering::
282 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
283 {
284  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
285  const GlobalValue *GV = GN->getGlobal();
286  SDLoc DL(GN);
287  int64_t Offset = GN->getOffset();
288  if (IsSmallObject(GV, *this)) {
289  // We can only fold positive offsets that are a multiple of the word size.
290  int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
291  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
292  GA = getGlobalAddressWrapper(GA, GV, DAG);
293  // Handle the rest of the offset.
294  if (Offset != FoldedOffset) {
295  SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
296  GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
297  }
298  return GA;
299  } else {
300  // Ideally we would not fold in offset with an index <= 11.
301  Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
302  Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
303  Ty = Type::getInt32Ty(*DAG.getContext());
304  Constant *Idx = ConstantInt::get(Ty, Offset);
306  Type::getInt8Ty(*DAG.getContext()), GA, Idx);
307  SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
308  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
310  }
311 }
312 
313 SDValue XCoreTargetLowering::
314 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
315 {
316  SDLoc DL(Op);
317  auto PtrVT = getPointerTy(DAG.getDataLayout());
318  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
319  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
320 
321  return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
322 }
323 
324 SDValue XCoreTargetLowering::
325 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
326 {
327  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
328  // FIXME there isn't really debug info here
329  SDLoc dl(CP);
330  EVT PtrVT = Op.getValueType();
331  SDValue Res;
332  if (CP->isMachineConstantPoolEntry()) {
333  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
334  CP->getAlignment(), CP->getOffset());
335  } else {
336  Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
337  CP->getAlignment(), CP->getOffset());
338  }
339  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
340 }
341 
344 }
345 
346 SDValue XCoreTargetLowering::
347 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
348 {
349  SDValue Chain = Op.getOperand(0);
350  SDValue Table = Op.getOperand(1);
351  SDValue Index = Op.getOperand(2);
352  SDLoc dl(Op);
353  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
354  unsigned JTI = JT->getIndex();
356  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
357  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
358 
359  unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
360  if (NumEntries <= 32) {
361  return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
362  }
363  assert((NumEntries >> 31) == 0);
364  SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
365  DAG.getConstant(1, dl, MVT::i32));
366  return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
367  ScaledIndex);
368 }
369 
370 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
371  const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
372  SelectionDAG &DAG) const {
373  auto PtrVT = getPointerTy(DAG.getDataLayout());
374  if ((Offset & 0x3) == 0) {
375  return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
376  }
377  // Lower to pair of consecutive word aligned loads plus some bit shifting.
378  int32_t HighOffset = alignTo(Offset, 4);
379  int32_t LowOffset = HighOffset - 4;
380  SDValue LowAddr, HighAddr;
381  if (GlobalAddressSDNode *GASD =
382  dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
383  LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
384  LowOffset);
385  HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
386  HighOffset);
387  } else {
388  LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
389  DAG.getConstant(LowOffset, DL, MVT::i32));
390  HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
391  DAG.getConstant(HighOffset, DL, MVT::i32));
392  }
393  SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
394  SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
395 
396  SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
397  SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
398  SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
399  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
400  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
401  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
402  High.getValue(1));
403  SDValue Ops[] = { Result, Chain };
404  return DAG.getMergeValues(Ops, DL);
405 }
406 
408 {
409  APInt KnownZero, KnownOne;
410  DAG.computeKnownBits(Value, KnownZero, KnownOne);
411  return KnownZero.countTrailingOnes() >= 2;
412 }
413 
414 SDValue XCoreTargetLowering::
415 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
416  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
417  LoadSDNode *LD = cast<LoadSDNode>(Op);
419  "Unexpected extension type");
420  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
422  LD->getAddressSpace(),
423  LD->getAlignment()))
424  return SDValue();
425 
426  auto &TD = DAG.getDataLayout();
427  unsigned ABIAlignment = TD.getABITypeAlignment(
428  LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
429  // Leave aligned load alone.
430  if (LD->getAlignment() >= ABIAlignment)
431  return SDValue();
432 
433  SDValue Chain = LD->getChain();
434  SDValue BasePtr = LD->getBasePtr();
435  SDLoc DL(Op);
436 
437  if (!LD->isVolatile()) {
438  const GlobalValue *GV;
439  int64_t Offset = 0;
440  if (DAG.isBaseWithConstantOffset(BasePtr) &&
441  isWordAligned(BasePtr->getOperand(0), DAG)) {
442  SDValue NewBasePtr = BasePtr->getOperand(0);
443  Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
444  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
445  Offset, DAG);
446  }
447  if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
448  MinAlign(GV->getAlignment(), 4) == 4) {
449  SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
450  BasePtr->getValueType(0));
451  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
452  Offset, DAG);
453  }
454  }
455 
456  if (LD->getAlignment() == 2) {
457  SDValue Low =
458  DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
459  LD->getPointerInfo(), MVT::i16,
460  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
461  SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
462  DAG.getConstant(2, DL, MVT::i32));
463  SDValue High =
464  DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
466  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
467  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
468  DAG.getConstant(16, DL, MVT::i32));
469  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
470  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
471  High.getValue(1));
472  SDValue Ops[] = { Result, Chain };
473  return DAG.getMergeValues(Ops, DL);
474  }
475 
476  // Lower to a call to __misaligned_load(BasePtr).
477  Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
480 
481  Entry.Ty = IntPtrTy;
482  Entry.Node = BasePtr;
483  Args.push_back(Entry);
484 
486  CLI.setDebugLoc(DL).setChain(Chain).setCallee(
487  CallingConv::C, IntPtrTy,
488  DAG.getExternalSymbol("__misaligned_load",
489  getPointerTy(DAG.getDataLayout())),
490  std::move(Args));
491 
492  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
493  SDValue Ops[] = { CallResult.first, CallResult.second };
494  return DAG.getMergeValues(Ops, DL);
495 }
496 
497 SDValue XCoreTargetLowering::
498 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
499 {
500  StoreSDNode *ST = cast<StoreSDNode>(Op);
501  assert(!ST->isTruncatingStore() && "Unexpected store type");
502  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
504  ST->getAddressSpace(),
505  ST->getAlignment())) {
506  return SDValue();
507  }
508  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
509  ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
510  // Leave aligned store alone.
511  if (ST->getAlignment() >= ABIAlignment) {
512  return SDValue();
513  }
514  SDValue Chain = ST->getChain();
515  SDValue BasePtr = ST->getBasePtr();
516  SDValue Value = ST->getValue();
517  SDLoc dl(Op);
518 
519  if (ST->getAlignment() == 2) {
520  SDValue Low = Value;
521  SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
522  DAG.getConstant(16, dl, MVT::i32));
523  SDValue StoreLow = DAG.getTruncStore(
524  Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
525  /* Alignment = */ 2, ST->getMemOperand()->getFlags());
526  SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
527  DAG.getConstant(2, dl, MVT::i32));
528  SDValue StoreHigh = DAG.getTruncStore(
529  Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
530  MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
531  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
532  }
533 
534  // Lower to a call to __misaligned_store(BasePtr, Value).
535  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
538 
539  Entry.Ty = IntPtrTy;
540  Entry.Node = BasePtr;
541  Args.push_back(Entry);
542 
543  Entry.Node = Value;
544  Args.push_back(Entry);
545 
547  CLI.setDebugLoc(dl).setChain(Chain).setCallee(
549  DAG.getExternalSymbol("__misaligned_store",
550  getPointerTy(DAG.getDataLayout())),
551  std::move(Args));
552 
553  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
554  return CallResult.second;
555 }
556 
557 SDValue XCoreTargetLowering::
558 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
559 {
560  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
561  "Unexpected operand to lower!");
562  SDLoc dl(Op);
563  SDValue LHS = Op.getOperand(0);
564  SDValue RHS = Op.getOperand(1);
565  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
566  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
568  LHS, RHS);
569  SDValue Lo(Hi.getNode(), 1);
570  SDValue Ops[] = { Lo, Hi };
571  return DAG.getMergeValues(Ops, dl);
572 }
573 
574 SDValue XCoreTargetLowering::
575 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
576 {
577  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
578  "Unexpected operand to lower!");
579  SDLoc dl(Op);
580  SDValue LHS = Op.getOperand(0);
581  SDValue RHS = Op.getOperand(1);
582  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
583  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
584  DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
585  Zero, Zero);
586  SDValue Lo(Hi.getNode(), 1);
587  SDValue Ops[] = { Lo, Hi };
588  return DAG.getMergeValues(Ops, dl);
589 }
590 
591 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
592 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
593 /// each intermediate result in the calculation must also have a single use.
594 /// If the Op is in the correct form the constituent parts are written to Mul0,
595 /// Mul1, Addend0 and Addend1.
596 static bool
597 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
598  SDValue &Addend1, bool requireIntermediatesHaveOneUse)
599 {
600  if (Op.getOpcode() != ISD::ADD)
601  return false;
602  SDValue N0 = Op.getOperand(0);
603  SDValue N1 = Op.getOperand(1);
604  SDValue AddOp;
605  SDValue OtherOp;
606  if (N0.getOpcode() == ISD::ADD) {
607  AddOp = N0;
608  OtherOp = N1;
609  } else if (N1.getOpcode() == ISD::ADD) {
610  AddOp = N1;
611  OtherOp = N0;
612  } else {
613  return false;
614  }
615  if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
616  return false;
617  if (OtherOp.getOpcode() == ISD::MUL) {
618  // add(add(a,b),mul(x,y))
619  if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
620  return false;
621  Mul0 = OtherOp.getOperand(0);
622  Mul1 = OtherOp.getOperand(1);
623  Addend0 = AddOp.getOperand(0);
624  Addend1 = AddOp.getOperand(1);
625  return true;
626  }
627  if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
628  // add(add(mul(x,y),a),b)
629  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
630  return false;
631  Mul0 = AddOp.getOperand(0).getOperand(0);
632  Mul1 = AddOp.getOperand(0).getOperand(1);
633  Addend0 = AddOp.getOperand(1);
634  Addend1 = OtherOp;
635  return true;
636  }
637  if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
638  // add(add(a,mul(x,y)),b)
639  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
640  return false;
641  Mul0 = AddOp.getOperand(1).getOperand(0);
642  Mul1 = AddOp.getOperand(1).getOperand(1);
643  Addend0 = AddOp.getOperand(0);
644  Addend1 = OtherOp;
645  return true;
646  }
647  return false;
648 }
649 
650 SDValue XCoreTargetLowering::
651 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
652 {
653  SDValue Mul;
654  SDValue Other;
655  if (N->getOperand(0).getOpcode() == ISD::MUL) {
656  Mul = N->getOperand(0);
657  Other = N->getOperand(1);
658  } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
659  Mul = N->getOperand(1);
660  Other = N->getOperand(0);
661  } else {
662  return SDValue();
663  }
664  SDLoc dl(N);
665  SDValue LL, RL, AddendL, AddendH;
666  LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
667  Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
668  RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
669  Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
670  AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
671  Other, DAG.getConstant(0, dl, MVT::i32));
672  AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
673  Other, DAG.getConstant(1, dl, MVT::i32));
674  APInt HighMask = APInt::getHighBitsSet(64, 32);
675  unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
676  unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
677  if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
678  DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
679  // The inputs are both zero-extended.
680  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
681  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
682  AddendL, LL, RL);
683  SDValue Lo(Hi.getNode(), 1);
684  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
685  }
686  if (LHSSB > 32 && RHSSB > 32) {
687  // The inputs are both sign-extended.
688  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
689  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
690  AddendL, LL, RL);
691  SDValue Lo(Hi.getNode(), 1);
692  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
693  }
694  SDValue LH, RH;
695  LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
696  Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
697  RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
698  Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
699  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
700  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
701  AddendL, LL, RL);
702  SDValue Lo(Hi.getNode(), 1);
703  RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
704  LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
705  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
706  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
707  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
708 }
709 
710 SDValue XCoreTargetLowering::
711 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
712 {
713  assert(N->getValueType(0) == MVT::i64 &&
714  (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
715  "Unknown operand to lower!");
716 
717  if (N->getOpcode() == ISD::ADD)
718  if (SDValue Result = TryExpandADDWithMul(N, DAG))
719  return Result;
720 
721  SDLoc dl(N);
722 
723  // Extract components
725  N->getOperand(0),
726  DAG.getConstant(0, dl, MVT::i32));
728  N->getOperand(0),
729  DAG.getConstant(1, dl, MVT::i32));
731  N->getOperand(1),
732  DAG.getConstant(0, dl, MVT::i32));
734  N->getOperand(1),
735  DAG.getConstant(1, dl, MVT::i32));
736 
737  // Expand
738  unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
740  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
741  SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
742  LHSL, RHSL, Zero);
743  SDValue Carry(Lo.getNode(), 1);
744 
745  SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
746  LHSH, RHSH, Carry);
747  SDValue Ignored(Hi.getNode(), 1);
748  // Merge the pieces
749  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
750 }
751 
752 SDValue XCoreTargetLowering::
753 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
754 {
755  // Whist llvm does not support aggregate varargs we can ignore
756  // the possibility of the ValueType being an implicit byVal vararg.
757  SDNode *Node = Op.getNode();
758  EVT VT = Node->getValueType(0); // not an aggregate
759  SDValue InChain = Node->getOperand(0);
760  SDValue VAListPtr = Node->getOperand(1);
761  EVT PtrVT = VAListPtr.getValueType();
762  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
763  SDLoc dl(Node);
764  SDValue VAList =
765  DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
766  // Increment the pointer, VAList, to the next vararg
767  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
768  DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
769  dl));
770  // Store the incremented VAList to the legalized pointer
771  InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
772  MachinePointerInfo(SV));
773  // Load the actual argument out of the pointer VAList
774  return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
775 }
776 
777 SDValue XCoreTargetLowering::
778 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
779 {
780  SDLoc dl(Op);
781  // vastart stores the address of the VarArgsFrameIndex slot into the
782  // memory location argument
786  return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
788 }
789 
790 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
791  SelectionDAG &DAG) const {
792  // This nodes represent llvm.frameaddress on the DAG.
793  // It takes one operand, the index of the frame address to return.
794  // An index of zero corresponds to the current function's frame address.
795  // An index of one to the parent's frame address, and so on.
796  // Depths > 0 not supported yet!
797  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
798  return SDValue();
799 
801  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
802  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
803  RegInfo->getFrameRegister(MF), MVT::i32);
804 }
805 
806 SDValue XCoreTargetLowering::
807 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
808  // This nodes represent llvm.returnaddress on the DAG.
809  // It takes one operand, the index of the return address to return.
810  // An index of zero corresponds to the current function's return address.
811  // An index of one to the parent's return address, and so on.
812  // Depths > 0 not supported yet!
813  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
814  return SDValue();
815 
818  int FI = XFI->createLRSpillSlot(MF);
819  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
820  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
821  DAG.getEntryNode(), FIN,
823 }
824 
825 SDValue XCoreTargetLowering::
826 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
827  // This node represents offset from frame pointer to first on-stack argument.
828  // This is needed for correct stack adjustment during unwind.
829  // However, we don't know the offset until after the frame has be finalised.
830  // This is done during the XCoreFTAOElim pass.
832 }
833 
834 SDValue XCoreTargetLowering::
835 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
836  // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
837  // This node represents 'eh_return' gcc dwarf builtin, which is used to
838  // return from exception. The general meaning is: adjust stack by OFFSET and
839  // pass execution to HANDLER.
841  SDValue Chain = Op.getOperand(0);
842  SDValue Offset = Op.getOperand(1);
843  SDValue Handler = Op.getOperand(2);
844  SDLoc dl(Op);
845 
846  // Absolute SP = (FP + FrameToArgs) + Offset
847  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
848  SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
849  RegInfo->getFrameRegister(MF), MVT::i32);
850  SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
851  MVT::i32);
852  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
853  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
854 
855  // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
856  // which leaves 2 caller saved registers, R2 & R3 for us to use.
857  unsigned StackReg = XCore::R2;
858  unsigned HandlerReg = XCore::R3;
859 
860  SDValue OutChains[] = {
861  DAG.getCopyToReg(Chain, dl, StackReg, Stack),
862  DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
863  };
864 
865  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
866 
867  return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
868  DAG.getRegister(StackReg, MVT::i32),
869  DAG.getRegister(HandlerReg, MVT::i32));
870 
871 }
872 
873 SDValue XCoreTargetLowering::
874 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
875  return Op.getOperand(0);
876 }
877 
878 SDValue XCoreTargetLowering::
879 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
880  SDValue Chain = Op.getOperand(0);
881  SDValue Trmp = Op.getOperand(1); // trampoline
882  SDValue FPtr = Op.getOperand(2); // nested function
883  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
884 
885  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
886 
887  // .align 4
888  // LDAPF_u10 r11, nest
889  // LDW_2rus r11, r11[0]
890  // STWSP_ru6 r11, sp[0]
891  // LDAPF_u10 r11, fptr
892  // LDW_2rus r11, r11[0]
893  // BAU_1r r11
894  // nest:
895  // .word nest
896  // fptr:
897  // .word fptr
898  SDValue OutChains[5];
899 
900  SDValue Addr = Trmp;
901 
902  SDLoc dl(Op);
903  OutChains[0] =
904  DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
905  MachinePointerInfo(TrmpAddr));
906 
907  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
908  DAG.getConstant(4, dl, MVT::i32));
909  OutChains[1] =
910  DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
911  MachinePointerInfo(TrmpAddr, 4));
912 
913  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
914  DAG.getConstant(8, dl, MVT::i32));
915  OutChains[2] =
916  DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
917  MachinePointerInfo(TrmpAddr, 8));
918 
919  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
920  DAG.getConstant(12, dl, MVT::i32));
921  OutChains[3] =
922  DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
923 
924  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
925  DAG.getConstant(16, dl, MVT::i32));
926  OutChains[4] =
927  DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
928 
929  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
930 }
931 
932 SDValue XCoreTargetLowering::
933 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
934  SDLoc DL(Op);
935  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
936  switch (IntNo) {
937  case Intrinsic::xcore_crc8:
938  EVT VT = Op.getValueType();
939  SDValue Data =
940  DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
941  Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
942  SDValue Crc(Data.getNode(), 1);
943  SDValue Results[] = { Crc, Data };
944  return DAG.getMergeValues(Results, DL);
945  }
946  return SDValue();
947 }
948 
949 SDValue XCoreTargetLowering::
950 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
951  SDLoc DL(Op);
952  return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
953 }
954 
955 SDValue XCoreTargetLowering::
956 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
957  AtomicSDNode *N = cast<AtomicSDNode>(Op);
958  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
961  "setInsertFencesForAtomic(true) expects unordered / monotonic");
962  if (N->getMemoryVT() == MVT::i32) {
963  if (N->getAlignment() < 4)
964  report_fatal_error("atomic load must be aligned");
965  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
966  N->getChain(), N->getBasePtr(), N->getPointerInfo(),
967  N->getAlignment(), N->getMemOperand()->getFlags(),
968  N->getAAInfo(), N->getRanges());
969  }
970  if (N->getMemoryVT() == MVT::i16) {
971  if (N->getAlignment() < 2)
972  report_fatal_error("atomic load must be aligned");
973  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
974  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
975  N->getAlignment(), N->getMemOperand()->getFlags(),
976  N->getAAInfo());
977  }
978  if (N->getMemoryVT() == MVT::i8)
979  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
980  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
981  N->getAlignment(), N->getMemOperand()->getFlags(),
982  N->getAAInfo());
983  return SDValue();
984 }
985 
986 SDValue XCoreTargetLowering::
987 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
988  AtomicSDNode *N = cast<AtomicSDNode>(Op);
989  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
992  "setInsertFencesForAtomic(true) expects unordered / monotonic");
993  if (N->getMemoryVT() == MVT::i32) {
994  if (N->getAlignment() < 4)
995  report_fatal_error("atomic store must be aligned");
996  return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
997  N->getPointerInfo(), N->getAlignment(),
998  N->getMemOperand()->getFlags(), N->getAAInfo());
999  }
1000  if (N->getMemoryVT() == MVT::i16) {
1001  if (N->getAlignment() < 2)
1002  report_fatal_error("atomic store must be aligned");
1003  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1004  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
1005  N->getAlignment(), N->getMemOperand()->getFlags(),
1006  N->getAAInfo());
1007  }
1008  if (N->getMemoryVT() == MVT::i8)
1009  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1010  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
1011  N->getAlignment(), N->getMemOperand()->getFlags(),
1012  N->getAAInfo());
1013  return SDValue();
1014 }
1015 
1016 //===----------------------------------------------------------------------===//
1017 // Calling Convention Implementation
1018 //===----------------------------------------------------------------------===//
1019 
1020 #include "XCoreGenCallingConv.inc"
1021 
1022 //===----------------------------------------------------------------------===//
1023 // Call Calling Convention Implementation
1024 //===----------------------------------------------------------------------===//
1025 
1026 /// XCore call implementation
1027 SDValue
1028 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1029  SmallVectorImpl<SDValue> &InVals) const {
1030  SelectionDAG &DAG = CLI.DAG;
1031  SDLoc &dl = CLI.DL;
1033  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1035  SDValue Chain = CLI.Chain;
1036  SDValue Callee = CLI.Callee;
1037  bool &isTailCall = CLI.IsTailCall;
1038  CallingConv::ID CallConv = CLI.CallConv;
1039  bool isVarArg = CLI.IsVarArg;
1040 
1041  // XCore target does not yet support tail call optimization.
1042  isTailCall = false;
1043 
1044  // For now, only CallingConv::C implemented
1045  switch (CallConv)
1046  {
1047  default:
1048  llvm_unreachable("Unsupported calling convention");
1049  case CallingConv::Fast:
1050  case CallingConv::C:
1051  return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1052  Outs, OutVals, Ins, dl, DAG, InVals);
1053  }
1054 }
1055 
1056 /// LowerCallResult - Lower the result values of a call into the
1057 /// appropriate copies out of appropriate physical registers / memory locations.
1059  const SmallVectorImpl<CCValAssign> &RVLocs,
1060  const SDLoc &dl, SelectionDAG &DAG,
1061  SmallVectorImpl<SDValue> &InVals) {
1062  SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1063  // Copy results out of physical registers.
1064  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1065  const CCValAssign &VA = RVLocs[i];
1066  if (VA.isRegLoc()) {
1067  Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1068  InFlag).getValue(1);
1069  InFlag = Chain.getValue(2);
1070  InVals.push_back(Chain.getValue(0));
1071  } else {
1072  assert(VA.isMemLoc());
1073  ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1074  InVals.size()));
1075  // Reserve space for this result.
1076  InVals.push_back(SDValue());
1077  }
1078  }
1079 
1080  // Copy results out of memory.
1081  SmallVector<SDValue, 4> MemOpChains;
1082  for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1083  int offset = ResultMemLocs[i].first;
1084  unsigned index = ResultMemLocs[i].second;
1085  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1086  SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1087  SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1088  InVals[index] = load;
1089  MemOpChains.push_back(load.getValue(1));
1090  }
1091 
1092  // Transform all loads nodes into one single node because
1093  // all load nodes are independent of each other.
1094  if (!MemOpChains.empty())
1095  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1096 
1097  return Chain;
1098 }
1099 
1100 /// LowerCCCCallTo - functions arguments are copied from virtual
1101 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1102 /// CALLSEQ_END are emitted.
1103 /// TODO: isTailCall, sret.
1104 SDValue XCoreTargetLowering::LowerCCCCallTo(
1105  SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1106  bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1107  const SmallVectorImpl<SDValue> &OutVals,
1108  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1109  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1110 
1111  // Analyze operands of the call, assigning locations to each operand.
1113  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1114  *DAG.getContext());
1115 
1116  // The ABI dictates there should be one stack slot available to the callee
1117  // on function entry (for saving lr).
1118  CCInfo.AllocateStack(4, 4);
1119 
1120  CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1121 
1123  // Analyze return values to determine the number of bytes of stack required.
1124  CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1125  *DAG.getContext());
1126  RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1127  RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1128 
1129  // Get a count of how many bytes are to be pushed on the stack.
1130  unsigned NumBytes = RetCCInfo.getNextStackOffset();
1131  auto PtrVT = getPointerTy(DAG.getDataLayout());
1132 
1133  Chain = DAG.getCALLSEQ_START(Chain,
1134  DAG.getConstant(NumBytes, dl, PtrVT, true), dl);
1135 
1137  SmallVector<SDValue, 12> MemOpChains;
1138 
1139  // Walk the register/memloc assignments, inserting copies/loads.
1140  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1141  CCValAssign &VA = ArgLocs[i];
1142  SDValue Arg = OutVals[i];
1143 
1144  // Promote the value if needed.
1145  switch (VA.getLocInfo()) {
1146  default: llvm_unreachable("Unknown loc info!");
1147  case CCValAssign::Full: break;
1148  case CCValAssign::SExt:
1149  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1150  break;
1151  case CCValAssign::ZExt:
1152  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1153  break;
1154  case CCValAssign::AExt:
1155  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1156  break;
1157  }
1158 
1159  // Arguments that can be passed on register must be kept at
1160  // RegsToPass vector
1161  if (VA.isRegLoc()) {
1162  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1163  } else {
1164  assert(VA.isMemLoc());
1165 
1166  int Offset = VA.getLocMemOffset();
1167 
1168  MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1169  Chain, Arg,
1170  DAG.getConstant(Offset/4, dl,
1171  MVT::i32)));
1172  }
1173  }
1174 
1175  // Transform all store nodes into one single node because
1176  // all store nodes are independent of each other.
1177  if (!MemOpChains.empty())
1178  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1179 
1180  // Build a sequence of copy-to-reg nodes chained together with token
1181  // chain and flag operands which copy the outgoing args into registers.
1182  // The InFlag in necessary since all emitted instructions must be
1183  // stuck together.
1184  SDValue InFlag;
1185  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1186  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1187  RegsToPass[i].second, InFlag);
1188  InFlag = Chain.getValue(1);
1189  }
1190 
1191  // If the callee is a GlobalAddress node (quite common, every direct call is)
1192  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1193  // Likewise ExternalSymbol -> TargetExternalSymbol.
1194  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1195  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1196  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1197  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1198 
1199  // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1200  // = Chain, Callee, Reg#1, Reg#2, ...
1201  //
1202  // Returns a chain & a flag for retval copy to use.
1203  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1205  Ops.push_back(Chain);
1206  Ops.push_back(Callee);
1207 
1208  // Add argument registers to the end of the list so that they are
1209  // known live into the call.
1210  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1211  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1212  RegsToPass[i].second.getValueType()));
1213 
1214  if (InFlag.getNode())
1215  Ops.push_back(InFlag);
1216 
1217  Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1218  InFlag = Chain.getValue(1);
1219 
1220  // Create the CALLSEQ_END node.
1221  Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1222  DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1223  InFlag = Chain.getValue(1);
1224 
1225  // Handle result values, copying them out of physregs into vregs that we
1226  // return.
1227  return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1228 }
1229 
1230 //===----------------------------------------------------------------------===//
1231 // Formal Arguments Calling Convention Implementation
1232 //===----------------------------------------------------------------------===//
1233 
1234 namespace {
1235  struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1236 }
1237 
1238 /// XCore formal arguments implementation
1239 SDValue XCoreTargetLowering::LowerFormalArguments(
1240  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1241  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1242  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1243  switch (CallConv)
1244  {
1245  default:
1246  llvm_unreachable("Unsupported calling convention");
1247  case CallingConv::C:
1248  case CallingConv::Fast:
1249  return LowerCCCArguments(Chain, CallConv, isVarArg,
1250  Ins, dl, DAG, InVals);
1251  }
1252 }
1253 
1254 /// LowerCCCArguments - transform physical registers into
1255 /// virtual registers and generate load operations for
1256 /// arguments places on the stack.
1257 /// TODO: sret
1258 SDValue XCoreTargetLowering::LowerCCCArguments(
1259  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1260  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1261  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1262  MachineFunction &MF = DAG.getMachineFunction();
1263  MachineFrameInfo &MFI = MF.getFrameInfo();
1264  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1266 
1267  // Assign locations to all of the incoming arguments.
1269  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1270  *DAG.getContext());
1271 
1272  CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1273 
1274  unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1275 
1276  unsigned LRSaveSize = StackSlotSize;
1277 
1278  if (!isVarArg)
1279  XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1280 
1281  // All getCopyFromReg ops must precede any getMemcpys to prevent the
1282  // scheduler clobbering a register before it has been copied.
1283  // The stages are:
1284  // 1. CopyFromReg (and load) arg & vararg registers.
1285  // 2. Chain CopyFromReg nodes into a TokenFactor.
1286  // 3. Memcpy 'byVal' args & push final InVals.
1287  // 4. Chain mem ops nodes into a TokenFactor.
1288  SmallVector<SDValue, 4> CFRegNode;
1290  SmallVector<SDValue, 4> MemOps;
1291 
1292  // 1a. CopyFromReg (and load) arg registers.
1293  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1294 
1295  CCValAssign &VA = ArgLocs[i];
1296  SDValue ArgIn;
1297 
1298  if (VA.isRegLoc()) {
1299  // Arguments passed in registers
1300  EVT RegVT = VA.getLocVT();
1301  switch (RegVT.getSimpleVT().SimpleTy) {
1302  default:
1303  {
1304 #ifndef NDEBUG
1305  errs() << "LowerFormalArguments Unhandled argument type: "
1306  << RegVT.getEVTString() << "\n";
1307 #endif
1308  llvm_unreachable(nullptr);
1309  }
1310  case MVT::i32:
1311  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1312  RegInfo.addLiveIn(VA.getLocReg(), VReg);
1313  ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1314  CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1315  }
1316  } else {
1317  // sanity check
1318  assert(VA.isMemLoc());
1319  // Load the argument to a virtual register
1320  unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1321  if (ObjSize > StackSlotSize) {
1322  errs() << "LowerFormalArguments Unhandled argument type: "
1323  << EVT(VA.getLocVT()).getEVTString()
1324  << "\n";
1325  }
1326  // Create the frame index object for this incoming parameter...
1327  int FI = MFI.CreateFixedObject(ObjSize,
1328  LRSaveSize + VA.getLocMemOffset(),
1329  true);
1330 
1331  // Create the SelectionDAG nodes corresponding to a load
1332  //from this parameter
1333  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1334  ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1336  }
1337  const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1338  ArgData.push_back(ADP);
1339  }
1340 
1341  // 1b. CopyFromReg vararg registers.
1342  if (isVarArg) {
1343  // Argument registers
1344  static const MCPhysReg ArgRegs[] = {
1345  XCore::R0, XCore::R1, XCore::R2, XCore::R3
1346  };
1348  unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1349  if (FirstVAReg < array_lengthof(ArgRegs)) {
1350  int offset = 0;
1351  // Save remaining registers, storing higher register numbers at a higher
1352  // address
1353  for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1354  // Create a stack slot
1355  int FI = MFI.CreateFixedObject(4, offset, true);
1356  if (i == (int)FirstVAReg) {
1357  XFI->setVarArgsFrameIndex(FI);
1358  }
1359  offset -= StackSlotSize;
1360  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1361  // Move argument from phys reg -> virt reg
1362  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1363  RegInfo.addLiveIn(ArgRegs[i], VReg);
1364  SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1365  CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1366  // Move argument from virt reg -> stack
1367  SDValue Store =
1368  DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1369  MemOps.push_back(Store);
1370  }
1371  } else {
1372  // This will point to the next argument passed via stack.
1373  XFI->setVarArgsFrameIndex(
1374  MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1375  true));
1376  }
1377  }
1378 
1379  // 2. chain CopyFromReg nodes into a TokenFactor.
1380  if (!CFRegNode.empty())
1381  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1382 
1383  // 3. Memcpy 'byVal' args & push final InVals.
1384  // Aggregates passed "byVal" need to be copied by the callee.
1385  // The callee will use a pointer to this copy, rather than the original
1386  // pointer.
1387  for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1388  ArgDE = ArgData.end();
1389  ArgDI != ArgDE; ++ArgDI) {
1390  if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1391  unsigned Size = ArgDI->Flags.getByValSize();
1392  unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1393  // Create a new object on the stack and copy the pointee into it.
1394  int FI = MFI.CreateStackObject(Size, Align, false);
1395  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1396  InVals.push_back(FIN);
1397  MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1398  DAG.getConstant(Size, dl, MVT::i32),
1399  Align, false, false, false,
1401  MachinePointerInfo()));
1402  } else {
1403  InVals.push_back(ArgDI->SDV);
1404  }
1405  }
1406 
1407  // 4, chain mem ops nodes into a TokenFactor.
1408  if (!MemOps.empty()) {
1409  MemOps.push_back(Chain);
1410  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1411  }
1412 
1413  return Chain;
1414 }
1415 
1416 //===----------------------------------------------------------------------===//
1417 // Return Value Calling Convention Implementation
1418 //===----------------------------------------------------------------------===//
1419 
1420 bool XCoreTargetLowering::
1421 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1422  bool isVarArg,
1423  const SmallVectorImpl<ISD::OutputArg> &Outs,
1424  LLVMContext &Context) const {
1426  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1427  if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1428  return false;
1429  if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1430  return false;
1431  return true;
1432 }
1433 
1434 SDValue
1435 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1436  bool isVarArg,
1437  const SmallVectorImpl<ISD::OutputArg> &Outs,
1438  const SmallVectorImpl<SDValue> &OutVals,
1439  const SDLoc &dl, SelectionDAG &DAG) const {
1440 
1441  XCoreFunctionInfo *XFI =
1444 
1445  // CCValAssign - represent the assignment of
1446  // the return value to a location
1448 
1449  // CCState - Info about the registers and stack slot.
1450  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1451  *DAG.getContext());
1452 
1453  // Analyze return values.
1454  if (!isVarArg)
1455  CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1456 
1457  CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1458 
1459  SDValue Flag;
1460  SmallVector<SDValue, 4> RetOps(1, Chain);
1461 
1462  // Return on XCore is always a "retsp 0"
1463  RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1464 
1465  SmallVector<SDValue, 4> MemOpChains;
1466  // Handle return values that must be copied to memory.
1467  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1468  CCValAssign &VA = RVLocs[i];
1469  if (VA.isRegLoc())
1470  continue;
1471  assert(VA.isMemLoc());
1472  if (isVarArg) {
1473  report_fatal_error("Can't return value from vararg function in memory");
1474  }
1475 
1476  int Offset = VA.getLocMemOffset();
1477  unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1478  // Create the frame index object for the memory location.
1479  int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1480 
1481  // Create a SelectionDAG node corresponding to a store
1482  // to this memory location.
1483  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1484  MemOpChains.push_back(DAG.getStore(
1485  Chain, dl, OutVals[i], FIN,
1487  }
1488 
1489  // Transform all store nodes into one single node because
1490  // all stores are independent of each other.
1491  if (!MemOpChains.empty())
1492  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1493 
1494  // Now handle return values copied to registers.
1495  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1496  CCValAssign &VA = RVLocs[i];
1497  if (!VA.isRegLoc())
1498  continue;
1499  // Copy the result values into the output registers.
1500  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1501 
1502  // guarantee that all emitted copies are
1503  // stuck together, avoiding something bad
1504  Flag = Chain.getValue(1);
1505  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1506  }
1507 
1508  RetOps[0] = Chain; // Update chain.
1509 
1510  // Add the flag if we have it.
1511  if (Flag.getNode())
1512  RetOps.push_back(Flag);
1513 
1514  return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1515 }
1516 
1517 //===----------------------------------------------------------------------===//
1518 // Other Lowering Code
1519 //===----------------------------------------------------------------------===//
1520 
1523  MachineBasicBlock *BB) const {
1524  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1525  DebugLoc dl = MI.getDebugLoc();
1526  assert((MI.getOpcode() == XCore::SELECT_CC) &&
1527  "Unexpected instr type to insert");
1528 
1529  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1530  // control-flow pattern. The incoming instruction knows the destination vreg
1531  // to set, the condition code register to branch on, the true/false values to
1532  // select between, and a branch opcode to use.
1533  const BasicBlock *LLVM_BB = BB->getBasicBlock();
1535 
1536  // thisMBB:
1537  // ...
1538  // TrueVal = ...
1539  // cmpTY ccX, r1, r2
1540  // bCC copy1MBB
1541  // fallthrough --> copy0MBB
1542  MachineBasicBlock *thisMBB = BB;
1543  MachineFunction *F = BB->getParent();
1544  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1545  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1546  F->insert(It, copy0MBB);
1547  F->insert(It, sinkMBB);
1548 
1549  // Transfer the remainder of BB and its successor edges to sinkMBB.
1550  sinkMBB->splice(sinkMBB->begin(), BB,
1551  std::next(MachineBasicBlock::iterator(MI)), BB->end());
1552  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1553 
1554  // Next, add the true and fallthrough blocks as its successors.
1555  BB->addSuccessor(copy0MBB);
1556  BB->addSuccessor(sinkMBB);
1557 
1558  BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1559  .addReg(MI.getOperand(1).getReg())
1560  .addMBB(sinkMBB);
1561 
1562  // copy0MBB:
1563  // %FalseValue = ...
1564  // # fallthrough to sinkMBB
1565  BB = copy0MBB;
1566 
1567  // Update machine-CFG edges
1568  BB->addSuccessor(sinkMBB);
1569 
1570  // sinkMBB:
1571  // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1572  // ...
1573  BB = sinkMBB;
1574  BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1575  .addReg(MI.getOperand(3).getReg())
1576  .addMBB(copy0MBB)
1577  .addReg(MI.getOperand(2).getReg())
1578  .addMBB(thisMBB);
1579 
1580  MI.eraseFromParent(); // The pseudo instruction is gone now.
1581  return BB;
1582 }
1583 
1584 //===----------------------------------------------------------------------===//
1585 // Target Optimization Hooks
1586 //===----------------------------------------------------------------------===//
1587 
1588 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1589  DAGCombinerInfo &DCI) const {
1590  SelectionDAG &DAG = DCI.DAG;
1591  SDLoc dl(N);
1592  switch (N->getOpcode()) {
1593  default: break;
1594  case ISD::INTRINSIC_VOID:
1595  switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1596  case Intrinsic::xcore_outt:
1597  case Intrinsic::xcore_outct:
1598  case Intrinsic::xcore_chkct: {
1599  SDValue OutVal = N->getOperand(3);
1600  // These instructions ignore the high bits.
1601  if (OutVal.hasOneUse()) {
1602  unsigned BitWidth = OutVal.getValueSizeInBits();
1603  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1604  APInt KnownZero, KnownOne;
1605  TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1606  !DCI.isBeforeLegalizeOps());
1607  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1608  if (TLO.ShrinkDemandedConstant(OutVal, DemandedMask) ||
1609  TLI.SimplifyDemandedBits(OutVal, DemandedMask, KnownZero, KnownOne,
1610  TLO))
1611  DCI.CommitTargetLoweringOpt(TLO);
1612  }
1613  break;
1614  }
1615  case Intrinsic::xcore_setpt: {
1616  SDValue Time = N->getOperand(3);
1617  // This instruction ignores the high bits.
1618  if (Time.hasOneUse()) {
1619  unsigned BitWidth = Time.getValueSizeInBits();
1620  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1621  APInt KnownZero, KnownOne;
1622  TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1623  !DCI.isBeforeLegalizeOps());
1624  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1625  if (TLO.ShrinkDemandedConstant(Time, DemandedMask) ||
1626  TLI.SimplifyDemandedBits(Time, DemandedMask, KnownZero, KnownOne,
1627  TLO))
1628  DCI.CommitTargetLoweringOpt(TLO);
1629  }
1630  break;
1631  }
1632  }
1633  break;
1634  case XCoreISD::LADD: {
1635  SDValue N0 = N->getOperand(0);
1636  SDValue N1 = N->getOperand(1);
1637  SDValue N2 = N->getOperand(2);
1640  EVT VT = N0.getValueType();
1641 
1642  // canonicalize constant to RHS
1643  if (N0C && !N1C)
1644  return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1645 
1646  // fold (ladd 0, 0, x) -> 0, x & 1
1647  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1648  SDValue Carry = DAG.getConstant(0, dl, VT);
1649  SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1650  DAG.getConstant(1, dl, VT));
1651  SDValue Ops[] = { Result, Carry };
1652  return DAG.getMergeValues(Ops, dl);
1653  }
1654 
1655  // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1656  // low bit set
1657  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1658  APInt KnownZero, KnownOne;
1659  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1660  VT.getSizeInBits() - 1);
1661  DAG.computeKnownBits(N2, KnownZero, KnownOne);
1662  if ((KnownZero & Mask) == Mask) {
1663  SDValue Carry = DAG.getConstant(0, dl, VT);
1664  SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1665  SDValue Ops[] = { Result, Carry };
1666  return DAG.getMergeValues(Ops, dl);
1667  }
1668  }
1669  }
1670  break;
1671  case XCoreISD::LSUB: {
1672  SDValue N0 = N->getOperand(0);
1673  SDValue N1 = N->getOperand(1);
1674  SDValue N2 = N->getOperand(2);
1677  EVT VT = N0.getValueType();
1678 
1679  // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1680  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1681  APInt KnownZero, KnownOne;
1682  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1683  VT.getSizeInBits() - 1);
1684  DAG.computeKnownBits(N2, KnownZero, KnownOne);
1685  if ((KnownZero & Mask) == Mask) {
1686  SDValue Borrow = N2;
1687  SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1688  DAG.getConstant(0, dl, VT), N2);
1689  SDValue Ops[] = { Result, Borrow };
1690  return DAG.getMergeValues(Ops, dl);
1691  }
1692  }
1693 
1694  // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1695  // low bit set
1696  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1697  APInt KnownZero, KnownOne;
1698  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1699  VT.getSizeInBits() - 1);
1700  DAG.computeKnownBits(N2, KnownZero, KnownOne);
1701  if ((KnownZero & Mask) == Mask) {
1702  SDValue Borrow = DAG.getConstant(0, dl, VT);
1703  SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1704  SDValue Ops[] = { Result, Borrow };
1705  return DAG.getMergeValues(Ops, dl);
1706  }
1707  }
1708  }
1709  break;
1710  case XCoreISD::LMUL: {
1711  SDValue N0 = N->getOperand(0);
1712  SDValue N1 = N->getOperand(1);
1713  SDValue N2 = N->getOperand(2);
1714  SDValue N3 = N->getOperand(3);
1717  EVT VT = N0.getValueType();
1718  // Canonicalize multiplicative constant to RHS. If both multiplicative
1719  // operands are constant canonicalize smallest to RHS.
1720  if ((N0C && !N1C) ||
1721  (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1722  return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1723  N1, N0, N2, N3);
1724 
1725  // lmul(x, 0, a, b)
1726  if (N1C && N1C->isNullValue()) {
1727  // If the high result is unused fold to add(a, b)
1728  if (N->hasNUsesOfValue(0, 0)) {
1729  SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1730  SDValue Ops[] = { Lo, Lo };
1731  return DAG.getMergeValues(Ops, dl);
1732  }
1733  // Otherwise fold to ladd(a, b, 0)
1734  SDValue Result =
1735  DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1736  SDValue Carry(Result.getNode(), 1);
1737  SDValue Ops[] = { Carry, Result };
1738  return DAG.getMergeValues(Ops, dl);
1739  }
1740  }
1741  break;
1742  case ISD::ADD: {
1743  // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1744  // lmul(x, y, a, b). The high result of lmul will be ignored.
1745  // This is only profitable if the intermediate results are unused
1746  // elsewhere.
1747  SDValue Mul0, Mul1, Addend0, Addend1;
1748  if (N->getValueType(0) == MVT::i32 &&
1749  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1750  SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1751  DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1752  Mul1, Addend0, Addend1);
1753  SDValue Result(Ignored.getNode(), 1);
1754  return Result;
1755  }
1756  APInt HighMask = APInt::getHighBitsSet(64, 32);
1757  // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1758  // lmul(x, y, a, b) if all operands are zero-extended. We do this
1759  // before type legalization as it is messy to match the operands after
1760  // that.
1761  if (N->getValueType(0) == MVT::i64 &&
1762  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1763  DAG.MaskedValueIsZero(Mul0, HighMask) &&
1764  DAG.MaskedValueIsZero(Mul1, HighMask) &&
1765  DAG.MaskedValueIsZero(Addend0, HighMask) &&
1766  DAG.MaskedValueIsZero(Addend1, HighMask)) {
1767  SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1768  Mul0, DAG.getConstant(0, dl, MVT::i32));
1769  SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1770  Mul1, DAG.getConstant(0, dl, MVT::i32));
1771  SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1772  Addend0, DAG.getConstant(0, dl, MVT::i32));
1773  SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1774  Addend1, DAG.getConstant(0, dl, MVT::i32));
1775  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1776  DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1777  Addend0L, Addend1L);
1778  SDValue Lo(Hi.getNode(), 1);
1779  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1780  }
1781  }
1782  break;
1783  case ISD::STORE: {
1784  // Replace unaligned store of unaligned load with memmove.
1785  StoreSDNode *ST = cast<StoreSDNode>(N);
1786  if (!DCI.isBeforeLegalize() ||
1788  ST->getAddressSpace(),
1789  ST->getAlignment()) ||
1790  ST->isVolatile() || ST->isIndexed()) {
1791  break;
1792  }
1793  SDValue Chain = ST->getChain();
1794 
1795  unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1796  assert((StoreBits % 8) == 0 &&
1797  "Store size in bits must be a multiple of 8");
1798  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
1799  ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1800  unsigned Alignment = ST->getAlignment();
1801  if (Alignment >= ABIAlignment) {
1802  break;
1803  }
1804 
1805  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1806  if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1807  LD->getAlignment() == Alignment &&
1808  !LD->isVolatile() && !LD->isIndexed() &&
1809  Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1810  bool isTail = isInTailCallPosition(DAG, ST, Chain);
1811  return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1812  LD->getBasePtr(),
1813  DAG.getConstant(StoreBits/8, dl, MVT::i32),
1814  Alignment, false, isTail, ST->getPointerInfo(),
1815  LD->getPointerInfo());
1816  }
1817  }
1818  break;
1819  }
1820  }
1821  return SDValue();
1822 }
1823 
1824 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1825  APInt &KnownZero,
1826  APInt &KnownOne,
1827  const SelectionDAG &DAG,
1828  unsigned Depth) const {
1829  KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1830  switch (Op.getOpcode()) {
1831  default: break;
1832  case XCoreISD::LADD:
1833  case XCoreISD::LSUB:
1834  if (Op.getResNo() == 1) {
1835  // Top bits of carry / borrow are clear.
1836  KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1837  KnownZero.getBitWidth() - 1);
1838  }
1839  break;
1841  {
1842  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1843  switch (IntNo) {
1844  case Intrinsic::xcore_getts:
1845  // High bits are known to be zero.
1846  KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1847  KnownZero.getBitWidth() - 16);
1848  break;
1849  case Intrinsic::xcore_int:
1850  case Intrinsic::xcore_inct:
1851  // High bits are known to be zero.
1852  KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1853  KnownZero.getBitWidth() - 8);
1854  break;
1855  case Intrinsic::xcore_testct:
1856  // Result is either 0 or 1.
1857  KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1858  KnownZero.getBitWidth() - 1);
1859  break;
1860  case Intrinsic::xcore_testwct:
1861  // Result is in the range 0 - 4.
1862  KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1863  KnownZero.getBitWidth() - 3);
1864  break;
1865  }
1866  }
1867  break;
1868  }
1869 }
1870 
1871 //===----------------------------------------------------------------------===//
1872 // Addressing mode description hooks
1873 //===----------------------------------------------------------------------===//
1874 
1875 static inline bool isImmUs(int64_t val)
1876 {
1877  return (val >= 0 && val <= 11);
1878 }
1879 
1880 static inline bool isImmUs2(int64_t val)
1881 {
1882  return (val%2 == 0 && isImmUs(val/2));
1883 }
1884 
1885 static inline bool isImmUs4(int64_t val)
1886 {
1887  return (val%4 == 0 && isImmUs(val/4));
1888 }
1889 
1890 /// isLegalAddressingMode - Return true if the addressing mode represented
1891 /// by AM is legal for this target, for a load/store of the specified type.
1893  const AddrMode &AM, Type *Ty,
1894  unsigned AS) const {
1895  if (Ty->getTypeID() == Type::VoidTyID)
1896  return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1897 
1898  unsigned Size = DL.getTypeAllocSize(Ty);
1899  if (AM.BaseGV) {
1900  return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1901  AM.BaseOffs%4 == 0;
1902  }
1903 
1904  switch (Size) {
1905  case 1:
1906  // reg + imm
1907  if (AM.Scale == 0) {
1908  return isImmUs(AM.BaseOffs);
1909  }
1910  // reg + reg
1911  return AM.Scale == 1 && AM.BaseOffs == 0;
1912  case 2:
1913  case 3:
1914  // reg + imm
1915  if (AM.Scale == 0) {
1916  return isImmUs2(AM.BaseOffs);
1917  }
1918  // reg + reg<<1
1919  return AM.Scale == 2 && AM.BaseOffs == 0;
1920  default:
1921  // reg + imm
1922  if (AM.Scale == 0) {
1923  return isImmUs4(AM.BaseOffs);
1924  }
1925  // reg + reg<<2
1926  return AM.Scale == 4 && AM.BaseOffs == 0;
1927  }
1928 }
1929 
1930 //===----------------------------------------------------------------------===//
1931 // XCore Inline Assembly Support
1932 //===----------------------------------------------------------------------===//
1933 
1934 std::pair<unsigned, const TargetRegisterClass *>
1935 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1936  StringRef Constraint,
1937  MVT VT) const {
1938  if (Constraint.size() == 1) {
1939  switch (Constraint[0]) {
1940  default : break;
1941  case 'r':
1942  return std::make_pair(0U, &XCore::GRRegsRegClass);
1943  }
1944  }
1945  // Use the default implementation in TargetLowering to convert the register
1946  // constraint into a member of a register class.
1947  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1948 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:673
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
int createLRSpillSlot(MachineFunction &MF)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
MVT getValVT() const
static bool isImmUs2(int64_t val)
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:184
Flags getFlags() const
Return the raw flags of the source value,.
LLVMContext * getContext() const
Definition: SelectionDAG.h:333
LLVMContext & Context
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
Definition: SelectionDAG.h:724
size_t i
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:572
LocInfo getLocInfo() const
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const TargetMachine & getTargetMachine() const
const SDValue & getVal() const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:219
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
Type * getValueType() const
Definition: GlobalValue.h:261
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:615
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:536
const GlobalValue * getGlobal() const
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1126
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Function Alias Analysis Results
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:204
unsigned getSizeInBits() const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
unsigned getReturnStackOffset() const
A debug info location.
Definition: DebugLoc.h:34
const SDValue & getOperand(unsigned Num) const
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:664
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
#define R2(n)
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
const SDValue & getBasePtr() const
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
uint64_t High
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:690
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:369
unsigned getResNo() const
get the index which selects a specific result in the SDNode
bool isRegLoc() const
SDValue getExternalSymbol(const char *Sym, EVT VT)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:159
static bool isImmUs(int64_t val)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static int stackSlotSize()
Stack slot size (4 bytes)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:388
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
const std::vector< MachineJumpTableEntry > & getJumpTables() const
The address of a basic block.
Definition: Constants.h:822
std::string getEVTString() const
getEVTString - This function returns value type as a string, e.g.
Definition: ValueTypes.cpp:120
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
struct fuzzer::@269 Flags
const HexagonInstrInfo * TII
Shift and rotation operations.
Definition: ISDOpcodes.h:344
bool hasSection() const
Definition: GlobalValue.h:255
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:327
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
MachinePointerInfo getWithOffset(int64_t O) const
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static const unsigned CodeModelLargeSize
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:73
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This is an SDNode representing atomic operations.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:662
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
Definition: ValueTypes.h:123
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:611
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:91
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
const XCoreInstrInfo * getInstrInfo() const override
unsigned getLocReg() const
void setReturnStackOffset(unsigned value)
#define F(x, y, z)
Definition: MD5.cpp:51
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:264
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
SmallVector< ISD::OutputArg, 32 > Outs
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:136
const SDValue & getBasePtr() const
MachineConstantPoolValue * getMachineCPVal() const
EVT getMemoryVT() const
Return the type of the in-memory value.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:135
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:151
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:328
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
const SDValue & getBasePtr() const
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:121
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:518
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1695
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:254
SDNode * getNode() const
get the SDNode which holds the desired result
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
Definition: MathExtras.h:589
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
0: type with no size
Definition: Type.h:56
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:166
unsigned getStoreSizeInBits() const
getStoreSizeInBits - Return the number of bits overwritten by a store of the specified value type...
Definition: ValueTypes.h:274
bool isMachineConstantPoolEntry() const
CodeModel::Model getCodeModel() const
Returns the code model.
MVT - Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:51
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:48
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
MVT getLocVT() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
Definition: Constant.h:42
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:637
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:115
const Constant * getConstVal() const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:228
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:154
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:667
uint32_t Offset
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1255
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
unsigned getOpcode() const
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:676
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:81
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:628
bool isVolatile() const
const SDValue & getValue() const
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
EVT - Extended Value Type.
Definition: ValueTypes.h:31
std::vector< ArgListEntry > ArgListTy
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
This structure contains all information that is necessary for lowering calls.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:213
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:45
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
const MachinePointerInfo & getPointerInfo() const
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:709
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:546
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:540
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:50
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:689
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:204
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:408
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:166
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:649
const DataFlowGraph & G
Definition: RDFGraph.cpp:206
const SDValue & getChain() const
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:558
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
Represents one node in the SelectionDAG.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:584
static mvt_range integer_valuetypes()
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, const SDLoc &DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
Definition: SelectionDAG.h:715
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Class for arbitrary precision integers.
Definition: APInt.h:77
static bool isImmUs4(int64_t val)
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:400
bool isMemLoc() const
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:403
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:560
Representation of each machine instruction.
Definition: MachineInstr.h:52
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:633
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:384
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:578
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:119
SmallVector< SDValue, 32 > OutVals
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:333
const TargetRegisterInfo * getRegisterInfo() const override
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:610
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:169
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:536
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:256
#define N
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
unsigned countTrailingOnes() const
Count the number of trailing one bits.
Definition: APInt.h:1385
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
static volatile int Zero
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),a),b).
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
EVT getValueType() const
Return the ValueType of the referenced return value.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
bool hasLocalLinkage() const
Definition: GlobalValue.h:415
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin...
Definition: ISDOpcodes.h:102
unsigned getReg() const
getReg - Returns the register number.
unsigned getAlignment() const
Definition: Globals.cpp:72
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:107
unsigned getAlignment() const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:537
LLVM Value Representation.
Definition: Value.h:71
SDValue getRegister(unsigned Reg, EVT VT)
bool isTruncatingStore() const
Return true if the op does a truncation before store.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:81
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:331
Primary interface to the complete machine description for the target machine.
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
unsigned getLocMemOffset() const
Conversion operators.
Definition: ISDOpcodes.h:397
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:381
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:698
unsigned getAlignment() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:167
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:694
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:226
StringRef getSection() const
Definition: Globals.cpp:145
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:529
uint64_t getZExtValue() const
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:326
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:545