LLVM  10.0.0svn
XCoreISelLowering.cpp
Go to the documentation of this file.
1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the XCoreTargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "XCoreISelLowering.h"
14 #include "XCore.h"
16 #include "XCoreSubtarget.h"
17 #include "XCoreTargetMachine.h"
18 #include "XCoreTargetObjectFile.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/KnownBits.h"
38 #include <algorithm>
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "xcore-lower"
43 
44 const char *XCoreTargetLowering::
45 getTargetNodeName(unsigned Opcode) const
46 {
47  switch ((XCoreISD::NodeType)Opcode)
48  {
49  case XCoreISD::FIRST_NUMBER : break;
50  case XCoreISD::BL : return "XCoreISD::BL";
51  case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
52  case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
53  case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
54  case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
55  case XCoreISD::STWSP : return "XCoreISD::STWSP";
56  case XCoreISD::RETSP : return "XCoreISD::RETSP";
57  case XCoreISD::LADD : return "XCoreISD::LADD";
58  case XCoreISD::LSUB : return "XCoreISD::LSUB";
59  case XCoreISD::LMUL : return "XCoreISD::LMUL";
60  case XCoreISD::MACCU : return "XCoreISD::MACCU";
61  case XCoreISD::MACCS : return "XCoreISD::MACCS";
62  case XCoreISD::CRC8 : return "XCoreISD::CRC8";
63  case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
64  case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
65  case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
66  case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
67  case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
68  }
69  return nullptr;
70 }
71 
73  const XCoreSubtarget &Subtarget)
74  : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
75 
76  // Set up the register classes.
77  addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
78 
79  // Compute derived properties from the register classes
81 
83 
85 
86  // Use i32 for setcc operations results (slt, sgt, ...).
88  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
89 
90  // XCore does not have the NodeTypes below.
93 
94  // 64bit
104 
105  // Bit Manipulation
109 
111 
112  // Jump tables.
114 
117 
118  // Conversion of i64 -> double produces constantpool nodes
120 
121  // Loads
122  for (MVT VT : MVT::integer_valuetypes()) {
126 
129  }
130 
131  // Custom expand misaligned loads / stores.
134 
135  // Varargs
140 
141  // Dynamic stack
145 
146  // Exception handling
149 
150  // Atomic operations
151  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
152  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
156 
157  // TRAMPOLINE is custom lowered.
160 
161  // We want to custom lower some of our intrinsics.
163 
167 
168  // We have target-specific dag combine patterns for the following nodes:
173 
176 }
177 
179  if (Val.getOpcode() != ISD::LOAD)
180  return false;
181 
182  EVT VT1 = Val.getValueType();
183  if (!VT1.isSimple() || !VT1.isInteger() ||
184  !VT2.isSimple() || !VT2.isInteger())
185  return false;
186 
187  switch (VT1.getSimpleVT().SimpleTy) {
188  default: break;
189  case MVT::i8:
190  return true;
191  }
192 
193  return false;
194 }
195 
198  switch (Op.getOpcode())
199  {
200  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
201  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
202  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
203  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
204  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
205  case ISD::LOAD: return LowerLOAD(Op, DAG);
206  case ISD::STORE: return LowerSTORE(Op, DAG);
207  case ISD::VAARG: return LowerVAARG(Op, DAG);
208  case ISD::VASTART: return LowerVASTART(Op, DAG);
209  case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
210  case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
211  // FIXME: Remove these when LegalizeDAGTypes lands.
212  case ISD::ADD:
213  case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
214  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
215  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
216  case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
217  case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
218  case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
219  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
220  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
221  case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
222  case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
223  default:
224  llvm_unreachable("unimplemented operand");
225  }
226 }
227 
228 /// ReplaceNodeResults - Replace the results of node with an illegal result
229 /// type with new values built out of custom code.
232  SelectionDAG &DAG) const {
233  switch (N->getOpcode()) {
234  default:
235  llvm_unreachable("Don't know how to custom expand this!");
236  case ISD::ADD:
237  case ISD::SUB:
238  Results.push_back(ExpandADDSUB(N, DAG));
239  return;
240  }
241 }
242 
243 //===----------------------------------------------------------------------===//
244 // Misc Lower Operation implementation
245 //===----------------------------------------------------------------------===//
246 
247 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
248  const GlobalValue *GV,
249  SelectionDAG &DAG) const {
250  // FIXME there is no actual debug info here
251  SDLoc dl(GA);
252 
253  if (GV->getValueType()->isFunctionTy())
254  return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
255 
256  const auto *GVar = dyn_cast<GlobalVariable>(GV);
257  if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
258  (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
259  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
260 
261  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
262 }
263 
264 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
266  return true;
267 
268  Type *ObjType = GV->getValueType();
269  if (!ObjType->isSized())
270  return false;
271 
272  auto &DL = GV->getParent()->getDataLayout();
273  unsigned ObjSize = DL.getTypeAllocSize(ObjType);
274  return ObjSize < CodeModelLargeSize && ObjSize != 0;
275 }
276 
277 SDValue XCoreTargetLowering::
278 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
279 {
280  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
281  const GlobalValue *GV = GN->getGlobal();
282  SDLoc DL(GN);
283  int64_t Offset = GN->getOffset();
284  if (IsSmallObject(GV, *this)) {
285  // We can only fold positive offsets that are a multiple of the word size.
286  int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
287  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
288  GA = getGlobalAddressWrapper(GA, GV, DAG);
289  // Handle the rest of the offset.
290  if (Offset != FoldedOffset) {
291  SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
292  GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
293  }
294  return GA;
295  } else {
296  // Ideally we would not fold in offset with an index <= 11.
297  Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
298  Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
299  Ty = Type::getInt32Ty(*DAG.getContext());
300  Constant *Idx = ConstantInt::get(Ty, Offset);
302  Type::getInt8Ty(*DAG.getContext()), GA, Idx);
303  SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
304  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
306  }
307 }
308 
309 SDValue XCoreTargetLowering::
310 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
311 {
312  SDLoc DL(Op);
313  auto PtrVT = getPointerTy(DAG.getDataLayout());
314  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
315  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
316 
317  return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
318 }
319 
320 SDValue XCoreTargetLowering::
321 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
322 {
323  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
324  // FIXME there isn't really debug info here
325  SDLoc dl(CP);
326  EVT PtrVT = Op.getValueType();
327  SDValue Res;
328  if (CP->isMachineConstantPoolEntry()) {
329  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
330  CP->getAlignment(), CP->getOffset());
331  } else {
332  Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
333  CP->getAlignment(), CP->getOffset());
334  }
335  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
336 }
337 
340 }
341 
342 SDValue XCoreTargetLowering::
343 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
344 {
345  SDValue Chain = Op.getOperand(0);
346  SDValue Table = Op.getOperand(1);
347  SDValue Index = Op.getOperand(2);
348  SDLoc dl(Op);
349  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
350  unsigned JTI = JT->getIndex();
352  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
353  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
354 
355  unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
356  if (NumEntries <= 32) {
357  return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
358  }
359  assert((NumEntries >> 31) == 0);
360  SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
361  DAG.getConstant(1, dl, MVT::i32));
362  return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
363  ScaledIndex);
364 }
365 
366 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
367  const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
368  SelectionDAG &DAG) const {
369  auto PtrVT = getPointerTy(DAG.getDataLayout());
370  if ((Offset & 0x3) == 0) {
371  return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
372  }
373  // Lower to pair of consecutive word aligned loads plus some bit shifting.
374  int32_t HighOffset = alignTo(Offset, 4);
375  int32_t LowOffset = HighOffset - 4;
376  SDValue LowAddr, HighAddr;
377  if (GlobalAddressSDNode *GASD =
378  dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
379  LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
380  LowOffset);
381  HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
382  HighOffset);
383  } else {
384  LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
385  DAG.getConstant(LowOffset, DL, MVT::i32));
386  HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
387  DAG.getConstant(HighOffset, DL, MVT::i32));
388  }
389  SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
390  SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
391 
392  SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
393  SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
394  SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
395  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
396  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
397  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
398  High.getValue(1));
399  SDValue Ops[] = { Result, Chain };
400  return DAG.getMergeValues(Ops, DL);
401 }
402 
404 {
405  KnownBits Known = DAG.computeKnownBits(Value);
406  return Known.countMinTrailingZeros() >= 2;
407 }
408 
409 SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
410  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
411  LLVMContext &Context = *DAG.getContext();
412  LoadSDNode *LD = cast<LoadSDNode>(Op);
414  "Unexpected extension type");
415  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
416 
417  if (allowsMemoryAccess(Context, DAG.getDataLayout(), LD->getMemoryVT(),
418  *LD->getMemOperand()))
419  return SDValue();
420 
421  SDValue Chain = LD->getChain();
422  SDValue BasePtr = LD->getBasePtr();
423  SDLoc DL(Op);
424 
425  if (!LD->isVolatile()) {
426  const GlobalValue *GV;
427  int64_t Offset = 0;
428  if (DAG.isBaseWithConstantOffset(BasePtr) &&
429  isWordAligned(BasePtr->getOperand(0), DAG)) {
430  SDValue NewBasePtr = BasePtr->getOperand(0);
431  Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
432  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
433  Offset, DAG);
434  }
435  if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
436  MinAlign(GV->getAlignment(), 4) == 4) {
437  SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
438  BasePtr->getValueType(0));
439  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
440  Offset, DAG);
441  }
442  }
443 
444  if (LD->getAlignment() == 2) {
445  SDValue Low =
446  DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
447  LD->getPointerInfo(), MVT::i16,
448  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
449  SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
450  DAG.getConstant(2, DL, MVT::i32));
451  SDValue High =
452  DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
454  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
455  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
456  DAG.getConstant(16, DL, MVT::i32));
457  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
458  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
459  High.getValue(1));
460  SDValue Ops[] = { Result, Chain };
461  return DAG.getMergeValues(Ops, DL);
462  }
463 
464  // Lower to a call to __misaligned_load(BasePtr).
465  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
467  TargetLowering::ArgListEntry Entry;
468 
469  Entry.Ty = IntPtrTy;
470  Entry.Node = BasePtr;
471  Args.push_back(Entry);
472 
474  CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
475  CallingConv::C, IntPtrTy,
476  DAG.getExternalSymbol("__misaligned_load",
477  getPointerTy(DAG.getDataLayout())),
478  std::move(Args));
479 
480  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
481  SDValue Ops[] = { CallResult.first, CallResult.second };
482  return DAG.getMergeValues(Ops, DL);
483 }
484 
485 SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
486  LLVMContext &Context = *DAG.getContext();
487  StoreSDNode *ST = cast<StoreSDNode>(Op);
488  assert(!ST->isTruncatingStore() && "Unexpected store type");
489  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
490 
491  if (allowsMemoryAccess(Context, DAG.getDataLayout(), ST->getMemoryVT(),
492  *ST->getMemOperand()))
493  return SDValue();
494 
495  SDValue Chain = ST->getChain();
496  SDValue BasePtr = ST->getBasePtr();
497  SDValue Value = ST->getValue();
498  SDLoc dl(Op);
499 
500  if (ST->getAlignment() == 2) {
501  SDValue Low = Value;
502  SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
503  DAG.getConstant(16, dl, MVT::i32));
504  SDValue StoreLow = DAG.getTruncStore(
505  Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
506  /* Alignment = */ 2, ST->getMemOperand()->getFlags());
507  SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
508  DAG.getConstant(2, dl, MVT::i32));
509  SDValue StoreHigh = DAG.getTruncStore(
510  Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
511  MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
512  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
513  }
514 
515  // Lower to a call to __misaligned_store(BasePtr, Value).
516  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
518  TargetLowering::ArgListEntry Entry;
519 
520  Entry.Ty = IntPtrTy;
521  Entry.Node = BasePtr;
522  Args.push_back(Entry);
523 
524  Entry.Node = Value;
525  Args.push_back(Entry);
526 
528  CLI.setDebugLoc(dl).setChain(Chain).setCallee(
530  DAG.getExternalSymbol("__misaligned_store",
531  getPointerTy(DAG.getDataLayout())),
532  std::move(Args));
533 
534  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
535  return CallResult.second;
536 }
537 
538 SDValue XCoreTargetLowering::
539 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
540 {
541  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
542  "Unexpected operand to lower!");
543  SDLoc dl(Op);
544  SDValue LHS = Op.getOperand(0);
545  SDValue RHS = Op.getOperand(1);
546  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
547  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
548  DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
549  LHS, RHS);
550  SDValue Lo(Hi.getNode(), 1);
551  SDValue Ops[] = { Lo, Hi };
552  return DAG.getMergeValues(Ops, dl);
553 }
554 
555 SDValue XCoreTargetLowering::
556 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
557 {
558  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
559  "Unexpected operand to lower!");
560  SDLoc dl(Op);
561  SDValue LHS = Op.getOperand(0);
562  SDValue RHS = Op.getOperand(1);
563  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
564  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
565  DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
566  Zero, Zero);
567  SDValue Lo(Hi.getNode(), 1);
568  SDValue Ops[] = { Lo, Hi };
569  return DAG.getMergeValues(Ops, dl);
570 }
571 
572 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
573 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
574 /// each intermediate result in the calculation must also have a single use.
575 /// If the Op is in the correct form the constituent parts are written to Mul0,
576 /// Mul1, Addend0 and Addend1.
577 static bool
578 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
579  SDValue &Addend1, bool requireIntermediatesHaveOneUse)
580 {
581  if (Op.getOpcode() != ISD::ADD)
582  return false;
583  SDValue N0 = Op.getOperand(0);
584  SDValue N1 = Op.getOperand(1);
585  SDValue AddOp;
586  SDValue OtherOp;
587  if (N0.getOpcode() == ISD::ADD) {
588  AddOp = N0;
589  OtherOp = N1;
590  } else if (N1.getOpcode() == ISD::ADD) {
591  AddOp = N1;
592  OtherOp = N0;
593  } else {
594  return false;
595  }
596  if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
597  return false;
598  if (OtherOp.getOpcode() == ISD::MUL) {
599  // add(add(a,b),mul(x,y))
600  if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
601  return false;
602  Mul0 = OtherOp.getOperand(0);
603  Mul1 = OtherOp.getOperand(1);
604  Addend0 = AddOp.getOperand(0);
605  Addend1 = AddOp.getOperand(1);
606  return true;
607  }
608  if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
609  // add(add(mul(x,y),a),b)
610  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
611  return false;
612  Mul0 = AddOp.getOperand(0).getOperand(0);
613  Mul1 = AddOp.getOperand(0).getOperand(1);
614  Addend0 = AddOp.getOperand(1);
615  Addend1 = OtherOp;
616  return true;
617  }
618  if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
619  // add(add(a,mul(x,y)),b)
620  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
621  return false;
622  Mul0 = AddOp.getOperand(1).getOperand(0);
623  Mul1 = AddOp.getOperand(1).getOperand(1);
624  Addend0 = AddOp.getOperand(0);
625  Addend1 = OtherOp;
626  return true;
627  }
628  return false;
629 }
630 
631 SDValue XCoreTargetLowering::
632 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
633 {
634  SDValue Mul;
635  SDValue Other;
636  if (N->getOperand(0).getOpcode() == ISD::MUL) {
637  Mul = N->getOperand(0);
638  Other = N->getOperand(1);
639  } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
640  Mul = N->getOperand(1);
641  Other = N->getOperand(0);
642  } else {
643  return SDValue();
644  }
645  SDLoc dl(N);
646  SDValue LL, RL, AddendL, AddendH;
647  LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
648  Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
649  RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
650  Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
651  AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
652  Other, DAG.getConstant(0, dl, MVT::i32));
653  AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
654  Other, DAG.getConstant(1, dl, MVT::i32));
655  APInt HighMask = APInt::getHighBitsSet(64, 32);
656  unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
657  unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
658  if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
659  DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
660  // The inputs are both zero-extended.
661  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
662  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
663  AddendL, LL, RL);
664  SDValue Lo(Hi.getNode(), 1);
665  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
666  }
667  if (LHSSB > 32 && RHSSB > 32) {
668  // The inputs are both sign-extended.
669  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
670  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
671  AddendL, LL, RL);
672  SDValue Lo(Hi.getNode(), 1);
673  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
674  }
675  SDValue LH, RH;
676  LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
677  Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
678  RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
679  Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
680  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
681  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
682  AddendL, LL, RL);
683  SDValue Lo(Hi.getNode(), 1);
684  RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
685  LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
686  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
687  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
688  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
689 }
690 
691 SDValue XCoreTargetLowering::
692 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
693 {
694  assert(N->getValueType(0) == MVT::i64 &&
695  (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
696  "Unknown operand to lower!");
697 
698  if (N->getOpcode() == ISD::ADD)
699  if (SDValue Result = TryExpandADDWithMul(N, DAG))
700  return Result;
701 
702  SDLoc dl(N);
703 
704  // Extract components
706  N->getOperand(0),
707  DAG.getConstant(0, dl, MVT::i32));
709  N->getOperand(0),
710  DAG.getConstant(1, dl, MVT::i32));
712  N->getOperand(1),
713  DAG.getConstant(0, dl, MVT::i32));
715  N->getOperand(1),
716  DAG.getConstant(1, dl, MVT::i32));
717 
718  // Expand
719  unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
721  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
722  SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
723  LHSL, RHSL, Zero);
724  SDValue Carry(Lo.getNode(), 1);
725 
726  SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
727  LHSH, RHSH, Carry);
728  SDValue Ignored(Hi.getNode(), 1);
729  // Merge the pieces
730  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
731 }
732 
733 SDValue XCoreTargetLowering::
734 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
735 {
736  // Whist llvm does not support aggregate varargs we can ignore
737  // the possibility of the ValueType being an implicit byVal vararg.
738  SDNode *Node = Op.getNode();
739  EVT VT = Node->getValueType(0); // not an aggregate
740  SDValue InChain = Node->getOperand(0);
741  SDValue VAListPtr = Node->getOperand(1);
742  EVT PtrVT = VAListPtr.getValueType();
743  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
744  SDLoc dl(Node);
745  SDValue VAList =
746  DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
747  // Increment the pointer, VAList, to the next vararg
748  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
749  DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
750  dl));
751  // Store the incremented VAList to the legalized pointer
752  InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
753  MachinePointerInfo(SV));
754  // Load the actual argument out of the pointer VAList
755  return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
756 }
757 
758 SDValue XCoreTargetLowering::
759 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
760 {
761  SDLoc dl(Op);
762  // vastart stores the address of the VarArgsFrameIndex slot into the
763  // memory location argument
767  return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
769 }
770 
771 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
772  SelectionDAG &DAG) const {
773  // This nodes represent llvm.frameaddress on the DAG.
774  // It takes one operand, the index of the frame address to return.
775  // An index of zero corresponds to the current function's frame address.
776  // An index of one to the parent's frame address, and so on.
777  // Depths > 0 not supported yet!
778  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
779  return SDValue();
780 
782  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
783  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
784  RegInfo->getFrameRegister(MF), MVT::i32);
785 }
786 
787 SDValue XCoreTargetLowering::
788 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
789  // This nodes represent llvm.returnaddress on the DAG.
790  // It takes one operand, the index of the return address to return.
791  // An index of zero corresponds to the current function's return address.
792  // An index of one to the parent's return address, and so on.
793  // Depths > 0 not supported yet!
794  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
795  return SDValue();
796 
799  int FI = XFI->createLRSpillSlot(MF);
800  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
801  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
802  DAG.getEntryNode(), FIN,
804 }
805 
806 SDValue XCoreTargetLowering::
807 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
808  // This node represents offset from frame pointer to first on-stack argument.
809  // This is needed for correct stack adjustment during unwind.
810  // However, we don't know the offset until after the frame has be finalised.
811  // This is done during the XCoreFTAOElim pass.
813 }
814 
815 SDValue XCoreTargetLowering::
816 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
817  // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
818  // This node represents 'eh_return' gcc dwarf builtin, which is used to
819  // return from exception. The general meaning is: adjust stack by OFFSET and
820  // pass execution to HANDLER.
822  SDValue Chain = Op.getOperand(0);
823  SDValue Offset = Op.getOperand(1);
824  SDValue Handler = Op.getOperand(2);
825  SDLoc dl(Op);
826 
827  // Absolute SP = (FP + FrameToArgs) + Offset
828  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
829  SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
830  RegInfo->getFrameRegister(MF), MVT::i32);
831  SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
832  MVT::i32);
833  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
834  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
835 
836  // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
837  // which leaves 2 caller saved registers, R2 & R3 for us to use.
838  unsigned StackReg = XCore::R2;
839  unsigned HandlerReg = XCore::R3;
840 
841  SDValue OutChains[] = {
842  DAG.getCopyToReg(Chain, dl, StackReg, Stack),
843  DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
844  };
845 
846  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
847 
848  return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
849  DAG.getRegister(StackReg, MVT::i32),
850  DAG.getRegister(HandlerReg, MVT::i32));
851 
852 }
853 
854 SDValue XCoreTargetLowering::
855 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
856  return Op.getOperand(0);
857 }
858 
859 SDValue XCoreTargetLowering::
860 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
861  SDValue Chain = Op.getOperand(0);
862  SDValue Trmp = Op.getOperand(1); // trampoline
863  SDValue FPtr = Op.getOperand(2); // nested function
864  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
865 
866  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
867 
868  // .align 4
869  // LDAPF_u10 r11, nest
870  // LDW_2rus r11, r11[0]
871  // STWSP_ru6 r11, sp[0]
872  // LDAPF_u10 r11, fptr
873  // LDW_2rus r11, r11[0]
874  // BAU_1r r11
875  // nest:
876  // .word nest
877  // fptr:
878  // .word fptr
879  SDValue OutChains[5];
880 
881  SDValue Addr = Trmp;
882 
883  SDLoc dl(Op);
884  OutChains[0] =
885  DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
886  MachinePointerInfo(TrmpAddr));
887 
888  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
889  DAG.getConstant(4, dl, MVT::i32));
890  OutChains[1] =
891  DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
892  MachinePointerInfo(TrmpAddr, 4));
893 
894  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
895  DAG.getConstant(8, dl, MVT::i32));
896  OutChains[2] =
897  DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
898  MachinePointerInfo(TrmpAddr, 8));
899 
900  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
901  DAG.getConstant(12, dl, MVT::i32));
902  OutChains[3] =
903  DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
904 
905  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
906  DAG.getConstant(16, dl, MVT::i32));
907  OutChains[4] =
908  DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
909 
910  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
911 }
912 
913 SDValue XCoreTargetLowering::
914 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
915  SDLoc DL(Op);
916  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
917  switch (IntNo) {
918  case Intrinsic::xcore_crc8:
919  EVT VT = Op.getValueType();
920  SDValue Data =
921  DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
922  Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
923  SDValue Crc(Data.getNode(), 1);
924  SDValue Results[] = { Crc, Data };
925  return DAG.getMergeValues(Results, DL);
926  }
927  return SDValue();
928 }
929 
930 SDValue XCoreTargetLowering::
931 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
932  SDLoc DL(Op);
933  return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
934 }
935 
936 SDValue XCoreTargetLowering::
937 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
938  AtomicSDNode *N = cast<AtomicSDNode>(Op);
939  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
942  "setInsertFencesForAtomic(true) expects unordered / monotonic");
943  if (N->getMemoryVT() == MVT::i32) {
944  if (N->getAlignment() < 4)
945  report_fatal_error("atomic load must be aligned");
946  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
947  N->getChain(), N->getBasePtr(), N->getPointerInfo(),
948  N->getAlignment(), N->getMemOperand()->getFlags(),
949  N->getAAInfo(), N->getRanges());
950  }
951  if (N->getMemoryVT() == MVT::i16) {
952  if (N->getAlignment() < 2)
953  report_fatal_error("atomic load must be aligned");
954  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
955  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
956  N->getAlignment(), N->getMemOperand()->getFlags(),
957  N->getAAInfo());
958  }
959  if (N->getMemoryVT() == MVT::i8)
960  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
961  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
962  N->getAlignment(), N->getMemOperand()->getFlags(),
963  N->getAAInfo());
964  return SDValue();
965 }
966 
967 SDValue XCoreTargetLowering::
968 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
969  AtomicSDNode *N = cast<AtomicSDNode>(Op);
970  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
973  "setInsertFencesForAtomic(true) expects unordered / monotonic");
974  if (N->getMemoryVT() == MVT::i32) {
975  if (N->getAlignment() < 4)
976  report_fatal_error("atomic store must be aligned");
977  return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
978  N->getPointerInfo(), N->getAlignment(),
979  N->getMemOperand()->getFlags(), N->getAAInfo());
980  }
981  if (N->getMemoryVT() == MVT::i16) {
982  if (N->getAlignment() < 2)
983  report_fatal_error("atomic store must be aligned");
984  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
985  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
986  N->getAlignment(), N->getMemOperand()->getFlags(),
987  N->getAAInfo());
988  }
989  if (N->getMemoryVT() == MVT::i8)
990  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
991  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
992  N->getAlignment(), N->getMemOperand()->getFlags(),
993  N->getAAInfo());
994  return SDValue();
995 }
996 
998 XCoreTargetLowering::getMMOFlags(const Instruction &I) const {
999  // Because of how we convert atomic_load and atomic_store to normal loads and
1000  // stores in the DAG, we need to ensure that the MMOs are marked volatile
1001  // since DAGCombine hasn't been updated to account for atomic, but non
1002  // volatile loads. (See D57601)
1003  if (auto *SI = dyn_cast<StoreInst>(&I))
1004  if (SI->isAtomic())
1006  if (auto *LI = dyn_cast<LoadInst>(&I))
1007  if (LI->isAtomic())
1009  if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
1010  if (AI->isAtomic())
1012  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
1013  if (AI->isAtomic())
1016 }
1017 
1018 //===----------------------------------------------------------------------===//
1019 // Calling Convention Implementation
1020 //===----------------------------------------------------------------------===//
1021 
1022 #include "XCoreGenCallingConv.inc"
1023 
1024 //===----------------------------------------------------------------------===//
1025 // Call Calling Convention Implementation
1026 //===----------------------------------------------------------------------===//
1027 
1028 /// XCore call implementation
1029 SDValue
1030 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1031  SmallVectorImpl<SDValue> &InVals) const {
1032  SelectionDAG &DAG = CLI.DAG;
1033  SDLoc &dl = CLI.DL;
1035  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1037  SDValue Chain = CLI.Chain;
1038  SDValue Callee = CLI.Callee;
1039  bool &isTailCall = CLI.IsTailCall;
1040  CallingConv::ID CallConv = CLI.CallConv;
1041  bool isVarArg = CLI.IsVarArg;
1042 
1043  // XCore target does not yet support tail call optimization.
1044  isTailCall = false;
1045 
1046  // For now, only CallingConv::C implemented
1047  switch (CallConv)
1048  {
1049  default:
1050  report_fatal_error("Unsupported calling convention");
1051  case CallingConv::Fast:
1052  case CallingConv::C:
1053  return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1054  Outs, OutVals, Ins, dl, DAG, InVals);
1055  }
1056 }
1057 
1058 /// LowerCallResult - Lower the result values of a call into the
1059 /// appropriate copies out of appropriate physical registers / memory locations.
1061  const SmallVectorImpl<CCValAssign> &RVLocs,
1062  const SDLoc &dl, SelectionDAG &DAG,
1063  SmallVectorImpl<SDValue> &InVals) {
1064  SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1065  // Copy results out of physical registers.
1066  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1067  const CCValAssign &VA = RVLocs[i];
1068  if (VA.isRegLoc()) {
1069  Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1070  InFlag).getValue(1);
1071  InFlag = Chain.getValue(2);
1072  InVals.push_back(Chain.getValue(0));
1073  } else {
1074  assert(VA.isMemLoc());
1075  ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1076  InVals.size()));
1077  // Reserve space for this result.
1078  InVals.push_back(SDValue());
1079  }
1080  }
1081 
1082  // Copy results out of memory.
1083  SmallVector<SDValue, 4> MemOpChains;
1084  for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1085  int offset = ResultMemLocs[i].first;
1086  unsigned index = ResultMemLocs[i].second;
1087  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1088  SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1089  SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1090  InVals[index] = load;
1091  MemOpChains.push_back(load.getValue(1));
1092  }
1093 
1094  // Transform all loads nodes into one single node because
1095  // all load nodes are independent of each other.
1096  if (!MemOpChains.empty())
1097  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1098 
1099  return Chain;
1100 }
1101 
1102 /// LowerCCCCallTo - functions arguments are copied from virtual
1103 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1104 /// CALLSEQ_END are emitted.
1105 /// TODO: isTailCall, sret.
1106 SDValue XCoreTargetLowering::LowerCCCCallTo(
1107  SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1108  bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1109  const SmallVectorImpl<SDValue> &OutVals,
1110  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1111  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1112 
1113  // Analyze operands of the call, assigning locations to each operand.
1115  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1116  *DAG.getContext());
1117 
1118  // The ABI dictates there should be one stack slot available to the callee
1119  // on function entry (for saving lr).
1120  CCInfo.AllocateStack(4, 4);
1121 
1122  CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1123 
1125  // Analyze return values to determine the number of bytes of stack required.
1126  CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1127  *DAG.getContext());
1128  RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1129  RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1130 
1131  // Get a count of how many bytes are to be pushed on the stack.
1132  unsigned NumBytes = RetCCInfo.getNextStackOffset();
1133  auto PtrVT = getPointerTy(DAG.getDataLayout());
1134 
1135  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1136 
1138  SmallVector<SDValue, 12> MemOpChains;
1139 
1140  // Walk the register/memloc assignments, inserting copies/loads.
1141  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1142  CCValAssign &VA = ArgLocs[i];
1143  SDValue Arg = OutVals[i];
1144 
1145  // Promote the value if needed.
1146  switch (VA.getLocInfo()) {
1147  default: llvm_unreachable("Unknown loc info!");
1148  case CCValAssign::Full: break;
1149  case CCValAssign::SExt:
1150  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1151  break;
1152  case CCValAssign::ZExt:
1153  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1154  break;
1155  case CCValAssign::AExt:
1156  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1157  break;
1158  }
1159 
1160  // Arguments that can be passed on register must be kept at
1161  // RegsToPass vector
1162  if (VA.isRegLoc()) {
1163  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1164  } else {
1165  assert(VA.isMemLoc());
1166 
1167  int Offset = VA.getLocMemOffset();
1168 
1169  MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1170  Chain, Arg,
1171  DAG.getConstant(Offset/4, dl,
1172  MVT::i32)));
1173  }
1174  }
1175 
1176  // Transform all store nodes into one single node because
1177  // all store nodes are independent of each other.
1178  if (!MemOpChains.empty())
1179  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1180 
1181  // Build a sequence of copy-to-reg nodes chained together with token
1182  // chain and flag operands which copy the outgoing args into registers.
1183  // The InFlag in necessary since all emitted instructions must be
1184  // stuck together.
1185  SDValue InFlag;
1186  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1187  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1188  RegsToPass[i].second, InFlag);
1189  InFlag = Chain.getValue(1);
1190  }
1191 
1192  // If the callee is a GlobalAddress node (quite common, every direct call is)
1193  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1194  // Likewise ExternalSymbol -> TargetExternalSymbol.
1195  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1196  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1197  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1198  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1199 
1200  // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1201  // = Chain, Callee, Reg#1, Reg#2, ...
1202  //
1203  // Returns a chain & a flag for retval copy to use.
1204  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1206  Ops.push_back(Chain);
1207  Ops.push_back(Callee);
1208 
1209  // Add argument registers to the end of the list so that they are
1210  // known live into the call.
1211  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1212  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1213  RegsToPass[i].second.getValueType()));
1214 
1215  if (InFlag.getNode())
1216  Ops.push_back(InFlag);
1217 
1218  Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1219  InFlag = Chain.getValue(1);
1220 
1221  // Create the CALLSEQ_END node.
1222  Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1223  DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1224  InFlag = Chain.getValue(1);
1225 
1226  // Handle result values, copying them out of physregs into vregs that we
1227  // return.
1228  return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1229 }
1230 
1231 //===----------------------------------------------------------------------===//
1232 // Formal Arguments Calling Convention Implementation
1233 //===----------------------------------------------------------------------===//
1234 
1235 namespace {
1236  struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1237 }
1238 
1239 /// XCore formal arguments implementation
1240 SDValue XCoreTargetLowering::LowerFormalArguments(
1241  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1242  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1243  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1244  switch (CallConv)
1245  {
1246  default:
1247  report_fatal_error("Unsupported calling convention");
1248  case CallingConv::C:
1249  case CallingConv::Fast:
1250  return LowerCCCArguments(Chain, CallConv, isVarArg,
1251  Ins, dl, DAG, InVals);
1252  }
1253 }
1254 
1255 /// LowerCCCArguments - transform physical registers into
1256 /// virtual registers and generate load operations for
1257 /// arguments places on the stack.
1258 /// TODO: sret
1259 SDValue XCoreTargetLowering::LowerCCCArguments(
1260  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1261  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1262  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1263  MachineFunction &MF = DAG.getMachineFunction();
1264  MachineFrameInfo &MFI = MF.getFrameInfo();
1265  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1267 
1268  // Assign locations to all of the incoming arguments.
1270  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1271  *DAG.getContext());
1272 
1273  CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1274 
1275  unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1276 
1277  unsigned LRSaveSize = StackSlotSize;
1278 
1279  if (!isVarArg)
1280  XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1281 
1282  // All getCopyFromReg ops must precede any getMemcpys to prevent the
1283  // scheduler clobbering a register before it has been copied.
1284  // The stages are:
1285  // 1. CopyFromReg (and load) arg & vararg registers.
1286  // 2. Chain CopyFromReg nodes into a TokenFactor.
1287  // 3. Memcpy 'byVal' args & push final InVals.
1288  // 4. Chain mem ops nodes into a TokenFactor.
1289  SmallVector<SDValue, 4> CFRegNode;
1291  SmallVector<SDValue, 4> MemOps;
1292 
1293  // 1a. CopyFromReg (and load) arg registers.
1294  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1295 
1296  CCValAssign &VA = ArgLocs[i];
1297  SDValue ArgIn;
1298 
1299  if (VA.isRegLoc()) {
1300  // Arguments passed in registers
1301  EVT RegVT = VA.getLocVT();
1302  switch (RegVT.getSimpleVT().SimpleTy) {
1303  default:
1304  {
1305 #ifndef NDEBUG
1306  errs() << "LowerFormalArguments Unhandled argument type: "
1307  << RegVT.getEVTString() << "\n";
1308 #endif
1309  llvm_unreachable(nullptr);
1310  }
1311  case MVT::i32:
1312  Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1313  RegInfo.addLiveIn(VA.getLocReg(), VReg);
1314  ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1315  CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1316  }
1317  } else {
1318  // sanity check
1319  assert(VA.isMemLoc());
1320  // Load the argument to a virtual register
1321  unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1322  if (ObjSize > StackSlotSize) {
1323  errs() << "LowerFormalArguments Unhandled argument type: "
1324  << EVT(VA.getLocVT()).getEVTString()
1325  << "\n";
1326  }
1327  // Create the frame index object for this incoming parameter...
1328  int FI = MFI.CreateFixedObject(ObjSize,
1329  LRSaveSize + VA.getLocMemOffset(),
1330  true);
1331 
1332  // Create the SelectionDAG nodes corresponding to a load
1333  //from this parameter
1334  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1335  ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1337  }
1338  const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1339  ArgData.push_back(ADP);
1340  }
1341 
1342  // 1b. CopyFromReg vararg registers.
1343  if (isVarArg) {
1344  // Argument registers
1345  static const MCPhysReg ArgRegs[] = {
1346  XCore::R0, XCore::R1, XCore::R2, XCore::R3
1347  };
1349  unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1350  if (FirstVAReg < array_lengthof(ArgRegs)) {
1351  int offset = 0;
1352  // Save remaining registers, storing higher register numbers at a higher
1353  // address
1354  for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1355  // Create a stack slot
1356  int FI = MFI.CreateFixedObject(4, offset, true);
1357  if (i == (int)FirstVAReg) {
1358  XFI->setVarArgsFrameIndex(FI);
1359  }
1360  offset -= StackSlotSize;
1361  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1362  // Move argument from phys reg -> virt reg
1363  Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1364  RegInfo.addLiveIn(ArgRegs[i], VReg);
1365  SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1366  CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1367  // Move argument from virt reg -> stack
1368  SDValue Store =
1369  DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1370  MemOps.push_back(Store);
1371  }
1372  } else {
1373  // This will point to the next argument passed via stack.
1374  XFI->setVarArgsFrameIndex(
1375  MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1376  true));
1377  }
1378  }
1379 
1380  // 2. chain CopyFromReg nodes into a TokenFactor.
1381  if (!CFRegNode.empty())
1382  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1383 
1384  // 3. Memcpy 'byVal' args & push final InVals.
1385  // Aggregates passed "byVal" need to be copied by the callee.
1386  // The callee will use a pointer to this copy, rather than the original
1387  // pointer.
1388  for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1389  ArgDE = ArgData.end();
1390  ArgDI != ArgDE; ++ArgDI) {
1391  if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1392  unsigned Size = ArgDI->Flags.getByValSize();
1393  unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1394  // Create a new object on the stack and copy the pointee into it.
1395  int FI = MFI.CreateStackObject(Size, Align, false);
1396  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1397  InVals.push_back(FIN);
1398  MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1399  DAG.getConstant(Size, dl, MVT::i32),
1400  Align, false, false, false,
1402  MachinePointerInfo()));
1403  } else {
1404  InVals.push_back(ArgDI->SDV);
1405  }
1406  }
1407 
1408  // 4, chain mem ops nodes into a TokenFactor.
1409  if (!MemOps.empty()) {
1410  MemOps.push_back(Chain);
1411  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1412  }
1413 
1414  return Chain;
1415 }
1416 
1417 //===----------------------------------------------------------------------===//
1418 // Return Value Calling Convention Implementation
1419 //===----------------------------------------------------------------------===//
1420 
1421 bool XCoreTargetLowering::
1422 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1423  bool isVarArg,
1424  const SmallVectorImpl<ISD::OutputArg> &Outs,
1425  LLVMContext &Context) const {
1427  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1428  if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1429  return false;
1430  if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1431  return false;
1432  return true;
1433 }
1434 
1435 SDValue
1436 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1437  bool isVarArg,
1438  const SmallVectorImpl<ISD::OutputArg> &Outs,
1439  const SmallVectorImpl<SDValue> &OutVals,
1440  const SDLoc &dl, SelectionDAG &DAG) const {
1441 
1442  XCoreFunctionInfo *XFI =
1445 
1446  // CCValAssign - represent the assignment of
1447  // the return value to a location
1449 
1450  // CCState - Info about the registers and stack slot.
1451  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1452  *DAG.getContext());
1453 
1454  // Analyze return values.
1455  if (!isVarArg)
1456  CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1457 
1458  CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1459 
1460  SDValue Flag;
1461  SmallVector<SDValue, 4> RetOps(1, Chain);
1462 
1463  // Return on XCore is always a "retsp 0"
1464  RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1465 
1466  SmallVector<SDValue, 4> MemOpChains;
1467  // Handle return values that must be copied to memory.
1468  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1469  CCValAssign &VA = RVLocs[i];
1470  if (VA.isRegLoc())
1471  continue;
1472  assert(VA.isMemLoc());
1473  if (isVarArg) {
1474  report_fatal_error("Can't return value from vararg function in memory");
1475  }
1476 
1477  int Offset = VA.getLocMemOffset();
1478  unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1479  // Create the frame index object for the memory location.
1480  int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1481 
1482  // Create a SelectionDAG node corresponding to a store
1483  // to this memory location.
1484  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1485  MemOpChains.push_back(DAG.getStore(
1486  Chain, dl, OutVals[i], FIN,
1488  }
1489 
1490  // Transform all store nodes into one single node because
1491  // all stores are independent of each other.
1492  if (!MemOpChains.empty())
1493  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1494 
1495  // Now handle return values copied to registers.
1496  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1497  CCValAssign &VA = RVLocs[i];
1498  if (!VA.isRegLoc())
1499  continue;
1500  // Copy the result values into the output registers.
1501  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1502 
1503  // guarantee that all emitted copies are
1504  // stuck together, avoiding something bad
1505  Flag = Chain.getValue(1);
1506  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1507  }
1508 
1509  RetOps[0] = Chain; // Update chain.
1510 
1511  // Add the flag if we have it.
1512  if (Flag.getNode())
1513  RetOps.push_back(Flag);
1514 
1515  return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1516 }
1517 
1518 //===----------------------------------------------------------------------===//
1519 // Other Lowering Code
1520 //===----------------------------------------------------------------------===//
1521 
1524  MachineBasicBlock *BB) const {
1525  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1526  DebugLoc dl = MI.getDebugLoc();
1527  assert((MI.getOpcode() == XCore::SELECT_CC) &&
1528  "Unexpected instr type to insert");
1529 
1530  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1531  // control-flow pattern. The incoming instruction knows the destination vreg
1532  // to set, the condition code register to branch on, the true/false values to
1533  // select between, and a branch opcode to use.
1534  const BasicBlock *LLVM_BB = BB->getBasicBlock();
1536 
1537  // thisMBB:
1538  // ...
1539  // TrueVal = ...
1540  // cmpTY ccX, r1, r2
1541  // bCC copy1MBB
1542  // fallthrough --> copy0MBB
1543  MachineBasicBlock *thisMBB = BB;
1544  MachineFunction *F = BB->getParent();
1545  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1546  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1547  F->insert(It, copy0MBB);
1548  F->insert(It, sinkMBB);
1549 
1550  // Transfer the remainder of BB and its successor edges to sinkMBB.
1551  sinkMBB->splice(sinkMBB->begin(), BB,
1552  std::next(MachineBasicBlock::iterator(MI)), BB->end());
1553  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1554 
1555  // Next, add the true and fallthrough blocks as its successors.
1556  BB->addSuccessor(copy0MBB);
1557  BB->addSuccessor(sinkMBB);
1558 
1559  BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1560  .addReg(MI.getOperand(1).getReg())
1561  .addMBB(sinkMBB);
1562 
1563  // copy0MBB:
1564  // %FalseValue = ...
1565  // # fallthrough to sinkMBB
1566  BB = copy0MBB;
1567 
1568  // Update machine-CFG edges
1569  BB->addSuccessor(sinkMBB);
1570 
1571  // sinkMBB:
1572  // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1573  // ...
1574  BB = sinkMBB;
1575  BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1576  .addReg(MI.getOperand(3).getReg())
1577  .addMBB(copy0MBB)
1578  .addReg(MI.getOperand(2).getReg())
1579  .addMBB(thisMBB);
1580 
1581  MI.eraseFromParent(); // The pseudo instruction is gone now.
1582  return BB;
1583 }
1584 
1585 //===----------------------------------------------------------------------===//
1586 // Target Optimization Hooks
1587 //===----------------------------------------------------------------------===//
1588 
1589 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1590  DAGCombinerInfo &DCI) const {
1591  SelectionDAG &DAG = DCI.DAG;
1592  SDLoc dl(N);
1593  switch (N->getOpcode()) {
1594  default: break;
1595  case ISD::INTRINSIC_VOID:
1596  switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1597  case Intrinsic::xcore_outt:
1598  case Intrinsic::xcore_outct:
1599  case Intrinsic::xcore_chkct: {
1600  SDValue OutVal = N->getOperand(3);
1601  // These instructions ignore the high bits.
1602  if (OutVal.hasOneUse()) {
1603  unsigned BitWidth = OutVal.getValueSizeInBits();
1604  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1605  KnownBits Known;
1607  !DCI.isBeforeLegalizeOps());
1608  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1609  if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1610  TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1611  DCI.CommitTargetLoweringOpt(TLO);
1612  }
1613  break;
1614  }
1615  case Intrinsic::xcore_setpt: {
1616  SDValue Time = N->getOperand(3);
1617  // This instruction ignores the high bits.
1618  if (Time.hasOneUse()) {
1619  unsigned BitWidth = Time.getValueSizeInBits();
1620  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1621  KnownBits Known;
1623  !DCI.isBeforeLegalizeOps());
1624  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1625  if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1626  TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1627  DCI.CommitTargetLoweringOpt(TLO);
1628  }
1629  break;
1630  }
1631  }
1632  break;
1633  case XCoreISD::LADD: {
1634  SDValue N0 = N->getOperand(0);
1635  SDValue N1 = N->getOperand(1);
1636  SDValue N2 = N->getOperand(2);
1639  EVT VT = N0.getValueType();
1640 
1641  // canonicalize constant to RHS
1642  if (N0C && !N1C)
1643  return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1644 
1645  // fold (ladd 0, 0, x) -> 0, x & 1
1646  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1647  SDValue Carry = DAG.getConstant(0, dl, VT);
1648  SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1649  DAG.getConstant(1, dl, VT));
1650  SDValue Ops[] = { Result, Carry };
1651  return DAG.getMergeValues(Ops, dl);
1652  }
1653 
1654  // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1655  // low bit set
1656  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1657  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1658  VT.getSizeInBits() - 1);
1659  KnownBits Known = DAG.computeKnownBits(N2);
1660  if ((Known.Zero & Mask) == Mask) {
1661  SDValue Carry = DAG.getConstant(0, dl, VT);
1662  SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1663  SDValue Ops[] = { Result, Carry };
1664  return DAG.getMergeValues(Ops, dl);
1665  }
1666  }
1667  }
1668  break;
1669  case XCoreISD::LSUB: {
1670  SDValue N0 = N->getOperand(0);
1671  SDValue N1 = N->getOperand(1);
1672  SDValue N2 = N->getOperand(2);
1675  EVT VT = N0.getValueType();
1676 
1677  // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1678  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1679  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1680  VT.getSizeInBits() - 1);
1681  KnownBits Known = DAG.computeKnownBits(N2);
1682  if ((Known.Zero & Mask) == Mask) {
1683  SDValue Borrow = N2;
1684  SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1685  DAG.getConstant(0, dl, VT), N2);
1686  SDValue Ops[] = { Result, Borrow };
1687  return DAG.getMergeValues(Ops, dl);
1688  }
1689  }
1690 
1691  // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1692  // low bit set
1693  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1694  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1695  VT.getSizeInBits() - 1);
1696  KnownBits Known = DAG.computeKnownBits(N2);
1697  if ((Known.Zero & Mask) == Mask) {
1698  SDValue Borrow = DAG.getConstant(0, dl, VT);
1699  SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1700  SDValue Ops[] = { Result, Borrow };
1701  return DAG.getMergeValues(Ops, dl);
1702  }
1703  }
1704  }
1705  break;
1706  case XCoreISD::LMUL: {
1707  SDValue N0 = N->getOperand(0);
1708  SDValue N1 = N->getOperand(1);
1709  SDValue N2 = N->getOperand(2);
1710  SDValue N3 = N->getOperand(3);
1713  EVT VT = N0.getValueType();
1714  // Canonicalize multiplicative constant to RHS. If both multiplicative
1715  // operands are constant canonicalize smallest to RHS.
1716  if ((N0C && !N1C) ||
1717  (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1718  return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1719  N1, N0, N2, N3);
1720 
1721  // lmul(x, 0, a, b)
1722  if (N1C && N1C->isNullValue()) {
1723  // If the high result is unused fold to add(a, b)
1724  if (N->hasNUsesOfValue(0, 0)) {
1725  SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1726  SDValue Ops[] = { Lo, Lo };
1727  return DAG.getMergeValues(Ops, dl);
1728  }
1729  // Otherwise fold to ladd(a, b, 0)
1730  SDValue Result =
1731  DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1732  SDValue Carry(Result.getNode(), 1);
1733  SDValue Ops[] = { Carry, Result };
1734  return DAG.getMergeValues(Ops, dl);
1735  }
1736  }
1737  break;
1738  case ISD::ADD: {
1739  // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1740  // lmul(x, y, a, b). The high result of lmul will be ignored.
1741  // This is only profitable if the intermediate results are unused
1742  // elsewhere.
1743  SDValue Mul0, Mul1, Addend0, Addend1;
1744  if (N->getValueType(0) == MVT::i32 &&
1745  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1746  SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1747  DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1748  Mul1, Addend0, Addend1);
1749  SDValue Result(Ignored.getNode(), 1);
1750  return Result;
1751  }
1752  APInt HighMask = APInt::getHighBitsSet(64, 32);
1753  // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1754  // lmul(x, y, a, b) if all operands are zero-extended. We do this
1755  // before type legalization as it is messy to match the operands after
1756  // that.
1757  if (N->getValueType(0) == MVT::i64 &&
1758  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1759  DAG.MaskedValueIsZero(Mul0, HighMask) &&
1760  DAG.MaskedValueIsZero(Mul1, HighMask) &&
1761  DAG.MaskedValueIsZero(Addend0, HighMask) &&
1762  DAG.MaskedValueIsZero(Addend1, HighMask)) {
1763  SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1764  Mul0, DAG.getConstant(0, dl, MVT::i32));
1765  SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1766  Mul1, DAG.getConstant(0, dl, MVT::i32));
1767  SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1768  Addend0, DAG.getConstant(0, dl, MVT::i32));
1769  SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1770  Addend1, DAG.getConstant(0, dl, MVT::i32));
1771  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1772  DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1773  Addend0L, Addend1L);
1774  SDValue Lo(Hi.getNode(), 1);
1775  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1776  }
1777  }
1778  break;
1779  case ISD::STORE: {
1780  // Replace unaligned store of unaligned load with memmove.
1781  StoreSDNode *ST = cast<StoreSDNode>(N);
1782  if (!DCI.isBeforeLegalize() ||
1784  ST->getMemoryVT(), *ST->getMemOperand()) ||
1785  ST->isVolatile() || ST->isIndexed()) {
1786  break;
1787  }
1788  SDValue Chain = ST->getChain();
1789 
1790  unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1791  assert((StoreBits % 8) == 0 &&
1792  "Store size in bits must be a multiple of 8");
1793  unsigned Alignment = ST->getAlignment();
1794 
1795  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1796  if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1797  LD->getAlignment() == Alignment &&
1798  !LD->isVolatile() && !LD->isIndexed() &&
1800  bool isTail = isInTailCallPosition(DAG, ST, Chain);
1801  return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1802  LD->getBasePtr(),
1803  DAG.getConstant(StoreBits/8, dl, MVT::i32),
1804  Alignment, false, isTail, ST->getPointerInfo(),
1805  LD->getPointerInfo());
1806  }
1807  }
1808  break;
1809  }
1810  }
1811  return SDValue();
1812 }
1813 
1814 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1815  KnownBits &Known,
1816  const APInt &DemandedElts,
1817  const SelectionDAG &DAG,
1818  unsigned Depth) const {
1819  Known.resetAll();
1820  switch (Op.getOpcode()) {
1821  default: break;
1822  case XCoreISD::LADD:
1823  case XCoreISD::LSUB:
1824  if (Op.getResNo() == 1) {
1825  // Top bits of carry / borrow are clear.
1826  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1827  Known.getBitWidth() - 1);
1828  }
1829  break;
1831  {
1832  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1833  switch (IntNo) {
1834  case Intrinsic::xcore_getts:
1835  // High bits are known to be zero.
1836  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1837  Known.getBitWidth() - 16);
1838  break;
1839  case Intrinsic::xcore_int:
1840  case Intrinsic::xcore_inct:
1841  // High bits are known to be zero.
1842  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1843  Known.getBitWidth() - 8);
1844  break;
1845  case Intrinsic::xcore_testct:
1846  // Result is either 0 or 1.
1847  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1848  Known.getBitWidth() - 1);
1849  break;
1850  case Intrinsic::xcore_testwct:
1851  // Result is in the range 0 - 4.
1852  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1853  Known.getBitWidth() - 3);
1854  break;
1855  }
1856  }
1857  break;
1858  }
1859 }
1860 
1861 //===----------------------------------------------------------------------===//
1862 // Addressing mode description hooks
1863 //===----------------------------------------------------------------------===//
1864 
1865 static inline bool isImmUs(int64_t val)
1866 {
1867  return (val >= 0 && val <= 11);
1868 }
1869 
1870 static inline bool isImmUs2(int64_t val)
1871 {
1872  return (val%2 == 0 && isImmUs(val/2));
1873 }
1874 
1875 static inline bool isImmUs4(int64_t val)
1876 {
1877  return (val%4 == 0 && isImmUs(val/4));
1878 }
1879 
1880 /// isLegalAddressingMode - Return true if the addressing mode represented
1881 /// by AM is legal for this target, for a load/store of the specified type.
1883  const AddrMode &AM, Type *Ty,
1884  unsigned AS,
1885  Instruction *I) const {
1886  if (Ty->getTypeID() == Type::VoidTyID)
1887  return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1888 
1889  unsigned Size = DL.getTypeAllocSize(Ty);
1890  if (AM.BaseGV) {
1891  return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1892  AM.BaseOffs%4 == 0;
1893  }
1894 
1895  switch (Size) {
1896  case 1:
1897  // reg + imm
1898  if (AM.Scale == 0) {
1899  return isImmUs(AM.BaseOffs);
1900  }
1901  // reg + reg
1902  return AM.Scale == 1 && AM.BaseOffs == 0;
1903  case 2:
1904  case 3:
1905  // reg + imm
1906  if (AM.Scale == 0) {
1907  return isImmUs2(AM.BaseOffs);
1908  }
1909  // reg + reg<<1
1910  return AM.Scale == 2 && AM.BaseOffs == 0;
1911  default:
1912  // reg + imm
1913  if (AM.Scale == 0) {
1914  return isImmUs4(AM.BaseOffs);
1915  }
1916  // reg + reg<<2
1917  return AM.Scale == 4 && AM.BaseOffs == 0;
1918  }
1919 }
1920 
1921 //===----------------------------------------------------------------------===//
1922 // XCore Inline Assembly Support
1923 //===----------------------------------------------------------------------===//
1924 
1925 std::pair<unsigned, const TargetRegisterClass *>
1926 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1927  StringRef Constraint,
1928  MVT VT) const {
1929  if (Constraint.size() == 1) {
1930  switch (Constraint[0]) {
1931  default : break;
1932  case 'r':
1933  return std::make_pair(0U, &XCore::GRRegsRegClass);
1934  }
1935  }
1936  // Use the default implementation in TargetLowering to convert the register
1937  // constraint into a member of a register class.
1938  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1939 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isMachineConstantPoolEntry() const
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:795
int createLRSpillSlot(MachineFunction &MF)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static bool isImmUs2(int64_t val)
const std::vector< MachineJumpTableEntry > & getJumpTables() const
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool hasLocalLinkage() const
Definition: GlobalValue.h:445
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:183
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:320
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:858
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:678
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Register getLocReg() const
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1153
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const SDValue & getVal() const
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
LLVM_NODISCARD bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:256
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:385
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:730
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:647
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getBasePtr() const
const SDValue & getChain() const
Function Alias Analysis Results
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
unsigned getAlignment() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:140
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
#define R2(n)
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned TargetFlags=0)
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:39
uint64_t High
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:812
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:459
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getExternalSymbol(const char *Sym, EVT VT)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool isMemLoc() const
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:146
static bool isImmUs(int64_t val)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static int stackSlotSize()
Stack slot size (4 bytes)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:480
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:839
bool hasSection() const
Definition: GlobalValue.h:273
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
const HexagonInstrInfo * TII
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
Shift and rotation operations.
Definition: ISDOpcodes.h:434
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:648
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static const unsigned CodeModelLargeSize
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:411
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:473
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:413
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
This is an SDNode representing atomic operations.
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:877
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:726
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:90
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getSizeInBits() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void setReturnStackOffset(unsigned value)
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:410
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
SmallVector< ISD::OutputArg, 32 > Outs
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
unsigned getAlignment() const
Definition: Globals.cpp:97
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
This class is used to represent ISD::STORE nodes.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:19
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:131
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:635
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1791
The memory access is volatile.
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:614
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:772
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
0: type with no size
Definition: Type.h:56
const SDValue & getBasePtr() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:65
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:759
const SDValue & getOperand(unsigned Num) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:846
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:160
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:789
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:798
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
Definition: ValueTypes.cpp:114
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:81
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:750
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
size_t size() const
Definition: SmallVector.h:52
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
bool isVolatile() const
const TargetMachine & getTargetMachine() const
StringRef getSection() const
Definition: Globals.cpp:165
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:686
unsigned first
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:416
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:654
Iterator for intrusive lists based on ilist_node.
unsigned getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
Definition: ValueTypes.h:309
CCState - This class holds information needed while lowering arguments and return values...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:638
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
virtual Register getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:1035
const DataFlowGraph & G
Definition: RDFGraph.cpp:202
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:640
Represents one node in the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:691
static mvt_range integer_valuetypes()
MachinePointerInfo getWithOffset(int64_t O) const
EVT getMemoryVT() const
Return the type of the in-memory value.
Class for arbitrary precision integers.
Definition: APInt.h:69
CodeModel::Model getCodeModel() const
Returns the code model.
static bool isImmUs4(int64_t val)
amdgpu Simplify well known AMD library false FunctionCallee Callee
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:492
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:495
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
Flags
Flags values. These may be or&#39;d together.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:470
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:666
Representation of each machine instruction.
Definition: MachineInstr.h:64
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:755
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SmallVector< SDValue, 32 > OutVals
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:126
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
unsigned AllocateStack(unsigned Size, unsigned Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:214
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:411
const TargetRegisterInfo * getRegisterInfo() const override
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:717
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:642
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Flags getFlags() const
Return the raw flags of the source value,.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
Type * getValueType() const
Definition: GlobalValue.h:279
uint32_t Size
Definition: Profile.cpp:46
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),a),b).
unsigned getOpcode() const
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents &#39;eh_return&#39; gcc dwarf builtin...
Definition: ISDOpcodes.h:101
bool isRegLoc() const
const MachinePointerInfo & getPointerInfo() const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if &#39;Op & Mask&#39; is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:73
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
Register getReg() const
getReg - Returns the register number.
Conversion operators.
Definition: ISDOpcodes.h:489
const SDValue & getOperand(unsigned i) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:820
uint64_t getZExtValue() const
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:125
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, unsigned Alignment=1, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:173
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:816
LLVMContext * getContext() const
Definition: SelectionDAG.h:420
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:404
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:651