LLVM  9.0.0svn
XCoreISelLowering.cpp
Go to the documentation of this file.
1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the XCoreTargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "XCoreISelLowering.h"
14 #include "XCore.h"
16 #include "XCoreSubtarget.h"
17 #include "XCoreTargetMachine.h"
18 #include "XCoreTargetObjectFile.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/KnownBits.h"
38 #include <algorithm>
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "xcore-lower"
43 
44 const char *XCoreTargetLowering::
45 getTargetNodeName(unsigned Opcode) const
46 {
47  switch ((XCoreISD::NodeType)Opcode)
48  {
49  case XCoreISD::FIRST_NUMBER : break;
50  case XCoreISD::BL : return "XCoreISD::BL";
51  case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
52  case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
53  case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
54  case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
55  case XCoreISD::STWSP : return "XCoreISD::STWSP";
56  case XCoreISD::RETSP : return "XCoreISD::RETSP";
57  case XCoreISD::LADD : return "XCoreISD::LADD";
58  case XCoreISD::LSUB : return "XCoreISD::LSUB";
59  case XCoreISD::LMUL : return "XCoreISD::LMUL";
60  case XCoreISD::MACCU : return "XCoreISD::MACCU";
61  case XCoreISD::MACCS : return "XCoreISD::MACCS";
62  case XCoreISD::CRC8 : return "XCoreISD::CRC8";
63  case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
64  case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
65  case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
66  case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
67  case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
68  }
69  return nullptr;
70 }
71 
73  const XCoreSubtarget &Subtarget)
74  : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
75 
76  // Set up the register classes.
77  addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
78 
79  // Compute derived properties from the register classes
81 
83 
85 
86  // Use i32 for setcc operations results (slt, sgt, ...).
88  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
89 
90  // XCore does not have the NodeTypes below.
93 
94  // 64bit
104 
105  // Bit Manipulation
109 
111 
112  // Jump tables.
114 
117 
118  // Conversion of i64 -> double produces constantpool nodes
120 
121  // Loads
122  for (MVT VT : MVT::integer_valuetypes()) {
126 
129  }
130 
131  // Custom expand misaligned loads / stores.
134 
135  // Varargs
140 
141  // Dynamic stack
145 
146  // Exception handling
149 
150  // Atomic operations
151  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
152  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
156 
157  // TRAMPOLINE is custom lowered.
160 
161  // We want to custom lower some of our intrinsics.
163 
167 
168  // We have target-specific dag combine patterns for the following nodes:
173 
176 }
177 
179  if (Val.getOpcode() != ISD::LOAD)
180  return false;
181 
182  EVT VT1 = Val.getValueType();
183  if (!VT1.isSimple() || !VT1.isInteger() ||
184  !VT2.isSimple() || !VT2.isInteger())
185  return false;
186 
187  switch (VT1.getSimpleVT().SimpleTy) {
188  default: break;
189  case MVT::i8:
190  return true;
191  }
192 
193  return false;
194 }
195 
198  switch (Op.getOpcode())
199  {
200  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
201  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
202  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
203  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
204  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
205  case ISD::LOAD: return LowerLOAD(Op, DAG);
206  case ISD::STORE: return LowerSTORE(Op, DAG);
207  case ISD::VAARG: return LowerVAARG(Op, DAG);
208  case ISD::VASTART: return LowerVASTART(Op, DAG);
209  case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
210  case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
211  // FIXME: Remove these when LegalizeDAGTypes lands.
212  case ISD::ADD:
213  case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
214  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
215  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
216  case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
217  case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
218  case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
219  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
220  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
221  case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
222  case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
223  default:
224  llvm_unreachable("unimplemented operand");
225  }
226 }
227 
228 /// ReplaceNodeResults - Replace the results of node with an illegal result
229 /// type with new values built out of custom code.
232  SelectionDAG &DAG) const {
233  switch (N->getOpcode()) {
234  default:
235  llvm_unreachable("Don't know how to custom expand this!");
236  case ISD::ADD:
237  case ISD::SUB:
238  Results.push_back(ExpandADDSUB(N, DAG));
239  return;
240  }
241 }
242 
243 //===----------------------------------------------------------------------===//
244 // Misc Lower Operation implementation
245 //===----------------------------------------------------------------------===//
246 
247 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
248  const GlobalValue *GV,
249  SelectionDAG &DAG) const {
250  // FIXME there is no actual debug info here
251  SDLoc dl(GA);
252 
253  if (GV->getValueType()->isFunctionTy())
254  return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
255 
256  const auto *GVar = dyn_cast<GlobalVariable>(GV);
257  if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
258  (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
259  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
260 
261  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
262 }
263 
264 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
266  return true;
267 
268  Type *ObjType = GV->getValueType();
269  if (!ObjType->isSized())
270  return false;
271 
272  auto &DL = GV->getParent()->getDataLayout();
273  unsigned ObjSize = DL.getTypeAllocSize(ObjType);
274  return ObjSize < CodeModelLargeSize && ObjSize != 0;
275 }
276 
277 SDValue XCoreTargetLowering::
278 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
279 {
280  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
281  const GlobalValue *GV = GN->getGlobal();
282  SDLoc DL(GN);
283  int64_t Offset = GN->getOffset();
284  if (IsSmallObject(GV, *this)) {
285  // We can only fold positive offsets that are a multiple of the word size.
286  int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
287  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
288  GA = getGlobalAddressWrapper(GA, GV, DAG);
289  // Handle the rest of the offset.
290  if (Offset != FoldedOffset) {
291  SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
292  GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
293  }
294  return GA;
295  } else {
296  // Ideally we would not fold in offset with an index <= 11.
297  Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
298  Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
299  Ty = Type::getInt32Ty(*DAG.getContext());
300  Constant *Idx = ConstantInt::get(Ty, Offset);
302  Type::getInt8Ty(*DAG.getContext()), GA, Idx);
303  SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
304  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
306  }
307 }
308 
309 SDValue XCoreTargetLowering::
310 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
311 {
312  SDLoc DL(Op);
313  auto PtrVT = getPointerTy(DAG.getDataLayout());
314  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
315  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
316 
317  return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
318 }
319 
320 SDValue XCoreTargetLowering::
321 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
322 {
323  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
324  // FIXME there isn't really debug info here
325  SDLoc dl(CP);
326  EVT PtrVT = Op.getValueType();
327  SDValue Res;
328  if (CP->isMachineConstantPoolEntry()) {
329  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
330  CP->getAlignment(), CP->getOffset());
331  } else {
332  Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
333  CP->getAlignment(), CP->getOffset());
334  }
335  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
336 }
337 
340 }
341 
342 SDValue XCoreTargetLowering::
343 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
344 {
345  SDValue Chain = Op.getOperand(0);
346  SDValue Table = Op.getOperand(1);
347  SDValue Index = Op.getOperand(2);
348  SDLoc dl(Op);
349  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
350  unsigned JTI = JT->getIndex();
352  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
353  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
354 
355  unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
356  if (NumEntries <= 32) {
357  return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
358  }
359  assert((NumEntries >> 31) == 0);
360  SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
361  DAG.getConstant(1, dl, MVT::i32));
362  return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
363  ScaledIndex);
364 }
365 
366 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
367  const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
368  SelectionDAG &DAG) const {
369  auto PtrVT = getPointerTy(DAG.getDataLayout());
370  if ((Offset & 0x3) == 0) {
371  return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
372  }
373  // Lower to pair of consecutive word aligned loads plus some bit shifting.
374  int32_t HighOffset = alignTo(Offset, 4);
375  int32_t LowOffset = HighOffset - 4;
376  SDValue LowAddr, HighAddr;
377  if (GlobalAddressSDNode *GASD =
378  dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
379  LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
380  LowOffset);
381  HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
382  HighOffset);
383  } else {
384  LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
385  DAG.getConstant(LowOffset, DL, MVT::i32));
386  HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
387  DAG.getConstant(HighOffset, DL, MVT::i32));
388  }
389  SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
390  SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
391 
392  SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
393  SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
394  SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
395  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
396  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
397  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
398  High.getValue(1));
399  SDValue Ops[] = { Result, Chain };
400  return DAG.getMergeValues(Ops, DL);
401 }
402 
404 {
405  KnownBits Known = DAG.computeKnownBits(Value);
406  return Known.countMinTrailingZeros() >= 2;
407 }
408 
409 SDValue XCoreTargetLowering::
410 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
411  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
412  LoadSDNode *LD = cast<LoadSDNode>(Op);
414  "Unexpected extension type");
415  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
417  LD->getAddressSpace(),
418  LD->getAlignment()))
419  return SDValue();
420 
421  auto &TD = DAG.getDataLayout();
422  unsigned ABIAlignment = TD.getABITypeAlignment(
423  LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
424  // Leave aligned load alone.
425  if (LD->getAlignment() >= ABIAlignment)
426  return SDValue();
427 
428  SDValue Chain = LD->getChain();
429  SDValue BasePtr = LD->getBasePtr();
430  SDLoc DL(Op);
431 
432  if (!LD->isVolatile()) {
433  const GlobalValue *GV;
434  int64_t Offset = 0;
435  if (DAG.isBaseWithConstantOffset(BasePtr) &&
436  isWordAligned(BasePtr->getOperand(0), DAG)) {
437  SDValue NewBasePtr = BasePtr->getOperand(0);
438  Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
439  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
440  Offset, DAG);
441  }
442  if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
443  MinAlign(GV->getAlignment(), 4) == 4) {
444  SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
445  BasePtr->getValueType(0));
446  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
447  Offset, DAG);
448  }
449  }
450 
451  if (LD->getAlignment() == 2) {
452  SDValue Low =
453  DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
454  LD->getPointerInfo(), MVT::i16,
455  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
456  SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
457  DAG.getConstant(2, DL, MVT::i32));
458  SDValue High =
459  DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
461  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
462  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
463  DAG.getConstant(16, DL, MVT::i32));
464  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
465  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
466  High.getValue(1));
467  SDValue Ops[] = { Result, Chain };
468  return DAG.getMergeValues(Ops, DL);
469  }
470 
471  // Lower to a call to __misaligned_load(BasePtr).
472  Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
474  TargetLowering::ArgListEntry Entry;
475 
476  Entry.Ty = IntPtrTy;
477  Entry.Node = BasePtr;
478  Args.push_back(Entry);
479 
481  CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
482  CallingConv::C, IntPtrTy,
483  DAG.getExternalSymbol("__misaligned_load",
484  getPointerTy(DAG.getDataLayout())),
485  std::move(Args));
486 
487  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
488  SDValue Ops[] = { CallResult.first, CallResult.second };
489  return DAG.getMergeValues(Ops, DL);
490 }
491 
492 SDValue XCoreTargetLowering::
493 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
494 {
495  StoreSDNode *ST = cast<StoreSDNode>(Op);
496  assert(!ST->isTruncatingStore() && "Unexpected store type");
497  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
499  ST->getAddressSpace(),
500  ST->getAlignment())) {
501  return SDValue();
502  }
503  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
504  ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
505  // Leave aligned store alone.
506  if (ST->getAlignment() >= ABIAlignment) {
507  return SDValue();
508  }
509  SDValue Chain = ST->getChain();
510  SDValue BasePtr = ST->getBasePtr();
511  SDValue Value = ST->getValue();
512  SDLoc dl(Op);
513 
514  if (ST->getAlignment() == 2) {
515  SDValue Low = Value;
516  SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
517  DAG.getConstant(16, dl, MVT::i32));
518  SDValue StoreLow = DAG.getTruncStore(
519  Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
520  /* Alignment = */ 2, ST->getMemOperand()->getFlags());
521  SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
522  DAG.getConstant(2, dl, MVT::i32));
523  SDValue StoreHigh = DAG.getTruncStore(
524  Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
525  MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
526  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
527  }
528 
529  // Lower to a call to __misaligned_store(BasePtr, Value).
530  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
532  TargetLowering::ArgListEntry Entry;
533 
534  Entry.Ty = IntPtrTy;
535  Entry.Node = BasePtr;
536  Args.push_back(Entry);
537 
538  Entry.Node = Value;
539  Args.push_back(Entry);
540 
542  CLI.setDebugLoc(dl).setChain(Chain).setCallee(
544  DAG.getExternalSymbol("__misaligned_store",
545  getPointerTy(DAG.getDataLayout())),
546  std::move(Args));
547 
548  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
549  return CallResult.second;
550 }
551 
552 SDValue XCoreTargetLowering::
553 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
554 {
555  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
556  "Unexpected operand to lower!");
557  SDLoc dl(Op);
558  SDValue LHS = Op.getOperand(0);
559  SDValue RHS = Op.getOperand(1);
560  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
561  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
562  DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
563  LHS, RHS);
564  SDValue Lo(Hi.getNode(), 1);
565  SDValue Ops[] = { Lo, Hi };
566  return DAG.getMergeValues(Ops, dl);
567 }
568 
569 SDValue XCoreTargetLowering::
570 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
571 {
572  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
573  "Unexpected operand to lower!");
574  SDLoc dl(Op);
575  SDValue LHS = Op.getOperand(0);
576  SDValue RHS = Op.getOperand(1);
577  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
578  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
579  DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
580  Zero, Zero);
581  SDValue Lo(Hi.getNode(), 1);
582  SDValue Ops[] = { Lo, Hi };
583  return DAG.getMergeValues(Ops, dl);
584 }
585 
586 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
587 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
588 /// each intermediate result in the calculation must also have a single use.
589 /// If the Op is in the correct form the constituent parts are written to Mul0,
590 /// Mul1, Addend0 and Addend1.
591 static bool
592 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
593  SDValue &Addend1, bool requireIntermediatesHaveOneUse)
594 {
595  if (Op.getOpcode() != ISD::ADD)
596  return false;
597  SDValue N0 = Op.getOperand(0);
598  SDValue N1 = Op.getOperand(1);
599  SDValue AddOp;
600  SDValue OtherOp;
601  if (N0.getOpcode() == ISD::ADD) {
602  AddOp = N0;
603  OtherOp = N1;
604  } else if (N1.getOpcode() == ISD::ADD) {
605  AddOp = N1;
606  OtherOp = N0;
607  } else {
608  return false;
609  }
610  if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
611  return false;
612  if (OtherOp.getOpcode() == ISD::MUL) {
613  // add(add(a,b),mul(x,y))
614  if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
615  return false;
616  Mul0 = OtherOp.getOperand(0);
617  Mul1 = OtherOp.getOperand(1);
618  Addend0 = AddOp.getOperand(0);
619  Addend1 = AddOp.getOperand(1);
620  return true;
621  }
622  if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
623  // add(add(mul(x,y),a),b)
624  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
625  return false;
626  Mul0 = AddOp.getOperand(0).getOperand(0);
627  Mul1 = AddOp.getOperand(0).getOperand(1);
628  Addend0 = AddOp.getOperand(1);
629  Addend1 = OtherOp;
630  return true;
631  }
632  if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
633  // add(add(a,mul(x,y)),b)
634  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
635  return false;
636  Mul0 = AddOp.getOperand(1).getOperand(0);
637  Mul1 = AddOp.getOperand(1).getOperand(1);
638  Addend0 = AddOp.getOperand(0);
639  Addend1 = OtherOp;
640  return true;
641  }
642  return false;
643 }
644 
645 SDValue XCoreTargetLowering::
646 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
647 {
648  SDValue Mul;
649  SDValue Other;
650  if (N->getOperand(0).getOpcode() == ISD::MUL) {
651  Mul = N->getOperand(0);
652  Other = N->getOperand(1);
653  } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
654  Mul = N->getOperand(1);
655  Other = N->getOperand(0);
656  } else {
657  return SDValue();
658  }
659  SDLoc dl(N);
660  SDValue LL, RL, AddendL, AddendH;
661  LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
662  Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
663  RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
664  Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
665  AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
666  Other, DAG.getConstant(0, dl, MVT::i32));
667  AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
668  Other, DAG.getConstant(1, dl, MVT::i32));
669  APInt HighMask = APInt::getHighBitsSet(64, 32);
670  unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
671  unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
672  if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
673  DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
674  // The inputs are both zero-extended.
675  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
676  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
677  AddendL, LL, RL);
678  SDValue Lo(Hi.getNode(), 1);
679  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
680  }
681  if (LHSSB > 32 && RHSSB > 32) {
682  // The inputs are both sign-extended.
683  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
684  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
685  AddendL, LL, RL);
686  SDValue Lo(Hi.getNode(), 1);
687  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
688  }
689  SDValue LH, RH;
690  LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
691  Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
692  RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
693  Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
694  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
695  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
696  AddendL, LL, RL);
697  SDValue Lo(Hi.getNode(), 1);
698  RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
699  LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
700  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
701  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
702  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
703 }
704 
705 SDValue XCoreTargetLowering::
706 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
707 {
708  assert(N->getValueType(0) == MVT::i64 &&
709  (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
710  "Unknown operand to lower!");
711 
712  if (N->getOpcode() == ISD::ADD)
713  if (SDValue Result = TryExpandADDWithMul(N, DAG))
714  return Result;
715 
716  SDLoc dl(N);
717 
718  // Extract components
720  N->getOperand(0),
721  DAG.getConstant(0, dl, MVT::i32));
723  N->getOperand(0),
724  DAG.getConstant(1, dl, MVT::i32));
726  N->getOperand(1),
727  DAG.getConstant(0, dl, MVT::i32));
729  N->getOperand(1),
730  DAG.getConstant(1, dl, MVT::i32));
731 
732  // Expand
733  unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
735  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
736  SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
737  LHSL, RHSL, Zero);
738  SDValue Carry(Lo.getNode(), 1);
739 
740  SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
741  LHSH, RHSH, Carry);
742  SDValue Ignored(Hi.getNode(), 1);
743  // Merge the pieces
744  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
745 }
746 
747 SDValue XCoreTargetLowering::
748 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
749 {
750  // Whist llvm does not support aggregate varargs we can ignore
751  // the possibility of the ValueType being an implicit byVal vararg.
752  SDNode *Node = Op.getNode();
753  EVT VT = Node->getValueType(0); // not an aggregate
754  SDValue InChain = Node->getOperand(0);
755  SDValue VAListPtr = Node->getOperand(1);
756  EVT PtrVT = VAListPtr.getValueType();
757  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
758  SDLoc dl(Node);
759  SDValue VAList =
760  DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
761  // Increment the pointer, VAList, to the next vararg
762  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
763  DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
764  dl));
765  // Store the incremented VAList to the legalized pointer
766  InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
767  MachinePointerInfo(SV));
768  // Load the actual argument out of the pointer VAList
769  return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
770 }
771 
772 SDValue XCoreTargetLowering::
773 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
774 {
775  SDLoc dl(Op);
776  // vastart stores the address of the VarArgsFrameIndex slot into the
777  // memory location argument
781  return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
783 }
784 
785 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
786  SelectionDAG &DAG) const {
787  // This nodes represent llvm.frameaddress on the DAG.
788  // It takes one operand, the index of the frame address to return.
789  // An index of zero corresponds to the current function's frame address.
790  // An index of one to the parent's frame address, and so on.
791  // Depths > 0 not supported yet!
792  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
793  return SDValue();
794 
796  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
797  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
798  RegInfo->getFrameRegister(MF), MVT::i32);
799 }
800 
801 SDValue XCoreTargetLowering::
802 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
803  // This nodes represent llvm.returnaddress on the DAG.
804  // It takes one operand, the index of the return address to return.
805  // An index of zero corresponds to the current function's return address.
806  // An index of one to the parent's return address, and so on.
807  // Depths > 0 not supported yet!
808  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
809  return SDValue();
810 
813  int FI = XFI->createLRSpillSlot(MF);
814  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
815  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
816  DAG.getEntryNode(), FIN,
818 }
819 
820 SDValue XCoreTargetLowering::
821 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
822  // This node represents offset from frame pointer to first on-stack argument.
823  // This is needed for correct stack adjustment during unwind.
824  // However, we don't know the offset until after the frame has be finalised.
825  // This is done during the XCoreFTAOElim pass.
827 }
828 
829 SDValue XCoreTargetLowering::
830 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
831  // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
832  // This node represents 'eh_return' gcc dwarf builtin, which is used to
833  // return from exception. The general meaning is: adjust stack by OFFSET and
834  // pass execution to HANDLER.
836  SDValue Chain = Op.getOperand(0);
837  SDValue Offset = Op.getOperand(1);
838  SDValue Handler = Op.getOperand(2);
839  SDLoc dl(Op);
840 
841  // Absolute SP = (FP + FrameToArgs) + Offset
842  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
843  SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
844  RegInfo->getFrameRegister(MF), MVT::i32);
845  SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
846  MVT::i32);
847  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
848  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
849 
850  // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
851  // which leaves 2 caller saved registers, R2 & R3 for us to use.
852  unsigned StackReg = XCore::R2;
853  unsigned HandlerReg = XCore::R3;
854 
855  SDValue OutChains[] = {
856  DAG.getCopyToReg(Chain, dl, StackReg, Stack),
857  DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
858  };
859 
860  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
861 
862  return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
863  DAG.getRegister(StackReg, MVT::i32),
864  DAG.getRegister(HandlerReg, MVT::i32));
865 
866 }
867 
868 SDValue XCoreTargetLowering::
869 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
870  return Op.getOperand(0);
871 }
872 
873 SDValue XCoreTargetLowering::
874 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
875  SDValue Chain = Op.getOperand(0);
876  SDValue Trmp = Op.getOperand(1); // trampoline
877  SDValue FPtr = Op.getOperand(2); // nested function
878  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
879 
880  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
881 
882  // .align 4
883  // LDAPF_u10 r11, nest
884  // LDW_2rus r11, r11[0]
885  // STWSP_ru6 r11, sp[0]
886  // LDAPF_u10 r11, fptr
887  // LDW_2rus r11, r11[0]
888  // BAU_1r r11
889  // nest:
890  // .word nest
891  // fptr:
892  // .word fptr
893  SDValue OutChains[5];
894 
895  SDValue Addr = Trmp;
896 
897  SDLoc dl(Op);
898  OutChains[0] =
899  DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
900  MachinePointerInfo(TrmpAddr));
901 
902  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
903  DAG.getConstant(4, dl, MVT::i32));
904  OutChains[1] =
905  DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
906  MachinePointerInfo(TrmpAddr, 4));
907 
908  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
909  DAG.getConstant(8, dl, MVT::i32));
910  OutChains[2] =
911  DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
912  MachinePointerInfo(TrmpAddr, 8));
913 
914  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
915  DAG.getConstant(12, dl, MVT::i32));
916  OutChains[3] =
917  DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
918 
919  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
920  DAG.getConstant(16, dl, MVT::i32));
921  OutChains[4] =
922  DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
923 
924  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
925 }
926 
927 SDValue XCoreTargetLowering::
928 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
929  SDLoc DL(Op);
930  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
931  switch (IntNo) {
932  case Intrinsic::xcore_crc8:
933  EVT VT = Op.getValueType();
934  SDValue Data =
935  DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
936  Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
937  SDValue Crc(Data.getNode(), 1);
938  SDValue Results[] = { Crc, Data };
939  return DAG.getMergeValues(Results, DL);
940  }
941  return SDValue();
942 }
943 
944 SDValue XCoreTargetLowering::
945 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
946  SDLoc DL(Op);
947  return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
948 }
949 
950 SDValue XCoreTargetLowering::
951 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
952  AtomicSDNode *N = cast<AtomicSDNode>(Op);
953  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
956  "setInsertFencesForAtomic(true) expects unordered / monotonic");
957  if (N->getMemoryVT() == MVT::i32) {
958  if (N->getAlignment() < 4)
959  report_fatal_error("atomic load must be aligned");
960  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
961  N->getChain(), N->getBasePtr(), N->getPointerInfo(),
962  N->getAlignment(), N->getMemOperand()->getFlags(),
963  N->getAAInfo(), N->getRanges());
964  }
965  if (N->getMemoryVT() == MVT::i16) {
966  if (N->getAlignment() < 2)
967  report_fatal_error("atomic load must be aligned");
968  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
969  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
970  N->getAlignment(), N->getMemOperand()->getFlags(),
971  N->getAAInfo());
972  }
973  if (N->getMemoryVT() == MVT::i8)
974  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
975  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
976  N->getAlignment(), N->getMemOperand()->getFlags(),
977  N->getAAInfo());
978  return SDValue();
979 }
980 
981 SDValue XCoreTargetLowering::
982 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
983  AtomicSDNode *N = cast<AtomicSDNode>(Op);
984  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
987  "setInsertFencesForAtomic(true) expects unordered / monotonic");
988  if (N->getMemoryVT() == MVT::i32) {
989  if (N->getAlignment() < 4)
990  report_fatal_error("atomic store must be aligned");
991  return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
992  N->getPointerInfo(), N->getAlignment(),
993  N->getMemOperand()->getFlags(), N->getAAInfo());
994  }
995  if (N->getMemoryVT() == MVT::i16) {
996  if (N->getAlignment() < 2)
997  report_fatal_error("atomic store must be aligned");
998  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
999  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
1000  N->getAlignment(), N->getMemOperand()->getFlags(),
1001  N->getAAInfo());
1002  }
1003  if (N->getMemoryVT() == MVT::i8)
1004  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1005  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
1006  N->getAlignment(), N->getMemOperand()->getFlags(),
1007  N->getAAInfo());
1008  return SDValue();
1009 }
1010 
1011 //===----------------------------------------------------------------------===//
1012 // Calling Convention Implementation
1013 //===----------------------------------------------------------------------===//
1014 
1015 #include "XCoreGenCallingConv.inc"
1016 
1017 //===----------------------------------------------------------------------===//
1018 // Call Calling Convention Implementation
1019 //===----------------------------------------------------------------------===//
1020 
1021 /// XCore call implementation
1022 SDValue
1023 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1024  SmallVectorImpl<SDValue> &InVals) const {
1025  SelectionDAG &DAG = CLI.DAG;
1026  SDLoc &dl = CLI.DL;
1028  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1030  SDValue Chain = CLI.Chain;
1031  SDValue Callee = CLI.Callee;
1032  bool &isTailCall = CLI.IsTailCall;
1033  CallingConv::ID CallConv = CLI.CallConv;
1034  bool isVarArg = CLI.IsVarArg;
1035 
1036  // XCore target does not yet support tail call optimization.
1037  isTailCall = false;
1038 
1039  // For now, only CallingConv::C implemented
1040  switch (CallConv)
1041  {
1042  default:
1043  report_fatal_error("Unsupported calling convention");
1044  case CallingConv::Fast:
1045  case CallingConv::C:
1046  return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1047  Outs, OutVals, Ins, dl, DAG, InVals);
1048  }
1049 }
1050 
1051 /// LowerCallResult - Lower the result values of a call into the
1052 /// appropriate copies out of appropriate physical registers / memory locations.
1054  const SmallVectorImpl<CCValAssign> &RVLocs,
1055  const SDLoc &dl, SelectionDAG &DAG,
1056  SmallVectorImpl<SDValue> &InVals) {
1057  SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1058  // Copy results out of physical registers.
1059  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1060  const CCValAssign &VA = RVLocs[i];
1061  if (VA.isRegLoc()) {
1062  Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1063  InFlag).getValue(1);
1064  InFlag = Chain.getValue(2);
1065  InVals.push_back(Chain.getValue(0));
1066  } else {
1067  assert(VA.isMemLoc());
1068  ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1069  InVals.size()));
1070  // Reserve space for this result.
1071  InVals.push_back(SDValue());
1072  }
1073  }
1074 
1075  // Copy results out of memory.
1076  SmallVector<SDValue, 4> MemOpChains;
1077  for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1078  int offset = ResultMemLocs[i].first;
1079  unsigned index = ResultMemLocs[i].second;
1080  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1081  SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1082  SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1083  InVals[index] = load;
1084  MemOpChains.push_back(load.getValue(1));
1085  }
1086 
1087  // Transform all loads nodes into one single node because
1088  // all load nodes are independent of each other.
1089  if (!MemOpChains.empty())
1090  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1091 
1092  return Chain;
1093 }
1094 
1095 /// LowerCCCCallTo - functions arguments are copied from virtual
1096 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1097 /// CALLSEQ_END are emitted.
1098 /// TODO: isTailCall, sret.
1099 SDValue XCoreTargetLowering::LowerCCCCallTo(
1100  SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1101  bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1102  const SmallVectorImpl<SDValue> &OutVals,
1103  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1104  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1105 
1106  // Analyze operands of the call, assigning locations to each operand.
1108  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1109  *DAG.getContext());
1110 
1111  // The ABI dictates there should be one stack slot available to the callee
1112  // on function entry (for saving lr).
1113  CCInfo.AllocateStack(4, 4);
1114 
1115  CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1116 
1118  // Analyze return values to determine the number of bytes of stack required.
1119  CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1120  *DAG.getContext());
1121  RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1122  RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1123 
1124  // Get a count of how many bytes are to be pushed on the stack.
1125  unsigned NumBytes = RetCCInfo.getNextStackOffset();
1126  auto PtrVT = getPointerTy(DAG.getDataLayout());
1127 
1128  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1129 
1131  SmallVector<SDValue, 12> MemOpChains;
1132 
1133  // Walk the register/memloc assignments, inserting copies/loads.
1134  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1135  CCValAssign &VA = ArgLocs[i];
1136  SDValue Arg = OutVals[i];
1137 
1138  // Promote the value if needed.
1139  switch (VA.getLocInfo()) {
1140  default: llvm_unreachable("Unknown loc info!");
1141  case CCValAssign::Full: break;
1142  case CCValAssign::SExt:
1143  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1144  break;
1145  case CCValAssign::ZExt:
1146  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1147  break;
1148  case CCValAssign::AExt:
1149  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1150  break;
1151  }
1152 
1153  // Arguments that can be passed on register must be kept at
1154  // RegsToPass vector
1155  if (VA.isRegLoc()) {
1156  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1157  } else {
1158  assert(VA.isMemLoc());
1159 
1160  int Offset = VA.getLocMemOffset();
1161 
1162  MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1163  Chain, Arg,
1164  DAG.getConstant(Offset/4, dl,
1165  MVT::i32)));
1166  }
1167  }
1168 
1169  // Transform all store nodes into one single node because
1170  // all store nodes are independent of each other.
1171  if (!MemOpChains.empty())
1172  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1173 
1174  // Build a sequence of copy-to-reg nodes chained together with token
1175  // chain and flag operands which copy the outgoing args into registers.
1176  // The InFlag in necessary since all emitted instructions must be
1177  // stuck together.
1178  SDValue InFlag;
1179  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1180  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1181  RegsToPass[i].second, InFlag);
1182  InFlag = Chain.getValue(1);
1183  }
1184 
1185  // If the callee is a GlobalAddress node (quite common, every direct call is)
1186  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1187  // Likewise ExternalSymbol -> TargetExternalSymbol.
1188  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1189  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1190  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1191  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1192 
1193  // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1194  // = Chain, Callee, Reg#1, Reg#2, ...
1195  //
1196  // Returns a chain & a flag for retval copy to use.
1197  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1199  Ops.push_back(Chain);
1200  Ops.push_back(Callee);
1201 
1202  // Add argument registers to the end of the list so that they are
1203  // known live into the call.
1204  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1205  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1206  RegsToPass[i].second.getValueType()));
1207 
1208  if (InFlag.getNode())
1209  Ops.push_back(InFlag);
1210 
1211  Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1212  InFlag = Chain.getValue(1);
1213 
1214  // Create the CALLSEQ_END node.
1215  Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1216  DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1217  InFlag = Chain.getValue(1);
1218 
1219  // Handle result values, copying them out of physregs into vregs that we
1220  // return.
1221  return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1222 }
1223 
1224 //===----------------------------------------------------------------------===//
1225 // Formal Arguments Calling Convention Implementation
1226 //===----------------------------------------------------------------------===//
1227 
1228 namespace {
1229  struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1230 }
1231 
1232 /// XCore formal arguments implementation
1233 SDValue XCoreTargetLowering::LowerFormalArguments(
1234  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1235  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1236  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1237  switch (CallConv)
1238  {
1239  default:
1240  report_fatal_error("Unsupported calling convention");
1241  case CallingConv::C:
1242  case CallingConv::Fast:
1243  return LowerCCCArguments(Chain, CallConv, isVarArg,
1244  Ins, dl, DAG, InVals);
1245  }
1246 }
1247 
1248 /// LowerCCCArguments - transform physical registers into
1249 /// virtual registers and generate load operations for
1250 /// arguments places on the stack.
1251 /// TODO: sret
1252 SDValue XCoreTargetLowering::LowerCCCArguments(
1253  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1254  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1255  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1256  MachineFunction &MF = DAG.getMachineFunction();
1257  MachineFrameInfo &MFI = MF.getFrameInfo();
1258  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1260 
1261  // Assign locations to all of the incoming arguments.
1263  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1264  *DAG.getContext());
1265 
1266  CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1267 
1268  unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1269 
1270  unsigned LRSaveSize = StackSlotSize;
1271 
1272  if (!isVarArg)
1273  XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1274 
1275  // All getCopyFromReg ops must precede any getMemcpys to prevent the
1276  // scheduler clobbering a register before it has been copied.
1277  // The stages are:
1278  // 1. CopyFromReg (and load) arg & vararg registers.
1279  // 2. Chain CopyFromReg nodes into a TokenFactor.
1280  // 3. Memcpy 'byVal' args & push final InVals.
1281  // 4. Chain mem ops nodes into a TokenFactor.
1282  SmallVector<SDValue, 4> CFRegNode;
1284  SmallVector<SDValue, 4> MemOps;
1285 
1286  // 1a. CopyFromReg (and load) arg registers.
1287  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1288 
1289  CCValAssign &VA = ArgLocs[i];
1290  SDValue ArgIn;
1291 
1292  if (VA.isRegLoc()) {
1293  // Arguments passed in registers
1294  EVT RegVT = VA.getLocVT();
1295  switch (RegVT.getSimpleVT().SimpleTy) {
1296  default:
1297  {
1298 #ifndef NDEBUG
1299  errs() << "LowerFormalArguments Unhandled argument type: "
1300  << RegVT.getEVTString() << "\n";
1301 #endif
1302  llvm_unreachable(nullptr);
1303  }
1304  case MVT::i32:
1305  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1306  RegInfo.addLiveIn(VA.getLocReg(), VReg);
1307  ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1308  CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1309  }
1310  } else {
1311  // sanity check
1312  assert(VA.isMemLoc());
1313  // Load the argument to a virtual register
1314  unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1315  if (ObjSize > StackSlotSize) {
1316  errs() << "LowerFormalArguments Unhandled argument type: "
1317  << EVT(VA.getLocVT()).getEVTString()
1318  << "\n";
1319  }
1320  // Create the frame index object for this incoming parameter...
1321  int FI = MFI.CreateFixedObject(ObjSize,
1322  LRSaveSize + VA.getLocMemOffset(),
1323  true);
1324 
1325  // Create the SelectionDAG nodes corresponding to a load
1326  //from this parameter
1327  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1328  ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1330  }
1331  const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1332  ArgData.push_back(ADP);
1333  }
1334 
1335  // 1b. CopyFromReg vararg registers.
1336  if (isVarArg) {
1337  // Argument registers
1338  static const MCPhysReg ArgRegs[] = {
1339  XCore::R0, XCore::R1, XCore::R2, XCore::R3
1340  };
1342  unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1343  if (FirstVAReg < array_lengthof(ArgRegs)) {
1344  int offset = 0;
1345  // Save remaining registers, storing higher register numbers at a higher
1346  // address
1347  for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1348  // Create a stack slot
1349  int FI = MFI.CreateFixedObject(4, offset, true);
1350  if (i == (int)FirstVAReg) {
1351  XFI->setVarArgsFrameIndex(FI);
1352  }
1353  offset -= StackSlotSize;
1354  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1355  // Move argument from phys reg -> virt reg
1356  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1357  RegInfo.addLiveIn(ArgRegs[i], VReg);
1358  SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1359  CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1360  // Move argument from virt reg -> stack
1361  SDValue Store =
1362  DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1363  MemOps.push_back(Store);
1364  }
1365  } else {
1366  // This will point to the next argument passed via stack.
1367  XFI->setVarArgsFrameIndex(
1368  MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1369  true));
1370  }
1371  }
1372 
1373  // 2. chain CopyFromReg nodes into a TokenFactor.
1374  if (!CFRegNode.empty())
1375  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1376 
1377  // 3. Memcpy 'byVal' args & push final InVals.
1378  // Aggregates passed "byVal" need to be copied by the callee.
1379  // The callee will use a pointer to this copy, rather than the original
1380  // pointer.
1381  for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1382  ArgDE = ArgData.end();
1383  ArgDI != ArgDE; ++ArgDI) {
1384  if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1385  unsigned Size = ArgDI->Flags.getByValSize();
1386  unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1387  // Create a new object on the stack and copy the pointee into it.
1388  int FI = MFI.CreateStackObject(Size, Align, false);
1389  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1390  InVals.push_back(FIN);
1391  MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1392  DAG.getConstant(Size, dl, MVT::i32),
1393  Align, false, false, false,
1395  MachinePointerInfo()));
1396  } else {
1397  InVals.push_back(ArgDI->SDV);
1398  }
1399  }
1400 
1401  // 4, chain mem ops nodes into a TokenFactor.
1402  if (!MemOps.empty()) {
1403  MemOps.push_back(Chain);
1404  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1405  }
1406 
1407  return Chain;
1408 }
1409 
1410 //===----------------------------------------------------------------------===//
1411 // Return Value Calling Convention Implementation
1412 //===----------------------------------------------------------------------===//
1413 
1414 bool XCoreTargetLowering::
1415 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1416  bool isVarArg,
1417  const SmallVectorImpl<ISD::OutputArg> &Outs,
1418  LLVMContext &Context) const {
1420  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1421  if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1422  return false;
1423  if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1424  return false;
1425  return true;
1426 }
1427 
1428 SDValue
1429 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1430  bool isVarArg,
1431  const SmallVectorImpl<ISD::OutputArg> &Outs,
1432  const SmallVectorImpl<SDValue> &OutVals,
1433  const SDLoc &dl, SelectionDAG &DAG) const {
1434 
1435  XCoreFunctionInfo *XFI =
1438 
1439  // CCValAssign - represent the assignment of
1440  // the return value to a location
1442 
1443  // CCState - Info about the registers and stack slot.
1444  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1445  *DAG.getContext());
1446 
1447  // Analyze return values.
1448  if (!isVarArg)
1449  CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1450 
1451  CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1452 
1453  SDValue Flag;
1454  SmallVector<SDValue, 4> RetOps(1, Chain);
1455 
1456  // Return on XCore is always a "retsp 0"
1457  RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1458 
1459  SmallVector<SDValue, 4> MemOpChains;
1460  // Handle return values that must be copied to memory.
1461  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1462  CCValAssign &VA = RVLocs[i];
1463  if (VA.isRegLoc())
1464  continue;
1465  assert(VA.isMemLoc());
1466  if (isVarArg) {
1467  report_fatal_error("Can't return value from vararg function in memory");
1468  }
1469 
1470  int Offset = VA.getLocMemOffset();
1471  unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1472  // Create the frame index object for the memory location.
1473  int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1474 
1475  // Create a SelectionDAG node corresponding to a store
1476  // to this memory location.
1477  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1478  MemOpChains.push_back(DAG.getStore(
1479  Chain, dl, OutVals[i], FIN,
1481  }
1482 
1483  // Transform all store nodes into one single node because
1484  // all stores are independent of each other.
1485  if (!MemOpChains.empty())
1486  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1487 
1488  // Now handle return values copied to registers.
1489  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1490  CCValAssign &VA = RVLocs[i];
1491  if (!VA.isRegLoc())
1492  continue;
1493  // Copy the result values into the output registers.
1494  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1495 
1496  // guarantee that all emitted copies are
1497  // stuck together, avoiding something bad
1498  Flag = Chain.getValue(1);
1499  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1500  }
1501 
1502  RetOps[0] = Chain; // Update chain.
1503 
1504  // Add the flag if we have it.
1505  if (Flag.getNode())
1506  RetOps.push_back(Flag);
1507 
1508  return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1509 }
1510 
1511 //===----------------------------------------------------------------------===//
1512 // Other Lowering Code
1513 //===----------------------------------------------------------------------===//
1514 
1517  MachineBasicBlock *BB) const {
1518  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1519  DebugLoc dl = MI.getDebugLoc();
1520  assert((MI.getOpcode() == XCore::SELECT_CC) &&
1521  "Unexpected instr type to insert");
1522 
1523  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1524  // control-flow pattern. The incoming instruction knows the destination vreg
1525  // to set, the condition code register to branch on, the true/false values to
1526  // select between, and a branch opcode to use.
1527  const BasicBlock *LLVM_BB = BB->getBasicBlock();
1529 
1530  // thisMBB:
1531  // ...
1532  // TrueVal = ...
1533  // cmpTY ccX, r1, r2
1534  // bCC copy1MBB
1535  // fallthrough --> copy0MBB
1536  MachineBasicBlock *thisMBB = BB;
1537  MachineFunction *F = BB->getParent();
1538  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1539  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1540  F->insert(It, copy0MBB);
1541  F->insert(It, sinkMBB);
1542 
1543  // Transfer the remainder of BB and its successor edges to sinkMBB.
1544  sinkMBB->splice(sinkMBB->begin(), BB,
1545  std::next(MachineBasicBlock::iterator(MI)), BB->end());
1546  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1547 
1548  // Next, add the true and fallthrough blocks as its successors.
1549  BB->addSuccessor(copy0MBB);
1550  BB->addSuccessor(sinkMBB);
1551 
1552  BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1553  .addReg(MI.getOperand(1).getReg())
1554  .addMBB(sinkMBB);
1555 
1556  // copy0MBB:
1557  // %FalseValue = ...
1558  // # fallthrough to sinkMBB
1559  BB = copy0MBB;
1560 
1561  // Update machine-CFG edges
1562  BB->addSuccessor(sinkMBB);
1563 
1564  // sinkMBB:
1565  // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1566  // ...
1567  BB = sinkMBB;
1568  BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1569  .addReg(MI.getOperand(3).getReg())
1570  .addMBB(copy0MBB)
1571  .addReg(MI.getOperand(2).getReg())
1572  .addMBB(thisMBB);
1573 
1574  MI.eraseFromParent(); // The pseudo instruction is gone now.
1575  return BB;
1576 }
1577 
1578 //===----------------------------------------------------------------------===//
1579 // Target Optimization Hooks
1580 //===----------------------------------------------------------------------===//
1581 
1582 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1583  DAGCombinerInfo &DCI) const {
1584  SelectionDAG &DAG = DCI.DAG;
1585  SDLoc dl(N);
1586  switch (N->getOpcode()) {
1587  default: break;
1588  case ISD::INTRINSIC_VOID:
1589  switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1590  case Intrinsic::xcore_outt:
1591  case Intrinsic::xcore_outct:
1592  case Intrinsic::xcore_chkct: {
1593  SDValue OutVal = N->getOperand(3);
1594  // These instructions ignore the high bits.
1595  if (OutVal.hasOneUse()) {
1596  unsigned BitWidth = OutVal.getValueSizeInBits();
1597  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1598  KnownBits Known;
1600  !DCI.isBeforeLegalizeOps());
1601  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1602  if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1603  TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1604  DCI.CommitTargetLoweringOpt(TLO);
1605  }
1606  break;
1607  }
1608  case Intrinsic::xcore_setpt: {
1609  SDValue Time = N->getOperand(3);
1610  // This instruction ignores the high bits.
1611  if (Time.hasOneUse()) {
1612  unsigned BitWidth = Time.getValueSizeInBits();
1613  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1614  KnownBits Known;
1616  !DCI.isBeforeLegalizeOps());
1617  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1618  if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1619  TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1620  DCI.CommitTargetLoweringOpt(TLO);
1621  }
1622  break;
1623  }
1624  }
1625  break;
1626  case XCoreISD::LADD: {
1627  SDValue N0 = N->getOperand(0);
1628  SDValue N1 = N->getOperand(1);
1629  SDValue N2 = N->getOperand(2);
1632  EVT VT = N0.getValueType();
1633 
1634  // canonicalize constant to RHS
1635  if (N0C && !N1C)
1636  return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1637 
1638  // fold (ladd 0, 0, x) -> 0, x & 1
1639  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1640  SDValue Carry = DAG.getConstant(0, dl, VT);
1641  SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1642  DAG.getConstant(1, dl, VT));
1643  SDValue Ops[] = { Result, Carry };
1644  return DAG.getMergeValues(Ops, dl);
1645  }
1646 
1647  // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1648  // low bit set
1649  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1650  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1651  VT.getSizeInBits() - 1);
1652  KnownBits Known = DAG.computeKnownBits(N2);
1653  if ((Known.Zero & Mask) == Mask) {
1654  SDValue Carry = DAG.getConstant(0, dl, VT);
1655  SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1656  SDValue Ops[] = { Result, Carry };
1657  return DAG.getMergeValues(Ops, dl);
1658  }
1659  }
1660  }
1661  break;
1662  case XCoreISD::LSUB: {
1663  SDValue N0 = N->getOperand(0);
1664  SDValue N1 = N->getOperand(1);
1665  SDValue N2 = N->getOperand(2);
1668  EVT VT = N0.getValueType();
1669 
1670  // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1671  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1672  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1673  VT.getSizeInBits() - 1);
1674  KnownBits Known = DAG.computeKnownBits(N2);
1675  if ((Known.Zero & Mask) == Mask) {
1676  SDValue Borrow = N2;
1677  SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1678  DAG.getConstant(0, dl, VT), N2);
1679  SDValue Ops[] = { Result, Borrow };
1680  return DAG.getMergeValues(Ops, dl);
1681  }
1682  }
1683 
1684  // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1685  // low bit set
1686  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1687  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1688  VT.getSizeInBits() - 1);
1689  KnownBits Known = DAG.computeKnownBits(N2);
1690  if ((Known.Zero & Mask) == Mask) {
1691  SDValue Borrow = DAG.getConstant(0, dl, VT);
1692  SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1693  SDValue Ops[] = { Result, Borrow };
1694  return DAG.getMergeValues(Ops, dl);
1695  }
1696  }
1697  }
1698  break;
1699  case XCoreISD::LMUL: {
1700  SDValue N0 = N->getOperand(0);
1701  SDValue N1 = N->getOperand(1);
1702  SDValue N2 = N->getOperand(2);
1703  SDValue N3 = N->getOperand(3);
1706  EVT VT = N0.getValueType();
1707  // Canonicalize multiplicative constant to RHS. If both multiplicative
1708  // operands are constant canonicalize smallest to RHS.
1709  if ((N0C && !N1C) ||
1710  (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1711  return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1712  N1, N0, N2, N3);
1713 
1714  // lmul(x, 0, a, b)
1715  if (N1C && N1C->isNullValue()) {
1716  // If the high result is unused fold to add(a, b)
1717  if (N->hasNUsesOfValue(0, 0)) {
1718  SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1719  SDValue Ops[] = { Lo, Lo };
1720  return DAG.getMergeValues(Ops, dl);
1721  }
1722  // Otherwise fold to ladd(a, b, 0)
1723  SDValue Result =
1724  DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1725  SDValue Carry(Result.getNode(), 1);
1726  SDValue Ops[] = { Carry, Result };
1727  return DAG.getMergeValues(Ops, dl);
1728  }
1729  }
1730  break;
1731  case ISD::ADD: {
1732  // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1733  // lmul(x, y, a, b). The high result of lmul will be ignored.
1734  // This is only profitable if the intermediate results are unused
1735  // elsewhere.
1736  SDValue Mul0, Mul1, Addend0, Addend1;
1737  if (N->getValueType(0) == MVT::i32 &&
1738  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1739  SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1740  DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1741  Mul1, Addend0, Addend1);
1742  SDValue Result(Ignored.getNode(), 1);
1743  return Result;
1744  }
1745  APInt HighMask = APInt::getHighBitsSet(64, 32);
1746  // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1747  // lmul(x, y, a, b) if all operands are zero-extended. We do this
1748  // before type legalization as it is messy to match the operands after
1749  // that.
1750  if (N->getValueType(0) == MVT::i64 &&
1751  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1752  DAG.MaskedValueIsZero(Mul0, HighMask) &&
1753  DAG.MaskedValueIsZero(Mul1, HighMask) &&
1754  DAG.MaskedValueIsZero(Addend0, HighMask) &&
1755  DAG.MaskedValueIsZero(Addend1, HighMask)) {
1756  SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1757  Mul0, DAG.getConstant(0, dl, MVT::i32));
1758  SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1759  Mul1, DAG.getConstant(0, dl, MVT::i32));
1760  SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1761  Addend0, DAG.getConstant(0, dl, MVT::i32));
1762  SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1763  Addend1, DAG.getConstant(0, dl, MVT::i32));
1764  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1765  DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1766  Addend0L, Addend1L);
1767  SDValue Lo(Hi.getNode(), 1);
1768  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1769  }
1770  }
1771  break;
1772  case ISD::STORE: {
1773  // Replace unaligned store of unaligned load with memmove.
1774  StoreSDNode *ST = cast<StoreSDNode>(N);
1775  if (!DCI.isBeforeLegalize() ||
1777  ST->getAddressSpace(),
1778  ST->getAlignment()) ||
1779  ST->isVolatile() || ST->isIndexed()) {
1780  break;
1781  }
1782  SDValue Chain = ST->getChain();
1783 
1784  unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1785  assert((StoreBits % 8) == 0 &&
1786  "Store size in bits must be a multiple of 8");
1787  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
1788  ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1789  unsigned Alignment = ST->getAlignment();
1790  if (Alignment >= ABIAlignment) {
1791  break;
1792  }
1793 
1794  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1795  if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1796  LD->getAlignment() == Alignment &&
1797  !LD->isVolatile() && !LD->isIndexed() &&
1799  bool isTail = isInTailCallPosition(DAG, ST, Chain);
1800  return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1801  LD->getBasePtr(),
1802  DAG.getConstant(StoreBits/8, dl, MVT::i32),
1803  Alignment, false, isTail, ST->getPointerInfo(),
1804  LD->getPointerInfo());
1805  }
1806  }
1807  break;
1808  }
1809  }
1810  return SDValue();
1811 }
1812 
1813 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1814  KnownBits &Known,
1815  const APInt &DemandedElts,
1816  const SelectionDAG &DAG,
1817  unsigned Depth) const {
1818  Known.resetAll();
1819  switch (Op.getOpcode()) {
1820  default: break;
1821  case XCoreISD::LADD:
1822  case XCoreISD::LSUB:
1823  if (Op.getResNo() == 1) {
1824  // Top bits of carry / borrow are clear.
1825  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1826  Known.getBitWidth() - 1);
1827  }
1828  break;
1830  {
1831  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1832  switch (IntNo) {
1833  case Intrinsic::xcore_getts:
1834  // High bits are known to be zero.
1835  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1836  Known.getBitWidth() - 16);
1837  break;
1838  case Intrinsic::xcore_int:
1839  case Intrinsic::xcore_inct:
1840  // High bits are known to be zero.
1841  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1842  Known.getBitWidth() - 8);
1843  break;
1844  case Intrinsic::xcore_testct:
1845  // Result is either 0 or 1.
1846  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1847  Known.getBitWidth() - 1);
1848  break;
1849  case Intrinsic::xcore_testwct:
1850  // Result is in the range 0 - 4.
1851  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1852  Known.getBitWidth() - 3);
1853  break;
1854  }
1855  }
1856  break;
1857  }
1858 }
1859 
1860 //===----------------------------------------------------------------------===//
1861 // Addressing mode description hooks
1862 //===----------------------------------------------------------------------===//
1863 
1864 static inline bool isImmUs(int64_t val)
1865 {
1866  return (val >= 0 && val <= 11);
1867 }
1868 
1869 static inline bool isImmUs2(int64_t val)
1870 {
1871  return (val%2 == 0 && isImmUs(val/2));
1872 }
1873 
1874 static inline bool isImmUs4(int64_t val)
1875 {
1876  return (val%4 == 0 && isImmUs(val/4));
1877 }
1878 
1879 /// isLegalAddressingMode - Return true if the addressing mode represented
1880 /// by AM is legal for this target, for a load/store of the specified type.
1882  const AddrMode &AM, Type *Ty,
1883  unsigned AS,
1884  Instruction *I) const {
1885  if (Ty->getTypeID() == Type::VoidTyID)
1886  return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1887 
1888  unsigned Size = DL.getTypeAllocSize(Ty);
1889  if (AM.BaseGV) {
1890  return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1891  AM.BaseOffs%4 == 0;
1892  }
1893 
1894  switch (Size) {
1895  case 1:
1896  // reg + imm
1897  if (AM.Scale == 0) {
1898  return isImmUs(AM.BaseOffs);
1899  }
1900  // reg + reg
1901  return AM.Scale == 1 && AM.BaseOffs == 0;
1902  case 2:
1903  case 3:
1904  // reg + imm
1905  if (AM.Scale == 0) {
1906  return isImmUs2(AM.BaseOffs);
1907  }
1908  // reg + reg<<1
1909  return AM.Scale == 2 && AM.BaseOffs == 0;
1910  default:
1911  // reg + imm
1912  if (AM.Scale == 0) {
1913  return isImmUs4(AM.BaseOffs);
1914  }
1915  // reg + reg<<2
1916  return AM.Scale == 4 && AM.BaseOffs == 0;
1917  }
1918 }
1919 
1920 //===----------------------------------------------------------------------===//
1921 // XCore Inline Assembly Support
1922 //===----------------------------------------------------------------------===//
1923 
1924 std::pair<unsigned, const TargetRegisterClass *>
1925 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1926  StringRef Constraint,
1927  MVT VT) const {
1928  if (Constraint.size() == 1) {
1929  switch (Constraint[0]) {
1930  default : break;
1931  case 'r':
1932  return std::make_pair(0U, &XCore::GRRegsRegClass);
1933  }
1934  }
1935  // Use the default implementation in TargetLowering to convert the register
1936  // constraint into a member of a register class.
1937  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1938 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isMachineConstantPoolEntry() const
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:768
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
int createLRSpillSlot(MachineFunction &MF)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static bool isImmUs2(int64_t val)
const std::vector< MachineJumpTableEntry > & getJumpTables() const
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool hasLocalLinkage() const
Definition: GlobalValue.h:435
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:183
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:320
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:835
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:651
This class represents lattice values for constants.
Definition: AllocatorList.h:23
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1153
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const SDValue & getVal() const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
LLVM_NODISCARD bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:256
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:703
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:647
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getBasePtr() const
const SDValue & getChain() const
Function Alias Analysis Results
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
unsigned getAlignment() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:140
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
#define R2(n)
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:39
uint64_t High
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:785
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:434
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getExternalSymbol(const char *Sym, EVT VT)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool isMemLoc() const
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:135
static bool isImmUs(int64_t val)
unsigned getAddressSpace() const
Return the address space for the associated pointer.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static int stackSlotSize()
Stack slot size (4 bytes)
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:455
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:839
bool hasSection() const
Definition: GlobalValue.h:269
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
const HexagonInstrInfo * TII
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
Shift and rotation operations.
Definition: ISDOpcodes.h:409
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:201
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static const unsigned CodeModelLargeSize
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:459
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:400
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This is an SDNode representing atomic operations.
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:810
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:699
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:90
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getSizeInBits() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void setReturnStackOffset(unsigned value)
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:397
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
SmallVector< ISD::OutputArg, 32 > Outs
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
unsigned getAlignment() const
Definition: Globals.cpp:96
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
This class is used to represent ISD::STORE nodes.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:117
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:635
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1772
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:609
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:749
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
0: type with no size
Definition: Type.h:56
const SDValue & getBasePtr() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:65
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:732
const SDValue & getOperand(unsigned Num) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:823
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:160
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:762
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:771
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
Definition: ValueTypes.cpp:114
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:81
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:723
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
size_t size() const
Definition: SmallVector.h:52
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
bool isVolatile() const
const TargetMachine & getTargetMachine() const
StringRef getSection() const
Definition: Globals.cpp:160
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
unsigned first
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:638
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:632
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:403
Iterator for intrusive lists based on ilist_node.
unsigned getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
Definition: ValueTypes.h:309
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:839
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:1043
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:729
const DataFlowGraph & G
Definition: RDFGraph.cpp:210
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:621
Represents one node in the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:678
static mvt_range integer_valuetypes()
MachinePointerInfo getWithOffset(int64_t O) const
EVT getMemoryVT() const
Return the type of the in-memory value.
Class for arbitrary precision integers.
Definition: APInt.h:69
CodeModel::Model getCodeModel() const
Returns the code model.
static bool isImmUs4(int64_t val)
amdgpu Simplify well known AMD library false FunctionCallee Callee
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:467
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:470
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:438
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:639
Representation of each machine instruction.
Definition: MachineInstr.h:63
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:728
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:672
SmallVector< SDValue, 32 > OutVals
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:214
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:386
const TargetRegisterInfo * getRegisterInfo() const override
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:704
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:615
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Flags getFlags() const
Return the raw flags of the source value,.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
Type * getValueType() const
Definition: GlobalValue.h:275
uint32_t Size
Definition: Profile.cpp:46
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),a),b).
unsigned getOpcode() const
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents &#39;eh_return&#39; gcc dwarf builtin...
Definition: ISDOpcodes.h:101
bool isRegLoc() const
const MachinePointerInfo & getPointerInfo() const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if &#39;Op & Mask&#39; is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:58
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
Conversion operators.
Definition: ISDOpcodes.h:464
const SDValue & getOperand(unsigned i) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:793
unsigned getLocReg() const
uint64_t getZExtValue() const
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:125
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:173
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:789
LLVMContext * getContext() const
Definition: SelectionDAG.h:406
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:621
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:379
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:624