LLVM  9.0.0svn
XCoreISelLowering.cpp
Go to the documentation of this file.
1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the XCoreTargetLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "XCoreISelLowering.h"
14 #include "XCore.h"
16 #include "XCoreSubtarget.h"
17 #include "XCoreTargetMachine.h"
18 #include "XCoreTargetObjectFile.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/KnownBits.h"
38 #include <algorithm>
39 
40 using namespace llvm;
41 
42 #define DEBUG_TYPE "xcore-lower"
43 
44 const char *XCoreTargetLowering::
45 getTargetNodeName(unsigned Opcode) const
46 {
47  switch ((XCoreISD::NodeType)Opcode)
48  {
49  case XCoreISD::FIRST_NUMBER : break;
50  case XCoreISD::BL : return "XCoreISD::BL";
51  case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
52  case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
53  case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
54  case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
55  case XCoreISD::STWSP : return "XCoreISD::STWSP";
56  case XCoreISD::RETSP : return "XCoreISD::RETSP";
57  case XCoreISD::LADD : return "XCoreISD::LADD";
58  case XCoreISD::LSUB : return "XCoreISD::LSUB";
59  case XCoreISD::LMUL : return "XCoreISD::LMUL";
60  case XCoreISD::MACCU : return "XCoreISD::MACCU";
61  case XCoreISD::MACCS : return "XCoreISD::MACCS";
62  case XCoreISD::CRC8 : return "XCoreISD::CRC8";
63  case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
64  case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
65  case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
66  case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
67  case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
68  }
69  return nullptr;
70 }
71 
73  const XCoreSubtarget &Subtarget)
74  : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
75 
76  // Set up the register classes.
77  addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
78 
79  // Compute derived properties from the register classes
81 
83 
85 
86  // Use i32 for setcc operations results (slt, sgt, ...).
88  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
89 
90  // XCore does not have the NodeTypes below.
93 
94  // 64bit
104 
105  // Bit Manipulation
109 
111 
112  // Jump tables.
114 
117 
118  // Conversion of i64 -> double produces constantpool nodes
120 
121  // Loads
122  for (MVT VT : MVT::integer_valuetypes()) {
126 
129  }
130 
131  // Custom expand misaligned loads / stores.
134 
135  // Varargs
140 
141  // Dynamic stack
145 
146  // Exception handling
149 
150  // Atomic operations
151  // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
152  // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
156 
157  // TRAMPOLINE is custom lowered.
160 
161  // We want to custom lower some of our intrinsics.
163 
167 
168  // We have target-specific dag combine patterns for the following nodes:
173 
176 }
177 
179  if (Val.getOpcode() != ISD::LOAD)
180  return false;
181 
182  EVT VT1 = Val.getValueType();
183  if (!VT1.isSimple() || !VT1.isInteger() ||
184  !VT2.isSimple() || !VT2.isInteger())
185  return false;
186 
187  switch (VT1.getSimpleVT().SimpleTy) {
188  default: break;
189  case MVT::i8:
190  return true;
191  }
192 
193  return false;
194 }
195 
198  switch (Op.getOpcode())
199  {
200  case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
201  case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
202  case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
203  case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
204  case ISD::BR_JT: return LowerBR_JT(Op, DAG);
205  case ISD::LOAD: return LowerLOAD(Op, DAG);
206  case ISD::STORE: return LowerSTORE(Op, DAG);
207  case ISD::VAARG: return LowerVAARG(Op, DAG);
208  case ISD::VASTART: return LowerVASTART(Op, DAG);
209  case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
210  case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
211  // FIXME: Remove these when LegalizeDAGTypes lands.
212  case ISD::ADD:
213  case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
214  case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
215  case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
216  case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
217  case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
218  case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
219  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
220  case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
221  case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
222  case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
223  default:
224  llvm_unreachable("unimplemented operand");
225  }
226 }
227 
228 /// ReplaceNodeResults - Replace the results of node with an illegal result
229 /// type with new values built out of custom code.
232  SelectionDAG &DAG) const {
233  switch (N->getOpcode()) {
234  default:
235  llvm_unreachable("Don't know how to custom expand this!");
236  case ISD::ADD:
237  case ISD::SUB:
238  Results.push_back(ExpandADDSUB(N, DAG));
239  return;
240  }
241 }
242 
243 //===----------------------------------------------------------------------===//
244 // Misc Lower Operation implementation
245 //===----------------------------------------------------------------------===//
246 
247 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
248  const GlobalValue *GV,
249  SelectionDAG &DAG) const {
250  // FIXME there is no actual debug info here
251  SDLoc dl(GA);
252 
253  if (GV->getValueType()->isFunctionTy())
254  return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
255 
256  const auto *GVar = dyn_cast<GlobalVariable>(GV);
257  if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
258  (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
259  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
260 
261  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
262 }
263 
264 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
266  return true;
267 
268  Type *ObjType = GV->getValueType();
269  if (!ObjType->isSized())
270  return false;
271 
272  auto &DL = GV->getParent()->getDataLayout();
273  unsigned ObjSize = DL.getTypeAllocSize(ObjType);
274  return ObjSize < CodeModelLargeSize && ObjSize != 0;
275 }
276 
277 SDValue XCoreTargetLowering::
278 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
279 {
280  const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
281  const GlobalValue *GV = GN->getGlobal();
282  SDLoc DL(GN);
283  int64_t Offset = GN->getOffset();
284  if (IsSmallObject(GV, *this)) {
285  // We can only fold positive offsets that are a multiple of the word size.
286  int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
287  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
288  GA = getGlobalAddressWrapper(GA, GV, DAG);
289  // Handle the rest of the offset.
290  if (Offset != FoldedOffset) {
291  SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
292  GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
293  }
294  return GA;
295  } else {
296  // Ideally we would not fold in offset with an index <= 11.
297  Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
298  Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
299  Ty = Type::getInt32Ty(*DAG.getContext());
300  Constant *Idx = ConstantInt::get(Ty, Offset);
302  Type::getInt8Ty(*DAG.getContext()), GA, Idx);
303  SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
304  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
306  }
307 }
308 
309 SDValue XCoreTargetLowering::
310 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
311 {
312  SDLoc DL(Op);
313  auto PtrVT = getPointerTy(DAG.getDataLayout());
314  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
315  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
316 
317  return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
318 }
319 
320 SDValue XCoreTargetLowering::
321 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
322 {
323  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
324  // FIXME there isn't really debug info here
325  SDLoc dl(CP);
326  EVT PtrVT = Op.getValueType();
327  SDValue Res;
328  if (CP->isMachineConstantPoolEntry()) {
329  Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
330  CP->getAlignment(), CP->getOffset());
331  } else {
332  Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
333  CP->getAlignment(), CP->getOffset());
334  }
335  return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
336 }
337 
340 }
341 
342 SDValue XCoreTargetLowering::
343 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
344 {
345  SDValue Chain = Op.getOperand(0);
346  SDValue Table = Op.getOperand(1);
347  SDValue Index = Op.getOperand(2);
348  SDLoc dl(Op);
349  JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
350  unsigned JTI = JT->getIndex();
352  const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
353  SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
354 
355  unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
356  if (NumEntries <= 32) {
357  return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
358  }
359  assert((NumEntries >> 31) == 0);
360  SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
361  DAG.getConstant(1, dl, MVT::i32));
362  return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
363  ScaledIndex);
364 }
365 
366 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
367  const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
368  SelectionDAG &DAG) const {
369  auto PtrVT = getPointerTy(DAG.getDataLayout());
370  if ((Offset & 0x3) == 0) {
371  return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
372  }
373  // Lower to pair of consecutive word aligned loads plus some bit shifting.
374  int32_t HighOffset = alignTo(Offset, 4);
375  int32_t LowOffset = HighOffset - 4;
376  SDValue LowAddr, HighAddr;
377  if (GlobalAddressSDNode *GASD =
378  dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
379  LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
380  LowOffset);
381  HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
382  HighOffset);
383  } else {
384  LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
385  DAG.getConstant(LowOffset, DL, MVT::i32));
386  HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
387  DAG.getConstant(HighOffset, DL, MVT::i32));
388  }
389  SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
390  SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
391 
392  SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
393  SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
394  SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
395  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
396  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
397  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
398  High.getValue(1));
399  SDValue Ops[] = { Result, Chain };
400  return DAG.getMergeValues(Ops, DL);
401 }
402 
404 {
405  KnownBits Known = DAG.computeKnownBits(Value);
406  return Known.countMinTrailingZeros() >= 2;
407 }
408 
409 SDValue XCoreTargetLowering::
410 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
411  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
412  LoadSDNode *LD = cast<LoadSDNode>(Op);
414  "Unexpected extension type");
415  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
417  LD->getAddressSpace(),
418  LD->getAlignment()))
419  return SDValue();
420 
421  auto &TD = DAG.getDataLayout();
422  unsigned ABIAlignment = TD.getABITypeAlignment(
423  LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
424  // Leave aligned load alone.
425  if (LD->getAlignment() >= ABIAlignment)
426  return SDValue();
427 
428  SDValue Chain = LD->getChain();
429  SDValue BasePtr = LD->getBasePtr();
430  SDLoc DL(Op);
431 
432  if (!LD->isVolatile()) {
433  const GlobalValue *GV;
434  int64_t Offset = 0;
435  if (DAG.isBaseWithConstantOffset(BasePtr) &&
436  isWordAligned(BasePtr->getOperand(0), DAG)) {
437  SDValue NewBasePtr = BasePtr->getOperand(0);
438  Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
439  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
440  Offset, DAG);
441  }
442  if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
443  MinAlign(GV->getAlignment(), 4) == 4) {
444  SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
445  BasePtr->getValueType(0));
446  return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
447  Offset, DAG);
448  }
449  }
450 
451  if (LD->getAlignment() == 2) {
452  SDValue Low =
453  DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
454  LD->getPointerInfo(), MVT::i16,
455  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
456  SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
457  DAG.getConstant(2, DL, MVT::i32));
458  SDValue High =
459  DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
461  /* Alignment = */ 2, LD->getMemOperand()->getFlags());
462  SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
463  DAG.getConstant(16, DL, MVT::i32));
464  SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
465  Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
466  High.getValue(1));
467  SDValue Ops[] = { Result, Chain };
468  return DAG.getMergeValues(Ops, DL);
469  }
470 
471  // Lower to a call to __misaligned_load(BasePtr).
472  Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
474  TargetLowering::ArgListEntry Entry;
475 
476  Entry.Ty = IntPtrTy;
477  Entry.Node = BasePtr;
478  Args.push_back(Entry);
479 
481  CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
482  CallingConv::C, IntPtrTy,
483  DAG.getExternalSymbol("__misaligned_load",
484  getPointerTy(DAG.getDataLayout())),
485  std::move(Args));
486 
487  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
488  SDValue Ops[] = { CallResult.first, CallResult.second };
489  return DAG.getMergeValues(Ops, DL);
490 }
491 
492 SDValue XCoreTargetLowering::
493 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
494 {
495  StoreSDNode *ST = cast<StoreSDNode>(Op);
496  assert(!ST->isTruncatingStore() && "Unexpected store type");
497  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
499  ST->getAddressSpace(),
500  ST->getAlignment())) {
501  return SDValue();
502  }
503  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
504  ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
505  // Leave aligned store alone.
506  if (ST->getAlignment() >= ABIAlignment) {
507  return SDValue();
508  }
509  SDValue Chain = ST->getChain();
510  SDValue BasePtr = ST->getBasePtr();
511  SDValue Value = ST->getValue();
512  SDLoc dl(Op);
513 
514  if (ST->getAlignment() == 2) {
515  SDValue Low = Value;
516  SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
517  DAG.getConstant(16, dl, MVT::i32));
518  SDValue StoreLow = DAG.getTruncStore(
519  Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
520  /* Alignment = */ 2, ST->getMemOperand()->getFlags());
521  SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
522  DAG.getConstant(2, dl, MVT::i32));
523  SDValue StoreHigh = DAG.getTruncStore(
524  Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
525  MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
526  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
527  }
528 
529  // Lower to a call to __misaligned_store(BasePtr, Value).
530  Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
532  TargetLowering::ArgListEntry Entry;
533 
534  Entry.Ty = IntPtrTy;
535  Entry.Node = BasePtr;
536  Args.push_back(Entry);
537 
538  Entry.Node = Value;
539  Args.push_back(Entry);
540 
542  CLI.setDebugLoc(dl).setChain(Chain).setCallee(
544  DAG.getExternalSymbol("__misaligned_store",
545  getPointerTy(DAG.getDataLayout())),
546  std::move(Args));
547 
548  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
549  return CallResult.second;
550 }
551 
552 SDValue XCoreTargetLowering::
553 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
554 {
555  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
556  "Unexpected operand to lower!");
557  SDLoc dl(Op);
558  SDValue LHS = Op.getOperand(0);
559  SDValue RHS = Op.getOperand(1);
560  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
561  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
562  DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
563  LHS, RHS);
564  SDValue Lo(Hi.getNode(), 1);
565  SDValue Ops[] = { Lo, Hi };
566  return DAG.getMergeValues(Ops, dl);
567 }
568 
569 SDValue XCoreTargetLowering::
570 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
571 {
572  assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
573  "Unexpected operand to lower!");
574  SDLoc dl(Op);
575  SDValue LHS = Op.getOperand(0);
576  SDValue RHS = Op.getOperand(1);
577  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
578  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
579  DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
580  Zero, Zero);
581  SDValue Lo(Hi.getNode(), 1);
582  SDValue Ops[] = { Lo, Hi };
583  return DAG.getMergeValues(Ops, dl);
584 }
585 
586 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
587 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
588 /// each intermediate result in the calculation must also have a single use.
589 /// If the Op is in the correct form the constituent parts are written to Mul0,
590 /// Mul1, Addend0 and Addend1.
591 static bool
592 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
593  SDValue &Addend1, bool requireIntermediatesHaveOneUse)
594 {
595  if (Op.getOpcode() != ISD::ADD)
596  return false;
597  SDValue N0 = Op.getOperand(0);
598  SDValue N1 = Op.getOperand(1);
599  SDValue AddOp;
600  SDValue OtherOp;
601  if (N0.getOpcode() == ISD::ADD) {
602  AddOp = N0;
603  OtherOp = N1;
604  } else if (N1.getOpcode() == ISD::ADD) {
605  AddOp = N1;
606  OtherOp = N0;
607  } else {
608  return false;
609  }
610  if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
611  return false;
612  if (OtherOp.getOpcode() == ISD::MUL) {
613  // add(add(a,b),mul(x,y))
614  if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
615  return false;
616  Mul0 = OtherOp.getOperand(0);
617  Mul1 = OtherOp.getOperand(1);
618  Addend0 = AddOp.getOperand(0);
619  Addend1 = AddOp.getOperand(1);
620  return true;
621  }
622  if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
623  // add(add(mul(x,y),a),b)
624  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
625  return false;
626  Mul0 = AddOp.getOperand(0).getOperand(0);
627  Mul1 = AddOp.getOperand(0).getOperand(1);
628  Addend0 = AddOp.getOperand(1);
629  Addend1 = OtherOp;
630  return true;
631  }
632  if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
633  // add(add(a,mul(x,y)),b)
634  if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
635  return false;
636  Mul0 = AddOp.getOperand(1).getOperand(0);
637  Mul1 = AddOp.getOperand(1).getOperand(1);
638  Addend0 = AddOp.getOperand(0);
639  Addend1 = OtherOp;
640  return true;
641  }
642  return false;
643 }
644 
645 SDValue XCoreTargetLowering::
646 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
647 {
648  SDValue Mul;
649  SDValue Other;
650  if (N->getOperand(0).getOpcode() == ISD::MUL) {
651  Mul = N->getOperand(0);
652  Other = N->getOperand(1);
653  } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
654  Mul = N->getOperand(1);
655  Other = N->getOperand(0);
656  } else {
657  return SDValue();
658  }
659  SDLoc dl(N);
660  SDValue LL, RL, AddendL, AddendH;
661  LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
662  Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
663  RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
664  Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
665  AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
666  Other, DAG.getConstant(0, dl, MVT::i32));
667  AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
668  Other, DAG.getConstant(1, dl, MVT::i32));
669  APInt HighMask = APInt::getHighBitsSet(64, 32);
670  unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
671  unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
672  if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
673  DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
674  // The inputs are both zero-extended.
675  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
676  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
677  AddendL, LL, RL);
678  SDValue Lo(Hi.getNode(), 1);
679  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
680  }
681  if (LHSSB > 32 && RHSSB > 32) {
682  // The inputs are both sign-extended.
683  SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
684  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
685  AddendL, LL, RL);
686  SDValue Lo(Hi.getNode(), 1);
687  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
688  }
689  SDValue LH, RH;
690  LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
691  Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
692  RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
693  Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
694  SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
695  DAG.getVTList(MVT::i32, MVT::i32), AddendH,
696  AddendL, LL, RL);
697  SDValue Lo(Hi.getNode(), 1);
698  RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
699  LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
700  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
701  Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
702  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
703 }
704 
705 SDValue XCoreTargetLowering::
706 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
707 {
708  assert(N->getValueType(0) == MVT::i64 &&
709  (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
710  "Unknown operand to lower!");
711 
712  if (N->getOpcode() == ISD::ADD)
713  if (SDValue Result = TryExpandADDWithMul(N, DAG))
714  return Result;
715 
716  SDLoc dl(N);
717 
718  // Extract components
720  N->getOperand(0),
721  DAG.getConstant(0, dl, MVT::i32));
723  N->getOperand(0),
724  DAG.getConstant(1, dl, MVT::i32));
726  N->getOperand(1),
727  DAG.getConstant(0, dl, MVT::i32));
729  N->getOperand(1),
730  DAG.getConstant(1, dl, MVT::i32));
731 
732  // Expand
733  unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
735  SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
736  SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
737  LHSL, RHSL, Zero);
738  SDValue Carry(Lo.getNode(), 1);
739 
740  SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
741  LHSH, RHSH, Carry);
742  SDValue Ignored(Hi.getNode(), 1);
743  // Merge the pieces
744  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
745 }
746 
747 SDValue XCoreTargetLowering::
748 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
749 {
750  // Whist llvm does not support aggregate varargs we can ignore
751  // the possibility of the ValueType being an implicit byVal vararg.
752  SDNode *Node = Op.getNode();
753  EVT VT = Node->getValueType(0); // not an aggregate
754  SDValue InChain = Node->getOperand(0);
755  SDValue VAListPtr = Node->getOperand(1);
756  EVT PtrVT = VAListPtr.getValueType();
757  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
758  SDLoc dl(Node);
759  SDValue VAList =
760  DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
761  // Increment the pointer, VAList, to the next vararg
762  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
763  DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
764  dl));
765  // Store the incremented VAList to the legalized pointer
766  InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
767  MachinePointerInfo(SV));
768  // Load the actual argument out of the pointer VAList
769  return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
770 }
771 
772 SDValue XCoreTargetLowering::
773 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
774 {
775  SDLoc dl(Op);
776  // vastart stores the address of the VarArgsFrameIndex slot into the
777  // memory location argument
781  return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
783 }
784 
785 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
786  SelectionDAG &DAG) const {
787  // This nodes represent llvm.frameaddress on the DAG.
788  // It takes one operand, the index of the frame address to return.
789  // An index of zero corresponds to the current function's frame address.
790  // An index of one to the parent's frame address, and so on.
791  // Depths > 0 not supported yet!
792  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
793  return SDValue();
794 
796  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
797  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
798  RegInfo->getFrameRegister(MF), MVT::i32);
799 }
800 
801 SDValue XCoreTargetLowering::
802 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
803  // This nodes represent llvm.returnaddress on the DAG.
804  // It takes one operand, the index of the return address to return.
805  // An index of zero corresponds to the current function's return address.
806  // An index of one to the parent's return address, and so on.
807  // Depths > 0 not supported yet!
808  if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
809  return SDValue();
810 
813  int FI = XFI->createLRSpillSlot(MF);
814  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
815  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
816  DAG.getEntryNode(), FIN,
818 }
819 
820 SDValue XCoreTargetLowering::
821 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
822  // This node represents offset from frame pointer to first on-stack argument.
823  // This is needed for correct stack adjustment during unwind.
824  // However, we don't know the offset until after the frame has be finalised.
825  // This is done during the XCoreFTAOElim pass.
827 }
828 
829 SDValue XCoreTargetLowering::
830 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
831  // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
832  // This node represents 'eh_return' gcc dwarf builtin, which is used to
833  // return from exception. The general meaning is: adjust stack by OFFSET and
834  // pass execution to HANDLER.
836  SDValue Chain = Op.getOperand(0);
837  SDValue Offset = Op.getOperand(1);
838  SDValue Handler = Op.getOperand(2);
839  SDLoc dl(Op);
840 
841  // Absolute SP = (FP + FrameToArgs) + Offset
842  const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
843  SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
844  RegInfo->getFrameRegister(MF), MVT::i32);
845  SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
846  MVT::i32);
847  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
848  Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
849 
850  // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
851  // which leaves 2 caller saved registers, R2 & R3 for us to use.
852  unsigned StackReg = XCore::R2;
853  unsigned HandlerReg = XCore::R3;
854 
855  SDValue OutChains[] = {
856  DAG.getCopyToReg(Chain, dl, StackReg, Stack),
857  DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
858  };
859 
860  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
861 
862  return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
863  DAG.getRegister(StackReg, MVT::i32),
864  DAG.getRegister(HandlerReg, MVT::i32));
865 
866 }
867 
868 SDValue XCoreTargetLowering::
869 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
870  return Op.getOperand(0);
871 }
872 
873 SDValue XCoreTargetLowering::
874 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
875  SDValue Chain = Op.getOperand(0);
876  SDValue Trmp = Op.getOperand(1); // trampoline
877  SDValue FPtr = Op.getOperand(2); // nested function
878  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
879 
880  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
881 
882  // .align 4
883  // LDAPF_u10 r11, nest
884  // LDW_2rus r11, r11[0]
885  // STWSP_ru6 r11, sp[0]
886  // LDAPF_u10 r11, fptr
887  // LDW_2rus r11, r11[0]
888  // BAU_1r r11
889  // nest:
890  // .word nest
891  // fptr:
892  // .word fptr
893  SDValue OutChains[5];
894 
895  SDValue Addr = Trmp;
896 
897  SDLoc dl(Op);
898  OutChains[0] =
899  DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
900  MachinePointerInfo(TrmpAddr));
901 
902  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
903  DAG.getConstant(4, dl, MVT::i32));
904  OutChains[1] =
905  DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
906  MachinePointerInfo(TrmpAddr, 4));
907 
908  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
909  DAG.getConstant(8, dl, MVT::i32));
910  OutChains[2] =
911  DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
912  MachinePointerInfo(TrmpAddr, 8));
913 
914  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
915  DAG.getConstant(12, dl, MVT::i32));
916  OutChains[3] =
917  DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
918 
919  Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
920  DAG.getConstant(16, dl, MVT::i32));
921  OutChains[4] =
922  DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
923 
924  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
925 }
926 
927 SDValue XCoreTargetLowering::
928 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
929  SDLoc DL(Op);
930  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
931  switch (IntNo) {
932  case Intrinsic::xcore_crc8:
933  EVT VT = Op.getValueType();
934  SDValue Data =
935  DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
936  Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
937  SDValue Crc(Data.getNode(), 1);
938  SDValue Results[] = { Crc, Data };
939  return DAG.getMergeValues(Results, DL);
940  }
941  return SDValue();
942 }
943 
944 SDValue XCoreTargetLowering::
945 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
946  SDLoc DL(Op);
947  return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
948 }
949 
950 SDValue XCoreTargetLowering::
951 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
952  AtomicSDNode *N = cast<AtomicSDNode>(Op);
953  assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
956  "setInsertFencesForAtomic(true) expects unordered / monotonic");
957  if (N->getMemoryVT() == MVT::i32) {
958  if (N->getAlignment() < 4)
959  report_fatal_error("atomic load must be aligned");
960  return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
961  N->getChain(), N->getBasePtr(), N->getPointerInfo(),
962  N->getAlignment(), N->getMemOperand()->getFlags(),
963  N->getAAInfo(), N->getRanges());
964  }
965  if (N->getMemoryVT() == MVT::i16) {
966  if (N->getAlignment() < 2)
967  report_fatal_error("atomic load must be aligned");
968  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
969  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
970  N->getAlignment(), N->getMemOperand()->getFlags(),
971  N->getAAInfo());
972  }
973  if (N->getMemoryVT() == MVT::i8)
974  return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
975  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
976  N->getAlignment(), N->getMemOperand()->getFlags(),
977  N->getAAInfo());
978  return SDValue();
979 }
980 
981 SDValue XCoreTargetLowering::
982 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
983  AtomicSDNode *N = cast<AtomicSDNode>(Op);
984  assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
987  "setInsertFencesForAtomic(true) expects unordered / monotonic");
988  if (N->getMemoryVT() == MVT::i32) {
989  if (N->getAlignment() < 4)
990  report_fatal_error("atomic store must be aligned");
991  return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
992  N->getPointerInfo(), N->getAlignment(),
993  N->getMemOperand()->getFlags(), N->getAAInfo());
994  }
995  if (N->getMemoryVT() == MVT::i16) {
996  if (N->getAlignment() < 2)
997  report_fatal_error("atomic store must be aligned");
998  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
999  N->getBasePtr(), N->getPointerInfo(), MVT::i16,
1000  N->getAlignment(), N->getMemOperand()->getFlags(),
1001  N->getAAInfo());
1002  }
1003  if (N->getMemoryVT() == MVT::i8)
1004  return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
1005  N->getBasePtr(), N->getPointerInfo(), MVT::i8,
1006  N->getAlignment(), N->getMemOperand()->getFlags(),
1007  N->getAAInfo());
1008  return SDValue();
1009 }
1010 
1012 XCoreTargetLowering::getMMOFlags(const Instruction &I) const {
1013  // Because of how we convert atomic_load and atomic_store to normal loads and
1014  // stores in the DAG, we need to ensure that the MMOs are marked volatile
1015  // since DAGCombine hasn't been updated to account for atomic, but non
1016  // volatile loads. (See D57601)
1017  if (auto *SI = dyn_cast<StoreInst>(&I))
1018  if (SI->isAtomic())
1020  if (auto *LI = dyn_cast<LoadInst>(&I))
1021  if (LI->isAtomic())
1023  if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
1024  if (AI->isAtomic())
1026  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
1027  if (AI->isAtomic())
1030 }
1031 
1032 //===----------------------------------------------------------------------===//
1033 // Calling Convention Implementation
1034 //===----------------------------------------------------------------------===//
1035 
1036 #include "XCoreGenCallingConv.inc"
1037 
1038 //===----------------------------------------------------------------------===//
1039 // Call Calling Convention Implementation
1040 //===----------------------------------------------------------------------===//
1041 
1042 /// XCore call implementation
1043 SDValue
1044 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1045  SmallVectorImpl<SDValue> &InVals) const {
1046  SelectionDAG &DAG = CLI.DAG;
1047  SDLoc &dl = CLI.DL;
1049  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1051  SDValue Chain = CLI.Chain;
1052  SDValue Callee = CLI.Callee;
1053  bool &isTailCall = CLI.IsTailCall;
1054  CallingConv::ID CallConv = CLI.CallConv;
1055  bool isVarArg = CLI.IsVarArg;
1056 
1057  // XCore target does not yet support tail call optimization.
1058  isTailCall = false;
1059 
1060  // For now, only CallingConv::C implemented
1061  switch (CallConv)
1062  {
1063  default:
1064  report_fatal_error("Unsupported calling convention");
1065  case CallingConv::Fast:
1066  case CallingConv::C:
1067  return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1068  Outs, OutVals, Ins, dl, DAG, InVals);
1069  }
1070 }
1071 
1072 /// LowerCallResult - Lower the result values of a call into the
1073 /// appropriate copies out of appropriate physical registers / memory locations.
1075  const SmallVectorImpl<CCValAssign> &RVLocs,
1076  const SDLoc &dl, SelectionDAG &DAG,
1077  SmallVectorImpl<SDValue> &InVals) {
1078  SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1079  // Copy results out of physical registers.
1080  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1081  const CCValAssign &VA = RVLocs[i];
1082  if (VA.isRegLoc()) {
1083  Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1084  InFlag).getValue(1);
1085  InFlag = Chain.getValue(2);
1086  InVals.push_back(Chain.getValue(0));
1087  } else {
1088  assert(VA.isMemLoc());
1089  ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1090  InVals.size()));
1091  // Reserve space for this result.
1092  InVals.push_back(SDValue());
1093  }
1094  }
1095 
1096  // Copy results out of memory.
1097  SmallVector<SDValue, 4> MemOpChains;
1098  for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1099  int offset = ResultMemLocs[i].first;
1100  unsigned index = ResultMemLocs[i].second;
1101  SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1102  SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1103  SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1104  InVals[index] = load;
1105  MemOpChains.push_back(load.getValue(1));
1106  }
1107 
1108  // Transform all loads nodes into one single node because
1109  // all load nodes are independent of each other.
1110  if (!MemOpChains.empty())
1111  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1112 
1113  return Chain;
1114 }
1115 
1116 /// LowerCCCCallTo - functions arguments are copied from virtual
1117 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1118 /// CALLSEQ_END are emitted.
1119 /// TODO: isTailCall, sret.
1120 SDValue XCoreTargetLowering::LowerCCCCallTo(
1121  SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1122  bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1123  const SmallVectorImpl<SDValue> &OutVals,
1124  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1125  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1126 
1127  // Analyze operands of the call, assigning locations to each operand.
1129  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1130  *DAG.getContext());
1131 
1132  // The ABI dictates there should be one stack slot available to the callee
1133  // on function entry (for saving lr).
1134  CCInfo.AllocateStack(4, 4);
1135 
1136  CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1137 
1139  // Analyze return values to determine the number of bytes of stack required.
1140  CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1141  *DAG.getContext());
1142  RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1143  RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1144 
1145  // Get a count of how many bytes are to be pushed on the stack.
1146  unsigned NumBytes = RetCCInfo.getNextStackOffset();
1147  auto PtrVT = getPointerTy(DAG.getDataLayout());
1148 
1149  Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1150 
1152  SmallVector<SDValue, 12> MemOpChains;
1153 
1154  // Walk the register/memloc assignments, inserting copies/loads.
1155  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1156  CCValAssign &VA = ArgLocs[i];
1157  SDValue Arg = OutVals[i];
1158 
1159  // Promote the value if needed.
1160  switch (VA.getLocInfo()) {
1161  default: llvm_unreachable("Unknown loc info!");
1162  case CCValAssign::Full: break;
1163  case CCValAssign::SExt:
1164  Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1165  break;
1166  case CCValAssign::ZExt:
1167  Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1168  break;
1169  case CCValAssign::AExt:
1170  Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1171  break;
1172  }
1173 
1174  // Arguments that can be passed on register must be kept at
1175  // RegsToPass vector
1176  if (VA.isRegLoc()) {
1177  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1178  } else {
1179  assert(VA.isMemLoc());
1180 
1181  int Offset = VA.getLocMemOffset();
1182 
1183  MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1184  Chain, Arg,
1185  DAG.getConstant(Offset/4, dl,
1186  MVT::i32)));
1187  }
1188  }
1189 
1190  // Transform all store nodes into one single node because
1191  // all store nodes are independent of each other.
1192  if (!MemOpChains.empty())
1193  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1194 
1195  // Build a sequence of copy-to-reg nodes chained together with token
1196  // chain and flag operands which copy the outgoing args into registers.
1197  // The InFlag in necessary since all emitted instructions must be
1198  // stuck together.
1199  SDValue InFlag;
1200  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1201  Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1202  RegsToPass[i].second, InFlag);
1203  InFlag = Chain.getValue(1);
1204  }
1205 
1206  // If the callee is a GlobalAddress node (quite common, every direct call is)
1207  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1208  // Likewise ExternalSymbol -> TargetExternalSymbol.
1209  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1210  Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1211  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1212  Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1213 
1214  // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1215  // = Chain, Callee, Reg#1, Reg#2, ...
1216  //
1217  // Returns a chain & a flag for retval copy to use.
1218  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1220  Ops.push_back(Chain);
1221  Ops.push_back(Callee);
1222 
1223  // Add argument registers to the end of the list so that they are
1224  // known live into the call.
1225  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1226  Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1227  RegsToPass[i].second.getValueType()));
1228 
1229  if (InFlag.getNode())
1230  Ops.push_back(InFlag);
1231 
1232  Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1233  InFlag = Chain.getValue(1);
1234 
1235  // Create the CALLSEQ_END node.
1236  Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1237  DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1238  InFlag = Chain.getValue(1);
1239 
1240  // Handle result values, copying them out of physregs into vregs that we
1241  // return.
1242  return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1243 }
1244 
1245 //===----------------------------------------------------------------------===//
1246 // Formal Arguments Calling Convention Implementation
1247 //===----------------------------------------------------------------------===//
1248 
1249 namespace {
1250  struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1251 }
1252 
1253 /// XCore formal arguments implementation
1254 SDValue XCoreTargetLowering::LowerFormalArguments(
1255  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1256  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1257  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1258  switch (CallConv)
1259  {
1260  default:
1261  report_fatal_error("Unsupported calling convention");
1262  case CallingConv::C:
1263  case CallingConv::Fast:
1264  return LowerCCCArguments(Chain, CallConv, isVarArg,
1265  Ins, dl, DAG, InVals);
1266  }
1267 }
1268 
1269 /// LowerCCCArguments - transform physical registers into
1270 /// virtual registers and generate load operations for
1271 /// arguments places on the stack.
1272 /// TODO: sret
1273 SDValue XCoreTargetLowering::LowerCCCArguments(
1274  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1275  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1276  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1277  MachineFunction &MF = DAG.getMachineFunction();
1278  MachineFrameInfo &MFI = MF.getFrameInfo();
1279  MachineRegisterInfo &RegInfo = MF.getRegInfo();
1281 
1282  // Assign locations to all of the incoming arguments.
1284  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1285  *DAG.getContext());
1286 
1287  CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1288 
1289  unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1290 
1291  unsigned LRSaveSize = StackSlotSize;
1292 
1293  if (!isVarArg)
1294  XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1295 
1296  // All getCopyFromReg ops must precede any getMemcpys to prevent the
1297  // scheduler clobbering a register before it has been copied.
1298  // The stages are:
1299  // 1. CopyFromReg (and load) arg & vararg registers.
1300  // 2. Chain CopyFromReg nodes into a TokenFactor.
1301  // 3. Memcpy 'byVal' args & push final InVals.
1302  // 4. Chain mem ops nodes into a TokenFactor.
1303  SmallVector<SDValue, 4> CFRegNode;
1305  SmallVector<SDValue, 4> MemOps;
1306 
1307  // 1a. CopyFromReg (and load) arg registers.
1308  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1309 
1310  CCValAssign &VA = ArgLocs[i];
1311  SDValue ArgIn;
1312 
1313  if (VA.isRegLoc()) {
1314  // Arguments passed in registers
1315  EVT RegVT = VA.getLocVT();
1316  switch (RegVT.getSimpleVT().SimpleTy) {
1317  default:
1318  {
1319 #ifndef NDEBUG
1320  errs() << "LowerFormalArguments Unhandled argument type: "
1321  << RegVT.getEVTString() << "\n";
1322 #endif
1323  llvm_unreachable(nullptr);
1324  }
1325  case MVT::i32:
1326  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1327  RegInfo.addLiveIn(VA.getLocReg(), VReg);
1328  ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1329  CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1330  }
1331  } else {
1332  // sanity check
1333  assert(VA.isMemLoc());
1334  // Load the argument to a virtual register
1335  unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1336  if (ObjSize > StackSlotSize) {
1337  errs() << "LowerFormalArguments Unhandled argument type: "
1338  << EVT(VA.getLocVT()).getEVTString()
1339  << "\n";
1340  }
1341  // Create the frame index object for this incoming parameter...
1342  int FI = MFI.CreateFixedObject(ObjSize,
1343  LRSaveSize + VA.getLocMemOffset(),
1344  true);
1345 
1346  // Create the SelectionDAG nodes corresponding to a load
1347  //from this parameter
1348  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1349  ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1351  }
1352  const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1353  ArgData.push_back(ADP);
1354  }
1355 
1356  // 1b. CopyFromReg vararg registers.
1357  if (isVarArg) {
1358  // Argument registers
1359  static const MCPhysReg ArgRegs[] = {
1360  XCore::R0, XCore::R1, XCore::R2, XCore::R3
1361  };
1363  unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1364  if (FirstVAReg < array_lengthof(ArgRegs)) {
1365  int offset = 0;
1366  // Save remaining registers, storing higher register numbers at a higher
1367  // address
1368  for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1369  // Create a stack slot
1370  int FI = MFI.CreateFixedObject(4, offset, true);
1371  if (i == (int)FirstVAReg) {
1372  XFI->setVarArgsFrameIndex(FI);
1373  }
1374  offset -= StackSlotSize;
1375  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1376  // Move argument from phys reg -> virt reg
1377  unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1378  RegInfo.addLiveIn(ArgRegs[i], VReg);
1379  SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1380  CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1381  // Move argument from virt reg -> stack
1382  SDValue Store =
1383  DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1384  MemOps.push_back(Store);
1385  }
1386  } else {
1387  // This will point to the next argument passed via stack.
1388  XFI->setVarArgsFrameIndex(
1389  MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1390  true));
1391  }
1392  }
1393 
1394  // 2. chain CopyFromReg nodes into a TokenFactor.
1395  if (!CFRegNode.empty())
1396  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1397 
1398  // 3. Memcpy 'byVal' args & push final InVals.
1399  // Aggregates passed "byVal" need to be copied by the callee.
1400  // The callee will use a pointer to this copy, rather than the original
1401  // pointer.
1402  for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1403  ArgDE = ArgData.end();
1404  ArgDI != ArgDE; ++ArgDI) {
1405  if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1406  unsigned Size = ArgDI->Flags.getByValSize();
1407  unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1408  // Create a new object on the stack and copy the pointee into it.
1409  int FI = MFI.CreateStackObject(Size, Align, false);
1410  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1411  InVals.push_back(FIN);
1412  MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1413  DAG.getConstant(Size, dl, MVT::i32),
1414  Align, false, false, false,
1416  MachinePointerInfo()));
1417  } else {
1418  InVals.push_back(ArgDI->SDV);
1419  }
1420  }
1421 
1422  // 4, chain mem ops nodes into a TokenFactor.
1423  if (!MemOps.empty()) {
1424  MemOps.push_back(Chain);
1425  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1426  }
1427 
1428  return Chain;
1429 }
1430 
1431 //===----------------------------------------------------------------------===//
1432 // Return Value Calling Convention Implementation
1433 //===----------------------------------------------------------------------===//
1434 
1435 bool XCoreTargetLowering::
1436 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1437  bool isVarArg,
1438  const SmallVectorImpl<ISD::OutputArg> &Outs,
1439  LLVMContext &Context) const {
1441  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1442  if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1443  return false;
1444  if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1445  return false;
1446  return true;
1447 }
1448 
1449 SDValue
1450 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1451  bool isVarArg,
1452  const SmallVectorImpl<ISD::OutputArg> &Outs,
1453  const SmallVectorImpl<SDValue> &OutVals,
1454  const SDLoc &dl, SelectionDAG &DAG) const {
1455 
1456  XCoreFunctionInfo *XFI =
1459 
1460  // CCValAssign - represent the assignment of
1461  // the return value to a location
1463 
1464  // CCState - Info about the registers and stack slot.
1465  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1466  *DAG.getContext());
1467 
1468  // Analyze return values.
1469  if (!isVarArg)
1470  CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1471 
1472  CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1473 
1474  SDValue Flag;
1475  SmallVector<SDValue, 4> RetOps(1, Chain);
1476 
1477  // Return on XCore is always a "retsp 0"
1478  RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1479 
1480  SmallVector<SDValue, 4> MemOpChains;
1481  // Handle return values that must be copied to memory.
1482  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1483  CCValAssign &VA = RVLocs[i];
1484  if (VA.isRegLoc())
1485  continue;
1486  assert(VA.isMemLoc());
1487  if (isVarArg) {
1488  report_fatal_error("Can't return value from vararg function in memory");
1489  }
1490 
1491  int Offset = VA.getLocMemOffset();
1492  unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1493  // Create the frame index object for the memory location.
1494  int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1495 
1496  // Create a SelectionDAG node corresponding to a store
1497  // to this memory location.
1498  SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1499  MemOpChains.push_back(DAG.getStore(
1500  Chain, dl, OutVals[i], FIN,
1502  }
1503 
1504  // Transform all store nodes into one single node because
1505  // all stores are independent of each other.
1506  if (!MemOpChains.empty())
1507  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1508 
1509  // Now handle return values copied to registers.
1510  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1511  CCValAssign &VA = RVLocs[i];
1512  if (!VA.isRegLoc())
1513  continue;
1514  // Copy the result values into the output registers.
1515  Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1516 
1517  // guarantee that all emitted copies are
1518  // stuck together, avoiding something bad
1519  Flag = Chain.getValue(1);
1520  RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1521  }
1522 
1523  RetOps[0] = Chain; // Update chain.
1524 
1525  // Add the flag if we have it.
1526  if (Flag.getNode())
1527  RetOps.push_back(Flag);
1528 
1529  return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1530 }
1531 
1532 //===----------------------------------------------------------------------===//
1533 // Other Lowering Code
1534 //===----------------------------------------------------------------------===//
1535 
1538  MachineBasicBlock *BB) const {
1539  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1540  DebugLoc dl = MI.getDebugLoc();
1541  assert((MI.getOpcode() == XCore::SELECT_CC) &&
1542  "Unexpected instr type to insert");
1543 
1544  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1545  // control-flow pattern. The incoming instruction knows the destination vreg
1546  // to set, the condition code register to branch on, the true/false values to
1547  // select between, and a branch opcode to use.
1548  const BasicBlock *LLVM_BB = BB->getBasicBlock();
1550 
1551  // thisMBB:
1552  // ...
1553  // TrueVal = ...
1554  // cmpTY ccX, r1, r2
1555  // bCC copy1MBB
1556  // fallthrough --> copy0MBB
1557  MachineBasicBlock *thisMBB = BB;
1558  MachineFunction *F = BB->getParent();
1559  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1560  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1561  F->insert(It, copy0MBB);
1562  F->insert(It, sinkMBB);
1563 
1564  // Transfer the remainder of BB and its successor edges to sinkMBB.
1565  sinkMBB->splice(sinkMBB->begin(), BB,
1566  std::next(MachineBasicBlock::iterator(MI)), BB->end());
1567  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1568 
1569  // Next, add the true and fallthrough blocks as its successors.
1570  BB->addSuccessor(copy0MBB);
1571  BB->addSuccessor(sinkMBB);
1572 
1573  BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1574  .addReg(MI.getOperand(1).getReg())
1575  .addMBB(sinkMBB);
1576 
1577  // copy0MBB:
1578  // %FalseValue = ...
1579  // # fallthrough to sinkMBB
1580  BB = copy0MBB;
1581 
1582  // Update machine-CFG edges
1583  BB->addSuccessor(sinkMBB);
1584 
1585  // sinkMBB:
1586  // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1587  // ...
1588  BB = sinkMBB;
1589  BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1590  .addReg(MI.getOperand(3).getReg())
1591  .addMBB(copy0MBB)
1592  .addReg(MI.getOperand(2).getReg())
1593  .addMBB(thisMBB);
1594 
1595  MI.eraseFromParent(); // The pseudo instruction is gone now.
1596  return BB;
1597 }
1598 
1599 //===----------------------------------------------------------------------===//
1600 // Target Optimization Hooks
1601 //===----------------------------------------------------------------------===//
1602 
1603 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1604  DAGCombinerInfo &DCI) const {
1605  SelectionDAG &DAG = DCI.DAG;
1606  SDLoc dl(N);
1607  switch (N->getOpcode()) {
1608  default: break;
1609  case ISD::INTRINSIC_VOID:
1610  switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1611  case Intrinsic::xcore_outt:
1612  case Intrinsic::xcore_outct:
1613  case Intrinsic::xcore_chkct: {
1614  SDValue OutVal = N->getOperand(3);
1615  // These instructions ignore the high bits.
1616  if (OutVal.hasOneUse()) {
1617  unsigned BitWidth = OutVal.getValueSizeInBits();
1618  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1619  KnownBits Known;
1621  !DCI.isBeforeLegalizeOps());
1622  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1623  if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1624  TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1625  DCI.CommitTargetLoweringOpt(TLO);
1626  }
1627  break;
1628  }
1629  case Intrinsic::xcore_setpt: {
1630  SDValue Time = N->getOperand(3);
1631  // This instruction ignores the high bits.
1632  if (Time.hasOneUse()) {
1633  unsigned BitWidth = Time.getValueSizeInBits();
1634  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1635  KnownBits Known;
1637  !DCI.isBeforeLegalizeOps());
1638  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1639  if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1640  TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1641  DCI.CommitTargetLoweringOpt(TLO);
1642  }
1643  break;
1644  }
1645  }
1646  break;
1647  case XCoreISD::LADD: {
1648  SDValue N0 = N->getOperand(0);
1649  SDValue N1 = N->getOperand(1);
1650  SDValue N2 = N->getOperand(2);
1653  EVT VT = N0.getValueType();
1654 
1655  // canonicalize constant to RHS
1656  if (N0C && !N1C)
1657  return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1658 
1659  // fold (ladd 0, 0, x) -> 0, x & 1
1660  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1661  SDValue Carry = DAG.getConstant(0, dl, VT);
1662  SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1663  DAG.getConstant(1, dl, VT));
1664  SDValue Ops[] = { Result, Carry };
1665  return DAG.getMergeValues(Ops, dl);
1666  }
1667 
1668  // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1669  // low bit set
1670  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1671  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1672  VT.getSizeInBits() - 1);
1673  KnownBits Known = DAG.computeKnownBits(N2);
1674  if ((Known.Zero & Mask) == Mask) {
1675  SDValue Carry = DAG.getConstant(0, dl, VT);
1676  SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1677  SDValue Ops[] = { Result, Carry };
1678  return DAG.getMergeValues(Ops, dl);
1679  }
1680  }
1681  }
1682  break;
1683  case XCoreISD::LSUB: {
1684  SDValue N0 = N->getOperand(0);
1685  SDValue N1 = N->getOperand(1);
1686  SDValue N2 = N->getOperand(2);
1689  EVT VT = N0.getValueType();
1690 
1691  // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1692  if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1693  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1694  VT.getSizeInBits() - 1);
1695  KnownBits Known = DAG.computeKnownBits(N2);
1696  if ((Known.Zero & Mask) == Mask) {
1697  SDValue Borrow = N2;
1698  SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1699  DAG.getConstant(0, dl, VT), N2);
1700  SDValue Ops[] = { Result, Borrow };
1701  return DAG.getMergeValues(Ops, dl);
1702  }
1703  }
1704 
1705  // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1706  // low bit set
1707  if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1708  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1709  VT.getSizeInBits() - 1);
1710  KnownBits Known = DAG.computeKnownBits(N2);
1711  if ((Known.Zero & Mask) == Mask) {
1712  SDValue Borrow = DAG.getConstant(0, dl, VT);
1713  SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1714  SDValue Ops[] = { Result, Borrow };
1715  return DAG.getMergeValues(Ops, dl);
1716  }
1717  }
1718  }
1719  break;
1720  case XCoreISD::LMUL: {
1721  SDValue N0 = N->getOperand(0);
1722  SDValue N1 = N->getOperand(1);
1723  SDValue N2 = N->getOperand(2);
1724  SDValue N3 = N->getOperand(3);
1727  EVT VT = N0.getValueType();
1728  // Canonicalize multiplicative constant to RHS. If both multiplicative
1729  // operands are constant canonicalize smallest to RHS.
1730  if ((N0C && !N1C) ||
1731  (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1732  return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1733  N1, N0, N2, N3);
1734 
1735  // lmul(x, 0, a, b)
1736  if (N1C && N1C->isNullValue()) {
1737  // If the high result is unused fold to add(a, b)
1738  if (N->hasNUsesOfValue(0, 0)) {
1739  SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1740  SDValue Ops[] = { Lo, Lo };
1741  return DAG.getMergeValues(Ops, dl);
1742  }
1743  // Otherwise fold to ladd(a, b, 0)
1744  SDValue Result =
1745  DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1746  SDValue Carry(Result.getNode(), 1);
1747  SDValue Ops[] = { Carry, Result };
1748  return DAG.getMergeValues(Ops, dl);
1749  }
1750  }
1751  break;
1752  case ISD::ADD: {
1753  // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1754  // lmul(x, y, a, b). The high result of lmul will be ignored.
1755  // This is only profitable if the intermediate results are unused
1756  // elsewhere.
1757  SDValue Mul0, Mul1, Addend0, Addend1;
1758  if (N->getValueType(0) == MVT::i32 &&
1759  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1760  SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1761  DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1762  Mul1, Addend0, Addend1);
1763  SDValue Result(Ignored.getNode(), 1);
1764  return Result;
1765  }
1766  APInt HighMask = APInt::getHighBitsSet(64, 32);
1767  // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1768  // lmul(x, y, a, b) if all operands are zero-extended. We do this
1769  // before type legalization as it is messy to match the operands after
1770  // that.
1771  if (N->getValueType(0) == MVT::i64 &&
1772  isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1773  DAG.MaskedValueIsZero(Mul0, HighMask) &&
1774  DAG.MaskedValueIsZero(Mul1, HighMask) &&
1775  DAG.MaskedValueIsZero(Addend0, HighMask) &&
1776  DAG.MaskedValueIsZero(Addend1, HighMask)) {
1777  SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1778  Mul0, DAG.getConstant(0, dl, MVT::i32));
1779  SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1780  Mul1, DAG.getConstant(0, dl, MVT::i32));
1781  SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1782  Addend0, DAG.getConstant(0, dl, MVT::i32));
1783  SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1784  Addend1, DAG.getConstant(0, dl, MVT::i32));
1785  SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1786  DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1787  Addend0L, Addend1L);
1788  SDValue Lo(Hi.getNode(), 1);
1789  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1790  }
1791  }
1792  break;
1793  case ISD::STORE: {
1794  // Replace unaligned store of unaligned load with memmove.
1795  StoreSDNode *ST = cast<StoreSDNode>(N);
1796  if (!DCI.isBeforeLegalize() ||
1798  ST->getAddressSpace(),
1799  ST->getAlignment()) ||
1800  ST->isVolatile() || ST->isIndexed()) {
1801  break;
1802  }
1803  SDValue Chain = ST->getChain();
1804 
1805  unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1806  assert((StoreBits % 8) == 0 &&
1807  "Store size in bits must be a multiple of 8");
1808  unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
1809  ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1810  unsigned Alignment = ST->getAlignment();
1811  if (Alignment >= ABIAlignment) {
1812  break;
1813  }
1814 
1815  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1816  if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1817  LD->getAlignment() == Alignment &&
1818  !LD->isVolatile() && !LD->isIndexed() &&
1820  bool isTail = isInTailCallPosition(DAG, ST, Chain);
1821  return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1822  LD->getBasePtr(),
1823  DAG.getConstant(StoreBits/8, dl, MVT::i32),
1824  Alignment, false, isTail, ST->getPointerInfo(),
1825  LD->getPointerInfo());
1826  }
1827  }
1828  break;
1829  }
1830  }
1831  return SDValue();
1832 }
1833 
1834 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1835  KnownBits &Known,
1836  const APInt &DemandedElts,
1837  const SelectionDAG &DAG,
1838  unsigned Depth) const {
1839  Known.resetAll();
1840  switch (Op.getOpcode()) {
1841  default: break;
1842  case XCoreISD::LADD:
1843  case XCoreISD::LSUB:
1844  if (Op.getResNo() == 1) {
1845  // Top bits of carry / borrow are clear.
1846  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1847  Known.getBitWidth() - 1);
1848  }
1849  break;
1851  {
1852  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1853  switch (IntNo) {
1854  case Intrinsic::xcore_getts:
1855  // High bits are known to be zero.
1856  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1857  Known.getBitWidth() - 16);
1858  break;
1859  case Intrinsic::xcore_int:
1860  case Intrinsic::xcore_inct:
1861  // High bits are known to be zero.
1862  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1863  Known.getBitWidth() - 8);
1864  break;
1865  case Intrinsic::xcore_testct:
1866  // Result is either 0 or 1.
1867  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1868  Known.getBitWidth() - 1);
1869  break;
1870  case Intrinsic::xcore_testwct:
1871  // Result is in the range 0 - 4.
1872  Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1873  Known.getBitWidth() - 3);
1874  break;
1875  }
1876  }
1877  break;
1878  }
1879 }
1880 
1881 //===----------------------------------------------------------------------===//
1882 // Addressing mode description hooks
1883 //===----------------------------------------------------------------------===//
1884 
1885 static inline bool isImmUs(int64_t val)
1886 {
1887  return (val >= 0 && val <= 11);
1888 }
1889 
1890 static inline bool isImmUs2(int64_t val)
1891 {
1892  return (val%2 == 0 && isImmUs(val/2));
1893 }
1894 
1895 static inline bool isImmUs4(int64_t val)
1896 {
1897  return (val%4 == 0 && isImmUs(val/4));
1898 }
1899 
1900 /// isLegalAddressingMode - Return true if the addressing mode represented
1901 /// by AM is legal for this target, for a load/store of the specified type.
1903  const AddrMode &AM, Type *Ty,
1904  unsigned AS,
1905  Instruction *I) const {
1906  if (Ty->getTypeID() == Type::VoidTyID)
1907  return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1908 
1909  unsigned Size = DL.getTypeAllocSize(Ty);
1910  if (AM.BaseGV) {
1911  return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1912  AM.BaseOffs%4 == 0;
1913  }
1914 
1915  switch (Size) {
1916  case 1:
1917  // reg + imm
1918  if (AM.Scale == 0) {
1919  return isImmUs(AM.BaseOffs);
1920  }
1921  // reg + reg
1922  return AM.Scale == 1 && AM.BaseOffs == 0;
1923  case 2:
1924  case 3:
1925  // reg + imm
1926  if (AM.Scale == 0) {
1927  return isImmUs2(AM.BaseOffs);
1928  }
1929  // reg + reg<<1
1930  return AM.Scale == 2 && AM.BaseOffs == 0;
1931  default:
1932  // reg + imm
1933  if (AM.Scale == 0) {
1934  return isImmUs4(AM.BaseOffs);
1935  }
1936  // reg + reg<<2
1937  return AM.Scale == 4 && AM.BaseOffs == 0;
1938  }
1939 }
1940 
1941 //===----------------------------------------------------------------------===//
1942 // XCore Inline Assembly Support
1943 //===----------------------------------------------------------------------===//
1944 
1945 std::pair<unsigned, const TargetRegisterClass *>
1946 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1947  StringRef Constraint,
1948  MVT VT) const {
1949  if (Constraint.size() == 1) {
1950  switch (Constraint[0]) {
1951  default : break;
1952  case 'r':
1953  return std::make_pair(0U, &XCore::GRRegsRegClass);
1954  }
1955  }
1956  // Use the default implementation in TargetLowering to convert the register
1957  // constraint into a member of a register class.
1958  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1959 }
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isMachineConstantPoolEntry() const
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
Definition: ISDOpcodes.h:768
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned char TargetFlags=0)
int createLRSpillSlot(MachineFunction &MF)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static bool isImmUs2(int64_t val)
const std::vector< MachineJumpTableEntry > & getJumpTables() const
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool hasLocalLinkage() const
Definition: GlobalValue.h:435
const GlobalValue * getGlobal() const
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
Definition: ISDOpcodes.h:183
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:320
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it&#39;s not CSE&#39;d)...
Definition: SelectionDAG.h:841
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:139
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:651
This class represents lattice values for constants.
Definition: AllocatorList.h:23
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1153
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const SDValue & getVal() const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
const SDValue & getBasePtr() const
LLVM_NODISCARD bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:256
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:382
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getReg() const
getReg - Returns the register number.
const SDValue & getValue() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
Definition: ISDOpcodes.h:703
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EK_Inline - Jump table entries are emitted inline at their point of use.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Definition: APInt.h:647
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
const SDValue & getBasePtr() const
const SDValue & getChain() const
Function Alias Analysis Results
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it&#39;s implicit...
unsigned getAlignment() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned second
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
unsigned const TargetRegisterInfo * TRI
A debug info location.
Definition: DebugLoc.h:33
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:140
F(f)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
#define R2(n)
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:39
uint64_t High
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:785
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:434
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getExternalSymbol(const char *Sym, EVT VT)
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:158
bool isTruncatingStore() const
Return true if the op does a truncation before store.
bool isMemLoc() const
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:146
static bool isImmUs(int64_t val)
unsigned getAddressSpace() const
Return the address space for the associated pointer.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static int stackSlotSize()
Stack slot size (4 bytes)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
Definition: ISDOpcodes.h:455
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
Definition: Constants.h:839
bool hasSection() const
Definition: GlobalValue.h:269
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
const HexagonInstrInfo * TII
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:137
Shift and rotation operations.
Definition: ISDOpcodes.h:409
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:190
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static const unsigned CodeModelLargeSize
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:462
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:403
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Definition: ISDOpcodes.h:72
virtual unsigned getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
This is an SDNode representing atomic operations.
LocInfo getLocInfo() const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:849
AtomicOrdering getOrdering() const
Return the atomic ordering requirements for this memory operation.
This represents a list of ValueType&#39;s that has been intern&#39;d by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:699
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
Definition: ISDOpcodes.h:90
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getSizeInBits() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void setReturnStackOffset(unsigned value)
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:400
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
SmallVector< ISD::OutputArg, 32 > Outs
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:130
unsigned getAlignment() const
Definition: Globals.cpp:96
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the &#39;usesCustomInserter&#39; fla...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:150
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
This class is used to represent ISD::STORE nodes.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:117
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
Definition: APInt.h:635
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1782
The memory access is volatile.
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:609
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
Definition: DataLayout.cpp:769
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
0: type with no size
Definition: Type.h:56
const SDValue & getBasePtr() const
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:165
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
Machine Value Type.
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:65
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
Definition: ISDOpcodes.h:732
const SDValue & getOperand(unsigned Num) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
Definition: SelectionDAG.h:829
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:160
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
Definition: ISDOpcodes.h:762
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:771
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
Definition: ValueTypes.cpp:114
void setPrefFunctionAlignment(unsigned Align)
Set the target&#39;s preferred function alignment.
self_iterator getIterator()
Definition: ilist_node.h:81
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:723
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
size_t size() const
Definition: SmallVector.h:52
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
bool isVolatile() const
const TargetMachine & getTargetMachine() const
StringRef getSection() const
Definition: Globals.cpp:160
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
unsigned first
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:644
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:638
TokenFactor - This node takes multiple tokens as input and produces a single token result...
Definition: ISDOpcodes.h:49
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:406
Iterator for intrusive lists based on ilist_node.
unsigned getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
Definition: ValueTypes.h:309
CCState - This class holds information needed while lowering arguments and return values...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:1043
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:749
const DataFlowGraph & G
Definition: RDFGraph.cpp:202
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Constant * getConstVal() const
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:631
Represents one node in the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:684
static mvt_range integer_valuetypes()
MachinePointerInfo getWithOffset(int64_t O) const
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
EVT getMemoryVT() const
Return the type of the in-memory value.
Class for arbitrary precision integers.
Definition: APInt.h:69
CodeModel::Model getCodeModel() const
Returns the code model.
static bool isImmUs4(int64_t val)
amdgpu Simplify well known AMD library false FunctionCallee Callee
void setMinFunctionAlignment(unsigned Align)
Set the target&#39;s minimum function alignment (in log2(bytes))
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:467
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:470
Flags
Flags values. These may be or&#39;d together.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:461
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:639
Representation of each machine instruction.
Definition: MachineInstr.h:63
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Definition: ISDOpcodes.h:728
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:678
SmallVector< SDValue, 32 > OutVals
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:214
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:386
const TargetRegisterInfo * getRegisterInfo() const override
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:710
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:175
unsigned getLocMemOffset() const
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:205
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:615
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Flags getFlags() const
Return the raw flags of the source value,.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
Type * getValueType() const
Definition: GlobalValue.h:275
uint32_t Size
Definition: Profile.cpp:46
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),a),b).
unsigned getOpcode() const
SDValue getValue(unsigned R) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents &#39;eh_return&#39; gcc dwarf builtin...
Definition: ISDOpcodes.h:101
bool isRegLoc() const
const MachinePointerInfo & getPointerInfo() const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if &#39;Op & Mask&#39; is known to be zero.
MachineConstantPoolValue * getMachineCPVal() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getResNo() const
get the index which selects a specific result in the SDNode
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
Conversion operators.
Definition: ISDOpcodes.h:464
const SDValue & getOperand(unsigned i) const
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
Definition: ISDOpcodes.h:793
unsigned getLocReg() const
uint64_t getZExtValue() const
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:125
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:173
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
Definition: ISDOpcodes.h:789
LLVMContext * getContext() const
Definition: SelectionDAG.h:409
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
Definition: SelectionDAG.h:627
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:379
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Definition: ISDOpcodes.h:624